{
struct binder_thread *thread;
- BUG_ON(!spin_is_locked(&proc->inner_lock));
+ assert_spin_locked(&proc->inner_lock);
thread = list_first_entry_or_null(&proc->waiting_threads,
struct binder_thread,
waiting_thread_node);
struct binder_thread *thread,
bool sync)
{
- BUG_ON(!spin_is_locked(&proc->inner_lock));
+ assert_spin_locked(&proc->inner_lock);
if (thread) {
if (sync)
struct rb_node *n = proc->nodes.rb_node;
struct binder_node *node;
- BUG_ON(!spin_is_locked(&proc->inner_lock));
+ assert_spin_locked(&proc->inner_lock);
while (n) {
node = rb_entry(n, struct binder_node, rb_node);
__u32 flags = fp ? fp->flags : 0;
s8 priority;
- BUG_ON(!spin_is_locked(&proc->inner_lock));
+ assert_spin_locked(&proc->inner_lock);
+
while (*p) {
parent = *p;
{
struct binder_proc *proc = node->proc;
- BUG_ON(!spin_is_locked(&node->lock));
+ assert_spin_locked(&node->lock);
if (proc)
- BUG_ON(!spin_is_locked(&proc->inner_lock));
+ assert_spin_locked(&proc->inner_lock);
if (strong) {
if (internal) {
if (target_list == NULL &&
{
struct binder_proc *proc = node->proc;
- BUG_ON(!spin_is_locked(&node->lock));
+ assert_spin_locked(&node->lock);
if (proc)
- BUG_ON(!spin_is_locked(&proc->inner_lock));
+ assert_spin_locked(&proc->inner_lock);
if (strong) {
if (internal)
node->internal_strong_refs--;
struct binder_transaction *t)
{
BUG_ON(!target_thread);
- BUG_ON(!spin_is_locked(&target_thread->proc->inner_lock));
+ assert_spin_locked(&target_thread->proc->inner_lock);
BUG_ON(target_thread->transaction_stack != t);
BUG_ON(target_thread->transaction_stack->from != target_thread);
target_thread->transaction_stack =
struct binder_proc *to_proc;
struct binder_buffer *buffer = t->buffer;
- WARN_ON(!spin_is_locked(&proc->inner_lock));
spin_lock(&t->lock);
to_proc = t->to_proc;
seq_printf(m,
size_t start_pos = m->count;
size_t header_pos;
- WARN_ON(!spin_is_locked(&thread->proc->inner_lock));
seq_printf(m, " thread %d: l %02x need_return %d tr %d\n",
thread->pid, thread->looper,
thread->looper_need_return,
struct binder_work *w;
int count;
- WARN_ON(!spin_is_locked(&node->lock));
- if (node->proc)
- WARN_ON(!spin_is_locked(&node->proc->inner_lock));
-
count = 0;
hlist_for_each_entry(ref, &node->refs, node_entry)
count++;
static void print_binder_ref_olocked(struct seq_file *m,
struct binder_ref *ref)
{
- WARN_ON(!spin_is_locked(&ref->proc->outer_lock));
binder_node_lock(ref->node);
seq_printf(m, " ref %d: desc %d %snode %d s %d w %d d %pK\n",
ref->data.debug_id, ref->data.desc,