ANDROID: binder: remove proc waitqueue
authorMartijn Coenen <maco@google.com>
Fri, 2 Jun 2017 18:15:44 +0000 (11:15 -0700)
committerDanny Wood <danwood76@gmail.com>
Tue, 26 Feb 2019 11:43:43 +0000 (11:43 +0000)
Removes the process waitqueue, so that threads
can only wait on the thread waitqueue. Whenever
there is process work to do, pick a thread and
wake it up.

This also fixes an issue with using epoll(),
since we no longer have to block on different
waitqueues.

Bug: 34461621
Change-Id: I2950b9de6fa078ee72d53c667a03cbaf587f0849
Signed-off-by: Martijn Coenen <maco@google.com>
drivers/android/binder.c

index ca91c70faf3f2a7ae2bbe88e4d7200386c5cea78..26be7ceeb10407ffc7d1758d76062c4ebdf21adb 100644 (file)
  *    binder_node_lock() and binder_node_unlock() are
  *    used to acq/rel
  * 3) proc->inner_lock : protects the thread and node lists
- *    (proc->threads, proc->nodes) and all todo lists associated
- *    with the binder_proc (proc->todo, thread->todo,
- *    proc->delivered_death and node->async_todo), as well as
- *    thread->transaction_stack
+ *    (proc->threads, proc->waiting_threads, proc->nodes)
+ *    and all todo lists associated with the binder_proc
+ *    (proc->todo, thread->todo, proc->delivered_death and
+ *    node->async_todo), as well as thread->transaction_stack
  *    binder_inner_proc_lock() and binder_inner_proc_unlock()
  *    are used to acq/rel
  *
@@ -477,6 +477,8 @@ enum binder_deferred_state {
  *                        (protected by @outer_lock)
  * @refs_by_node:         rbtree of refs ordered by ref->node
  *                        (protected by @outer_lock)
+ * @waiting_threads:      threads currently waiting for proc work
+ *                        (protected by @inner_lock)
  * @pid                   PID of group_leader of process
  *                        (invariant after initialized)
  * @tsk                   task_struct for group_leader of process
@@ -506,8 +508,6 @@ enum binder_deferred_state {
  *                        (protected by @inner_lock)
  * @requested_threads_started: number binder threads started
  *                        (protected by @inner_lock)
- * @ready_threads:        number of threads waiting for proc work
- *                        (protected by @inner_lock)
  * @tmp_ref:              temporary reference to indicate proc is in use
  *                        (protected by @inner_lock)
  * @default_priority:     default scheduler priority
@@ -528,6 +528,7 @@ struct binder_proc {
        struct rb_root nodes;
        struct rb_root refs_by_desc;
        struct rb_root refs_by_node;
+       struct list_head waiting_threads;
        int pid;
        struct task_struct *tsk;
        struct files_struct *files;
@@ -542,7 +543,6 @@ struct binder_proc {
        int max_threads;
        int requested_threads;
        int requested_threads_started;
-       int ready_threads;
        int tmp_ref;
        long default_priority;
        struct dentry *debugfs_entry;
@@ -558,6 +558,7 @@ enum {
        BINDER_LOOPER_STATE_EXITED      = 0x04,
        BINDER_LOOPER_STATE_INVALID     = 0x08,
        BINDER_LOOPER_STATE_WAITING     = 0x10,
+       BINDER_LOOPER_STATE_POLL        = 0x20,
 };
 
 /**
@@ -566,6 +567,8 @@ enum {
  *                        (invariant after initialization)
  * @rb_node:              element for proc->threads rbtree
  *                        (protected by @proc->inner_lock)
+ * @waiting_thread_node:  element for @proc->waiting_threads list
+ *                        (protected by @proc->inner_lock)
  * @pid:                  PID for this thread
  *                        (invariant after initialization)
  * @looper:               bitmap of looping state
@@ -595,6 +598,7 @@ enum {
 struct binder_thread {
        struct binder_proc *proc;
        struct rb_node rb_node;
+       struct list_head waiting_thread_node;
        int pid;
        int looper;              /* only modified by this thread */
        bool looper_need_return; /* can be written by other thread */
@@ -922,6 +926,86 @@ static long task_close_fd(struct binder_proc *proc, unsigned int fd)
        return retval;
 }
 
+static bool binder_has_work_ilocked(struct binder_thread *thread,
+                                   bool do_proc_work)
+{
+       return !binder_worklist_empty_ilocked(&thread->todo) ||
+               thread->looper_need_return ||
+               (do_proc_work &&
+                !binder_worklist_empty_ilocked(&thread->proc->todo));
+}
+
+static bool binder_has_work(struct binder_thread *thread, bool do_proc_work)
+{
+       bool has_work;
+
+       binder_inner_proc_lock(thread->proc);
+       has_work = binder_has_work_ilocked(thread, do_proc_work);
+       binder_inner_proc_unlock(thread->proc);
+
+       return has_work;
+}
+
+static bool binder_available_for_proc_work_ilocked(struct binder_thread *thread)
+{
+       return !thread->transaction_stack &&
+               binder_worklist_empty_ilocked(&thread->todo) &&
+               (thread->looper & (BINDER_LOOPER_STATE_ENTERED |
+                                  BINDER_LOOPER_STATE_REGISTERED));
+}
+
+static void binder_wakeup_poll_threads_ilocked(struct binder_proc *proc,
+                                              bool sync)
+{
+       struct rb_node *n;
+       struct binder_thread *thread;
+
+       for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
+               thread = rb_entry(n, struct binder_thread, rb_node);
+               if (thread->looper & BINDER_LOOPER_STATE_POLL &&
+                   binder_available_for_proc_work_ilocked(thread)) {
+                       if (sync)
+                               wake_up_interruptible_sync(&thread->wait);
+                       else
+                               wake_up_interruptible(&thread->wait);
+               }
+       }
+}
+
+static void binder_wakeup_proc_ilocked(struct binder_proc *proc, bool sync)
+{
+       struct binder_thread *thread;
+
+       BUG_ON(!spin_is_locked(&proc->inner_lock));
+       thread = list_first_entry_or_null(&proc->waiting_threads,
+                                         struct binder_thread,
+                                         waiting_thread_node);
+
+       if (thread) {
+               list_del_init(&thread->waiting_thread_node);
+               if (sync)
+                       wake_up_interruptible_sync(&thread->wait);
+               else
+                       wake_up_interruptible(&thread->wait);
+               return;
+       }
+
+       /* Didn't find a thread waiting for proc work; this can happen
+        * in two scenarios:
+        * 1. All threads are busy handling transactions
+        *    In that case, one of those threads should call back into
+        *    the kernel driver soon and pick up this work.
+        * 2. Threads are using the (e)poll interface, in which case
+        *    they may be blocked on the waitqueue without having been
+        *    added to waiting_threads. For this case, we just iterate
+        *    over all threads not handling transaction work, and
+        *    wake them all up. We wake all because we don't know whether
+        *    a thread that called into (e)poll is handling non-binder
+        *    work currently.
+        */
+       binder_wakeup_poll_threads_ilocked(proc, sync);
+}
+
 static void binder_set_nice(long nice)
 {
        long min_nice;
@@ -1141,7 +1225,7 @@ static bool binder_dec_node_nilocked(struct binder_node *node,
        if (proc && (node->has_strong_ref || node->has_weak_ref)) {
                if (list_empty(&node->work.entry)) {
                        binder_enqueue_work_ilocked(&node->work, &proc->todo);
-                       wake_up_interruptible(&node->proc->wait);
+                       binder_wakeup_proc_ilocked(proc, false);
                }
        } else {
                if (hlist_empty(&node->refs) && !node->local_strong_refs &&
@@ -2402,7 +2486,6 @@ static void binder_transaction(struct binder_proc *proc,
        struct binder_thread *target_thread = NULL;
        struct binder_node *target_node = NULL;
        struct list_head *target_list;
-       wait_queue_head_t *target_wait;
        struct binder_transaction *in_reply_to = NULL;
        struct binder_transaction_log_entry *e;
        uint32_t return_error = 0;
@@ -2412,6 +2495,7 @@ static void binder_transaction(struct binder_proc *proc,
        binder_size_t last_fixup_min_off = 0;
        struct binder_context *context = proc->context;
        int t_debug_id = atomic_inc_return(&binder_last_id);
+       bool wakeup_for_proc_work = false;
 
        e = binder_transaction_log_add(&binder_transaction_log);
        e->debug_id = t_debug_id;
@@ -2575,10 +2659,9 @@ static void binder_transaction(struct binder_proc *proc,
        if (target_thread) {
                e->to_thread = target_thread->pid;
                target_list = &target_thread->todo;
-               target_wait = &target_thread->wait;
        } else {
                target_list = &target_proc->todo;
-               target_wait = &target_proc->wait;
+               wakeup_for_proc_work = true;
        }
        e->to_proc = target_proc->pid;
 
@@ -2885,7 +2968,7 @@ static void binder_transaction(struct binder_proc *proc,
                binder_node_lock(target_node);
                if (target_node->has_async_transaction) {
                        target_list = &target_node->async_todo;
-                       target_wait = NULL;
+                       wakeup_for_proc_work = false;
                } else
                        target_node->has_async_transaction = 1;
                /*
@@ -2904,12 +2987,13 @@ static void binder_transaction(struct binder_proc *proc,
                binder_inner_proc_unlock(target_proc);
                binder_node_unlock(target_node);
        }
-       if (target_wait) {
-               if (reply || !(tr->flags & TF_ONE_WAY)) {
-                       wake_up_interruptible_sync(target_wait);
-               } else {
-                       wake_up_interruptible(target_wait);
-               }
+       if (target_thread) {
+               wake_up_interruptible_sync(&target_thread->wait);
+       } else if (wakeup_for_proc_work) {
+               binder_inner_proc_lock(target_proc);
+               binder_wakeup_proc_ilocked(target_proc,
+                                          !(tr->flags & TF_ONE_WAY));
+               binder_inner_proc_unlock(target_proc);
        }
        if (target_thread)
                binder_thread_dec_tmpref(target_thread);
@@ -3349,12 +3433,14 @@ int binder_thread_write(struct binder_proc *proc,
                                                        &ref->death->work,
                                                        &thread->todo);
                                        else {
-                                               binder_enqueue_work(
-                                                       proc,
+                                               binder_inner_proc_lock(proc);
+                                               binder_enqueue_work_ilocked(
                                                        &ref->death->work,
                                                        &proc->todo);
-                                               wake_up_interruptible(
-                                                               &proc->wait);
+                                               binder_wakeup_proc_ilocked(
+                                                       proc,
+                                                       false);
+                                               binder_inner_proc_unlock(proc);
                                        }
                                }
                        } else {
@@ -3389,8 +3475,9 @@ int binder_thread_write(struct binder_proc *proc,
                                                binder_enqueue_work_ilocked(
                                                                &death->work,
                                                                &proc->todo);
-                                               wake_up_interruptible(
-                                                               &proc->wait);
+                                               binder_wakeup_proc_ilocked(
+                                                               proc,
+                                                               false);
                                        }
                                } else {
                                        BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER);
@@ -3445,7 +3532,8 @@ int binder_thread_write(struct binder_proc *proc,
                                        binder_enqueue_work_ilocked(
                                                        &death->work,
                                                        &proc->todo);
-                                       wake_up_interruptible(&proc->wait);
+                                       binder_wakeup_proc_ilocked(
+                                                       proc, false);
                                }
                        }
                        binder_inner_proc_unlock(proc);
@@ -3472,13 +3560,6 @@ static void binder_stat_br(struct binder_proc *proc,
        }
 }
 
-static int binder_has_proc_work(struct binder_proc *proc,
-                               struct binder_thread *thread)
-{
-       return !binder_worklist_empty(proc, &proc->todo) ||
-               thread->looper_need_return;
-}
-
 static int binder_has_thread_work(struct binder_thread *thread)
 {
        return !binder_worklist_empty(thread->proc, &thread->todo) ||
@@ -3516,6 +3597,38 @@ static int binder_put_node_cmd(struct binder_proc *proc,
        return 0;
 }
 
+static int binder_wait_for_work(struct binder_thread *thread,
+                               bool do_proc_work)
+{
+       DEFINE_WAIT(wait);
+       struct binder_proc *proc = thread->proc;
+       int ret = 0;
+
+       freezer_do_not_count();
+       binder_inner_proc_lock(proc);
+       for (;;) {
+               prepare_to_wait(&thread->wait, &wait, TASK_INTERRUPTIBLE);
+               if (binder_has_work_ilocked(thread, do_proc_work))
+                       break;
+               if (do_proc_work)
+                       list_add(&thread->waiting_thread_node,
+                                &proc->waiting_threads);
+               binder_inner_proc_unlock(proc);
+               schedule();
+               binder_inner_proc_lock(proc);
+               list_del_init(&thread->waiting_thread_node);
+               if (signal_pending(current)) {
+                       ret = -ERESTARTSYS;
+                       break;
+               }
+       }
+       finish_wait(&thread->wait, &wait);
+       binder_inner_proc_unlock(proc);
+       freezer_count();
+
+       return ret;
+}
+
 static int binder_thread_read(struct binder_proc *proc,
                              struct binder_thread *thread,
                              binder_uintptr_t binder_buffer, size_t size,
@@ -3536,10 +3649,7 @@ static int binder_thread_read(struct binder_proc *proc,
 
 retry:
        binder_inner_proc_lock(proc);
-       wait_for_proc_work = thread->transaction_stack == NULL &&
-               binder_worklist_empty_ilocked(&thread->todo);
-       if (wait_for_proc_work)
-               proc->ready_threads++;
+       wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
        binder_inner_proc_unlock(proc);
 
        thread->looper |= BINDER_LOOPER_STATE_WAITING;
@@ -3556,23 +3666,15 @@ retry:
                                                 binder_stop_on_user_error < 2);
                }
                binder_set_nice(proc->default_priority);
-               if (non_block) {
-                       if (!binder_has_proc_work(proc, thread))
-                               ret = -EAGAIN;
-               } else
-                       ret = wait_event_freezable_exclusive(proc->wait, binder_has_proc_work(proc, thread));
+       }
+
+       if (non_block) {
+               if (!binder_has_work(thread, wait_for_proc_work))
+                       ret = -EAGAIN;
        } else {
-               if (non_block) {
-                       if (!binder_has_thread_work(thread))
-                               ret = -EAGAIN;
-               } else
-                       ret = wait_event_freezable(thread->wait, binder_has_thread_work(thread));
+               ret = binder_wait_for_work(thread, wait_for_proc_work);
        }
 
-       binder_inner_proc_lock(proc);
-       if (wait_for_proc_work)
-               proc->ready_threads--;
-       binder_inner_proc_unlock(proc);
        thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
 
        if (ret)
@@ -3858,7 +3960,8 @@ done:
 
        *consumed = ptr - buffer;
        binder_inner_proc_lock(proc);
-       if (proc->requested_threads + proc->ready_threads == 0 &&
+       if (proc->requested_threads == 0 &&
+           list_empty(&thread->proc->waiting_threads) &&
            proc->requested_threads_started < proc->max_threads &&
            (thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
             BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */
@@ -3969,7 +4072,7 @@ static struct binder_thread *binder_get_thread_ilocked(
        thread->return_error.cmd = BR_OK;
        thread->reply_error.work.type = BINDER_WORK_RETURN_ERROR;
        thread->reply_error.cmd = BR_OK;
-
+       INIT_LIST_HEAD(&new_thread->waiting_thread_node);
        return thread;
 }
 
@@ -4082,28 +4185,24 @@ static unsigned int binder_poll(struct file *filp,
 {
        struct binder_proc *proc = filp->private_data;
        struct binder_thread *thread = NULL;
-       int wait_for_proc_work;
+       bool wait_for_proc_work;
 
        thread = binder_get_thread(proc);
 
        binder_inner_proc_lock(thread->proc);
-       wait_for_proc_work = thread->transaction_stack == NULL &&
-               binder_worklist_empty_ilocked(&thread->todo);
+       thread->looper |= BINDER_LOOPER_STATE_POLL;
+       wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
+
        binder_inner_proc_unlock(thread->proc);
 
-       if (wait_for_proc_work) {
-               if (binder_has_proc_work(proc, thread))
-                       return POLLIN;
-               poll_wait(filp, &proc->wait, wait);
-               if (binder_has_proc_work(proc, thread))
-                       return POLLIN;
-       } else {
-               if (binder_has_thread_work(thread))
-                       return POLLIN;
-               poll_wait(filp, &thread->wait, wait);
-               if (binder_has_thread_work(thread))
-                       return POLLIN;
-       }
+       if (binder_has_work(thread, wait_for_proc_work))
+               return POLLIN;
+
+       poll_wait(filp, &thread->wait, wait);
+
+       if (binder_has_thread_work(thread))
+               return POLLIN;
+
        return 0;
 }
 
@@ -4150,8 +4249,10 @@ static int binder_ioctl_write_read(struct file *filp,
                                         &bwr.read_consumed,
                                         filp->f_flags & O_NONBLOCK);
                trace_binder_read_done(ret);
-               if (!binder_worklist_empty(proc, &proc->todo))
-                       wake_up_interruptible(&proc->wait);
+               binder_inner_proc_lock(proc);
+               if (!binder_worklist_empty_ilocked(&proc->todo))
+                       binder_wakeup_proc_ilocked(proc, false);
+               binder_inner_proc_unlock(proc);
                if (ret < 0) {
                        if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
                                ret = -EFAULT;
@@ -4391,7 +4492,6 @@ static int binder_open(struct inode *nodp, struct file *filp)
        get_task_struct(current->group_leader);
        proc->tsk = current->group_leader;
        INIT_LIST_HEAD(&proc->todo);
-       init_waitqueue_head(&proc->wait);
        proc->default_priority = task_nice(current);
        binder_dev = container_of(filp->private_data, struct binder_device,
                                  miscdev);
@@ -4401,6 +4501,7 @@ static int binder_open(struct inode *nodp, struct file *filp)
        binder_stats_created(BINDER_STAT_PROC);
        proc->pid = current->group_leader->pid;
        INIT_LIST_HEAD(&proc->delivered_death);
+       INIT_LIST_HEAD(&proc->waiting_threads);
        filp->private_data = proc;
 
        mutex_lock(&binder_procs_lock);
@@ -4452,7 +4553,6 @@ static void binder_deferred_flush(struct binder_proc *proc)
                }
        }
        binder_inner_proc_unlock(proc);
-       wake_up_interruptible_all(&proc->wait);
 
        binder_debug(BINDER_DEBUG_OPEN_CLOSE,
                     "binder_flush: %d woke %d threads\n", proc->pid,
@@ -4521,7 +4621,7 @@ static int binder_node_release(struct binder_node *node, int refs)
                ref->death->work.type = BINDER_WORK_DEAD_BINDER;
                binder_enqueue_work_ilocked(&ref->death->work,
                                            &ref->proc->todo);
-               wake_up_interruptible(&ref->proc->wait);
+               binder_wakeup_proc_ilocked(ref->proc, false);
                binder_inner_proc_unlock(ref->proc);
        }
 
@@ -5009,23 +5109,29 @@ static void print_binder_proc_stats(struct seq_file *m,
                                    struct binder_proc *proc)
 {
        struct binder_work *w;
+       struct binder_thread *thread;
        struct rb_node *n;
-       int count, strong, weak;
+       int count, strong, weak, ready_threads;
        size_t free_async_space =
                binder_alloc_get_free_async_space(&proc->alloc);
 
        seq_printf(m, "proc %d\n", proc->pid);
        seq_printf(m, "context %s\n", proc->context->name);
        count = 0;
+       ready_threads = 0;
        binder_inner_proc_lock(proc);
        for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
                count++;
+
+       list_for_each_entry(thread, &proc->waiting_threads, waiting_thread_node)
+               ready_threads++;
+
        seq_printf(m, "  threads: %d\n", count);
        seq_printf(m, "  requested threads: %d+%d/%d\n"
                        "  ready threads %d\n"
                        "  free async space %zd\n", proc->requested_threads,
                        proc->requested_threads_started, proc->max_threads,
-                       proc->ready_threads,
+                       ready_threads,
                        free_async_space);
        count = 0;
        for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n))