From: Martijn Coenen Date: Thu, 19 Oct 2017 13:04:46 +0000 (+0200) Subject: ANDROID: binder: Add thread->process_todo flag. X-Git-Url: https://git.stricted.de/?a=commitdiff_plain;h=3fb0a5b65dd64f6dfe679b99343195018372c08f;p=GitHub%2FLineageOS%2Fandroid_kernel_samsung_universal7580.git ANDROID: binder: Add thread->process_todo flag. This flag determines whether the thread should currently process the work in the thread->todo worklist. The prime usecase for this is improving the performance of synchronous transactions: all synchronous transactions post a BR_TRANSACTION_COMPLETE to the calling thread, but there's no reason to return that command to userspace right away - userspace anyway needs to wait for the reply. Likewise, a synchronous transaction that contains a binder object can cause a BC_ACQUIRE/BC_INCREFS to be returned to userspace; since the caller must anyway hold a strong/weak ref for the duration of the call, postponing these commands until the reply comes in is not a problem. Note that this flag is not used to determine whether a thread can handle process work; a thread should never pick up process work when thread work is still pending. Before patch: ------------------------------------------------------------------ Benchmark Time CPU Iterations ------------------------------------------------------------------ BM_sendVec_binderize/4 45959 ns 20288 ns 34351 BM_sendVec_binderize/8 45603 ns 20080 ns 34909 BM_sendVec_binderize/16 45528 ns 20113 ns 34863 BM_sendVec_binderize/32 45551 ns 20122 ns 34881 BM_sendVec_binderize/64 45701 ns 20183 ns 34864 BM_sendVec_binderize/128 45824 ns 20250 ns 34576 BM_sendVec_binderize/256 45695 ns 20171 ns 34759 BM_sendVec_binderize/512 45743 ns 20211 ns 34489 BM_sendVec_binderize/1024 46169 ns 20430 ns 34081 After patch: ------------------------------------------------------------------ Benchmark Time CPU Iterations ------------------------------------------------------------------ BM_sendVec_binderize/4 42939 ns 17262 ns 40653 BM_sendVec_binderize/8 42823 ns 17243 ns 40671 BM_sendVec_binderize/16 42898 ns 17243 ns 40594 BM_sendVec_binderize/32 42838 ns 17267 ns 40527 BM_sendVec_binderize/64 42854 ns 17249 ns 40379 BM_sendVec_binderize/128 42881 ns 17288 ns 40427 BM_sendVec_binderize/256 42917 ns 17297 ns 40429 BM_sendVec_binderize/512 43184 ns 17395 ns 40411 BM_sendVec_binderize/1024 43119 ns 17357 ns 40432 Signed-off-by: Martijn Coenen Change-Id: Ia70287066d62aba64e98ac44ff1214e37ca75693 --- diff --git a/drivers/android/binder.c b/drivers/android/binder.c index 796b4297ddd..735782b6a51 100644 --- a/drivers/android/binder.c +++ b/drivers/android/binder.c @@ -608,6 +608,8 @@ enum { * (protected by @proc->inner_lock) * @todo: list of work to do for this thread * (protected by @proc->inner_lock) + * @process_todo: whether work in @todo should be processed + * (protected by @proc->inner_lock) * @return_error: transaction errors reported by this thread * (only accessed by this thread) * @reply_error: transaction errors reported by target thread @@ -634,6 +636,7 @@ struct binder_thread { bool looper_need_return; /* can be written by other thread */ struct binder_transaction *transaction_stack; struct list_head todo; + bool process_todo; struct binder_error return_error; struct binder_error reply_error; wait_queue_head_t wait; @@ -821,6 +824,16 @@ static bool binder_worklist_empty(struct binder_proc *proc, return ret; } +/** + * binder_enqueue_work_ilocked() - Add an item to the work list + * @work: struct binder_work to add to list + * @target_list: list to add work to + * + * Adds the work to the specified list. Asserts that work + * is not already on a list. + * + * Requires the proc->inner_lock to be held. + */ static void binder_enqueue_work_ilocked(struct binder_work *work, struct list_head *target_list) @@ -831,22 +844,56 @@ binder_enqueue_work_ilocked(struct binder_work *work, } /** - * binder_enqueue_work() - Add an item to the work list - * @proc: binder_proc associated with list + * binder_enqueue_thread_work_ilocked_nowake() - Add thread work + * @thread: thread to queue work to * @work: struct binder_work to add to list - * @target_list: list to add work to * - * Adds the work to the specified list. Asserts that work - * is not already on a list. + * Adds the work to the todo list of the thread. Doesn't set the process_todo + * flag, which means that (if it wasn't already set) the thread will go to + * sleep without handling this work when it calls read. + * + * Requires the proc->inner_lock to be held. */ static void -binder_enqueue_work(struct binder_proc *proc, - struct binder_work *work, - struct list_head *target_list) +binder_enqueue_thread_work_ilocked_nowake(struct binder_thread *thread, + struct binder_work *work) { - binder_inner_proc_lock(proc); - binder_enqueue_work_ilocked(work, target_list); - binder_inner_proc_unlock(proc); + binder_enqueue_work_ilocked(work, &thread->todo); +} + +/** + * binder_enqueue_thread_work_ilocked() - Add an item to the thread work list + * @thread: thread to queue work to + * @work: struct binder_work to add to list + * + * Adds the work to the todo list of the thread, and enables processing + * of the todo queue. + * + * Requires the proc->inner_lock to be held. + */ +static void +binder_enqueue_thread_work_ilocked(struct binder_thread *thread, + struct binder_work *work) +{ + binder_enqueue_work_ilocked(work, &thread->todo); + thread->process_todo = true; +} + +/** + * binder_enqueue_thread_work() - Add an item to the thread work list + * @thread: thread to queue work to + * @work: struct binder_work to add to list + * + * Adds the work to the todo list of the thread, and enables processing + * of the todo queue. + */ +static void +binder_enqueue_thread_work(struct binder_thread *thread, + struct binder_work *work) +{ + binder_inner_proc_lock(thread->proc); + binder_enqueue_thread_work_ilocked(thread, work); + binder_inner_proc_unlock(thread->proc); } static void @@ -961,7 +1008,7 @@ static long task_close_fd(struct binder_proc *proc, unsigned int fd) static bool binder_has_work_ilocked(struct binder_thread *thread, bool do_proc_work) { - return !binder_worklist_empty_ilocked(&thread->todo) || + return thread->process_todo || thread->looper_need_return || (do_proc_work && !binder_worklist_empty_ilocked(&thread->proc->todo)); @@ -1378,6 +1425,17 @@ static int binder_inc_node_nilocked(struct binder_node *node, int strong, node->local_strong_refs++; if (!node->has_strong_ref && target_list) { binder_dequeue_work_ilocked(&node->work); + /* + * Note: this function is the only place where we queue + * directly to a thread->todo without using the + * corresponding binder_enqueue_thread_work() helper + * functions; in this case it's ok to not set the + * process_todo flag, since we know this node work will + * always be followed by other work that starts queue + * processing: in case of synchronous transactions, a + * BR_REPLY or BR_ERROR; in case of oneway + * transactions, a BR_TRANSACTION_COMPLETE. + */ binder_enqueue_work_ilocked(&node->work, target_list); } } else { @@ -1389,6 +1447,9 @@ static int binder_inc_node_nilocked(struct binder_node *node, int strong, node->debug_id); return -EINVAL; } + /* + * See comment above + */ binder_enqueue_work_ilocked(&node->work, target_list); } } @@ -2078,9 +2139,9 @@ static void binder_send_failed_reply(struct binder_transaction *t, binder_pop_transaction_ilocked(target_thread, t); if (target_thread->reply_error.cmd == BR_OK) { target_thread->reply_error.cmd = error_code; - binder_enqueue_work_ilocked( - &target_thread->reply_error.work, - &target_thread->todo); + binder_enqueue_thread_work_ilocked( + target_thread, + &target_thread->reply_error.work); wake_up_interruptible(&target_thread->wait); } else { WARN(1, "Unexpected reply error: %u\n", @@ -2719,11 +2780,10 @@ static bool binder_proc_transaction(struct binder_transaction *t, struct binder_proc *proc, struct binder_thread *thread) { - struct list_head *target_list = NULL; struct binder_node *node = t->buffer->target_node; struct binder_priority node_prio; bool oneway = !!(t->flags & TF_ONE_WAY); - bool wakeup = true; + bool pending_async = false; BUG_ON(!node); binder_node_lock(node); @@ -2733,8 +2793,7 @@ static bool binder_proc_transaction(struct binder_transaction *t, if (oneway) { BUG_ON(thread); if (node->has_async_transaction) { - target_list = &node->async_todo; - wakeup = false; + pending_async = true; } else { node->has_async_transaction = 1; } @@ -2748,22 +2807,20 @@ static bool binder_proc_transaction(struct binder_transaction *t, return false; } - if (!thread && !target_list) + if (!thread && !pending_async) thread = binder_select_thread_ilocked(proc); if (thread) { - target_list = &thread->todo; binder_transaction_priority(thread->task, t, node_prio, node->inherit_rt); - } else if (!target_list) { - target_list = &proc->todo; + binder_enqueue_thread_work_ilocked(thread, &t->work); + } else if (!pending_async) { + binder_enqueue_work_ilocked(&t->work, &proc->todo); } else { - BUG_ON(target_list != &node->async_todo); + binder_enqueue_work_ilocked(&t->work, &node->async_todo); } - binder_enqueue_work_ilocked(&t->work, target_list); - - if (wakeup) + if (!pending_async) binder_wakeup_thread_ilocked(proc, thread, !oneway /* sync */); binder_inner_proc_unlock(proc); @@ -3265,10 +3322,10 @@ static void binder_transaction(struct binder_proc *proc, } } tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE; - binder_enqueue_work(proc, tcomplete, &thread->todo); t->work.type = BINDER_WORK_TRANSACTION; if (reply) { + binder_enqueue_thread_work(thread, tcomplete); binder_inner_proc_lock(target_proc); if (target_thread->is_dead) { binder_inner_proc_unlock(target_proc); @@ -3276,7 +3333,7 @@ static void binder_transaction(struct binder_proc *proc, } BUG_ON(t->buffer->async_transaction != 0); binder_pop_transaction_ilocked(target_thread, in_reply_to); - binder_enqueue_work_ilocked(&t->work, &target_thread->todo); + binder_enqueue_thread_work_ilocked(target_thread, &t->work); binder_inner_proc_unlock(target_proc); wake_up_interruptible_sync(&target_thread->wait); binder_restore_priority(current, in_reply_to->saved_priority); @@ -3284,6 +3341,7 @@ static void binder_transaction(struct binder_proc *proc, } else if (!(t->flags & TF_ONE_WAY)) { BUG_ON(t->buffer->async_transaction != 0); binder_inner_proc_lock(proc); + binder_enqueue_thread_work_ilocked_nowake(thread, tcomplete); t->need_reply = 1; t->from_parent = thread->transaction_stack; thread->transaction_stack = t; @@ -3297,6 +3355,7 @@ static void binder_transaction(struct binder_proc *proc, } else { BUG_ON(target_node == NULL); BUG_ON(t->buffer->async_transaction != 1); + binder_enqueue_thread_work(thread, tcomplete); if (!binder_proc_transaction(t, target_proc, NULL)) goto err_dead_proc_or_thread; } @@ -3376,15 +3435,11 @@ err_invalid_target_handle: if (in_reply_to) { binder_restore_priority(current, in_reply_to->saved_priority); thread->return_error.cmd = BR_TRANSACTION_COMPLETE; - binder_enqueue_work(thread->proc, - &thread->return_error.work, - &thread->todo); + binder_enqueue_thread_work(thread, &thread->return_error.work); binder_send_failed_reply(in_reply_to, return_error); } else { thread->return_error.cmd = return_error; - binder_enqueue_work(thread->proc, - &thread->return_error.work, - &thread->todo); + binder_enqueue_thread_work(thread, &thread->return_error.work); } } @@ -3688,10 +3743,9 @@ int binder_thread_write(struct binder_proc *proc, WARN_ON(thread->return_error.cmd != BR_OK); thread->return_error.cmd = BR_ERROR; - binder_enqueue_work( - thread->proc, - &thread->return_error.work, - &thread->todo); + binder_enqueue_thread_work( + thread, + &thread->return_error.work); binder_debug( BINDER_DEBUG_FAILED_TRANSACTION, "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n", @@ -3771,9 +3825,9 @@ int binder_thread_write(struct binder_proc *proc, if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) - binder_enqueue_work_ilocked( - &death->work, - &thread->todo); + binder_enqueue_thread_work_ilocked( + thread, + &death->work); else { binder_enqueue_work_ilocked( &death->work, @@ -3828,8 +3882,8 @@ int binder_thread_write(struct binder_proc *proc, if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) - binder_enqueue_work_ilocked( - &death->work, &thread->todo); + binder_enqueue_thread_work_ilocked( + thread, &death->work); else { binder_enqueue_work_ilocked( &death->work, @@ -4003,6 +4057,8 @@ retry: break; } w = binder_dequeue_work_head_ilocked(list); + if (binder_worklist_empty_ilocked(&thread->todo)) + thread->process_todo = false; switch (w->type) { case BINDER_WORK_TRANSACTION: {