* (invariant after initialized)
* @min_priority: minimum scheduling priority
* (invariant after initialized)
+ * @inherit_rt: inherit RT scheduling policy from caller
+ * (invariant after initialized)
* @async_todo: list of async work items
* (protected by @proc->inner_lock)
*
* invariant after initialization
*/
u8 sched_policy:2;
+ u8 inherit_rt:1;
u8 accept_fds:1;
u8 min_priority;
};
static void binder_transaction_priority(struct task_struct *task,
struct binder_transaction *t,
- struct binder_priority node_prio)
+ struct binder_priority node_prio,
+ bool inherit_rt)
{
struct binder_priority desired_prio;
t->saved_priority.sched_policy = task->policy;
t->saved_priority.prio = task->normal_prio;
- desired_prio.prio = t->priority.prio;
- desired_prio.sched_policy = t->priority.sched_policy;
+ if (!inherit_rt && is_rt_policy(desired_prio.sched_policy)) {
+ desired_prio.prio = NICE_TO_PRIO(0);
+ desired_prio.sched_policy = SCHED_NORMAL;
+ } else {
+ desired_prio.prio = t->priority.prio;
+ desired_prio.sched_policy = t->priority.sched_policy;
+ }
if (node_prio.prio < t->priority.prio ||
(node_prio.prio == t->priority.prio &&
FLAT_BINDER_FLAG_SCHED_POLICY_SHIFT;
node->min_priority = to_kernel_prio(node->sched_policy, priority);
node->accept_fds = !!(flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
+ node->inherit_rt = !!(flags & FLAT_BINDER_FLAG_INHERIT_RT);
spin_lock_init(&node->lock);
INIT_LIST_HEAD(&node->work.entry);
INIT_LIST_HEAD(&node->async_todo);
if (thread) {
target_list = &thread->todo;
- binder_transaction_priority(thread->task, t, node_prio);
+ binder_transaction_priority(thread->task, t, node_prio,
+ node->inherit_rt);
} else if (!target_list) {
target_list = &proc->todo;
} else {
tr.cookie = target_node->cookie;
node_prio.sched_policy = target_node->sched_policy;
node_prio.prio = target_node->min_priority;
- binder_transaction_priority(current, t, node_prio);
+ binder_transaction_priority(current, t, node_prio,
+ target_node->inherit_rt);
cmd = BR_TRANSACTION;
} else {
tr.target.ptr = 0;