From bce03ad52c18ff704de53467839f46108e46ac0f Mon Sep 17 00:00:00 2001 From: Johnlay Park Date: Tue, 10 Apr 2018 11:22:36 +0900 Subject: [PATCH] [COMMON] sched/rt: add the sync flag into RT to fulfill the cache hot benefit Change-Id: I0bea23664b351a60b4aea3306be3d87d31efca3a Signed-off-by: Johnlay Park --- include/linux/sched.h | 3 +++ kernel/sched/rt.c | 28 +++++++++++++++++++++++++--- 2 files changed, 28 insertions(+), 3 deletions(-) diff --git a/include/linux/sched.h b/include/linux/sched.h index 7d11d7455566..d5ae5f9867d9 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -492,6 +492,9 @@ struct sched_rt_entity { #endif #ifdef CONFIG_SMP +#ifdef CONFIG_SCHED_USE_FLUID_RT + int sync_flag; +#endif /* * Per entity load average tracking. * diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index 0c2f3851ec23..630bc6a7a740 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c @@ -2335,6 +2335,7 @@ static int find_lowest_rq_fluid(struct task_struct *task, int wake_flags) */ if ((wake_flags || affordable_cpu(prefer_cpu, task_util(task))) && cpumask_test_cpu(prefer_cpu, cpu_online_mask)) { + task->rt.sync_flag = 1; best_cpu = prefer_cpu; trace_sched_fluid_stat(task, &task->se.avg, best_cpu, "CACHE-HOT"); goto out; @@ -2538,8 +2539,17 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq) break; lowest_rq = cpu_rq(cpu); - - if (lowest_rq->rt.highest_prio.curr <= task->prio) { +#ifdef CONFIG_SCHED_USE_FLUID_RT + if (task->rt.sync_flag == 1 && lowest_rq->rt.highest_prio.curr == task->prio) { + /* + * If the sync flag is set, + * let the task go even though its priority is same with current. + */ + trace_sched_fluid_stat(task, &task->se.avg, cpu, "SYNC AGAIN"); + } else + #else + if (lowest_rq->rt.highest_prio.curr <= task->prio) + { /* * Target rq has tasks of equal or higher priority, * retrying does not release any lock and is unlikely @@ -2548,6 +2558,7 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq) lowest_rq = NULL; break; } +#endif /* if the prio of this runqueue changed, try again */ if (double_lock_balance(rq, lowest_rq)) { @@ -2978,8 +2989,19 @@ static void task_woken_rt(struct rq *rq, struct task_struct *p) p->nr_cpus_allowed > 1 && (dl_task(rq->curr) || rt_task(rq->curr)) && (rq->curr->nr_cpus_allowed < 2 || - rq->curr->prio <= p->prio)) + rq->curr->prio <= p->prio)) { +#ifdef CONFIG_SCHED_USE_FLUID_RT + if (p->rt.sync_flag && rq->curr->prio < p->prio) { + p->rt.sync_flag = 0; + push_rt_tasks(rq); + } +#else push_rt_tasks(rq); +#endif + } +#ifdef CONFIG_SCHED_USE_FLUID_RT + p->rt.sync_flag = 0; +#endif } /* Assumes rq->lock is held */ -- 2.20.1