[COMMON] sched/rt: add the sync flag into RT
authorJohnlay Park <jonglae.park@samsung.com>
Tue, 10 Apr 2018 02:22:36 +0000 (11:22 +0900)
committerChungwoo Park <cww.park@samsung.com>
Mon, 21 May 2018 08:35:49 +0000 (17:35 +0900)
to fulfill the cache hot benefit

Change-Id: I0bea23664b351a60b4aea3306be3d87d31efca3a
Signed-off-by: Johnlay Park <jonglae.park@samsung.com>
include/linux/sched.h
kernel/sched/rt.c

index 7d11d7455566320451567183a796337e07b60502..d5ae5f9867d9945a19c84a85b5ff3cd5ad21ca69 100644 (file)
@@ -492,6 +492,9 @@ struct sched_rt_entity {
 #endif
 
 #ifdef CONFIG_SMP
+#ifdef CONFIG_SCHED_USE_FLUID_RT
+       int sync_flag;
+#endif
        /*
         * Per entity load average tracking.
         *
index 0c2f3851ec23e167111affaa41bdb714ccac2a30..630bc6a7a7407cb6c95416f5b00e08a5dfc5ca66 100644 (file)
@@ -2335,6 +2335,7 @@ static int find_lowest_rq_fluid(struct task_struct *task, int wake_flags)
         */
        if ((wake_flags || affordable_cpu(prefer_cpu, task_util(task))) &&
                cpumask_test_cpu(prefer_cpu, cpu_online_mask)) {
+               task->rt.sync_flag = 1;
                best_cpu = prefer_cpu;
                trace_sched_fluid_stat(task, &task->se.avg, best_cpu, "CACHE-HOT");
                goto out;
@@ -2538,8 +2539,17 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
                        break;
 
                lowest_rq = cpu_rq(cpu);
-
-               if (lowest_rq->rt.highest_prio.curr <= task->prio) {
+#ifdef CONFIG_SCHED_USE_FLUID_RT
+               if (task->rt.sync_flag == 1 && lowest_rq->rt.highest_prio.curr == task->prio) {
+                       /*
+                        * If the sync flag is set,
+                        * let the task go even though its priority is same with current.
+                        */
+                       trace_sched_fluid_stat(task, &task->se.avg, cpu, "SYNC AGAIN");
+               } else
+ #else
+               if (lowest_rq->rt.highest_prio.curr <= task->prio)
+               {
                        /*
                         * Target rq has tasks of equal or higher priority,
                         * retrying does not release any lock and is unlikely
@@ -2548,6 +2558,7 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
                        lowest_rq = NULL;
                        break;
                }
+#endif
 
                /* if the prio of this runqueue changed, try again */
                if (double_lock_balance(rq, lowest_rq)) {
@@ -2978,8 +2989,19 @@ static void task_woken_rt(struct rq *rq, struct task_struct *p)
            p->nr_cpus_allowed > 1 &&
            (dl_task(rq->curr) || rt_task(rq->curr)) &&
            (rq->curr->nr_cpus_allowed < 2 ||
-            rq->curr->prio <= p->prio))
+            rq->curr->prio <= p->prio)) {
+#ifdef CONFIG_SCHED_USE_FLUID_RT
+               if (p->rt.sync_flag && rq->curr->prio < p->prio) {
+                       p->rt.sync_flag = 0;
+                       push_rt_tasks(rq);
+               }
+#else
                push_rt_tasks(rq);
+#endif
+       }
+#ifdef CONFIG_SCHED_USE_FLUID_RT
+       p->rt.sync_flag = 0;
+#endif
 }
 
 /* Assumes rq->lock is held */