[COMMON] sched/rt: add the sync flag into RT
authorJohnlay Park <jonglae.park@samsung.com>
Tue, 10 Apr 2018 02:22:36 +0000 (11:22 +0900)
committerCosmin Tanislav <demonsingur@gmail.com>
Mon, 22 Apr 2024 17:24:55 +0000 (20:24 +0300)
to fulfill the cache hot benefit

Change-Id: I0bea23664b351a60b4aea3306be3d87d31efca3a
Signed-off-by: Johnlay Park <jonglae.park@samsung.com>
include/linux/sched.h
kernel/sched/rt.c

index cdcd8a96f91772918c30bd8c0eeffe56152d65fd..be660e2a6c1d053cf697adf2c87a74f97d6d4601 100644 (file)
@@ -562,6 +562,9 @@ struct sched_rt_entity {
 #endif
 
 #ifdef CONFIG_SMP
+#ifdef CONFIG_SCHED_USE_FLUID_RT
+       int sync_flag;
+#endif
        /*
         * Per entity load average tracking.
         *
index 5d8dfc6e81991b42ad3d9656d4b8654227807a91..a223e4f7249381dd5e6ed62450ccc228a705c680 100644 (file)
@@ -2353,6 +2353,7 @@ static int find_lowest_rq_fluid(struct task_struct *task, int wake_flags)
         */
        if ((wake_flags || affordable_cpu(prefer_cpu, task_util(task))) &&
                cpumask_test_cpu(prefer_cpu, cpu_online_mask)) {
+               task->rt.sync_flag = 1;
                best_cpu = prefer_cpu;
                trace_sched_fluid_stat(task, &task->se.avg, best_cpu, "CACHE-HOT");
                goto out;
@@ -2556,8 +2557,17 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
                        break;
 
                lowest_rq = cpu_rq(cpu);
-
-               if (lowest_rq->rt.highest_prio.curr <= task->prio) {
+#ifdef CONFIG_SCHED_USE_FLUID_RT
+               if (task->rt.sync_flag == 1 && lowest_rq->rt.highest_prio.curr == task->prio) {
+                       /*
+                        * If the sync flag is set,
+                        * let the task go even though its priority is same with current.
+                        */
+                       trace_sched_fluid_stat(task, &task->se.avg, cpu, "SYNC AGAIN");
+               } else
+ #else
+               if (lowest_rq->rt.highest_prio.curr <= task->prio)
+               {
                        /*
                         * Target rq has tasks of equal or higher priority,
                         * retrying does not release any lock and is unlikely
@@ -2566,6 +2576,7 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
                        lowest_rq = NULL;
                        break;
                }
+#endif
 
                /* if the prio of this runqueue changed, try again */
                if (double_lock_balance(rq, lowest_rq)) {
@@ -2996,8 +3007,19 @@ static void task_woken_rt(struct rq *rq, struct task_struct *p)
            p->nr_cpus_allowed > 1 &&
            (dl_task(rq->curr) || rt_task(rq->curr)) &&
            (rq->curr->nr_cpus_allowed < 2 ||
-            rq->curr->prio <= p->prio))
+            rq->curr->prio <= p->prio)) {
+#ifdef CONFIG_SCHED_USE_FLUID_RT
+               if (p->rt.sync_flag && rq->curr->prio < p->prio) {
+                       p->rt.sync_flag = 0;
+                       push_rt_tasks(rq);
+               }
+#else
                push_rt_tasks(rq);
+#endif
+       }
+#ifdef CONFIG_SCHED_USE_FLUID_RT
+       p->rt.sync_flag = 0;
+#endif
 }
 
 /* Assumes rq->lock is held */