[COMMON] sched/rt: fix the problem of long stay
authorJohnlay Park <jonglae.park@samsung.com>
Mon, 9 Apr 2018 13:21:47 +0000 (22:21 +0900)
committerChungwoo Park <cww.park@samsung.com>
Mon, 21 May 2018 08:35:48 +0000 (17:35 +0900)
in faster CPU domain.

Change-Id: Icc0c64a945efff9bf33753224ca16c9086e9b7d5
Signed-off-by: Johnlay Park <jonglae.park@samsung.com>
kernel/sched/rt.c

index 0da4b9f4b6d26ba7a1560d8befb39f433db52919..0c2f3851ec23e167111affaa41bdb714ccac2a30 100644 (file)
@@ -1541,6 +1541,25 @@ select_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags,
        rcu_read_lock();
        curr = READ_ONCE(rq->curr); /* unlocked access */
 
+#ifdef CONFIG_SCHED_USE_FLUID_RT
+       if (curr) {
+               int target = find_lowest_rq(p, flags);
+               /*
+                * Even though the destination CPU is running
+                * a higher priority task, FluidRT can bother moving it
+                * when its utilization is very small, and the other CPU is too busy
+                * to accomodate the p in the point of priority and utilization.
+                *
+                * BTW, if the curr has higher priority than p, FluidRT tries to find
+                * the other CPUs first. In the worst case, curr can be victim, if it
+                * has very small utilization.
+                */
+               if (likely(target != -1)) {
+                       cpu = target;
+               }
+       }
+#else
+
        /*
         * If the current task on @p's runqueue is an RT task, then
         * try to see if we can wake this RT task up on another
@@ -1566,22 +1585,6 @@ select_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags,
        if (curr && unlikely(rt_task(curr)) &&
            (curr->nr_cpus_allowed < 2 ||
             curr->prio <= p->prio)) {
-#ifdef CONFIG_SCHED_USE_FLUID_RT
-               int target = find_lowest_rq(p, flags);
-               /*
-                * Even though the destination CPU is running
-                * a higher priority task, FluidRT can bother moving it
-                * when its utilization is very small, and the other CPU is too busy
-                * to accomodate the p in the point of priority and utilization.
-                *
-                * BTW, if the curr has higher priority than p, FluidRT tries to find
-                * the other CPUs first. In the worst case, curr can be victim, if it
-                * has very small utilization.
-                */
-               if (likely(target != -1)) {
-                       cpu = target;
-               }
-#else
                int target = find_lowest_rq(p);
                /*
                 * Don't bother moving it if the destination CPU is
@@ -1590,11 +1593,15 @@ select_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags,
                if (target != -1 &&
                    p->prio < cpu_rq(target)->rt.highest_prio.curr)
                        cpu = target;
-#endif
        }
+#endif
        rcu_read_unlock();
 
 out:
+#ifdef CONFIG_SCHED_USE_FLUID_RT
+       if (cpu >= 6)
+               trace_sched_fluid_stat(p, &p->se.avg, cpu, "BIG_ASSIGED");
+#endif
        return cpu;
 }
 
@@ -2299,11 +2306,15 @@ static int find_lowest_rq_fluid(struct task_struct *task, int wake_flags)
        int min_cpu = -1, min_rt_cpu = -1;
 
        /* Make sure the mask is initialized first */
-       if (unlikely(!lowest_mask))
+       if (unlikely(!lowest_mask)) {
+               trace_sched_fluid_stat(task, &task->se.avg, best_cpu, "NA LOWESTMSK");
                goto out;
+       }
 
-       if (task->nr_cpus_allowed == 1)
+       if (task->nr_cpus_allowed == 1) {
+               trace_sched_fluid_stat(task, &task->se.avg, best_cpu, "NA ALLOWED");
                goto out; /* No other targets possible */
+       }
 
        /* update the per-cpu local_cpu_mask (lowest_mask) */
        cpupri_find(&task_rq(task)->rd->cpupri, task, lowest_mask);
@@ -2419,8 +2430,10 @@ unlock:
        rcu_read_unlock();
 out:
 
-       if (!cpumask_test_cpu(best_cpu, cpu_online_mask))
+       if (!cpumask_test_cpu(best_cpu, cpu_online_mask)) {
+               trace_sched_fluid_stat(task, &task->se.avg, cpu, "NOTHING_VALID");
                best_cpu = -1;
+       }
 
        return best_cpu;
 }