sched: rt: Fix cpu_util_wake problem
authorYoungtae Lee <yt0729.lee@samsung.com>
Fri, 6 Jul 2018 01:46:58 +0000 (10:46 +0900)
committerhskang <hs1218.kang@samsung.com>
Sun, 9 Sep 2018 21:36:57 +0000 (06:36 +0900)
Previous cpu_util_wake considers only cfs util.
But in rt task working case, it reads making wrong decisions so
frt_cpu_util_wake considers not only cfs util but also rt util.

Change-Id: I814cd8346b7e6260fa3cbefff773024bd74a42a1
Signed-off-by: Youngtae Lee <yt0729.lee@samsung.com>
kernel/sched/rt.c

index 453c2ed3f48a05359b36162f385f92cad0139031..82827d27be4086e298c0931dfed874447433746f 100644 (file)
@@ -2223,8 +2223,34 @@ static inline int affordable_cpu(int cpu, unsigned long task_load)
        return 1;
 }
 
-extern unsigned long cpu_util_wake(int cpu, struct task_struct *p);
 extern unsigned long task_util(struct task_struct *p);
+unsigned long frt_cpu_util_wake(int cpu, struct task_struct *p)
+{
+       struct cfs_rq *cfs_rq = &cpu_rq(cpu)->cfs;
+       struct rt_rq *rt_rq = &cpu_rq(cpu)->rt;
+       unsigned int util;
+
+       util = READ_ONCE(cfs_rq->avg.util_avg) + READ_ONCE(rt_rq->avg.util_avg);
+
+#ifdef CONFIG_SCHED_WALT
+       /*
+        * WALT does not decay idle tasks in the same manner
+        * as PELT, so it makes little sense to subtract task
+        * utilization from cpu utilization. Instead just use
+        * cpu_util for this case.
+        */
+       if (!walt_disabled && sysctl_sched_use_walt_cpu_util)
+               return cpu_util(cpu);
+#endif
+       /* Task has no contribution or is new */
+       if (cpu != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time))
+               return util;
+
+       /* Discount task's blocked util from CPU's util */
+       util -= min_t(unsigned int, util, task_util(p));
+
+       return min_t(unsigned long, util, capacity_orig_of(cpu));
+}
 static inline int cpu_selected(int cpu)        { return (nr_cpu_ids > cpu && cpu >= 0); }
 /*
  * Must find the victim or recessive (not in lowest_mask)
@@ -2362,7 +2388,7 @@ static int find_lowest_rq_fluid(struct task_struct *task, int wake_flags)
 
                for_each_cpu_and(icpu, rttsk_cpus_allowed(task), traversingDom) {
                        if (idle_cpu(icpu)) {
-                               cpu_load = cpu_util_wake(icpu, task) + task_util(task);
+                               cpu_load = frt_cpu_util_wake(icpu, task) + task_util(task);
                                if ((min_icl > cpu_load) ||
                                        (min_icl == cpu_load && task_cpu(task) == icpu)) {
                                        min_icl = cpu_load;
@@ -2390,7 +2416,7 @@ static int find_lowest_rq_fluid(struct task_struct *task, int wake_flags)
                        if (!cpumask_test_cpu(icpu, lowest_mask))
                                continue;
 
-                       cpu_load = cpu_util_wake(icpu, task) + task_util(task);
+                       cpu_load = frt_cpu_util_wake(icpu, task) + task_util(task);
 
                        if (rt_task(cpu_rq(icpu)->curr)) {
                                if (cpu_load < min_rt_load ||