sched: rt: Refactoring select idle cpu and recessive cpu
authorYoungtae Lee <yt0729.lee@samsung.com>
Thu, 9 Aug 2018 02:11:05 +0000 (11:11 +0900)
committerCosmin Tanislav <demonsingur@gmail.com>
Mon, 22 Apr 2024 17:24:57 +0000 (20:24 +0300)
Implement init code for sysfs, dt to
support multi min_thr to detect heavy rt task
change loop and condition for find_idle_cpu/find_recessive_cpu

Change-Id: I4ca5525c4275115c74f9c1577a8f716275526b9a
Signed-off-by: Youngtae Lee <yt0729.lee@samsung.com>
kernel/sched/rt.c

index 06949c569bfaaf7ca0a4d31ac543d076f26f7e8e..c4e4fbbf079f1d5aa84a9354c1be5efc18aa19e0 100644 (file)
@@ -2557,31 +2557,28 @@ static int find_victim_rt_rq(struct task_struct *task, const struct cpumask *sg_
 
 static int find_idle_cpu(struct task_struct *task)
 {
-       int prefer_cpu, cpu, best_cpu = -1;
-       int cpu_prio, max_prio = -1;
-       u64 cpu_load, min_load = ULLONG_MAX;
+       int best_cpu = -1, cpu, cpu_prio, max_prio = -1, prefer_cpu;
+       u64 cpu_load = ULLONG_MAX, min_load = ULLONG_MAX;
        struct cpumask candidate_cpus;
 
-       prefer_cpu = cpu = frt_find_prefer_cpu(task);
        cpumask_and(&candidate_cpus, &task->cpus_allowed, cpu_active_mask);
+       prefer_cpu = frt_find_prefer_cpu(task);
 
-       do {
-               const struct cpumask *grp_mask = cpu_coregroup_mask(cpu);
+       while (!cpumask_empty(&candidate_cpus)) {
+               const struct cpumask* grp_mask = cpu_coregroup_mask(prefer_cpu);
+
+               for_each_cpu(cpu, grp_mask) {
+                       cpumask_clear_cpu(cpu, &candidate_cpus);
 
-               for_each_cpu_and(cpu, grp_mask, &candidate_cpus) {
                        if (!idle_cpu(cpu))
                                continue;
-
                        cpu_prio = cpu_rq(cpu)->rt.highest_prio.curr;
                        if (cpu_prio < max_prio)
                                continue;
 
                        cpu_load = frt_cpu_util_wake(cpu, task) + task_util(task);
-                       if (cpu_load > capacity_orig_of(cpu))
-                               continue;
-
                        if ((cpu_prio > max_prio) || (cpu_load < min_load) ||
-                               (cpu_load == min_load && task_cpu(task) == cpu)) {
+                                       (cpu_load == min_load && task_cpu(task) == cpu)) {
                                min_load = cpu_load;
                                max_prio = cpu_prio;
                                best_cpu = cpu;
@@ -2597,20 +2594,20 @@ static int find_idle_cpu(struct task_struct *task)
                 * If heavy util rt task, search higher performance sched group.
                 * In the opposite case, search lower performance sched group
                 */
-               cpu = cpumask_first(grp_mask);
-               cpu += cpumask_weight(grp_mask);
-               if (cpu >= cpumask_weight(cpu_possible_mask))
-                       cpu = 0;
-       } while (prefer_cpu == cpu);
+               prefer_cpu = cpumask_first(grp_mask);
+               prefer_cpu += cpumask_weight(grp_mask);
+               if (prefer_cpu >= NR_CPUS)
+                       prefer_cpu = 0;
+       }
 
        return best_cpu;
 }
 
 static int find_recessive_cpu(struct task_struct *task)
 {
-       int cpu, prefer_cpu, best_cpu = -1;
-       u64 cpu_load, min_load = ULLONG_MAX;
+       int best_cpu = -1, cpu, prefer_cpu;
        struct cpumask *lowest_mask;
+       u64 cpu_load = ULLONG_MAX, min_load = ULLONG_MAX;
        struct cpumask candidate_cpus;
        lowest_mask = this_cpu_cpumask_var_ptr(local_cpu_mask);
        /* Make sure the mask is initialized first */
@@ -2622,36 +2619,38 @@ static int find_recessive_cpu(struct task_struct *task)
        cpupri_find(&task_rq(task)->rd->cpupri, task, lowest_mask);
 
        cpumask_and(&candidate_cpus, &task->cpus_allowed, lowest_mask);
-       cpumask_and(&candidate_cpus, &candidate_cpus, cpu_active_mask);
-       prefer_cpu = cpu = frt_find_prefer_cpu(task);
+       prefer_cpu = frt_find_prefer_cpu(task);
 
-       do {
-               const struct cpumask *grp_mask = cpu_coregroup_mask(cpu);
+       while (!cpumask_empty(&candidate_cpus)) {
+               const struct cpumask* grp_mask = cpu_coregroup_mask(prefer_cpu);
 
-               for_each_cpu_and(cpu, grp_mask, &candidate_cpus) {
+               for_each_cpu(cpu, grp_mask) {
+                       cpumask_clear_cpu(cpu, &candidate_cpus);
                        cpu_load = frt_cpu_util_wake(cpu, task) + task_util(task);
 
-                       if (cpu_load > capacity_orig_of(cpu))
-                               continue;
-
                        if (cpu_load < min_load ||
-                               (cpu_load == min_load && task_cpu(task) == cpu)) {
+                               (cpu_load == min_load && cpu == prefer_cpu)) {
                                min_load = cpu_load;
                                best_cpu = cpu;
                        }
                }
-               if (cpu_selected(best_cpu))
+
+               if (cpu_selected(best_cpu) &&
+                       ((capacity_orig_of(best_cpu) >= min_load) || (best_cpu == prefer_cpu))) {
                        trace_sched_fluid_stat(task, &task->rt.avg, best_cpu,
                                rt_task(cpu_rq(best_cpu)->curr) ? "RT-RECESS" : "FAIR-RECESS");
+                       return best_cpu;
+               }
+
                /*
                 * If heavy util rt task, search higher performance sched group.
                 * In the opposite case, search lower performance sched group
                 */
-               cpu = cpumask_first(grp_mask);
-               cpu += cpumask_weight(grp_mask);
-               if (cpu >= cpumask_weight(cpu_possible_mask))
-                       cpu = 0;
-       } while (prefer_cpu == cpu);
+               prefer_cpu = cpumask_first(grp_mask);
+               prefer_cpu += cpumask_weight(grp_mask);
+               if (prefer_cpu >= NR_CPUS)
+                       prefer_cpu = 0;
+       }
 
        return best_cpu;
 }