sched: frt: Fix loop condition to use domain pointer
authorYoungtae Lee <yt0729.lee@samsung.com>
Fri, 7 Sep 2018 05:20:14 +0000 (14:20 +0900)
committerhskang <hs1218.kang@samsung.com>
Wed, 10 Oct 2018 12:59:56 +0000 (21:59 +0900)
Change-Id: Ie4646c26f21438c7bbaad84ff70ad1056e0f0912
Signed-off-by: Youngtae Lee <yt0729.lee@samsung.com>
kernel/sched/rt.c

index 6ace73d45f23987bbe8f5e3b321d04417a40c422..4100b37204f36119de7784bb080d7a92792ddda4 100644 (file)
@@ -27,6 +27,7 @@ struct frt_dom {
        struct cpumask          *activated_cpus;
 
        struct list_head        list;
+       struct frt_dom          *next;
        /* kobject for sysfs group */
        struct kobject          kobj;
 };
@@ -286,7 +287,7 @@ disable:
 
 static int __init init_frt(void)
 {
-       struct frt_dom *dom;
+       struct frt_dom *dom, *prev, *head;
        struct device_node *dn;
        int cpu, tcpu, cnt = 0;
        struct cpumask *ptr_mask;
@@ -304,19 +305,27 @@ static int __init init_frt(void)
                        continue;
 
                dom = kzalloc(sizeof(struct frt_dom), GFP_KERNEL);
+               if (cpu == 0)
+                       head = dom;
+
                dom->activated_cpus = ptr_mask;
 
                cpumask_copy(&dom->cpus, cpu_coregroup_mask(cpu));
 
                frt_parse_dt(dn, dom, cnt++);
 
+               dom->next = head;
+               if (prev)
+                       prev->next = dom;
+               prev = dom;
+
                for_each_cpu(tcpu, &dom->cpus)
                        per_cpu(frt_rqs, tcpu) = dom;
+
                frt_set_coverage_ratio(cpu);
                frt_set_active_ratio(cpu);
 
                list_add_tail(&dom->list, &frt_list);
-
        }
 
        frt_sysfs_init();
@@ -2631,23 +2640,24 @@ static int find_victim_rt_rq(struct task_struct *task, const struct cpumask *sg_
 
 static int find_idle_cpu(struct task_struct *task)
 {
-       int best_cpu = -1, cpu, cpu_prio, max_prio = -1, prefer_cpu;
-       u64 cpu_load = ULLONG_MAX, min_load = ULLONG_MAX;
+       int cpu, best_cpu = -1;
+       int cpu_prio, max_prio = -1;
+       u64 cpu_load, min_load = ULLONG_MAX;
        struct cpumask candidate_cpus;
+       struct frt_dom *dom, *prefer_dom;
+
+       cpu = frt_find_prefer_cpu(task);
+       prefer_dom = dom = per_cpu(frt_rqs, cpu);
+       if (unlikely(!dom))
+               return best_cpu;
 
-       prefer_cpu = frt_find_prefer_cpu(task);
        cpumask_and(&candidate_cpus, &task->cpus_allowed, cpu_active_mask);
        cpumask_and(&candidate_cpus, &candidate_cpus, get_activated_cpus());
-
        if (unlikely(cpumask_empty(&candidate_cpus)))
                cpumask_copy(&candidate_cpus, &task->cpus_allowed);
 
-       while (!cpumask_empty(&candidate_cpus)) {
-               const struct cpumask* grp_mask = cpu_coregroup_mask(prefer_cpu);
-
-               for_each_cpu(cpu, grp_mask) {
-                       cpumask_clear_cpu(cpu, &candidate_cpus);
-
+       do {
+               for_each_cpu_and(cpu, &dom->cpus, &candidate_cpus) {
                        if (!idle_cpu(cpu))
                                continue;
                        cpu_prio = cpu_rq(cpu)->rt.highest_prio.curr;
@@ -2668,25 +2678,20 @@ static int find_idle_cpu(struct task_struct *task)
                        return best_cpu;
                }
 
-               /*
-                * If heavy util rt task, search higher performance sched group.
-                * In the opposite case, search lower performance sched group
-                */
-               prefer_cpu = cpumask_first(grp_mask);
-               prefer_cpu += cpumask_weight(grp_mask);
-               if (prefer_cpu >= NR_CPUS)
-                       prefer_cpu = 0;
-       }
+               dom = dom->next;
+       } while (dom != prefer_dom);
 
        return best_cpu;
 }
 
 static int find_recessive_cpu(struct task_struct *task)
 {
-       int best_cpu = -1, cpu, prefer_cpu;
+       int cpu, best_cpu = -1;
        struct cpumask *lowest_mask;
        u64 cpu_load = ULLONG_MAX, min_load = ULLONG_MAX;
        struct cpumask candidate_cpus;
+       struct frt_dom *dom, *prefer_dom;
+
        lowest_mask = this_cpu_cpumask_var_ptr(local_cpu_mask);
        /* Make sure the mask is initialized first */
        if (unlikely(!lowest_mask)) {
@@ -2697,38 +2702,29 @@ static int find_recessive_cpu(struct task_struct *task)
        cpupri_find(&task_rq(task)->rd->cpupri, task, lowest_mask);
 
        cpumask_and(&candidate_cpus, &task->cpus_allowed, lowest_mask);
-       prefer_cpu = frt_find_prefer_cpu(task);
-
-       while (!cpumask_empty(&candidate_cpus)) {
-               const struct cpumask* grp_mask = cpu_coregroup_mask(prefer_cpu);
+       cpumask_and(&candidate_cpus, &candidate_cpus, cpu_active_mask);
+       cpu = frt_find_prefer_cpu(task);
+       prefer_dom = dom = per_cpu(frt_rqs, cpu);
+       if (unlikely(!dom))
+               return best_cpu;
 
-               for_each_cpu(cpu, grp_mask) {
-                       cpumask_clear_cpu(cpu, &candidate_cpus);
+       do {
+               for_each_cpu_and(cpu, &dom->cpus, &candidate_cpus) {
                        cpu_load = frt_cpu_util_wake(cpu, task) + task_util(task);
 
                        if (cpu_load < min_load ||
-                               (cpu_load == min_load && cpu == prefer_cpu)) {
+                               (cpu_load == min_load && task_cpu(task) == cpu)) {
                                min_load = cpu_load;
                                best_cpu = cpu;
                        }
                }
 
-               if (cpu_selected(best_cpu) &&
-                       ((capacity_orig_of(best_cpu) >= min_load) || (best_cpu == prefer_cpu))) {
+               if (cpu_selected(best_cpu))
                        trace_sched_fluid_stat(task, &task->rt.avg, best_cpu,
                                rt_task(cpu_rq(best_cpu)->curr) ? "RT-RECESS" : "FAIR-RECESS");
-                       return best_cpu;
-               }
 
-               /*
-                * If heavy util rt task, search higher performance sched group.
-                * In the opposite case, search lower performance sched group
-                */
-               prefer_cpu = cpumask_first(grp_mask);
-               prefer_cpu += cpumask_weight(grp_mask);
-               if (prefer_cpu >= NR_CPUS)
-                       prefer_cpu = 0;
-       }
+               dom = dom->next;
+       } while (dom != prefer_dom);
 
        return best_cpu;
 }