From 9492c8d32a0758d72ced9a764f0fdaeee2cc15a9 Mon Sep 17 00:00:00 2001 From: Youngtae Lee Date: Thu, 9 Aug 2018 11:11:05 +0900 Subject: [PATCH] sched: rt: Refactoring select idle cpu and recessive cpu Implement init code for sysfs, dt to support multi min_thr to detect heavy rt task change loop and condition for find_idle_cpu/find_recessive_cpu Change-Id: I4ca5525c4275115c74f9c1577a8f716275526b9a Signed-off-by: Youngtae Lee --- kernel/sched/rt.c | 67 +++++++++++++++++++++++------------------------ 1 file changed, 33 insertions(+), 34 deletions(-) diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index d32fb44c4cc9..e44faa106edc 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c @@ -2541,31 +2541,28 @@ static int find_victim_rt_rq(struct task_struct *task, const struct cpumask *sg_ static int find_idle_cpu(struct task_struct *task) { - int prefer_cpu, cpu, best_cpu = -1; - int cpu_prio, max_prio = -1; - u64 cpu_load, min_load = ULLONG_MAX; + int best_cpu = -1, cpu, cpu_prio, max_prio = -1, prefer_cpu; + u64 cpu_load = ULLONG_MAX, min_load = ULLONG_MAX; struct cpumask candidate_cpus; - prefer_cpu = cpu = frt_find_prefer_cpu(task); cpumask_and(&candidate_cpus, &task->cpus_allowed, cpu_active_mask); + prefer_cpu = frt_find_prefer_cpu(task); - do { - const struct cpumask *grp_mask = cpu_coregroup_mask(cpu); + while (!cpumask_empty(&candidate_cpus)) { + const struct cpumask* grp_mask = cpu_coregroup_mask(prefer_cpu); + + for_each_cpu(cpu, grp_mask) { + cpumask_clear_cpu(cpu, &candidate_cpus); - for_each_cpu_and(cpu, grp_mask, &candidate_cpus) { if (!idle_cpu(cpu)) continue; - cpu_prio = cpu_rq(cpu)->rt.highest_prio.curr; if (cpu_prio < max_prio) continue; cpu_load = frt_cpu_util_wake(cpu, task) + task_util(task); - if (cpu_load > capacity_orig_of(cpu)) - continue; - if ((cpu_prio > max_prio) || (cpu_load < min_load) || - (cpu_load == min_load && task_cpu(task) == cpu)) { + (cpu_load == min_load && task_cpu(task) == cpu)) { min_load = cpu_load; max_prio = cpu_prio; best_cpu = cpu; @@ -2581,20 +2578,20 @@ static int find_idle_cpu(struct task_struct *task) * If heavy util rt task, search higher performance sched group. * In the opposite case, search lower performance sched group */ - cpu = cpumask_first(grp_mask); - cpu += cpumask_weight(grp_mask); - if (cpu >= cpumask_weight(cpu_possible_mask)) - cpu = 0; - } while (prefer_cpu == cpu); + prefer_cpu = cpumask_first(grp_mask); + prefer_cpu += cpumask_weight(grp_mask); + if (prefer_cpu >= NR_CPUS) + prefer_cpu = 0; + } return best_cpu; } static int find_recessive_cpu(struct task_struct *task) { - int cpu, prefer_cpu, best_cpu = -1; - u64 cpu_load, min_load = ULLONG_MAX; + int best_cpu = -1, cpu, prefer_cpu; struct cpumask *lowest_mask; + u64 cpu_load = ULLONG_MAX, min_load = ULLONG_MAX; struct cpumask candidate_cpus; lowest_mask = this_cpu_cpumask_var_ptr(local_cpu_mask); /* Make sure the mask is initialized first */ @@ -2606,36 +2603,38 @@ static int find_recessive_cpu(struct task_struct *task) cpupri_find(&task_rq(task)->rd->cpupri, task, lowest_mask); cpumask_and(&candidate_cpus, &task->cpus_allowed, lowest_mask); - cpumask_and(&candidate_cpus, &candidate_cpus, cpu_active_mask); - prefer_cpu = cpu = frt_find_prefer_cpu(task); + prefer_cpu = frt_find_prefer_cpu(task); - do { - const struct cpumask *grp_mask = cpu_coregroup_mask(cpu); + while (!cpumask_empty(&candidate_cpus)) { + const struct cpumask* grp_mask = cpu_coregroup_mask(prefer_cpu); - for_each_cpu_and(cpu, grp_mask, &candidate_cpus) { + for_each_cpu(cpu, grp_mask) { + cpumask_clear_cpu(cpu, &candidate_cpus); cpu_load = frt_cpu_util_wake(cpu, task) + task_util(task); - if (cpu_load > capacity_orig_of(cpu)) - continue; - if (cpu_load < min_load || - (cpu_load == min_load && task_cpu(task) == cpu)) { + (cpu_load == min_load && cpu == prefer_cpu)) { min_load = cpu_load; best_cpu = cpu; } } - if (cpu_selected(best_cpu)) + + if (cpu_selected(best_cpu) && + ((capacity_orig_of(best_cpu) >= min_load) || (best_cpu == prefer_cpu))) { trace_sched_fluid_stat(task, &task->rt.avg, best_cpu, rt_task(cpu_rq(best_cpu)->curr) ? "RT-RECESS" : "FAIR-RECESS"); + return best_cpu; + } + /* * If heavy util rt task, search higher performance sched group. * In the opposite case, search lower performance sched group */ - cpu = cpumask_first(grp_mask); - cpu += cpumask_weight(grp_mask); - if (cpu >= cpumask_weight(cpu_possible_mask)) - cpu = 0; - } while (prefer_cpu == cpu); + prefer_cpu = cpumask_first(grp_mask); + prefer_cpu += cpumask_weight(grp_mask); + if (prefer_cpu >= NR_CPUS) + prefer_cpu = 0; + } return best_cpu; } -- 2.20.1