From 4bc8a960edcc4661d9e25af8af6d33a354fb33ae Mon Sep 17 00:00:00 2001 From: Youngtae Lee Date: Fri, 6 Jul 2018 10:46:58 +0900 Subject: [PATCH] sched: rt: Fix cpu_util_wake problem Previous cpu_util_wake considers only cfs util. But in rt task working case, it reads making wrong decisions so frt_cpu_util_wake considers not only cfs util but also rt util. Change-Id: I814cd8346b7e6260fa3cbefff773024bd74a42a1 Signed-off-by: Youngtae Lee --- kernel/sched/rt.c | 32 +++++++++++++++++++++++++++++--- 1 file changed, 29 insertions(+), 3 deletions(-) diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index 453c2ed3f48a..82827d27be40 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c @@ -2223,8 +2223,34 @@ static inline int affordable_cpu(int cpu, unsigned long task_load) return 1; } -extern unsigned long cpu_util_wake(int cpu, struct task_struct *p); extern unsigned long task_util(struct task_struct *p); +unsigned long frt_cpu_util_wake(int cpu, struct task_struct *p) +{ + struct cfs_rq *cfs_rq = &cpu_rq(cpu)->cfs; + struct rt_rq *rt_rq = &cpu_rq(cpu)->rt; + unsigned int util; + + util = READ_ONCE(cfs_rq->avg.util_avg) + READ_ONCE(rt_rq->avg.util_avg); + +#ifdef CONFIG_SCHED_WALT + /* + * WALT does not decay idle tasks in the same manner + * as PELT, so it makes little sense to subtract task + * utilization from cpu utilization. Instead just use + * cpu_util for this case. + */ + if (!walt_disabled && sysctl_sched_use_walt_cpu_util) + return cpu_util(cpu); +#endif + /* Task has no contribution or is new */ + if (cpu != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time)) + return util; + + /* Discount task's blocked util from CPU's util */ + util -= min_t(unsigned int, util, task_util(p)); + + return min_t(unsigned long, util, capacity_orig_of(cpu)); +} static inline int cpu_selected(int cpu) { return (nr_cpu_ids > cpu && cpu >= 0); } /* * Must find the victim or recessive (not in lowest_mask) @@ -2362,7 +2388,7 @@ static int find_lowest_rq_fluid(struct task_struct *task, int wake_flags) for_each_cpu_and(icpu, rttsk_cpus_allowed(task), traversingDom) { if (idle_cpu(icpu)) { - cpu_load = cpu_util_wake(icpu, task) + task_util(task); + cpu_load = frt_cpu_util_wake(icpu, task) + task_util(task); if ((min_icl > cpu_load) || (min_icl == cpu_load && task_cpu(task) == icpu)) { min_icl = cpu_load; @@ -2390,7 +2416,7 @@ static int find_lowest_rq_fluid(struct task_struct *task, int wake_flags) if (!cpumask_test_cpu(icpu, lowest_mask)) continue; - cpu_load = cpu_util_wake(icpu, task) + task_util(task); + cpu_load = frt_cpu_util_wake(icpu, task) + task_util(task); if (rt_task(cpu_rq(icpu)->curr)) { if (cpu_load < min_rt_load || -- 2.20.1