From b4002becaef1fd6a7768c3a7300ba319f407999c Mon Sep 17 00:00:00 2001 From: Chris Redpath Date: Mon, 12 Feb 2018 17:41:12 +0000 Subject: [PATCH] ANDROID: Fixup 64/32-bit divide confusion for WALT configs Builds cleanly for aarch64 and arm with and without CONFIG_FAIR_GROUP_SCHED. Bug: 72707388 Change-Id: Iafd2b8e2b1fb13837b760e3821610d67bfaa48aa Signed-off-by: Chris Redpath --- include/trace/events/sched.h | 17 ++++++++++------- kernel/sched/sched.h | 13 +++++++++---- 2 files changed, 19 insertions(+), 11 deletions(-) diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h index c620ae91cdd3..7909097a1e5e 100644 --- a/include/trace/events/sched.h +++ b/include/trace/events/sched.h @@ -696,6 +696,12 @@ extern unsigned int sysctl_sched_use_walt_cpu_util; extern unsigned int sysctl_sched_use_walt_task_util; extern unsigned int walt_ravg_window; extern bool walt_disabled; + +#define walt_util(util_var, demand_sum) {\ + u64 sum = demand_sum << SCHED_CAPACITY_SHIFT;\ + do_div(sum, walt_ravg_window);\ + util_var = (typeof(util_var))sum;\ + } #endif /* @@ -722,11 +728,9 @@ TRACE_EVENT(sched_load_avg_cpu, __entry->util_avg_pelt = cfs_rq->avg.util_avg; __entry->util_avg_walt = 0; #ifdef CONFIG_SCHED_WALT - __entry->util_avg_walt = - cpu_rq(cpu)->prev_runnable_sum << SCHED_CAPACITY_SHIFT; - do_div(__entry->util_avg_walt, walt_ravg_window); + walt_util(__entry->util_avg_walt, cpu_rq(cpu)->prev_runnable_sum); if (!walt_disabled && sysctl_sched_use_walt_cpu_util) - __entry->util_avg = __entry->util_avg_walt; + __entry->util_avg = __entry->util_avg_walt; #endif ), @@ -775,8 +779,7 @@ TRACE_EVENT(sched_load_se, #ifdef CONFIG_SCHED_WALT if (!se->my_q) { struct task_struct *p = container_of(se, struct task_struct, se); - __entry->util_walt = p->ravg.demand; - do_div(__entry->util_walt, walt_ravg_window >> SCHED_CAPACITY_SHIFT); + walt_util(__entry->util_walt, p->ravg.demand); if (!walt_disabled && sysctl_sched_use_walt_task_util) __entry->util = __entry->util_walt; } @@ -1113,7 +1116,7 @@ TRACE_EVENT(walt_update_history, __entry->samples = samples; __entry->evt = evt; __entry->demand = p->ravg.demand; - __entry->walt_avg = (__entry->demand << 10) / walt_ravg_window, + walt_util(__entry->walt_avg,__entry->demand); __entry->pelt_avg = p->se.avg.util_avg; memcpy(__entry->hist, p->ravg.sum_history, RAVG_HIST_SIZE_MAX * sizeof(u32)); diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 5b96242b3111..65a3ec10c579 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -1753,6 +1753,13 @@ extern unsigned int sysctl_sched_use_walt_cpu_util; extern unsigned int walt_ravg_window; extern bool walt_disabled; +#ifdef CONFIG_SCHED_WALT +#define walt_util(util_var, demand_sum) {\ + u64 sum = demand_sum << SCHED_CAPACITY_SHIFT;\ + do_div(sum, walt_ravg_window);\ + util_var = (typeof(util_var))sum;\ + } +#endif /* * cpu_util returns the amount of capacity of a CPU that is used by CFS * tasks. The unit of the return value must be the one of capacity so we can @@ -1786,8 +1793,7 @@ static inline unsigned long __cpu_util(int cpu, int delta) #ifdef CONFIG_SCHED_WALT if (!walt_disabled && sysctl_sched_use_walt_cpu_util) { - util = cpu_rq(cpu)->cumulative_runnable_avg << SCHED_CAPACITY_SHIFT; - util = div_u64(util, walt_ravg_window); + walt_util(util, cpu_rq(cpu)->cumulative_runnable_avg); } #endif delta += util; @@ -1809,8 +1815,7 @@ static inline unsigned long cpu_util_freq(int cpu) #ifdef CONFIG_SCHED_WALT if (!walt_disabled && sysctl_sched_use_walt_cpu_util) { - util = cpu_rq(cpu)->prev_runnable_sum << SCHED_CAPACITY_SHIFT; - do_div(util, walt_ravg_window); + walt_util(util, cpu_rq(cpu)->prev_runnable_sum); } #endif return (util >= capacity) ? capacity : util; -- 2.20.1