ANDROID: Fixup 64/32-bit divide confusion for WALT configs
authorChris Redpath <chris.redpath@arm.com>
Mon, 12 Feb 2018 17:41:12 +0000 (17:41 +0000)
committerTodd Kjos <tkjos@google.com>
Wed, 14 Feb 2018 16:28:51 +0000 (16:28 +0000)
Builds cleanly for aarch64 and arm with and without
CONFIG_FAIR_GROUP_SCHED.

Bug: 72707388
Change-Id: Iafd2b8e2b1fb13837b760e3821610d67bfaa48aa
Signed-off-by: Chris Redpath <chris.redpath@arm.com>
include/trace/events/sched.h
kernel/sched/sched.h

index c620ae91cdd3c6505bc9596df6b514e840c5a4b4..7909097a1e5edf35b95064d323e747fc072bd52c 100644 (file)
@@ -696,6 +696,12 @@ extern unsigned int sysctl_sched_use_walt_cpu_util;
 extern unsigned int sysctl_sched_use_walt_task_util;
 extern unsigned int walt_ravg_window;
 extern bool walt_disabled;
+
+#define walt_util(util_var, demand_sum) {\
+       u64 sum = demand_sum << SCHED_CAPACITY_SHIFT;\
+       do_div(sum, walt_ravg_window);\
+       util_var = (typeof(util_var))sum;\
+       }
 #endif
 
 /*
@@ -722,11 +728,9 @@ TRACE_EVENT(sched_load_avg_cpu,
                 __entry->util_avg_pelt  = cfs_rq->avg.util_avg;
                 __entry->util_avg_walt  = 0;
 #ifdef CONFIG_SCHED_WALT
-                __entry->util_avg_walt  =
-                                cpu_rq(cpu)->prev_runnable_sum << SCHED_CAPACITY_SHIFT;
-                do_div(__entry->util_avg_walt, walt_ravg_window);
+                walt_util(__entry->util_avg_walt, cpu_rq(cpu)->prev_runnable_sum);
                 if (!walt_disabled && sysctl_sched_use_walt_cpu_util)
-                        __entry->util_avg               = __entry->util_avg_walt;
+                        __entry->util_avg = __entry->util_avg_walt;
 #endif
         ),
 
@@ -775,8 +779,7 @@ TRACE_EVENT(sched_load_se,
 #ifdef CONFIG_SCHED_WALT
                if (!se->my_q) {
                        struct task_struct *p = container_of(se, struct task_struct, se);
-                       __entry->util_walt = p->ravg.demand;
-                       do_div(__entry->util_walt, walt_ravg_window >> SCHED_CAPACITY_SHIFT);
+                       walt_util(__entry->util_walt, p->ravg.demand);
                        if (!walt_disabled && sysctl_sched_use_walt_task_util)
                                __entry->util = __entry->util_walt;
                }
@@ -1113,7 +1116,7 @@ TRACE_EVENT(walt_update_history,
                __entry->samples        = samples;
                __entry->evt            = evt;
                __entry->demand         = p->ravg.demand;
-               __entry->walt_avg = (__entry->demand << 10) / walt_ravg_window,
+               walt_util(__entry->walt_avg,__entry->demand);
                __entry->pelt_avg       = p->se.avg.util_avg;
                memcpy(__entry->hist, p->ravg.sum_history,
                                        RAVG_HIST_SIZE_MAX * sizeof(u32));
index 5b96242b31119ce41c25c39ff0d5cf028114efd7..65a3ec10c579f287c187023d7c85c386863c7896 100644 (file)
@@ -1753,6 +1753,13 @@ extern unsigned int sysctl_sched_use_walt_cpu_util;
 extern unsigned int walt_ravg_window;
 extern bool walt_disabled;
 
+#ifdef CONFIG_SCHED_WALT
+#define walt_util(util_var, demand_sum) {\
+       u64 sum = demand_sum << SCHED_CAPACITY_SHIFT;\
+       do_div(sum, walt_ravg_window);\
+       util_var = (typeof(util_var))sum;\
+       }
+#endif
 /*
  * cpu_util returns the amount of capacity of a CPU that is used by CFS
  * tasks. The unit of the return value must be the one of capacity so we can
@@ -1786,8 +1793,7 @@ static inline unsigned long __cpu_util(int cpu, int delta)
 
 #ifdef CONFIG_SCHED_WALT
        if (!walt_disabled && sysctl_sched_use_walt_cpu_util) {
-               util = cpu_rq(cpu)->cumulative_runnable_avg << SCHED_CAPACITY_SHIFT;
-               util = div_u64(util, walt_ravg_window);
+               walt_util(util, cpu_rq(cpu)->cumulative_runnable_avg);
        }
 #endif
        delta += util;
@@ -1809,8 +1815,7 @@ static inline unsigned long cpu_util_freq(int cpu)
 
 #ifdef CONFIG_SCHED_WALT
        if (!walt_disabled && sysctl_sched_use_walt_cpu_util) {
-               util = cpu_rq(cpu)->prev_runnable_sum << SCHED_CAPACITY_SHIFT;
-               do_div(util, walt_ravg_window);
+               walt_util(util, cpu_rq(cpu)->prev_runnable_sum);
        }
 #endif
        return (util >= capacity) ? capacity : util;