sched: schedutil: remove update_single function.
authorYoungtae Lee <yt0729.lee@samsung.com>
Thu, 26 Apr 2018 12:27:22 +0000 (21:27 +0900)
committerlakkyung.jung <lakkyung.jung@samsung.com>
Mon, 23 Jul 2018 05:59:21 +0000 (14:59 +0900)
Change-Id: I870028b8b159501e79730f226e8c46c0c2bff50f
Signed-off-by: Youngtae Lee <yt0729.lee@samsung.com>
kernel/sched/cpufreq_schedutil.c

index a9d4aa51fca043a76c3b22fe3249921e73cf8301..468ec4fc7a3b32d46f423ffb1dd1fa3f32f9ba86 100644 (file)
@@ -327,59 +327,6 @@ static void sugov_iowait_boost(struct sugov_cpu *sg_cpu, unsigned long *util,
        }
 }
 
-#ifdef CONFIG_NO_HZ_COMMON
-static bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu)
-{
-       unsigned long idle_calls = tick_nohz_get_idle_calls_cpu(sg_cpu->cpu);
-       bool ret = idle_calls == sg_cpu->saved_idle_calls;
-
-       sg_cpu->saved_idle_calls = idle_calls;
-       return ret;
-}
-#else
-static inline bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu) { return false; }
-#endif /* CONFIG_NO_HZ_COMMON */
-
-static void sugov_update_single(struct update_util_data *hook, u64 time,
-                               unsigned int flags)
-{
-       struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util);
-       struct sugov_policy *sg_policy = sg_cpu->sg_policy;
-       struct cpufreq_policy *policy = sg_policy->policy;
-       unsigned long util, max;
-       unsigned int next_f;
-       bool busy;
-
-       sugov_set_iowait_boost(sg_cpu, time, flags);
-       sg_cpu->last_update = time;
-
-       if (!sugov_should_update_freq(sg_policy, time))
-               return;
-
-       busy = sugov_cpu_is_busy(sg_cpu);
-
-       if (flags & SCHED_CPUFREQ_DL) {
-               next_f = policy->cpuinfo.max_freq;
-       } else {
-               sugov_get_util(&util, &max, sg_cpu->cpu);
-               sugov_iowait_boost(sg_cpu, &util, &max);
-               next_f = get_next_freq(sg_policy, util, max);
-               /*
-                * Do not reduce the frequency if the CPU has not been idle
-                * recently, as the reduction is likely to be premature then.
-                */
-               if (busy && next_f < sg_policy->next_freq &&
-                   sg_policy->next_freq != UINT_MAX) {
-                       next_f = sg_policy->next_freq;
-
-                       /* Reset cached freq as next_freq has changed */
-                       sg_policy->cached_raw_freq = 0;
-               }
-       }
-
-       sugov_update_commit(sg_policy, time, next_f);
-}
-
 static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu, u64 time)
 {
        struct sugov_policy *sg_policy = sg_cpu->sg_policy;
@@ -775,9 +722,7 @@ static int sugov_start(struct cpufreq_policy *policy)
                struct sugov_cpu *sg_cpu = &per_cpu(sugov_cpu, cpu);
 
                cpufreq_add_update_util_hook(cpu, &sg_cpu->update_util,
-                                            policy_is_shared(policy) ?
-                                                       sugov_update_shared :
-                                                       sugov_update_single);
+                                                       sugov_update_shared);
        }
        return 0;
 }