ARM: Fix build after memfd_create syscall
[GitHub/LineageOS/android_kernel_samsung_universal7580.git] / drivers / cpufreq / cpufreq_governor.c
1 /*
2 * drivers/cpufreq/cpufreq_governor.c
3 *
4 * CPUFREQ governors common code
5 *
6 * Copyright (C) 2001 Russell King
7 * (C) 2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>.
8 * (C) 2003 Jun Nakajima <jun.nakajima@intel.com>
9 * (C) 2009 Alexander Clouter <alex@digriz.org.uk>
10 * (c) 2012 Viresh Kumar <viresh.kumar@linaro.org>
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License version 2 as
14 * published by the Free Software Foundation.
15 */
16
17 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18
19 #include <asm/cputime.h>
20 #include <linux/cpufreq.h>
21 #include <linux/cpumask.h>
22 #include <linux/export.h>
23 #include <linux/kernel_stat.h>
24 #include <linux/mutex.h>
25 #include <linux/slab.h>
26 #include <linux/types.h>
27 #include <linux/workqueue.h>
28
29 #include "cpufreq_governor.h"
30 #include "cpu_load_metric.h"
31
32 static struct attribute_group *get_sysfs_attr(struct dbs_data *dbs_data)
33 {
34 if (have_governor_per_policy())
35 return dbs_data->cdata->attr_group_gov_pol;
36 else
37 return dbs_data->cdata->attr_group_gov_sys;
38 }
39
40 void dbs_check_cpu(struct dbs_data *dbs_data, int cpu)
41 {
42 struct cpu_dbs_common_info *cdbs = dbs_data->cdata->get_cpu_cdbs(cpu);
43 struct od_dbs_tuners *od_tuners = dbs_data->tuners;
44 struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
45 struct cpufreq_policy *policy;
46 unsigned int max_load = 0;
47 unsigned int ignore_nice;
48 unsigned int j;
49
50 if (dbs_data->cdata->governor == GOV_ONDEMAND)
51 ignore_nice = od_tuners->ignore_nice_load;
52 else
53 ignore_nice = cs_tuners->ignore_nice_load;
54
55 policy = cdbs->cur_policy;
56
57 /* Get Absolute Load */
58 for_each_cpu(j, policy->cpus) {
59 struct cpu_dbs_common_info *j_cdbs;
60 u64 cur_wall_time, cur_idle_time;
61 unsigned int idle_time, wall_time;
62 unsigned int load;
63 int io_busy = 0;
64
65 j_cdbs = dbs_data->cdata->get_cpu_cdbs(j);
66
67 /*
68 * For the purpose of ondemand, waiting for disk IO is
69 * an indication that you're performance critical, and
70 * not that the system is actually idle. So do not add
71 * the iowait time to the cpu idle time.
72 */
73 if (dbs_data->cdata->governor == GOV_ONDEMAND)
74 io_busy = od_tuners->io_is_busy;
75 cur_idle_time = get_cpu_idle_time(j, &cur_wall_time, io_busy);
76
77 wall_time = (unsigned int)
78 (cur_wall_time - j_cdbs->prev_cpu_wall);
79 j_cdbs->prev_cpu_wall = cur_wall_time;
80
81 idle_time = (unsigned int)
82 (cur_idle_time - j_cdbs->prev_cpu_idle);
83 j_cdbs->prev_cpu_idle = cur_idle_time;
84
85 if (ignore_nice) {
86 u64 cur_nice;
87 unsigned long cur_nice_jiffies;
88
89 cur_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE] -
90 cdbs->prev_cpu_nice;
91 /*
92 * Assumption: nice time between sampling periods will
93 * be less than 2^32 jiffies for 32 bit sys
94 */
95 cur_nice_jiffies = (unsigned long)
96 cputime64_to_jiffies64(cur_nice);
97
98 cdbs->prev_cpu_nice =
99 kcpustat_cpu(j).cpustat[CPUTIME_NICE];
100 idle_time += jiffies_to_usecs(cur_nice_jiffies);
101 }
102
103 if (unlikely(!wall_time || wall_time < idle_time))
104 continue;
105
106 load = 100 * (wall_time - idle_time) / wall_time;
107
108 if (load > max_load)
109 max_load = load;
110
111 update_cpu_metric(j, cur_wall_time, idle_time, wall_time, policy);
112 }
113
114 dbs_data->cdata->gov_check_cpu(cpu, max_load);
115 }
116 EXPORT_SYMBOL_GPL(dbs_check_cpu);
117
118 static inline void __gov_queue_work(int cpu, struct dbs_data *dbs_data,
119 unsigned int delay)
120 {
121 struct cpu_dbs_common_info *cdbs = dbs_data->cdata->get_cpu_cdbs(cpu);
122
123 mod_delayed_work_on(cpu, system_wq, &cdbs->work, delay);
124 }
125
126 void gov_queue_work(struct dbs_data *dbs_data, struct cpufreq_policy *policy,
127 unsigned int delay, bool all_cpus)
128 {
129 int i;
130
131 if (!policy->governor_enabled)
132 return;
133
134 if (!all_cpus) {
135 __gov_queue_work(smp_processor_id(), dbs_data, delay);
136 } else {
137 for_each_cpu(i, policy->cpus)
138 __gov_queue_work(i, dbs_data, delay);
139 }
140 }
141 EXPORT_SYMBOL_GPL(gov_queue_work);
142
143 static inline void gov_cancel_work(struct dbs_data *dbs_data,
144 struct cpufreq_policy *policy)
145 {
146 struct cpu_dbs_common_info *cdbs;
147 int i;
148
149 for_each_cpu(i, policy->cpus) {
150 cdbs = dbs_data->cdata->get_cpu_cdbs(i);
151 cancel_delayed_work_sync(&cdbs->work);
152 }
153 }
154
155 /* Will return if we need to evaluate cpu load again or not */
156 bool need_load_eval(struct cpu_dbs_common_info *cdbs,
157 unsigned int sampling_rate)
158 {
159 if (policy_is_shared(cdbs->cur_policy)) {
160 ktime_t time_now = ktime_get();
161 s64 delta_us = ktime_us_delta(time_now, cdbs->time_stamp);
162
163 /* Do nothing if we recently have sampled */
164 if (delta_us < (s64)(sampling_rate / 2))
165 return false;
166 else
167 cdbs->time_stamp = time_now;
168 }
169
170 return true;
171 }
172 EXPORT_SYMBOL_GPL(need_load_eval);
173
174 static void set_sampling_rate(struct dbs_data *dbs_data,
175 unsigned int sampling_rate)
176 {
177 if (dbs_data->cdata->governor == GOV_CONSERVATIVE) {
178 struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
179 cs_tuners->sampling_rate = sampling_rate;
180 } else {
181 struct od_dbs_tuners *od_tuners = dbs_data->tuners;
182 od_tuners->sampling_rate = sampling_rate;
183 }
184 }
185
186 int cpufreq_governor_dbs(struct cpufreq_policy *policy,
187 struct common_dbs_data *cdata, unsigned int event)
188 {
189 struct dbs_data *dbs_data;
190 struct od_cpu_dbs_info_s *od_dbs_info = NULL;
191 struct cs_cpu_dbs_info_s *cs_dbs_info = NULL;
192 struct od_ops *od_ops = NULL;
193 struct od_dbs_tuners *od_tuners = NULL;
194 struct cs_dbs_tuners *cs_tuners = NULL;
195 struct cpu_dbs_common_info *cpu_cdbs;
196 unsigned int sampling_rate, latency, ignore_nice, j, cpu = policy->cpu;
197 int io_busy = 0;
198 int rc;
199
200 if (have_governor_per_policy())
201 dbs_data = policy->governor_data;
202 else
203 dbs_data = cdata->gdbs_data;
204
205 WARN_ON(!dbs_data && (event != CPUFREQ_GOV_POLICY_INIT));
206
207 switch (event) {
208 case CPUFREQ_GOV_POLICY_INIT:
209 if (have_governor_per_policy()) {
210 WARN_ON(dbs_data);
211 } else if (dbs_data) {
212 dbs_data->usage_count++;
213 policy->governor_data = dbs_data;
214 return 0;
215 }
216
217 dbs_data = kzalloc(sizeof(*dbs_data), GFP_KERNEL);
218 if (!dbs_data) {
219 pr_err("%s: POLICY_INIT: kzalloc failed\n", __func__);
220 return -ENOMEM;
221 }
222
223 dbs_data->cdata = cdata;
224 dbs_data->usage_count = 1;
225 rc = cdata->init(dbs_data);
226 if (rc) {
227 pr_err("%s: POLICY_INIT: init() failed\n", __func__);
228 kfree(dbs_data);
229 return rc;
230 }
231
232 rc = sysfs_create_group(get_governor_parent_kobj(policy),
233 get_sysfs_attr(dbs_data));
234 if (rc) {
235 cdata->exit(dbs_data);
236 kfree(dbs_data);
237 return rc;
238 }
239
240 policy->governor_data = dbs_data;
241
242 /* policy latency is in nS. Convert it to uS first */
243 latency = policy->cpuinfo.transition_latency / 1000;
244 if (latency == 0)
245 latency = 1;
246
247 /* Bring kernel and HW constraints together */
248 dbs_data->min_sampling_rate = max(dbs_data->min_sampling_rate,
249 MIN_LATENCY_MULTIPLIER * latency);
250 set_sampling_rate(dbs_data, max(dbs_data->min_sampling_rate,
251 latency * LATENCY_MULTIPLIER));
252
253 if ((cdata->governor == GOV_CONSERVATIVE) &&
254 (!policy->governor->initialized)) {
255 struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
256
257 cpufreq_register_notifier(cs_ops->notifier_block,
258 CPUFREQ_TRANSITION_NOTIFIER);
259 }
260
261 if (!have_governor_per_policy())
262 cdata->gdbs_data = dbs_data;
263
264 return 0;
265 case CPUFREQ_GOV_POLICY_EXIT:
266 if (!--dbs_data->usage_count) {
267 sysfs_remove_group(get_governor_parent_kobj(policy),
268 get_sysfs_attr(dbs_data));
269
270 if ((dbs_data->cdata->governor == GOV_CONSERVATIVE) &&
271 (policy->governor->initialized == 1)) {
272 struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
273
274 cpufreq_unregister_notifier(cs_ops->notifier_block,
275 CPUFREQ_TRANSITION_NOTIFIER);
276 }
277
278 cdata->exit(dbs_data);
279 kfree(dbs_data);
280 cdata->gdbs_data = NULL;
281 }
282
283 policy->governor_data = NULL;
284 return 0;
285 }
286
287 cpu_cdbs = dbs_data->cdata->get_cpu_cdbs(cpu);
288
289 if (dbs_data->cdata->governor == GOV_CONSERVATIVE) {
290 cs_tuners = dbs_data->tuners;
291 cs_dbs_info = dbs_data->cdata->get_cpu_dbs_info_s(cpu);
292 sampling_rate = cs_tuners->sampling_rate;
293 ignore_nice = cs_tuners->ignore_nice_load;
294 } else {
295 od_tuners = dbs_data->tuners;
296 od_dbs_info = dbs_data->cdata->get_cpu_dbs_info_s(cpu);
297 sampling_rate = od_tuners->sampling_rate;
298 ignore_nice = od_tuners->ignore_nice_load;
299 od_ops = dbs_data->cdata->gov_ops;
300 io_busy = od_tuners->io_is_busy;
301 }
302
303 switch (event) {
304 case CPUFREQ_GOV_START:
305 if (!policy->cur)
306 return -EINVAL;
307
308 mutex_lock(&dbs_data->mutex);
309
310 for_each_cpu(j, policy->cpus) {
311 struct cpu_dbs_common_info *j_cdbs =
312 dbs_data->cdata->get_cpu_cdbs(j);
313
314 j_cdbs->cpu = j;
315 j_cdbs->cur_policy = policy;
316 j_cdbs->prev_cpu_idle = get_cpu_idle_time(j,
317 &j_cdbs->prev_cpu_wall, io_busy);
318 if (ignore_nice)
319 j_cdbs->prev_cpu_nice =
320 kcpustat_cpu(j).cpustat[CPUTIME_NICE];
321
322 mutex_init(&j_cdbs->timer_mutex);
323 INIT_DEFERRABLE_WORK(&j_cdbs->work,
324 dbs_data->cdata->gov_dbs_timer);
325 }
326
327 /*
328 * conservative does not implement micro like ondemand
329 * governor, thus we are bound to jiffes/HZ
330 */
331 if (dbs_data->cdata->governor == GOV_CONSERVATIVE) {
332 cs_dbs_info->down_skip = 0;
333 cs_dbs_info->enable = 1;
334 cs_dbs_info->requested_freq = policy->cur;
335 } else {
336 od_dbs_info->rate_mult = 1;
337 od_dbs_info->sample_type = OD_NORMAL_SAMPLE;
338 od_ops->powersave_bias_init_cpu(cpu);
339 }
340
341 mutex_unlock(&dbs_data->mutex);
342
343 /* Initiate timer time stamp */
344 cpu_cdbs->time_stamp = ktime_get();
345
346 gov_queue_work(dbs_data, policy,
347 delay_for_sampling_rate(sampling_rate), true);
348 break;
349
350 case CPUFREQ_GOV_STOP:
351 if (dbs_data->cdata->governor == GOV_CONSERVATIVE)
352 cs_dbs_info->enable = 0;
353
354 gov_cancel_work(dbs_data, policy);
355
356 mutex_lock(&dbs_data->mutex);
357 mutex_destroy(&cpu_cdbs->timer_mutex);
358 cpu_cdbs->cur_policy = NULL;
359
360 mutex_unlock(&dbs_data->mutex);
361
362 break;
363
364 case CPUFREQ_GOV_LIMITS:
365 mutex_lock(&cpu_cdbs->timer_mutex);
366 if (policy->max < cpu_cdbs->cur_policy->cur)
367 __cpufreq_driver_target(cpu_cdbs->cur_policy,
368 policy->max, CPUFREQ_RELATION_H);
369 else if (policy->min > cpu_cdbs->cur_policy->cur)
370 __cpufreq_driver_target(cpu_cdbs->cur_policy,
371 policy->min, CPUFREQ_RELATION_L);
372 dbs_check_cpu(dbs_data, cpu);
373 mutex_unlock(&cpu_cdbs->timer_mutex);
374 break;
375 }
376 return 0;
377 }
378 EXPORT_SYMBOL_GPL(cpufreq_governor_dbs);