Merge tag 'v3.10.85' into update
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / cpuidle / governors / menu.c
1 /*
2 * menu.c - the menu idle governor
3 *
4 * Copyright (C) 2006-2007 Adam Belay <abelay@novell.com>
5 * Copyright (C) 2009 Intel Corporation
6 * Author:
7 * Arjan van de Ven <arjan@linux.intel.com>
8 *
9 * This code is licenced under the GPL version 2 as described
10 * in the COPYING file that acompanies the Linux Kernel.
11 */
12
13 #include <linux/kernel.h>
14 #include <linux/cpuidle.h>
15 #include <linux/pm_qos.h>
16 #include <linux/time.h>
17 #include <linux/ktime.h>
18 #include <linux/hrtimer.h>
19 #include <linux/tick.h>
20 #include <linux/sched.h>
21 #include <linux/math64.h>
22 #include <linux/module.h>
23
24 #define BUCKETS 12
25 #define INTERVALS 8
26 #define RESOLUTION 1024
27 #define DECAY 8
28 #define MAX_INTERESTING 50000
29 #define STDDEV_THRESH 400
30
31
32 /*
33 * Concepts and ideas behind the menu governor
34 *
35 * For the menu governor, there are 3 decision factors for picking a C
36 * state:
37 * 1) Energy break even point
38 * 2) Performance impact
39 * 3) Latency tolerance (from pmqos infrastructure)
40 * These these three factors are treated independently.
41 *
42 * Energy break even point
43 * -----------------------
44 * C state entry and exit have an energy cost, and a certain amount of time in
45 * the C state is required to actually break even on this cost. CPUIDLE
46 * provides us this duration in the "target_residency" field. So all that we
47 * need is a good prediction of how long we'll be idle. Like the traditional
48 * menu governor, we start with the actual known "next timer event" time.
49 *
50 * Since there are other source of wakeups (interrupts for example) than
51 * the next timer event, this estimation is rather optimistic. To get a
52 * more realistic estimate, a correction factor is applied to the estimate,
53 * that is based on historic behavior. For example, if in the past the actual
54 * duration always was 50% of the next timer tick, the correction factor will
55 * be 0.5.
56 *
57 * menu uses a running average for this correction factor, however it uses a
58 * set of factors, not just a single factor. This stems from the realization
59 * that the ratio is dependent on the order of magnitude of the expected
60 * duration; if we expect 500 milliseconds of idle time the likelihood of
61 * getting an interrupt very early is much higher than if we expect 50 micro
62 * seconds of idle time. A second independent factor that has big impact on
63 * the actual factor is if there is (disk) IO outstanding or not.
64 * (as a special twist, we consider every sleep longer than 50 milliseconds
65 * as perfect; there are no power gains for sleeping longer than this)
66 *
67 * For these two reasons we keep an array of 12 independent factors, that gets
68 * indexed based on the magnitude of the expected duration as well as the
69 * "is IO outstanding" property.
70 *
71 * Repeatable-interval-detector
72 * ----------------------------
73 * There are some cases where "next timer" is a completely unusable predictor:
74 * Those cases where the interval is fixed, for example due to hardware
75 * interrupt mitigation, but also due to fixed transfer rate devices such as
76 * mice.
77 * For this, we use a different predictor: We track the duration of the last 8
78 * intervals and if the stand deviation of these 8 intervals is below a
79 * threshold value, we use the average of these intervals as prediction.
80 *
81 * Limiting Performance Impact
82 * ---------------------------
83 * C states, especially those with large exit latencies, can have a real
84 * noticeable impact on workloads, which is not acceptable for most sysadmins,
85 * and in addition, less performance has a power price of its own.
86 *
87 * As a general rule of thumb, menu assumes that the following heuristic
88 * holds:
89 * The busier the system, the less impact of C states is acceptable
90 *
91 * This rule-of-thumb is implemented using a performance-multiplier:
92 * If the exit latency times the performance multiplier is longer than
93 * the predicted duration, the C state is not considered a candidate
94 * for selection due to a too high performance impact. So the higher
95 * this multiplier is, the longer we need to be idle to pick a deep C
96 * state, and thus the less likely a busy CPU will hit such a deep
97 * C state.
98 *
99 * Two factors are used in determing this multiplier:
100 * a value of 10 is added for each point of "per cpu load average" we have.
101 * a value of 5 points is added for each process that is waiting for
102 * IO on this CPU.
103 * (these values are experimentally determined)
104 *
105 * The load average factor gives a longer term (few seconds) input to the
106 * decision, while the iowait value gives a cpu local instantanious input.
107 * The iowait factor may look low, but realize that this is also already
108 * represented in the system load average.
109 *
110 */
111
112 struct menu_device {
113 int last_state_idx;
114 int needs_update;
115
116 unsigned int expected_us;
117 u64 predicted_us;
118 unsigned int exit_us;
119 unsigned int bucket;
120 u64 correction_factor[BUCKETS];
121 u32 intervals[INTERVALS];
122 int interval_ptr;
123 };
124
125
126 #define LOAD_INT(x) ((x) >> FSHIFT)
127 #define LOAD_FRAC(x) LOAD_INT(((x) & (FIXED_1-1)) * 100)
128
129 static int get_loadavg(void)
130 {
131 unsigned long this = this_cpu_load();
132
133
134 return LOAD_INT(this) * 10 + LOAD_FRAC(this) / 10;
135 }
136
137 static inline int which_bucket(unsigned int duration)
138 {
139 int bucket = 0;
140
141 /*
142 * We keep two groups of stats; one with no
143 * IO pending, one without.
144 * This allows us to calculate
145 * E(duration)|iowait
146 */
147 if (nr_iowait_cpu(smp_processor_id()))
148 bucket = BUCKETS/2;
149
150 if (duration < 10)
151 return bucket;
152 if (duration < 100)
153 return bucket + 1;
154 if (duration < 1000)
155 return bucket + 2;
156 if (duration < 10000)
157 return bucket + 3;
158 if (duration < 100000)
159 return bucket + 4;
160 return bucket + 5;
161 }
162
163 /*
164 * Return a multiplier for the exit latency that is intended
165 * to take performance requirements into account.
166 * The more performance critical we estimate the system
167 * to be, the higher this multiplier, and thus the higher
168 * the barrier to go to an expensive C state.
169 */
170 static inline int performance_multiplier(void)
171 {
172 int mult = 1;
173
174 /* for higher loadavg, we are more reluctant */
175
176 /*
177 * this doesn't work as intended - it is almost always 0, but can
178 * sometimes, depending on workload, spike very high into the hundreds
179 * even when the average cpu load is under 10%.
180 */
181 /* mult += 2 * get_loadavg(); */
182
183 /* for IO wait tasks (per cpu!) we add 5x each */
184 mult += 10 * nr_iowait_cpu(smp_processor_id());
185
186 return mult;
187 }
188
189 static DEFINE_PER_CPU(struct menu_device, menu_devices);
190
191 static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev);
192
193 /* This implements DIV_ROUND_CLOSEST but avoids 64 bit division */
194 static u64 div_round64(u64 dividend, u32 divisor)
195 {
196 return div_u64(dividend + (divisor / 2), divisor);
197 }
198
199 /*
200 * Try detecting repeating patterns by keeping track of the last 8
201 * intervals, and checking if the standard deviation of that set
202 * of points is below a threshold. If it is... then use the
203 * average of these 8 points as the estimated value.
204 */
205 static void get_typical_interval(struct menu_device *data)
206 {
207 int i = 0, divisor = 0;
208 uint64_t max = 0, avg = 0, stddev = 0;
209 int64_t thresh = LLONG_MAX; /* Discard outliers above this value. */
210
211 again:
212
213 /* first calculate average and standard deviation of the past */
214 max = avg = divisor = stddev = 0;
215 for (i = 0; i < INTERVALS; i++) {
216 int64_t value = data->intervals[i];
217 if (value <= thresh) {
218 avg += value;
219 divisor++;
220 if (value > max)
221 max = value;
222 }
223 }
224 do_div(avg, divisor);
225
226 for (i = 0; i < INTERVALS; i++) {
227 int64_t value = data->intervals[i];
228 if (value <= thresh) {
229 int64_t diff = value - avg;
230 stddev += diff * diff;
231 }
232 }
233 do_div(stddev, divisor);
234 stddev = int_sqrt(stddev);
235 /*
236 * If we have outliers to the upside in our distribution, discard
237 * those by setting the threshold to exclude these outliers, then
238 * calculate the average and standard deviation again. Once we get
239 * down to the bottom 3/4 of our samples, stop excluding samples.
240 *
241 * This can deal with workloads that have long pauses interspersed
242 * with sporadic activity with a bunch of short pauses.
243 *
244 * The typical interval is obtained when standard deviation is small
245 * or standard deviation is small compared to the average interval.
246 */
247 if (((avg > stddev * 6) && (divisor * 4 >= INTERVALS * 3))
248 || stddev <= 20) {
249 data->predicted_us = avg;
250 return;
251
252 } else if ((divisor * 4) > INTERVALS * 3) {
253 /* Exclude the max interval */
254 thresh = max - 1;
255 goto again;
256 }
257 }
258
259 /**
260 * menu_select - selects the next idle state to enter
261 * @drv: cpuidle driver containing state data
262 * @dev: the CPU
263 */
264 static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
265 {
266 struct menu_device *data = &__get_cpu_var(menu_devices);
267 int latency_req = pm_qos_request(PM_QOS_CPU_DMA_LATENCY);
268 int i;
269 int multiplier;
270 struct timespec t;
271
272 if (data->needs_update) {
273 menu_update(drv, dev);
274 data->needs_update = 0;
275 }
276
277 data->last_state_idx = CPUIDLE_DRIVER_STATE_START - 1;
278 data->exit_us = 0;
279
280 /* Special case when user has set very strict latency requirement */
281 if (unlikely(latency_req == 0))
282 return 0;
283
284 /* determine the expected residency time, round up */
285 t = ktime_to_timespec(tick_nohz_get_sleep_length());
286 data->expected_us =
287 t.tv_sec * USEC_PER_SEC + t.tv_nsec / NSEC_PER_USEC;
288
289
290 data->bucket = which_bucket(data->expected_us);
291
292 multiplier = performance_multiplier();
293
294 /*
295 * if the correction factor is 0 (eg first time init or cpu hotplug
296 * etc), we actually want to start out with a unity factor.
297 */
298 if (data->correction_factor[data->bucket] == 0)
299 data->correction_factor[data->bucket] = RESOLUTION * DECAY;
300
301 /* Make sure to round up for half microseconds */
302 data->predicted_us = div_round64(data->expected_us * data->correction_factor[data->bucket],
303 RESOLUTION * DECAY);
304
305 get_typical_interval(data);
306
307 /*
308 * We want to default to C1 (hlt), not to busy polling
309 * unless the timer is happening really really soon.
310 */
311 if (data->expected_us > 5 &&
312 !drv->states[CPUIDLE_DRIVER_STATE_START].disabled &&
313 dev->states_usage[CPUIDLE_DRIVER_STATE_START].disable == 0)
314 data->last_state_idx = CPUIDLE_DRIVER_STATE_START;
315
316 /*
317 * Find the idle state with the lowest power while satisfying
318 * our constraints.
319 */
320 for (i = CPUIDLE_DRIVER_STATE_START; i < drv->state_count; i++) {
321 struct cpuidle_state *s = &drv->states[i];
322 struct cpuidle_state_usage *su = &dev->states_usage[i];
323
324 if (s->disabled || su->disable)
325 continue;
326 if (s->target_residency > data->predicted_us)
327 continue;
328 if (s->exit_latency > latency_req)
329 continue;
330 if (s->exit_latency * multiplier > data->predicted_us)
331 continue;
332
333 data->last_state_idx = i;
334 data->exit_us = s->exit_latency;
335 }
336
337 return data->last_state_idx;
338 }
339
340 /**
341 * menu_reflect - records that data structures need update
342 * @dev: the CPU
343 * @index: the index of actual entered state
344 *
345 * NOTE: it's important to be fast here because this operation will add to
346 * the overall exit latency.
347 */
348 static void menu_reflect(struct cpuidle_device *dev, int index)
349 {
350 struct menu_device *data = &__get_cpu_var(menu_devices);
351 data->last_state_idx = index;
352 if (index >= 0)
353 data->needs_update = 1;
354 }
355
356 /**
357 * menu_update - attempts to guess what happened after entry
358 * @drv: cpuidle driver containing state data
359 * @dev: the CPU
360 */
361 static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
362 {
363 struct menu_device *data = &__get_cpu_var(menu_devices);
364 int last_idx = data->last_state_idx;
365 unsigned int last_idle_us = cpuidle_get_last_residency(dev);
366 struct cpuidle_state *target = &drv->states[last_idx];
367 unsigned int measured_us;
368 u64 new_factor;
369
370 /*
371 * Ugh, this idle state doesn't support residency measurements, so we
372 * are basically lost in the dark. As a compromise, assume we slept
373 * for the whole expected time.
374 */
375 if (unlikely(!(target->flags & CPUIDLE_FLAG_TIME_VALID)))
376 last_idle_us = data->expected_us;
377
378
379 measured_us = last_idle_us;
380
381 /*
382 * We correct for the exit latency; we are assuming here that the
383 * exit latency happens after the event that we're interested in.
384 */
385 if (measured_us > data->exit_us)
386 measured_us -= data->exit_us;
387
388
389 /* update our correction ratio */
390
391 new_factor = data->correction_factor[data->bucket]
392 * (DECAY - 1) / DECAY;
393
394 if (data->expected_us > 0 && measured_us < MAX_INTERESTING)
395 new_factor += RESOLUTION * measured_us / data->expected_us;
396 else
397 /*
398 * we were idle so long that we count it as a perfect
399 * prediction
400 */
401 new_factor += RESOLUTION;
402
403 /*
404 * We don't want 0 as factor; we always want at least
405 * a tiny bit of estimated time.
406 */
407 if (new_factor == 0)
408 new_factor = 1;
409
410 data->correction_factor[data->bucket] = new_factor;
411
412 /* update the repeating-pattern data */
413 data->intervals[data->interval_ptr++] = last_idle_us;
414 if (data->interval_ptr >= INTERVALS)
415 data->interval_ptr = 0;
416 }
417
418 /**
419 * menu_enable_device - scans a CPU's states and does setup
420 * @drv: cpuidle driver
421 * @dev: the CPU
422 */
423 static int menu_enable_device(struct cpuidle_driver *drv,
424 struct cpuidle_device *dev)
425 {
426 struct menu_device *data = &per_cpu(menu_devices, dev->cpu);
427
428 memset(data, 0, sizeof(struct menu_device));
429
430 return 0;
431 }
432
433 static struct cpuidle_governor menu_governor = {
434 .name = "menu",
435 .rating = 20,
436 .enable = menu_enable_device,
437 .select = menu_select,
438 .reflect = menu_reflect,
439 .owner = THIS_MODULE,
440 };
441
442 /**
443 * init_menu - initializes the governor
444 */
445 static int __init init_menu(void)
446 {
447 return cpuidle_register_governor(&menu_governor);
448 }
449
450 /**
451 * exit_menu - exits the governor
452 */
453 static void __exit exit_menu(void)
454 {
455 cpuidle_unregister_governor(&menu_governor);
456 }
457
458 MODULE_LICENSE("GPL");
459 module_init(init_menu);
460 module_exit(exit_menu);