iwlwifi: don't mess up the SCD when removing a key
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / cpuidle / cpuidle.c
1 /*
2 * cpuidle.c - core cpuidle infrastructure
3 *
4 * (C) 2006-2007 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
5 * Shaohua Li <shaohua.li@intel.com>
6 * Adam Belay <abelay@novell.com>
7 *
8 * This code is licenced under the GPL.
9 */
10
11 #include <linux/kernel.h>
12 #include <linux/mutex.h>
13 #include <linux/sched.h>
14 #include <linux/notifier.h>
15 #include <linux/pm_qos.h>
16 #include <linux/cpu.h>
17 #include <linux/cpuidle.h>
18 #include <linux/ktime.h>
19 #include <linux/hrtimer.h>
20 #include <linux/module.h>
21 #include <trace/events/power.h>
22
23 #include "cpuidle.h"
24
25 DEFINE_PER_CPU(struct cpuidle_device *, cpuidle_devices);
26
27 DEFINE_MUTEX(cpuidle_lock);
28 LIST_HEAD(cpuidle_detected_devices);
29
30 static int enabled_devices;
31 static int off __read_mostly;
32 static int initialized __read_mostly;
33
34 int cpuidle_disabled(void)
35 {
36 return off;
37 }
38 void disable_cpuidle(void)
39 {
40 off = 1;
41 }
42
43 static int __cpuidle_register_device(struct cpuidle_device *dev);
44
45 static inline int cpuidle_enter(struct cpuidle_device *dev,
46 struct cpuidle_driver *drv, int index)
47 {
48 struct cpuidle_state *target_state = &drv->states[index];
49 return target_state->enter(dev, drv, index);
50 }
51
52 static inline int cpuidle_enter_tk(struct cpuidle_device *dev,
53 struct cpuidle_driver *drv, int index)
54 {
55 return cpuidle_wrap_enter(dev, drv, index, cpuidle_enter);
56 }
57
58 typedef int (*cpuidle_enter_t)(struct cpuidle_device *dev,
59 struct cpuidle_driver *drv, int index);
60
61 static cpuidle_enter_t cpuidle_enter_ops;
62
63 /**
64 * cpuidle_play_dead - cpu off-lining
65 *
66 * Returns in case of an error or no driver
67 */
68 int cpuidle_play_dead(void)
69 {
70 struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices);
71 struct cpuidle_driver *drv = cpuidle_get_driver();
72 int i, dead_state = -1;
73 int power_usage = -1;
74
75 if (!drv)
76 return -ENODEV;
77
78 /* Find lowest-power state that supports long-term idle */
79 for (i = CPUIDLE_DRIVER_STATE_START; i < drv->state_count; i++) {
80 struct cpuidle_state *s = &drv->states[i];
81
82 if (s->power_usage < power_usage && s->enter_dead) {
83 power_usage = s->power_usage;
84 dead_state = i;
85 }
86 }
87
88 if (dead_state != -1)
89 return drv->states[dead_state].enter_dead(dev, dead_state);
90
91 return -ENODEV;
92 }
93
94 /**
95 * cpuidle_idle_call - the main idle loop
96 *
97 * NOTE: no locks or semaphores should be used here
98 * return non-zero on failure
99 */
100 int cpuidle_idle_call(void)
101 {
102 struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices);
103 struct cpuidle_driver *drv = cpuidle_get_driver();
104 int next_state, entered_state;
105
106 if (off)
107 return -ENODEV;
108
109 if (!initialized)
110 return -ENODEV;
111
112 /* check if the device is ready */
113 if (!dev || !dev->enabled)
114 return -EBUSY;
115
116 #if 0
117 /* shows regressions, re-enable for 2.6.29 */
118 /*
119 * run any timers that can be run now, at this point
120 * before calculating the idle duration etc.
121 */
122 hrtimer_peek_ahead_timers();
123 #endif
124
125 /* ask the governor for the next state */
126 next_state = cpuidle_curr_governor->select(drv, dev);
127 if (need_resched()) {
128 local_irq_enable();
129 return 0;
130 }
131
132 trace_power_start_rcuidle(POWER_CSTATE, next_state, dev->cpu);
133 trace_cpu_idle_rcuidle(next_state, dev->cpu);
134
135 entered_state = cpuidle_enter_ops(dev, drv, next_state);
136
137 trace_power_end_rcuidle(dev->cpu);
138 trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, dev->cpu);
139
140 if (entered_state >= 0) {
141 /* Update cpuidle counters */
142 /* This can be moved to within driver enter routine
143 * but that results in multiple copies of same code.
144 */
145 dev->states_usage[entered_state].time +=
146 (unsigned long long)dev->last_residency;
147 dev->states_usage[entered_state].usage++;
148 } else {
149 dev->last_residency = 0;
150 }
151
152 /* give the governor an opportunity to reflect on the outcome */
153 if (cpuidle_curr_governor->reflect)
154 cpuidle_curr_governor->reflect(dev, entered_state);
155
156 return 0;
157 }
158
159 /**
160 * cpuidle_install_idle_handler - installs the cpuidle idle loop handler
161 */
162 void cpuidle_install_idle_handler(void)
163 {
164 if (enabled_devices) {
165 /* Make sure all changes finished before we switch to new idle */
166 smp_wmb();
167 initialized = 1;
168 }
169 }
170
171 /**
172 * cpuidle_uninstall_idle_handler - uninstalls the cpuidle idle loop handler
173 */
174 void cpuidle_uninstall_idle_handler(void)
175 {
176 if (enabled_devices) {
177 initialized = 0;
178 kick_all_cpus_sync();
179 }
180 }
181
182 /**
183 * cpuidle_pause_and_lock - temporarily disables CPUIDLE
184 */
185 void cpuidle_pause_and_lock(void)
186 {
187 mutex_lock(&cpuidle_lock);
188 cpuidle_uninstall_idle_handler();
189 }
190
191 EXPORT_SYMBOL_GPL(cpuidle_pause_and_lock);
192
193 /**
194 * cpuidle_resume_and_unlock - resumes CPUIDLE operation
195 */
196 void cpuidle_resume_and_unlock(void)
197 {
198 cpuidle_install_idle_handler();
199 mutex_unlock(&cpuidle_lock);
200 }
201
202 EXPORT_SYMBOL_GPL(cpuidle_resume_and_unlock);
203
204 /**
205 * cpuidle_wrap_enter - performs timekeeping and irqen around enter function
206 * @dev: pointer to a valid cpuidle_device object
207 * @drv: pointer to a valid cpuidle_driver object
208 * @index: index of the target cpuidle state.
209 */
210 int cpuidle_wrap_enter(struct cpuidle_device *dev,
211 struct cpuidle_driver *drv, int index,
212 int (*enter)(struct cpuidle_device *dev,
213 struct cpuidle_driver *drv, int index))
214 {
215 ktime_t time_start, time_end;
216 s64 diff;
217
218 time_start = ktime_get();
219
220 index = enter(dev, drv, index);
221
222 time_end = ktime_get();
223
224 local_irq_enable();
225
226 diff = ktime_to_us(ktime_sub(time_end, time_start));
227 if (diff > INT_MAX)
228 diff = INT_MAX;
229
230 dev->last_residency = (int) diff;
231
232 return index;
233 }
234
235 #ifdef CONFIG_ARCH_HAS_CPU_RELAX
236 static int poll_idle(struct cpuidle_device *dev,
237 struct cpuidle_driver *drv, int index)
238 {
239 ktime_t t1, t2;
240 s64 diff;
241
242 t1 = ktime_get();
243 local_irq_enable();
244 while (!need_resched())
245 cpu_relax();
246
247 t2 = ktime_get();
248 diff = ktime_to_us(ktime_sub(t2, t1));
249 if (diff > INT_MAX)
250 diff = INT_MAX;
251
252 dev->last_residency = (int) diff;
253
254 return index;
255 }
256
257 static void poll_idle_init(struct cpuidle_driver *drv)
258 {
259 struct cpuidle_state *state = &drv->states[0];
260
261 snprintf(state->name, CPUIDLE_NAME_LEN, "POLL");
262 snprintf(state->desc, CPUIDLE_DESC_LEN, "CPUIDLE CORE POLL IDLE");
263 state->exit_latency = 0;
264 state->target_residency = 0;
265 state->power_usage = -1;
266 state->flags = 0;
267 state->enter = poll_idle;
268 state->disable = 0;
269 }
270 #else
271 static void poll_idle_init(struct cpuidle_driver *drv) {}
272 #endif /* CONFIG_ARCH_HAS_CPU_RELAX */
273
274 /**
275 * cpuidle_enable_device - enables idle PM for a CPU
276 * @dev: the CPU
277 *
278 * This function must be called between cpuidle_pause_and_lock and
279 * cpuidle_resume_and_unlock when used externally.
280 */
281 int cpuidle_enable_device(struct cpuidle_device *dev)
282 {
283 int ret, i;
284 struct cpuidle_driver *drv = cpuidle_get_driver();
285
286 if (dev->enabled)
287 return 0;
288 if (!drv || !cpuidle_curr_governor)
289 return -EIO;
290 if (!dev->state_count)
291 dev->state_count = drv->state_count;
292
293 if (dev->registered == 0) {
294 ret = __cpuidle_register_device(dev);
295 if (ret)
296 return ret;
297 }
298
299 cpuidle_enter_ops = drv->en_core_tk_irqen ?
300 cpuidle_enter_tk : cpuidle_enter;
301
302 poll_idle_init(drv);
303
304 if ((ret = cpuidle_add_state_sysfs(dev)))
305 return ret;
306
307 if (cpuidle_curr_governor->enable &&
308 (ret = cpuidle_curr_governor->enable(drv, dev)))
309 goto fail_sysfs;
310
311 for (i = 0; i < dev->state_count; i++) {
312 dev->states_usage[i].usage = 0;
313 dev->states_usage[i].time = 0;
314 }
315 dev->last_residency = 0;
316
317 smp_wmb();
318
319 dev->enabled = 1;
320
321 enabled_devices++;
322 return 0;
323
324 fail_sysfs:
325 cpuidle_remove_state_sysfs(dev);
326
327 return ret;
328 }
329
330 EXPORT_SYMBOL_GPL(cpuidle_enable_device);
331
332 /**
333 * cpuidle_disable_device - disables idle PM for a CPU
334 * @dev: the CPU
335 *
336 * This function must be called between cpuidle_pause_and_lock and
337 * cpuidle_resume_and_unlock when used externally.
338 */
339 void cpuidle_disable_device(struct cpuidle_device *dev)
340 {
341 if (!dev->enabled)
342 return;
343 if (!cpuidle_get_driver() || !cpuidle_curr_governor)
344 return;
345
346 dev->enabled = 0;
347
348 if (cpuidle_curr_governor->disable)
349 cpuidle_curr_governor->disable(cpuidle_get_driver(), dev);
350
351 cpuidle_remove_state_sysfs(dev);
352 enabled_devices--;
353 }
354
355 EXPORT_SYMBOL_GPL(cpuidle_disable_device);
356
357 /**
358 * __cpuidle_register_device - internal register function called before register
359 * and enable routines
360 * @dev: the cpu
361 *
362 * cpuidle_lock mutex must be held before this is called
363 */
364 static int __cpuidle_register_device(struct cpuidle_device *dev)
365 {
366 int ret;
367 struct device *cpu_dev = get_cpu_device((unsigned long)dev->cpu);
368 struct cpuidle_driver *cpuidle_driver = cpuidle_get_driver();
369
370 if (!dev)
371 return -EINVAL;
372 if (!try_module_get(cpuidle_driver->owner))
373 return -EINVAL;
374
375 init_completion(&dev->kobj_unregister);
376
377 per_cpu(cpuidle_devices, dev->cpu) = dev;
378 list_add(&dev->device_list, &cpuidle_detected_devices);
379 if ((ret = cpuidle_add_sysfs(cpu_dev))) {
380 module_put(cpuidle_driver->owner);
381 return ret;
382 }
383
384 dev->registered = 1;
385 return 0;
386 }
387
388 /**
389 * cpuidle_register_device - registers a CPU's idle PM feature
390 * @dev: the cpu
391 */
392 int cpuidle_register_device(struct cpuidle_device *dev)
393 {
394 int ret;
395
396 mutex_lock(&cpuidle_lock);
397
398 if ((ret = __cpuidle_register_device(dev))) {
399 mutex_unlock(&cpuidle_lock);
400 return ret;
401 }
402
403 cpuidle_enable_device(dev);
404 cpuidle_install_idle_handler();
405
406 mutex_unlock(&cpuidle_lock);
407
408 return 0;
409
410 }
411
412 EXPORT_SYMBOL_GPL(cpuidle_register_device);
413
414 /**
415 * cpuidle_unregister_device - unregisters a CPU's idle PM feature
416 * @dev: the cpu
417 */
418 void cpuidle_unregister_device(struct cpuidle_device *dev)
419 {
420 struct device *cpu_dev = get_cpu_device((unsigned long)dev->cpu);
421 struct cpuidle_driver *cpuidle_driver = cpuidle_get_driver();
422
423 if (dev->registered == 0)
424 return;
425
426 cpuidle_pause_and_lock();
427
428 cpuidle_disable_device(dev);
429
430 cpuidle_remove_sysfs(cpu_dev);
431 list_del(&dev->device_list);
432 wait_for_completion(&dev->kobj_unregister);
433 per_cpu(cpuidle_devices, dev->cpu) = NULL;
434
435 cpuidle_resume_and_unlock();
436
437 module_put(cpuidle_driver->owner);
438 }
439
440 EXPORT_SYMBOL_GPL(cpuidle_unregister_device);
441
442 #ifdef CONFIG_SMP
443
444 static void smp_callback(void *v)
445 {
446 /* we already woke the CPU up, nothing more to do */
447 }
448
449 /*
450 * This function gets called when a part of the kernel has a new latency
451 * requirement. This means we need to get all processors out of their C-state,
452 * and then recalculate a new suitable C-state. Just do a cross-cpu IPI; that
453 * wakes them all right up.
454 */
455 static int cpuidle_latency_notify(struct notifier_block *b,
456 unsigned long l, void *v)
457 {
458 smp_call_function(smp_callback, NULL, 1);
459 return NOTIFY_OK;
460 }
461
462 static struct notifier_block cpuidle_latency_notifier = {
463 .notifier_call = cpuidle_latency_notify,
464 };
465
466 static inline void latency_notifier_init(struct notifier_block *n)
467 {
468 pm_qos_add_notifier(PM_QOS_CPU_DMA_LATENCY, n);
469 }
470
471 #else /* CONFIG_SMP */
472
473 #define latency_notifier_init(x) do { } while (0)
474
475 #endif /* CONFIG_SMP */
476
477 /**
478 * cpuidle_init - core initializer
479 */
480 static int __init cpuidle_init(void)
481 {
482 int ret;
483
484 if (cpuidle_disabled())
485 return -ENODEV;
486
487 ret = cpuidle_add_interface(cpu_subsys.dev_root);
488 if (ret)
489 return ret;
490
491 latency_notifier_init(&cpuidle_latency_notifier);
492
493 return 0;
494 }
495
496 module_param(off, int, 0444);
497 core_initcall(cpuidle_init);