Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs-2.6
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / arm / mach-omap2 / cpuidle34xx.c
1 /*
2 * linux/arch/arm/mach-omap2/cpuidle34xx.c
3 *
4 * OMAP3 CPU IDLE Routines
5 *
6 * Copyright (C) 2008 Texas Instruments, Inc.
7 * Rajendra Nayak <rnayak@ti.com>
8 *
9 * Copyright (C) 2007 Texas Instruments, Inc.
10 * Karthik Dasu <karthik-dp@ti.com>
11 *
12 * Copyright (C) 2006 Nokia Corporation
13 * Tony Lindgren <tony@atomide.com>
14 *
15 * Copyright (C) 2005 Texas Instruments, Inc.
16 * Richard Woodruff <r-woodruff2@ti.com>
17 *
18 * Based on pm.c for omap2
19 *
20 * This program is free software; you can redistribute it and/or modify
21 * it under the terms of the GNU General Public License version 2 as
22 * published by the Free Software Foundation.
23 */
24
25 #include <linux/sched.h>
26 #include <linux/cpuidle.h>
27
28 #include <plat/prcm.h>
29 #include <plat/irqs.h>
30 #include "powerdomain.h"
31 #include "clockdomain.h"
32 #include <plat/serial.h>
33
34 #include "pm.h"
35 #include "control.h"
36
37 #ifdef CONFIG_CPU_IDLE
38
39 #define OMAP3_MAX_STATES 7
40 #define OMAP3_STATE_C1 0 /* C1 - MPU WFI + Core active */
41 #define OMAP3_STATE_C2 1 /* C2 - MPU WFI + Core inactive */
42 #define OMAP3_STATE_C3 2 /* C3 - MPU CSWR + Core inactive */
43 #define OMAP3_STATE_C4 3 /* C4 - MPU OFF + Core iactive */
44 #define OMAP3_STATE_C5 4 /* C5 - MPU RET + Core RET */
45 #define OMAP3_STATE_C6 5 /* C6 - MPU OFF + Core RET */
46 #define OMAP3_STATE_C7 6 /* C7 - MPU OFF + Core OFF */
47
48 #define OMAP3_STATE_MAX OMAP3_STATE_C7
49
50 #define CPUIDLE_FLAG_CHECK_BM 0x10000 /* use omap3_enter_idle_bm() */
51
52 struct omap3_processor_cx {
53 u8 valid;
54 u8 type;
55 u32 sleep_latency;
56 u32 wakeup_latency;
57 u32 mpu_state;
58 u32 core_state;
59 u32 threshold;
60 u32 flags;
61 const char *desc;
62 };
63
64 struct omap3_processor_cx omap3_power_states[OMAP3_MAX_STATES];
65 struct omap3_processor_cx current_cx_state;
66 struct powerdomain *mpu_pd, *core_pd, *per_pd;
67 struct powerdomain *cam_pd;
68
69 /*
70 * The latencies/thresholds for various C states have
71 * to be configured from the respective board files.
72 * These are some default values (which might not provide
73 * the best power savings) used on boards which do not
74 * pass these details from the board file.
75 */
76 static struct cpuidle_params cpuidle_params_table[] = {
77 /* C1 */
78 {1, 2, 2, 5},
79 /* C2 */
80 {1, 10, 10, 30},
81 /* C3 */
82 {1, 50, 50, 300},
83 /* C4 */
84 {1, 1500, 1800, 4000},
85 /* C5 */
86 {1, 2500, 7500, 12000},
87 /* C6 */
88 {1, 3000, 8500, 15000},
89 /* C7 */
90 {1, 10000, 30000, 300000},
91 };
92
93 static int omap3_idle_bm_check(void)
94 {
95 if (!omap3_can_sleep())
96 return 1;
97 return 0;
98 }
99
100 static int _cpuidle_allow_idle(struct powerdomain *pwrdm,
101 struct clockdomain *clkdm)
102 {
103 clkdm_allow_idle(clkdm);
104 return 0;
105 }
106
107 static int _cpuidle_deny_idle(struct powerdomain *pwrdm,
108 struct clockdomain *clkdm)
109 {
110 clkdm_deny_idle(clkdm);
111 return 0;
112 }
113
114 /**
115 * omap3_enter_idle - Programs OMAP3 to enter the specified state
116 * @dev: cpuidle device
117 * @state: The target state to be programmed
118 *
119 * Called from the CPUidle framework to program the device to the
120 * specified target state selected by the governor.
121 */
122 static int omap3_enter_idle(struct cpuidle_device *dev,
123 struct cpuidle_state *state)
124 {
125 struct omap3_processor_cx *cx = cpuidle_get_statedata(state);
126 struct timespec ts_preidle, ts_postidle, ts_idle;
127 u32 mpu_state = cx->mpu_state, core_state = cx->core_state;
128
129 current_cx_state = *cx;
130
131 /* Used to keep track of the total time in idle */
132 getnstimeofday(&ts_preidle);
133
134 local_irq_disable();
135 local_fiq_disable();
136
137 pwrdm_set_next_pwrst(mpu_pd, mpu_state);
138 pwrdm_set_next_pwrst(core_pd, core_state);
139
140 if (omap_irq_pending() || need_resched())
141 goto return_sleep_time;
142
143 if (cx->type == OMAP3_STATE_C1) {
144 pwrdm_for_each_clkdm(mpu_pd, _cpuidle_deny_idle);
145 pwrdm_for_each_clkdm(core_pd, _cpuidle_deny_idle);
146 }
147
148 /* Execute ARM wfi */
149 omap_sram_idle();
150
151 if (cx->type == OMAP3_STATE_C1) {
152 pwrdm_for_each_clkdm(mpu_pd, _cpuidle_allow_idle);
153 pwrdm_for_each_clkdm(core_pd, _cpuidle_allow_idle);
154 }
155
156 return_sleep_time:
157 getnstimeofday(&ts_postidle);
158 ts_idle = timespec_sub(ts_postidle, ts_preidle);
159
160 local_irq_enable();
161 local_fiq_enable();
162
163 return ts_idle.tv_nsec / NSEC_PER_USEC + ts_idle.tv_sec * USEC_PER_SEC;
164 }
165
166 /**
167 * next_valid_state - Find next valid c-state
168 * @dev: cpuidle device
169 * @state: Currently selected c-state
170 *
171 * If the current state is valid, it is returned back to the caller.
172 * Else, this function searches for a lower c-state which is still
173 * valid (as defined in omap3_power_states[]).
174 */
175 static struct cpuidle_state *next_valid_state(struct cpuidle_device *dev,
176 struct cpuidle_state *curr)
177 {
178 struct cpuidle_state *next = NULL;
179 struct omap3_processor_cx *cx;
180
181 cx = (struct omap3_processor_cx *)cpuidle_get_statedata(curr);
182
183 /* Check if current state is valid */
184 if (cx->valid) {
185 return curr;
186 } else {
187 u8 idx = OMAP3_STATE_MAX;
188
189 /*
190 * Reach the current state starting at highest C-state
191 */
192 for (; idx >= OMAP3_STATE_C1; idx--) {
193 if (&dev->states[idx] == curr) {
194 next = &dev->states[idx];
195 break;
196 }
197 }
198
199 /*
200 * Should never hit this condition.
201 */
202 WARN_ON(next == NULL);
203
204 /*
205 * Drop to next valid state.
206 * Start search from the next (lower) state.
207 */
208 idx--;
209 for (; idx >= OMAP3_STATE_C1; idx--) {
210 struct omap3_processor_cx *cx;
211
212 cx = cpuidle_get_statedata(&dev->states[idx]);
213 if (cx->valid) {
214 next = &dev->states[idx];
215 break;
216 }
217 }
218 /*
219 * C1 and C2 are always valid.
220 * So, no need to check for 'next==NULL' outside this loop.
221 */
222 }
223
224 return next;
225 }
226
227 /**
228 * omap3_enter_idle_bm - Checks for any bus activity
229 * @dev: cpuidle device
230 * @state: The target state to be programmed
231 *
232 * Used for C states with CPUIDLE_FLAG_CHECK_BM flag set. This
233 * function checks for any pending activity and then programs the
234 * device to the specified or a safer state.
235 */
236 static int omap3_enter_idle_bm(struct cpuidle_device *dev,
237 struct cpuidle_state *state)
238 {
239 struct cpuidle_state *new_state = next_valid_state(dev, state);
240 u32 core_next_state, per_next_state = 0, per_saved_state = 0;
241 u32 cam_state;
242 struct omap3_processor_cx *cx;
243 int ret;
244
245 if ((state->flags & CPUIDLE_FLAG_CHECK_BM) && omap3_idle_bm_check()) {
246 BUG_ON(!dev->safe_state);
247 new_state = dev->safe_state;
248 goto select_state;
249 }
250
251 cx = cpuidle_get_statedata(state);
252 core_next_state = cx->core_state;
253
254 /*
255 * FIXME: we currently manage device-specific idle states
256 * for PER and CORE in combination with CPU-specific
257 * idle states. This is wrong, and device-specific
258 * idle management needs to be separated out into
259 * its own code.
260 */
261
262 /*
263 * Prevent idle completely if CAM is active.
264 * CAM does not have wakeup capability in OMAP3.
265 */
266 cam_state = pwrdm_read_pwrst(cam_pd);
267 if (cam_state == PWRDM_POWER_ON) {
268 new_state = dev->safe_state;
269 goto select_state;
270 }
271
272 /*
273 * Prevent PER off if CORE is not in retention or off as this
274 * would disable PER wakeups completely.
275 */
276 per_next_state = per_saved_state = pwrdm_read_next_pwrst(per_pd);
277 if ((per_next_state == PWRDM_POWER_OFF) &&
278 (core_next_state > PWRDM_POWER_RET))
279 per_next_state = PWRDM_POWER_RET;
280
281 /* Are we changing PER target state? */
282 if (per_next_state != per_saved_state)
283 pwrdm_set_next_pwrst(per_pd, per_next_state);
284
285 select_state:
286 dev->last_state = new_state;
287 ret = omap3_enter_idle(dev, new_state);
288
289 /* Restore original PER state if it was modified */
290 if (per_next_state != per_saved_state)
291 pwrdm_set_next_pwrst(per_pd, per_saved_state);
292
293 return ret;
294 }
295
296 DEFINE_PER_CPU(struct cpuidle_device, omap3_idle_dev);
297
298 /**
299 * omap3_cpuidle_update_states() - Update the cpuidle states
300 * @mpu_deepest_state: Enable states upto and including this for mpu domain
301 * @core_deepest_state: Enable states upto and including this for core domain
302 *
303 * This goes through the list of states available and enables and disables the
304 * validity of C states based on deepest state that can be achieved for the
305 * variable domain
306 */
307 void omap3_cpuidle_update_states(u32 mpu_deepest_state, u32 core_deepest_state)
308 {
309 int i;
310
311 for (i = OMAP3_STATE_C1; i < OMAP3_MAX_STATES; i++) {
312 struct omap3_processor_cx *cx = &omap3_power_states[i];
313
314 if ((cx->mpu_state >= mpu_deepest_state) &&
315 (cx->core_state >= core_deepest_state)) {
316 cx->valid = 1;
317 } else {
318 cx->valid = 0;
319 }
320 }
321 }
322
323 void omap3_pm_init_cpuidle(struct cpuidle_params *cpuidle_board_params)
324 {
325 int i;
326
327 if (!cpuidle_board_params)
328 return;
329
330 for (i = OMAP3_STATE_C1; i < OMAP3_MAX_STATES; i++) {
331 cpuidle_params_table[i].valid =
332 cpuidle_board_params[i].valid;
333 cpuidle_params_table[i].sleep_latency =
334 cpuidle_board_params[i].sleep_latency;
335 cpuidle_params_table[i].wake_latency =
336 cpuidle_board_params[i].wake_latency;
337 cpuidle_params_table[i].threshold =
338 cpuidle_board_params[i].threshold;
339 }
340 return;
341 }
342
343 /* omap3_init_power_states - Initialises the OMAP3 specific C states.
344 *
345 * Below is the desciption of each C state.
346 * C1 . MPU WFI + Core active
347 * C2 . MPU WFI + Core inactive
348 * C3 . MPU CSWR + Core inactive
349 * C4 . MPU OFF + Core inactive
350 * C5 . MPU CSWR + Core CSWR
351 * C6 . MPU OFF + Core CSWR
352 * C7 . MPU OFF + Core OFF
353 */
354 void omap_init_power_states(void)
355 {
356 /* C1 . MPU WFI + Core active */
357 omap3_power_states[OMAP3_STATE_C1].valid =
358 cpuidle_params_table[OMAP3_STATE_C1].valid;
359 omap3_power_states[OMAP3_STATE_C1].type = OMAP3_STATE_C1;
360 omap3_power_states[OMAP3_STATE_C1].sleep_latency =
361 cpuidle_params_table[OMAP3_STATE_C1].sleep_latency;
362 omap3_power_states[OMAP3_STATE_C1].wakeup_latency =
363 cpuidle_params_table[OMAP3_STATE_C1].wake_latency;
364 omap3_power_states[OMAP3_STATE_C1].threshold =
365 cpuidle_params_table[OMAP3_STATE_C1].threshold;
366 omap3_power_states[OMAP3_STATE_C1].mpu_state = PWRDM_POWER_ON;
367 omap3_power_states[OMAP3_STATE_C1].core_state = PWRDM_POWER_ON;
368 omap3_power_states[OMAP3_STATE_C1].flags = CPUIDLE_FLAG_TIME_VALID;
369 omap3_power_states[OMAP3_STATE_C1].desc = "MPU ON + CORE ON";
370
371 /* C2 . MPU WFI + Core inactive */
372 omap3_power_states[OMAP3_STATE_C2].valid =
373 cpuidle_params_table[OMAP3_STATE_C2].valid;
374 omap3_power_states[OMAP3_STATE_C2].type = OMAP3_STATE_C2;
375 omap3_power_states[OMAP3_STATE_C2].sleep_latency =
376 cpuidle_params_table[OMAP3_STATE_C2].sleep_latency;
377 omap3_power_states[OMAP3_STATE_C2].wakeup_latency =
378 cpuidle_params_table[OMAP3_STATE_C2].wake_latency;
379 omap3_power_states[OMAP3_STATE_C2].threshold =
380 cpuidle_params_table[OMAP3_STATE_C2].threshold;
381 omap3_power_states[OMAP3_STATE_C2].mpu_state = PWRDM_POWER_ON;
382 omap3_power_states[OMAP3_STATE_C2].core_state = PWRDM_POWER_ON;
383 omap3_power_states[OMAP3_STATE_C2].flags = CPUIDLE_FLAG_TIME_VALID |
384 CPUIDLE_FLAG_CHECK_BM;
385 omap3_power_states[OMAP3_STATE_C2].desc = "MPU ON + CORE ON";
386
387 /* C3 . MPU CSWR + Core inactive */
388 omap3_power_states[OMAP3_STATE_C3].valid =
389 cpuidle_params_table[OMAP3_STATE_C3].valid;
390 omap3_power_states[OMAP3_STATE_C3].type = OMAP3_STATE_C3;
391 omap3_power_states[OMAP3_STATE_C3].sleep_latency =
392 cpuidle_params_table[OMAP3_STATE_C3].sleep_latency;
393 omap3_power_states[OMAP3_STATE_C3].wakeup_latency =
394 cpuidle_params_table[OMAP3_STATE_C3].wake_latency;
395 omap3_power_states[OMAP3_STATE_C3].threshold =
396 cpuidle_params_table[OMAP3_STATE_C3].threshold;
397 omap3_power_states[OMAP3_STATE_C3].mpu_state = PWRDM_POWER_RET;
398 omap3_power_states[OMAP3_STATE_C3].core_state = PWRDM_POWER_ON;
399 omap3_power_states[OMAP3_STATE_C3].flags = CPUIDLE_FLAG_TIME_VALID |
400 CPUIDLE_FLAG_CHECK_BM;
401 omap3_power_states[OMAP3_STATE_C3].desc = "MPU RET + CORE ON";
402
403 /* C4 . MPU OFF + Core inactive */
404 omap3_power_states[OMAP3_STATE_C4].valid =
405 cpuidle_params_table[OMAP3_STATE_C4].valid;
406 omap3_power_states[OMAP3_STATE_C4].type = OMAP3_STATE_C4;
407 omap3_power_states[OMAP3_STATE_C4].sleep_latency =
408 cpuidle_params_table[OMAP3_STATE_C4].sleep_latency;
409 omap3_power_states[OMAP3_STATE_C4].wakeup_latency =
410 cpuidle_params_table[OMAP3_STATE_C4].wake_latency;
411 omap3_power_states[OMAP3_STATE_C4].threshold =
412 cpuidle_params_table[OMAP3_STATE_C4].threshold;
413 omap3_power_states[OMAP3_STATE_C4].mpu_state = PWRDM_POWER_OFF;
414 omap3_power_states[OMAP3_STATE_C4].core_state = PWRDM_POWER_ON;
415 omap3_power_states[OMAP3_STATE_C4].flags = CPUIDLE_FLAG_TIME_VALID |
416 CPUIDLE_FLAG_CHECK_BM;
417 omap3_power_states[OMAP3_STATE_C4].desc = "MPU OFF + CORE ON";
418
419 /* C5 . MPU CSWR + Core CSWR*/
420 omap3_power_states[OMAP3_STATE_C5].valid =
421 cpuidle_params_table[OMAP3_STATE_C5].valid;
422 omap3_power_states[OMAP3_STATE_C5].type = OMAP3_STATE_C5;
423 omap3_power_states[OMAP3_STATE_C5].sleep_latency =
424 cpuidle_params_table[OMAP3_STATE_C5].sleep_latency;
425 omap3_power_states[OMAP3_STATE_C5].wakeup_latency =
426 cpuidle_params_table[OMAP3_STATE_C5].wake_latency;
427 omap3_power_states[OMAP3_STATE_C5].threshold =
428 cpuidle_params_table[OMAP3_STATE_C5].threshold;
429 omap3_power_states[OMAP3_STATE_C5].mpu_state = PWRDM_POWER_RET;
430 omap3_power_states[OMAP3_STATE_C5].core_state = PWRDM_POWER_RET;
431 omap3_power_states[OMAP3_STATE_C5].flags = CPUIDLE_FLAG_TIME_VALID |
432 CPUIDLE_FLAG_CHECK_BM;
433 omap3_power_states[OMAP3_STATE_C5].desc = "MPU RET + CORE RET";
434
435 /* C6 . MPU OFF + Core CSWR */
436 omap3_power_states[OMAP3_STATE_C6].valid =
437 cpuidle_params_table[OMAP3_STATE_C6].valid;
438 omap3_power_states[OMAP3_STATE_C6].type = OMAP3_STATE_C6;
439 omap3_power_states[OMAP3_STATE_C6].sleep_latency =
440 cpuidle_params_table[OMAP3_STATE_C6].sleep_latency;
441 omap3_power_states[OMAP3_STATE_C6].wakeup_latency =
442 cpuidle_params_table[OMAP3_STATE_C6].wake_latency;
443 omap3_power_states[OMAP3_STATE_C6].threshold =
444 cpuidle_params_table[OMAP3_STATE_C6].threshold;
445 omap3_power_states[OMAP3_STATE_C6].mpu_state = PWRDM_POWER_OFF;
446 omap3_power_states[OMAP3_STATE_C6].core_state = PWRDM_POWER_RET;
447 omap3_power_states[OMAP3_STATE_C6].flags = CPUIDLE_FLAG_TIME_VALID |
448 CPUIDLE_FLAG_CHECK_BM;
449 omap3_power_states[OMAP3_STATE_C6].desc = "MPU OFF + CORE RET";
450
451 /* C7 . MPU OFF + Core OFF */
452 omap3_power_states[OMAP3_STATE_C7].valid =
453 cpuidle_params_table[OMAP3_STATE_C7].valid;
454 omap3_power_states[OMAP3_STATE_C7].type = OMAP3_STATE_C7;
455 omap3_power_states[OMAP3_STATE_C7].sleep_latency =
456 cpuidle_params_table[OMAP3_STATE_C7].sleep_latency;
457 omap3_power_states[OMAP3_STATE_C7].wakeup_latency =
458 cpuidle_params_table[OMAP3_STATE_C7].wake_latency;
459 omap3_power_states[OMAP3_STATE_C7].threshold =
460 cpuidle_params_table[OMAP3_STATE_C7].threshold;
461 omap3_power_states[OMAP3_STATE_C7].mpu_state = PWRDM_POWER_OFF;
462 omap3_power_states[OMAP3_STATE_C7].core_state = PWRDM_POWER_OFF;
463 omap3_power_states[OMAP3_STATE_C7].flags = CPUIDLE_FLAG_TIME_VALID |
464 CPUIDLE_FLAG_CHECK_BM;
465 omap3_power_states[OMAP3_STATE_C7].desc = "MPU OFF + CORE OFF";
466
467 /*
468 * Erratum i583: implementation for ES rev < Es1.2 on 3630. We cannot
469 * enable OFF mode in a stable form for previous revisions.
470 * we disable C7 state as a result.
471 */
472 if (IS_PM34XX_ERRATUM(PM_SDRC_WAKEUP_ERRATUM_i583)) {
473 omap3_power_states[OMAP3_STATE_C7].valid = 0;
474 cpuidle_params_table[OMAP3_STATE_C7].valid = 0;
475 pr_warn("%s: core off state C7 disabled due to i583\n",
476 __func__);
477 }
478 }
479
480 struct cpuidle_driver omap3_idle_driver = {
481 .name = "omap3_idle",
482 .owner = THIS_MODULE,
483 };
484
485 /**
486 * omap3_idle_init - Init routine for OMAP3 idle
487 *
488 * Registers the OMAP3 specific cpuidle driver with the cpuidle
489 * framework with the valid set of states.
490 */
491 int __init omap3_idle_init(void)
492 {
493 int i, count = 0;
494 struct omap3_processor_cx *cx;
495 struct cpuidle_state *state;
496 struct cpuidle_device *dev;
497
498 mpu_pd = pwrdm_lookup("mpu_pwrdm");
499 core_pd = pwrdm_lookup("core_pwrdm");
500 per_pd = pwrdm_lookup("per_pwrdm");
501 cam_pd = pwrdm_lookup("cam_pwrdm");
502
503 omap_init_power_states();
504 cpuidle_register_driver(&omap3_idle_driver);
505
506 dev = &per_cpu(omap3_idle_dev, smp_processor_id());
507
508 for (i = OMAP3_STATE_C1; i < OMAP3_MAX_STATES; i++) {
509 cx = &omap3_power_states[i];
510 state = &dev->states[count];
511
512 if (!cx->valid)
513 continue;
514 cpuidle_set_statedata(state, cx);
515 state->exit_latency = cx->sleep_latency + cx->wakeup_latency;
516 state->target_residency = cx->threshold;
517 state->flags = cx->flags;
518 state->enter = (state->flags & CPUIDLE_FLAG_CHECK_BM) ?
519 omap3_enter_idle_bm : omap3_enter_idle;
520 if (cx->type == OMAP3_STATE_C1)
521 dev->safe_state = state;
522 sprintf(state->name, "C%d", count+1);
523 strncpy(state->desc, cx->desc, CPUIDLE_DESC_LEN);
524 count++;
525 }
526
527 if (!count)
528 return -EINVAL;
529 dev->state_count = count;
530
531 if (enable_off_mode)
532 omap3_cpuidle_update_states(PWRDM_POWER_OFF, PWRDM_POWER_OFF);
533 else
534 omap3_cpuidle_update_states(PWRDM_POWER_RET, PWRDM_POWER_RET);
535
536 if (cpuidle_register_device(dev)) {
537 printk(KERN_ERR "%s: CPUidle register device failed\n",
538 __func__);
539 return -EIO;
540 }
541
542 return 0;
543 }
544 #else
545 int __init omap3_idle_init(void)
546 {
547 return 0;
548 }
549 #endif /* CONFIG_CPU_IDLE */