OMAP3630: PM: don't warn the user with a trace in case of PM34XX_ERRATUM
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / arm / mach-omap2 / cpuidle34xx.c
1 /*
2 * linux/arch/arm/mach-omap2/cpuidle34xx.c
3 *
4 * OMAP3 CPU IDLE Routines
5 *
6 * Copyright (C) 2008 Texas Instruments, Inc.
7 * Rajendra Nayak <rnayak@ti.com>
8 *
9 * Copyright (C) 2007 Texas Instruments, Inc.
10 * Karthik Dasu <karthik-dp@ti.com>
11 *
12 * Copyright (C) 2006 Nokia Corporation
13 * Tony Lindgren <tony@atomide.com>
14 *
15 * Copyright (C) 2005 Texas Instruments, Inc.
16 * Richard Woodruff <r-woodruff2@ti.com>
17 *
18 * Based on pm.c for omap2
19 *
20 * This program is free software; you can redistribute it and/or modify
21 * it under the terms of the GNU General Public License version 2 as
22 * published by the Free Software Foundation.
23 */
24
25 #include <linux/sched.h>
26 #include <linux/cpuidle.h>
27
28 #include <plat/prcm.h>
29 #include <plat/irqs.h>
30 #include "powerdomain.h"
31 #include "clockdomain.h"
32 #include <plat/serial.h>
33
34 #include "pm.h"
35 #include "control.h"
36
37 #ifdef CONFIG_CPU_IDLE
38
39 #define OMAP3_MAX_STATES 7
40 #define OMAP3_STATE_C1 0 /* C1 - MPU WFI + Core active */
41 #define OMAP3_STATE_C2 1 /* C2 - MPU WFI + Core inactive */
42 #define OMAP3_STATE_C3 2 /* C3 - MPU CSWR + Core inactive */
43 #define OMAP3_STATE_C4 3 /* C4 - MPU OFF + Core iactive */
44 #define OMAP3_STATE_C5 4 /* C5 - MPU RET + Core RET */
45 #define OMAP3_STATE_C6 5 /* C6 - MPU OFF + Core RET */
46 #define OMAP3_STATE_C7 6 /* C7 - MPU OFF + Core OFF */
47
48 #define OMAP3_STATE_MAX OMAP3_STATE_C7
49
50 #define CPUIDLE_FLAG_CHECK_BM 0x10000 /* use omap3_enter_idle_bm() */
51
52 struct omap3_processor_cx {
53 u8 valid;
54 u8 type;
55 u32 sleep_latency;
56 u32 wakeup_latency;
57 u32 mpu_state;
58 u32 core_state;
59 u32 threshold;
60 u32 flags;
61 };
62
63 struct omap3_processor_cx omap3_power_states[OMAP3_MAX_STATES];
64 struct omap3_processor_cx current_cx_state;
65 struct powerdomain *mpu_pd, *core_pd, *per_pd;
66 struct powerdomain *cam_pd;
67
68 /*
69 * The latencies/thresholds for various C states have
70 * to be configured from the respective board files.
71 * These are some default values (which might not provide
72 * the best power savings) used on boards which do not
73 * pass these details from the board file.
74 */
75 static struct cpuidle_params cpuidle_params_table[] = {
76 /* C1 */
77 {1, 2, 2, 5},
78 /* C2 */
79 {1, 10, 10, 30},
80 /* C3 */
81 {1, 50, 50, 300},
82 /* C4 */
83 {1, 1500, 1800, 4000},
84 /* C5 */
85 {1, 2500, 7500, 12000},
86 /* C6 */
87 {1, 3000, 8500, 15000},
88 /* C7 */
89 {1, 10000, 30000, 300000},
90 };
91
92 static int omap3_idle_bm_check(void)
93 {
94 if (!omap3_can_sleep())
95 return 1;
96 return 0;
97 }
98
99 static int _cpuidle_allow_idle(struct powerdomain *pwrdm,
100 struct clockdomain *clkdm)
101 {
102 omap2_clkdm_allow_idle(clkdm);
103 return 0;
104 }
105
106 static int _cpuidle_deny_idle(struct powerdomain *pwrdm,
107 struct clockdomain *clkdm)
108 {
109 omap2_clkdm_deny_idle(clkdm);
110 return 0;
111 }
112
113 /**
114 * omap3_enter_idle - Programs OMAP3 to enter the specified state
115 * @dev: cpuidle device
116 * @state: The target state to be programmed
117 *
118 * Called from the CPUidle framework to program the device to the
119 * specified target state selected by the governor.
120 */
121 static int omap3_enter_idle(struct cpuidle_device *dev,
122 struct cpuidle_state *state)
123 {
124 struct omap3_processor_cx *cx = cpuidle_get_statedata(state);
125 struct timespec ts_preidle, ts_postidle, ts_idle;
126 u32 mpu_state = cx->mpu_state, core_state = cx->core_state;
127
128 current_cx_state = *cx;
129
130 /* Used to keep track of the total time in idle */
131 getnstimeofday(&ts_preidle);
132
133 local_irq_disable();
134 local_fiq_disable();
135
136 pwrdm_set_next_pwrst(mpu_pd, mpu_state);
137 pwrdm_set_next_pwrst(core_pd, core_state);
138
139 if (omap_irq_pending() || need_resched())
140 goto return_sleep_time;
141
142 if (cx->type == OMAP3_STATE_C1) {
143 pwrdm_for_each_clkdm(mpu_pd, _cpuidle_deny_idle);
144 pwrdm_for_each_clkdm(core_pd, _cpuidle_deny_idle);
145 }
146
147 /* Execute ARM wfi */
148 omap_sram_idle();
149
150 if (cx->type == OMAP3_STATE_C1) {
151 pwrdm_for_each_clkdm(mpu_pd, _cpuidle_allow_idle);
152 pwrdm_for_each_clkdm(core_pd, _cpuidle_allow_idle);
153 }
154
155 return_sleep_time:
156 getnstimeofday(&ts_postidle);
157 ts_idle = timespec_sub(ts_postidle, ts_preidle);
158
159 local_irq_enable();
160 local_fiq_enable();
161
162 return ts_idle.tv_nsec / NSEC_PER_USEC + ts_idle.tv_sec * USEC_PER_SEC;
163 }
164
165 /**
166 * next_valid_state - Find next valid c-state
167 * @dev: cpuidle device
168 * @state: Currently selected c-state
169 *
170 * If the current state is valid, it is returned back to the caller.
171 * Else, this function searches for a lower c-state which is still
172 * valid (as defined in omap3_power_states[]).
173 */
174 static struct cpuidle_state *next_valid_state(struct cpuidle_device *dev,
175 struct cpuidle_state *curr)
176 {
177 struct cpuidle_state *next = NULL;
178 struct omap3_processor_cx *cx;
179
180 cx = (struct omap3_processor_cx *)cpuidle_get_statedata(curr);
181
182 /* Check if current state is valid */
183 if (cx->valid) {
184 return curr;
185 } else {
186 u8 idx = OMAP3_STATE_MAX;
187
188 /*
189 * Reach the current state starting at highest C-state
190 */
191 for (; idx >= OMAP3_STATE_C1; idx--) {
192 if (&dev->states[idx] == curr) {
193 next = &dev->states[idx];
194 break;
195 }
196 }
197
198 /*
199 * Should never hit this condition.
200 */
201 WARN_ON(next == NULL);
202
203 /*
204 * Drop to next valid state.
205 * Start search from the next (lower) state.
206 */
207 idx--;
208 for (; idx >= OMAP3_STATE_C1; idx--) {
209 struct omap3_processor_cx *cx;
210
211 cx = cpuidle_get_statedata(&dev->states[idx]);
212 if (cx->valid) {
213 next = &dev->states[idx];
214 break;
215 }
216 }
217 /*
218 * C1 and C2 are always valid.
219 * So, no need to check for 'next==NULL' outside this loop.
220 */
221 }
222
223 return next;
224 }
225
226 /**
227 * omap3_enter_idle_bm - Checks for any bus activity
228 * @dev: cpuidle device
229 * @state: The target state to be programmed
230 *
231 * Used for C states with CPUIDLE_FLAG_CHECK_BM flag set. This
232 * function checks for any pending activity and then programs the
233 * device to the specified or a safer state.
234 */
235 static int omap3_enter_idle_bm(struct cpuidle_device *dev,
236 struct cpuidle_state *state)
237 {
238 struct cpuidle_state *new_state = next_valid_state(dev, state);
239 u32 core_next_state, per_next_state = 0, per_saved_state = 0;
240 u32 cam_state;
241 struct omap3_processor_cx *cx;
242 int ret;
243
244 if ((state->flags & CPUIDLE_FLAG_CHECK_BM) && omap3_idle_bm_check()) {
245 BUG_ON(!dev->safe_state);
246 new_state = dev->safe_state;
247 goto select_state;
248 }
249
250 cx = cpuidle_get_statedata(state);
251 core_next_state = cx->core_state;
252
253 /*
254 * FIXME: we currently manage device-specific idle states
255 * for PER and CORE in combination with CPU-specific
256 * idle states. This is wrong, and device-specific
257 * idle management needs to be separated out into
258 * its own code.
259 */
260
261 /*
262 * Prevent idle completely if CAM is active.
263 * CAM does not have wakeup capability in OMAP3.
264 */
265 cam_state = pwrdm_read_pwrst(cam_pd);
266 if (cam_state == PWRDM_POWER_ON) {
267 new_state = dev->safe_state;
268 goto select_state;
269 }
270
271 /*
272 * Prevent PER off if CORE is not in retention or off as this
273 * would disable PER wakeups completely.
274 */
275 per_next_state = per_saved_state = pwrdm_read_next_pwrst(per_pd);
276 if ((per_next_state == PWRDM_POWER_OFF) &&
277 (core_next_state > PWRDM_POWER_RET))
278 per_next_state = PWRDM_POWER_RET;
279
280 /* Are we changing PER target state? */
281 if (per_next_state != per_saved_state)
282 pwrdm_set_next_pwrst(per_pd, per_next_state);
283
284 select_state:
285 dev->last_state = new_state;
286 ret = omap3_enter_idle(dev, new_state);
287
288 /* Restore original PER state if it was modified */
289 if (per_next_state != per_saved_state)
290 pwrdm_set_next_pwrst(per_pd, per_saved_state);
291
292 return ret;
293 }
294
295 DEFINE_PER_CPU(struct cpuidle_device, omap3_idle_dev);
296
297 /**
298 * omap3_cpuidle_update_states() - Update the cpuidle states
299 * @mpu_deepest_state: Enable states upto and including this for mpu domain
300 * @core_deepest_state: Enable states upto and including this for core domain
301 *
302 * This goes through the list of states available and enables and disables the
303 * validity of C states based on deepest state that can be achieved for the
304 * variable domain
305 */
306 void omap3_cpuidle_update_states(u32 mpu_deepest_state, u32 core_deepest_state)
307 {
308 int i;
309
310 for (i = OMAP3_STATE_C1; i < OMAP3_MAX_STATES; i++) {
311 struct omap3_processor_cx *cx = &omap3_power_states[i];
312
313 if ((cx->mpu_state >= mpu_deepest_state) &&
314 (cx->core_state >= core_deepest_state)) {
315 cx->valid = 1;
316 } else {
317 cx->valid = 0;
318 }
319 }
320 }
321
322 void omap3_pm_init_cpuidle(struct cpuidle_params *cpuidle_board_params)
323 {
324 int i;
325
326 if (!cpuidle_board_params)
327 return;
328
329 for (i = OMAP3_STATE_C1; i < OMAP3_MAX_STATES; i++) {
330 cpuidle_params_table[i].valid =
331 cpuidle_board_params[i].valid;
332 cpuidle_params_table[i].sleep_latency =
333 cpuidle_board_params[i].sleep_latency;
334 cpuidle_params_table[i].wake_latency =
335 cpuidle_board_params[i].wake_latency;
336 cpuidle_params_table[i].threshold =
337 cpuidle_board_params[i].threshold;
338 }
339 return;
340 }
341
342 /* omap3_init_power_states - Initialises the OMAP3 specific C states.
343 *
344 * Below is the desciption of each C state.
345 * C1 . MPU WFI + Core active
346 * C2 . MPU WFI + Core inactive
347 * C3 . MPU CSWR + Core inactive
348 * C4 . MPU OFF + Core inactive
349 * C5 . MPU CSWR + Core CSWR
350 * C6 . MPU OFF + Core CSWR
351 * C7 . MPU OFF + Core OFF
352 */
353 void omap_init_power_states(void)
354 {
355 /* C1 . MPU WFI + Core active */
356 omap3_power_states[OMAP3_STATE_C1].valid =
357 cpuidle_params_table[OMAP3_STATE_C1].valid;
358 omap3_power_states[OMAP3_STATE_C1].type = OMAP3_STATE_C1;
359 omap3_power_states[OMAP3_STATE_C1].sleep_latency =
360 cpuidle_params_table[OMAP3_STATE_C1].sleep_latency;
361 omap3_power_states[OMAP3_STATE_C1].wakeup_latency =
362 cpuidle_params_table[OMAP3_STATE_C1].wake_latency;
363 omap3_power_states[OMAP3_STATE_C1].threshold =
364 cpuidle_params_table[OMAP3_STATE_C1].threshold;
365 omap3_power_states[OMAP3_STATE_C1].mpu_state = PWRDM_POWER_ON;
366 omap3_power_states[OMAP3_STATE_C1].core_state = PWRDM_POWER_ON;
367 omap3_power_states[OMAP3_STATE_C1].flags = CPUIDLE_FLAG_TIME_VALID;
368
369 /* C2 . MPU WFI + Core inactive */
370 omap3_power_states[OMAP3_STATE_C2].valid =
371 cpuidle_params_table[OMAP3_STATE_C2].valid;
372 omap3_power_states[OMAP3_STATE_C2].type = OMAP3_STATE_C2;
373 omap3_power_states[OMAP3_STATE_C2].sleep_latency =
374 cpuidle_params_table[OMAP3_STATE_C2].sleep_latency;
375 omap3_power_states[OMAP3_STATE_C2].wakeup_latency =
376 cpuidle_params_table[OMAP3_STATE_C2].wake_latency;
377 omap3_power_states[OMAP3_STATE_C2].threshold =
378 cpuidle_params_table[OMAP3_STATE_C2].threshold;
379 omap3_power_states[OMAP3_STATE_C2].mpu_state = PWRDM_POWER_ON;
380 omap3_power_states[OMAP3_STATE_C2].core_state = PWRDM_POWER_ON;
381 omap3_power_states[OMAP3_STATE_C2].flags = CPUIDLE_FLAG_TIME_VALID |
382 CPUIDLE_FLAG_CHECK_BM;
383
384 /* C3 . MPU CSWR + Core inactive */
385 omap3_power_states[OMAP3_STATE_C3].valid =
386 cpuidle_params_table[OMAP3_STATE_C3].valid;
387 omap3_power_states[OMAP3_STATE_C3].type = OMAP3_STATE_C3;
388 omap3_power_states[OMAP3_STATE_C3].sleep_latency =
389 cpuidle_params_table[OMAP3_STATE_C3].sleep_latency;
390 omap3_power_states[OMAP3_STATE_C3].wakeup_latency =
391 cpuidle_params_table[OMAP3_STATE_C3].wake_latency;
392 omap3_power_states[OMAP3_STATE_C3].threshold =
393 cpuidle_params_table[OMAP3_STATE_C3].threshold;
394 omap3_power_states[OMAP3_STATE_C3].mpu_state = PWRDM_POWER_RET;
395 omap3_power_states[OMAP3_STATE_C3].core_state = PWRDM_POWER_ON;
396 omap3_power_states[OMAP3_STATE_C3].flags = CPUIDLE_FLAG_TIME_VALID |
397 CPUIDLE_FLAG_CHECK_BM;
398
399 /* C4 . MPU OFF + Core inactive */
400 omap3_power_states[OMAP3_STATE_C4].valid =
401 cpuidle_params_table[OMAP3_STATE_C4].valid;
402 omap3_power_states[OMAP3_STATE_C4].type = OMAP3_STATE_C4;
403 omap3_power_states[OMAP3_STATE_C4].sleep_latency =
404 cpuidle_params_table[OMAP3_STATE_C4].sleep_latency;
405 omap3_power_states[OMAP3_STATE_C4].wakeup_latency =
406 cpuidle_params_table[OMAP3_STATE_C4].wake_latency;
407 omap3_power_states[OMAP3_STATE_C4].threshold =
408 cpuidle_params_table[OMAP3_STATE_C4].threshold;
409 omap3_power_states[OMAP3_STATE_C4].mpu_state = PWRDM_POWER_OFF;
410 omap3_power_states[OMAP3_STATE_C4].core_state = PWRDM_POWER_ON;
411 omap3_power_states[OMAP3_STATE_C4].flags = CPUIDLE_FLAG_TIME_VALID |
412 CPUIDLE_FLAG_CHECK_BM;
413
414 /* C5 . MPU CSWR + Core CSWR*/
415 omap3_power_states[OMAP3_STATE_C5].valid =
416 cpuidle_params_table[OMAP3_STATE_C5].valid;
417 omap3_power_states[OMAP3_STATE_C5].type = OMAP3_STATE_C5;
418 omap3_power_states[OMAP3_STATE_C5].sleep_latency =
419 cpuidle_params_table[OMAP3_STATE_C5].sleep_latency;
420 omap3_power_states[OMAP3_STATE_C5].wakeup_latency =
421 cpuidle_params_table[OMAP3_STATE_C5].wake_latency;
422 omap3_power_states[OMAP3_STATE_C5].threshold =
423 cpuidle_params_table[OMAP3_STATE_C5].threshold;
424 omap3_power_states[OMAP3_STATE_C5].mpu_state = PWRDM_POWER_RET;
425 omap3_power_states[OMAP3_STATE_C5].core_state = PWRDM_POWER_RET;
426 omap3_power_states[OMAP3_STATE_C5].flags = CPUIDLE_FLAG_TIME_VALID |
427 CPUIDLE_FLAG_CHECK_BM;
428
429 /* C6 . MPU OFF + Core CSWR */
430 omap3_power_states[OMAP3_STATE_C6].valid =
431 cpuidle_params_table[OMAP3_STATE_C6].valid;
432 omap3_power_states[OMAP3_STATE_C6].type = OMAP3_STATE_C6;
433 omap3_power_states[OMAP3_STATE_C6].sleep_latency =
434 cpuidle_params_table[OMAP3_STATE_C6].sleep_latency;
435 omap3_power_states[OMAP3_STATE_C6].wakeup_latency =
436 cpuidle_params_table[OMAP3_STATE_C6].wake_latency;
437 omap3_power_states[OMAP3_STATE_C6].threshold =
438 cpuidle_params_table[OMAP3_STATE_C6].threshold;
439 omap3_power_states[OMAP3_STATE_C6].mpu_state = PWRDM_POWER_OFF;
440 omap3_power_states[OMAP3_STATE_C6].core_state = PWRDM_POWER_RET;
441 omap3_power_states[OMAP3_STATE_C6].flags = CPUIDLE_FLAG_TIME_VALID |
442 CPUIDLE_FLAG_CHECK_BM;
443
444 /* C7 . MPU OFF + Core OFF */
445 omap3_power_states[OMAP3_STATE_C7].valid =
446 cpuidle_params_table[OMAP3_STATE_C7].valid;
447 omap3_power_states[OMAP3_STATE_C7].type = OMAP3_STATE_C7;
448 omap3_power_states[OMAP3_STATE_C7].sleep_latency =
449 cpuidle_params_table[OMAP3_STATE_C7].sleep_latency;
450 omap3_power_states[OMAP3_STATE_C7].wakeup_latency =
451 cpuidle_params_table[OMAP3_STATE_C7].wake_latency;
452 omap3_power_states[OMAP3_STATE_C7].threshold =
453 cpuidle_params_table[OMAP3_STATE_C7].threshold;
454 omap3_power_states[OMAP3_STATE_C7].mpu_state = PWRDM_POWER_OFF;
455 omap3_power_states[OMAP3_STATE_C7].core_state = PWRDM_POWER_OFF;
456 omap3_power_states[OMAP3_STATE_C7].flags = CPUIDLE_FLAG_TIME_VALID |
457 CPUIDLE_FLAG_CHECK_BM;
458
459 /*
460 * Erratum i583: implementation for ES rev < Es1.2 on 3630. We cannot
461 * enable OFF mode in a stable form for previous revisions.
462 * we disable C7 state as a result.
463 */
464 if (IS_PM34XX_ERRATUM(PM_SDRC_WAKEUP_ERRATUM_i583)) {
465 omap3_power_states[OMAP3_STATE_C7].valid = 0;
466 cpuidle_params_table[OMAP3_STATE_C7].valid = 0;
467 pr_warn("%s: core off state C7 disabled due to i583\n",
468 __func__);
469 }
470 }
471
472 struct cpuidle_driver omap3_idle_driver = {
473 .name = "omap3_idle",
474 .owner = THIS_MODULE,
475 };
476
477 /**
478 * omap3_idle_init - Init routine for OMAP3 idle
479 *
480 * Registers the OMAP3 specific cpuidle driver with the cpuidle
481 * framework with the valid set of states.
482 */
483 int __init omap3_idle_init(void)
484 {
485 int i, count = 0;
486 struct omap3_processor_cx *cx;
487 struct cpuidle_state *state;
488 struct cpuidle_device *dev;
489
490 mpu_pd = pwrdm_lookup("mpu_pwrdm");
491 core_pd = pwrdm_lookup("core_pwrdm");
492 per_pd = pwrdm_lookup("per_pwrdm");
493 cam_pd = pwrdm_lookup("cam_pwrdm");
494
495 omap_init_power_states();
496 cpuidle_register_driver(&omap3_idle_driver);
497
498 dev = &per_cpu(omap3_idle_dev, smp_processor_id());
499
500 for (i = OMAP3_STATE_C1; i < OMAP3_MAX_STATES; i++) {
501 cx = &omap3_power_states[i];
502 state = &dev->states[count];
503
504 if (!cx->valid)
505 continue;
506 cpuidle_set_statedata(state, cx);
507 state->exit_latency = cx->sleep_latency + cx->wakeup_latency;
508 state->target_residency = cx->threshold;
509 state->flags = cx->flags;
510 state->enter = (state->flags & CPUIDLE_FLAG_CHECK_BM) ?
511 omap3_enter_idle_bm : omap3_enter_idle;
512 if (cx->type == OMAP3_STATE_C1)
513 dev->safe_state = state;
514 sprintf(state->name, "C%d", count+1);
515 count++;
516 }
517
518 if (!count)
519 return -EINVAL;
520 dev->state_count = count;
521
522 if (enable_off_mode)
523 omap3_cpuidle_update_states(PWRDM_POWER_OFF, PWRDM_POWER_OFF);
524 else
525 omap3_cpuidle_update_states(PWRDM_POWER_RET, PWRDM_POWER_RET);
526
527 if (cpuidle_register_device(dev)) {
528 printk(KERN_ERR "%s: CPUidle register device failed\n",
529 __func__);
530 return -EIO;
531 }
532
533 return 0;
534 }
535 #else
536 int __init omap3_idle_init(void)
537 {
538 return 0;
539 }
540 #endif /* CONFIG_CPU_IDLE */