irq: Better struct irqaction layout
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / arm / mach-omap2 / pm34xx.c
1 /*
2 * OMAP3 Power Management Routines
3 *
4 * Copyright (C) 2006-2008 Nokia Corporation
5 * Tony Lindgren <tony@atomide.com>
6 * Jouni Hogander
7 *
8 * Copyright (C) 2007 Texas Instruments, Inc.
9 * Rajendra Nayak <rnayak@ti.com>
10 *
11 * Copyright (C) 2005 Texas Instruments, Inc.
12 * Richard Woodruff <r-woodruff2@ti.com>
13 *
14 * Based on pm.c for omap1
15 *
16 * This program is free software; you can redistribute it and/or modify
17 * it under the terms of the GNU General Public License version 2 as
18 * published by the Free Software Foundation.
19 */
20
21 #include <linux/pm.h>
22 #include <linux/suspend.h>
23 #include <linux/interrupt.h>
24 #include <linux/module.h>
25 #include <linux/list.h>
26 #include <linux/err.h>
27 #include <linux/gpio.h>
28 #include <linux/clk.h>
29 #include <linux/delay.h>
30 #include <linux/slab.h>
31
32 #include <plat/sram.h>
33 #include <plat/clockdomain.h>
34 #include <plat/powerdomain.h>
35 #include <plat/serial.h>
36 #include <plat/sdrc.h>
37 #include <plat/prcm.h>
38 #include <plat/gpmc.h>
39 #include <plat/dma.h>
40
41 #include <asm/tlbflush.h>
42
43 #include "cm.h"
44 #include "cm-regbits-34xx.h"
45 #include "prm-regbits-34xx.h"
46
47 #include "prm.h"
48 #include "pm.h"
49 #include "sdrc.h"
50 #include "control.h"
51
52 /* Scratchpad offsets */
53 #define OMAP343X_TABLE_ADDRESS_OFFSET 0xc4
54 #define OMAP343X_TABLE_VALUE_OFFSET 0xc0
55 #define OMAP343X_CONTROL_REG_VALUE_OFFSET 0xc8
56
57 struct power_state {
58 struct powerdomain *pwrdm;
59 u32 next_state;
60 #ifdef CONFIG_SUSPEND
61 u32 saved_state;
62 #endif
63 struct list_head node;
64 };
65
66 static LIST_HEAD(pwrst_list);
67
68 static void (*_omap_sram_idle)(u32 *addr, int save_state);
69
70 static int (*_omap_save_secure_sram)(u32 *addr);
71
72 static struct powerdomain *mpu_pwrdm, *neon_pwrdm;
73 static struct powerdomain *core_pwrdm, *per_pwrdm;
74 static struct powerdomain *cam_pwrdm;
75
76 static inline void omap3_per_save_context(void)
77 {
78 omap_gpio_save_context();
79 }
80
81 static inline void omap3_per_restore_context(void)
82 {
83 omap_gpio_restore_context();
84 }
85
86 static void omap3_enable_io_chain(void)
87 {
88 int timeout = 0;
89
90 if (omap_rev() >= OMAP3430_REV_ES3_1) {
91 prm_set_mod_reg_bits(OMAP3430_EN_IO_CHAIN_MASK, WKUP_MOD,
92 PM_WKEN);
93 /* Do a readback to assure write has been done */
94 prm_read_mod_reg(WKUP_MOD, PM_WKEN);
95
96 while (!(prm_read_mod_reg(WKUP_MOD, PM_WKEN) &
97 OMAP3430_ST_IO_CHAIN_MASK)) {
98 timeout++;
99 if (timeout > 1000) {
100 printk(KERN_ERR "Wake up daisy chain "
101 "activation failed.\n");
102 return;
103 }
104 prm_set_mod_reg_bits(OMAP3430_ST_IO_CHAIN_MASK,
105 WKUP_MOD, PM_WKEN);
106 }
107 }
108 }
109
110 static void omap3_disable_io_chain(void)
111 {
112 if (omap_rev() >= OMAP3430_REV_ES3_1)
113 prm_clear_mod_reg_bits(OMAP3430_EN_IO_CHAIN_MASK, WKUP_MOD,
114 PM_WKEN);
115 }
116
117 static void omap3_core_save_context(void)
118 {
119 u32 control_padconf_off;
120
121 /* Save the padconf registers */
122 control_padconf_off = omap_ctrl_readl(OMAP343X_CONTROL_PADCONF_OFF);
123 control_padconf_off |= START_PADCONF_SAVE;
124 omap_ctrl_writel(control_padconf_off, OMAP343X_CONTROL_PADCONF_OFF);
125 /* wait for the save to complete */
126 while (!(omap_ctrl_readl(OMAP343X_CONTROL_GENERAL_PURPOSE_STATUS)
127 & PADCONF_SAVE_DONE))
128 udelay(1);
129
130 /*
131 * Force write last pad into memory, as this can fail in some
132 * cases according to erratas 1.157, 1.185
133 */
134 omap_ctrl_writel(omap_ctrl_readl(OMAP343X_PADCONF_ETK_D14),
135 OMAP343X_CONTROL_MEM_WKUP + 0x2a0);
136
137 /* Save the Interrupt controller context */
138 omap_intc_save_context();
139 /* Save the GPMC context */
140 omap3_gpmc_save_context();
141 /* Save the system control module context, padconf already save above*/
142 omap3_control_save_context();
143 omap_dma_global_context_save();
144 }
145
146 static void omap3_core_restore_context(void)
147 {
148 /* Restore the control module context, padconf restored by h/w */
149 omap3_control_restore_context();
150 /* Restore the GPMC context */
151 omap3_gpmc_restore_context();
152 /* Restore the interrupt controller context */
153 omap_intc_restore_context();
154 omap_dma_global_context_restore();
155 }
156
157 /*
158 * FIXME: This function should be called before entering off-mode after
159 * OMAP3 secure services have been accessed. Currently it is only called
160 * once during boot sequence, but this works as we are not using secure
161 * services.
162 */
163 static void omap3_save_secure_ram_context(u32 target_mpu_state)
164 {
165 u32 ret;
166
167 if (omap_type() != OMAP2_DEVICE_TYPE_GP) {
168 /*
169 * MPU next state must be set to POWER_ON temporarily,
170 * otherwise the WFI executed inside the ROM code
171 * will hang the system.
172 */
173 pwrdm_set_next_pwrst(mpu_pwrdm, PWRDM_POWER_ON);
174 ret = _omap_save_secure_sram((u32 *)
175 __pa(omap3_secure_ram_storage));
176 pwrdm_set_next_pwrst(mpu_pwrdm, target_mpu_state);
177 /* Following is for error tracking, it should not happen */
178 if (ret) {
179 printk(KERN_ERR "save_secure_sram() returns %08x\n",
180 ret);
181 while (1)
182 ;
183 }
184 }
185 }
186
187 /*
188 * PRCM Interrupt Handler Helper Function
189 *
190 * The purpose of this function is to clear any wake-up events latched
191 * in the PRCM PM_WKST_x registers. It is possible that a wake-up event
192 * may occur whilst attempting to clear a PM_WKST_x register and thus
193 * set another bit in this register. A while loop is used to ensure
194 * that any peripheral wake-up events occurring while attempting to
195 * clear the PM_WKST_x are detected and cleared.
196 */
197 static int prcm_clear_mod_irqs(s16 module, u8 regs)
198 {
199 u32 wkst, fclk, iclk, clken;
200 u16 wkst_off = (regs == 3) ? OMAP3430ES2_PM_WKST3 : PM_WKST1;
201 u16 fclk_off = (regs == 3) ? OMAP3430ES2_CM_FCLKEN3 : CM_FCLKEN1;
202 u16 iclk_off = (regs == 3) ? CM_ICLKEN3 : CM_ICLKEN1;
203 u16 grpsel_off = (regs == 3) ?
204 OMAP3430ES2_PM_MPUGRPSEL3 : OMAP3430_PM_MPUGRPSEL;
205 int c = 0;
206
207 wkst = prm_read_mod_reg(module, wkst_off);
208 wkst &= prm_read_mod_reg(module, grpsel_off);
209 if (wkst) {
210 iclk = cm_read_mod_reg(module, iclk_off);
211 fclk = cm_read_mod_reg(module, fclk_off);
212 while (wkst) {
213 clken = wkst;
214 cm_set_mod_reg_bits(clken, module, iclk_off);
215 /*
216 * For USBHOST, we don't know whether HOST1 or
217 * HOST2 woke us up, so enable both f-clocks
218 */
219 if (module == OMAP3430ES2_USBHOST_MOD)
220 clken |= 1 << OMAP3430ES2_EN_USBHOST2_SHIFT;
221 cm_set_mod_reg_bits(clken, module, fclk_off);
222 prm_write_mod_reg(wkst, module, wkst_off);
223 wkst = prm_read_mod_reg(module, wkst_off);
224 c++;
225 }
226 cm_write_mod_reg(iclk, module, iclk_off);
227 cm_write_mod_reg(fclk, module, fclk_off);
228 }
229
230 return c;
231 }
232
233 static int _prcm_int_handle_wakeup(void)
234 {
235 int c;
236
237 c = prcm_clear_mod_irqs(WKUP_MOD, 1);
238 c += prcm_clear_mod_irqs(CORE_MOD, 1);
239 c += prcm_clear_mod_irqs(OMAP3430_PER_MOD, 1);
240 if (omap_rev() > OMAP3430_REV_ES1_0) {
241 c += prcm_clear_mod_irqs(CORE_MOD, 3);
242 c += prcm_clear_mod_irqs(OMAP3430ES2_USBHOST_MOD, 1);
243 }
244
245 return c;
246 }
247
248 /*
249 * PRCM Interrupt Handler
250 *
251 * The PRM_IRQSTATUS_MPU register indicates if there are any pending
252 * interrupts from the PRCM for the MPU. These bits must be cleared in
253 * order to clear the PRCM interrupt. The PRCM interrupt handler is
254 * implemented to simply clear the PRM_IRQSTATUS_MPU in order to clear
255 * the PRCM interrupt. Please note that bit 0 of the PRM_IRQSTATUS_MPU
256 * register indicates that a wake-up event is pending for the MPU and
257 * this bit can only be cleared if the all the wake-up events latched
258 * in the various PM_WKST_x registers have been cleared. The interrupt
259 * handler is implemented using a do-while loop so that if a wake-up
260 * event occurred during the processing of the prcm interrupt handler
261 * (setting a bit in the corresponding PM_WKST_x register and thus
262 * preventing us from clearing bit 0 of the PRM_IRQSTATUS_MPU register)
263 * this would be handled.
264 */
265 static irqreturn_t prcm_interrupt_handler (int irq, void *dev_id)
266 {
267 u32 irqenable_mpu, irqstatus_mpu;
268 int c = 0;
269
270 irqenable_mpu = prm_read_mod_reg(OCP_MOD,
271 OMAP3_PRM_IRQENABLE_MPU_OFFSET);
272 irqstatus_mpu = prm_read_mod_reg(OCP_MOD,
273 OMAP3_PRM_IRQSTATUS_MPU_OFFSET);
274 irqstatus_mpu &= irqenable_mpu;
275
276 do {
277 if (irqstatus_mpu & (OMAP3430_WKUP_ST_MASK |
278 OMAP3430_IO_ST_MASK)) {
279 c = _prcm_int_handle_wakeup();
280
281 /*
282 * Is the MPU PRCM interrupt handler racing with the
283 * IVA2 PRCM interrupt handler ?
284 */
285 WARN(c == 0, "prcm: WARNING: PRCM indicated MPU wakeup "
286 "but no wakeup sources are marked\n");
287 } else {
288 /* XXX we need to expand our PRCM interrupt handler */
289 WARN(1, "prcm: WARNING: PRCM interrupt received, but "
290 "no code to handle it (%08x)\n", irqstatus_mpu);
291 }
292
293 prm_write_mod_reg(irqstatus_mpu, OCP_MOD,
294 OMAP3_PRM_IRQSTATUS_MPU_OFFSET);
295
296 irqstatus_mpu = prm_read_mod_reg(OCP_MOD,
297 OMAP3_PRM_IRQSTATUS_MPU_OFFSET);
298 irqstatus_mpu &= irqenable_mpu;
299
300 } while (irqstatus_mpu);
301
302 return IRQ_HANDLED;
303 }
304
305 static void restore_control_register(u32 val)
306 {
307 __asm__ __volatile__ ("mcr p15, 0, %0, c1, c0, 0" : : "r" (val));
308 }
309
310 /* Function to restore the table entry that was modified for enabling MMU */
311 static void restore_table_entry(void)
312 {
313 void __iomem *scratchpad_address;
314 u32 previous_value, control_reg_value;
315 u32 *address;
316
317 scratchpad_address = OMAP2_L4_IO_ADDRESS(OMAP343X_SCRATCHPAD);
318
319 /* Get address of entry that was modified */
320 address = (u32 *)__raw_readl(scratchpad_address +
321 OMAP343X_TABLE_ADDRESS_OFFSET);
322 /* Get the previous value which needs to be restored */
323 previous_value = __raw_readl(scratchpad_address +
324 OMAP343X_TABLE_VALUE_OFFSET);
325 address = __va(address);
326 *address = previous_value;
327 flush_tlb_all();
328 control_reg_value = __raw_readl(scratchpad_address
329 + OMAP343X_CONTROL_REG_VALUE_OFFSET);
330 /* This will enable caches and prediction */
331 restore_control_register(control_reg_value);
332 }
333
334 void omap_sram_idle(void)
335 {
336 /* Variable to tell what needs to be saved and restored
337 * in omap_sram_idle*/
338 /* save_state = 0 => Nothing to save and restored */
339 /* save_state = 1 => Only L1 and logic lost */
340 /* save_state = 2 => Only L2 lost */
341 /* save_state = 3 => L1, L2 and logic lost */
342 int save_state = 0;
343 int mpu_next_state = PWRDM_POWER_ON;
344 int per_next_state = PWRDM_POWER_ON;
345 int core_next_state = PWRDM_POWER_ON;
346 int core_prev_state, per_prev_state;
347 u32 sdrc_pwr = 0;
348
349 if (!_omap_sram_idle)
350 return;
351
352 pwrdm_clear_all_prev_pwrst(mpu_pwrdm);
353 pwrdm_clear_all_prev_pwrst(neon_pwrdm);
354 pwrdm_clear_all_prev_pwrst(core_pwrdm);
355 pwrdm_clear_all_prev_pwrst(per_pwrdm);
356
357 mpu_next_state = pwrdm_read_next_pwrst(mpu_pwrdm);
358 switch (mpu_next_state) {
359 case PWRDM_POWER_ON:
360 case PWRDM_POWER_RET:
361 /* No need to save context */
362 save_state = 0;
363 break;
364 case PWRDM_POWER_OFF:
365 save_state = 3;
366 break;
367 default:
368 /* Invalid state */
369 printk(KERN_ERR "Invalid mpu state in sram_idle\n");
370 return;
371 }
372 pwrdm_pre_transition();
373
374 /* NEON control */
375 if (pwrdm_read_pwrst(neon_pwrdm) == PWRDM_POWER_ON)
376 pwrdm_set_next_pwrst(neon_pwrdm, mpu_next_state);
377
378 /* Enable IO-PAD and IO-CHAIN wakeups */
379 per_next_state = pwrdm_read_next_pwrst(per_pwrdm);
380 core_next_state = pwrdm_read_next_pwrst(core_pwrdm);
381 if (omap3_has_io_wakeup() &&
382 (per_next_state < PWRDM_POWER_ON ||
383 core_next_state < PWRDM_POWER_ON)) {
384 prm_set_mod_reg_bits(OMAP3430_EN_IO_MASK, WKUP_MOD, PM_WKEN);
385 omap3_enable_io_chain();
386 }
387
388 /* PER */
389 if (per_next_state < PWRDM_POWER_ON) {
390 omap_uart_prepare_idle(2);
391 omap_uart_prepare_idle(3);
392 omap2_gpio_prepare_for_idle(per_next_state);
393 if (per_next_state == PWRDM_POWER_OFF)
394 omap3_per_save_context();
395 }
396
397 /* CORE */
398 if (core_next_state < PWRDM_POWER_ON) {
399 omap_uart_prepare_idle(0);
400 omap_uart_prepare_idle(1);
401 if (core_next_state == PWRDM_POWER_OFF) {
402 omap3_core_save_context();
403 omap3_prcm_save_context();
404 }
405 }
406
407 omap3_intc_prepare_idle();
408
409 /*
410 * On EMU/HS devices ROM code restores a SRDC value
411 * from scratchpad which has automatic self refresh on timeout
412 * of AUTO_CNT = 1 enabled. This takes care of errata 1.142.
413 * Hence store/restore the SDRC_POWER register here.
414 */
415 if (omap_rev() >= OMAP3430_REV_ES3_0 &&
416 omap_type() != OMAP2_DEVICE_TYPE_GP &&
417 core_next_state == PWRDM_POWER_OFF)
418 sdrc_pwr = sdrc_read_reg(SDRC_POWER);
419
420 /*
421 * omap3_arm_context is the location where ARM registers
422 * get saved. The restore path then reads from this
423 * location and restores them back.
424 */
425 _omap_sram_idle(omap3_arm_context, save_state);
426 cpu_init();
427
428 /* Restore normal SDRC POWER settings */
429 if (omap_rev() >= OMAP3430_REV_ES3_0 &&
430 omap_type() != OMAP2_DEVICE_TYPE_GP &&
431 core_next_state == PWRDM_POWER_OFF)
432 sdrc_write_reg(sdrc_pwr, SDRC_POWER);
433
434 /* Restore table entry modified during MMU restoration */
435 if (pwrdm_read_prev_pwrst(mpu_pwrdm) == PWRDM_POWER_OFF)
436 restore_table_entry();
437
438 /* CORE */
439 if (core_next_state < PWRDM_POWER_ON) {
440 core_prev_state = pwrdm_read_prev_pwrst(core_pwrdm);
441 if (core_prev_state == PWRDM_POWER_OFF) {
442 omap3_core_restore_context();
443 omap3_prcm_restore_context();
444 omap3_sram_restore_context();
445 omap2_sms_restore_context();
446 }
447 omap_uart_resume_idle(0);
448 omap_uart_resume_idle(1);
449 if (core_next_state == PWRDM_POWER_OFF)
450 prm_clear_mod_reg_bits(OMAP3430_AUTO_OFF_MASK,
451 OMAP3430_GR_MOD,
452 OMAP3_PRM_VOLTCTRL_OFFSET);
453 }
454 omap3_intc_resume_idle();
455
456 /* PER */
457 if (per_next_state < PWRDM_POWER_ON) {
458 per_prev_state = pwrdm_read_prev_pwrst(per_pwrdm);
459 omap2_gpio_resume_after_idle();
460 if (per_prev_state == PWRDM_POWER_OFF)
461 omap3_per_restore_context();
462 omap_uart_resume_idle(2);
463 omap_uart_resume_idle(3);
464 }
465
466 /* Disable IO-PAD and IO-CHAIN wakeup */
467 if (omap3_has_io_wakeup() &&
468 (per_next_state < PWRDM_POWER_ON ||
469 core_next_state < PWRDM_POWER_ON)) {
470 prm_clear_mod_reg_bits(OMAP3430_EN_IO_MASK, WKUP_MOD, PM_WKEN);
471 omap3_disable_io_chain();
472 }
473
474 pwrdm_post_transition();
475
476 omap2_clkdm_allow_idle(mpu_pwrdm->pwrdm_clkdms[0]);
477 }
478
479 int omap3_can_sleep(void)
480 {
481 if (!sleep_while_idle)
482 return 0;
483 if (!omap_uart_can_sleep())
484 return 0;
485 return 1;
486 }
487
488 static void omap3_pm_idle(void)
489 {
490 local_irq_disable();
491 local_fiq_disable();
492
493 if (!omap3_can_sleep())
494 goto out;
495
496 if (omap_irq_pending() || need_resched())
497 goto out;
498
499 omap_sram_idle();
500
501 out:
502 local_fiq_enable();
503 local_irq_enable();
504 }
505
506 #ifdef CONFIG_SUSPEND
507 static suspend_state_t suspend_state;
508
509 static int omap3_pm_prepare(void)
510 {
511 disable_hlt();
512 return 0;
513 }
514
515 static int omap3_pm_suspend(void)
516 {
517 struct power_state *pwrst;
518 int state, ret = 0;
519
520 if (wakeup_timer_seconds || wakeup_timer_milliseconds)
521 omap2_pm_wakeup_on_timer(wakeup_timer_seconds,
522 wakeup_timer_milliseconds);
523
524 /* Read current next_pwrsts */
525 list_for_each_entry(pwrst, &pwrst_list, node)
526 pwrst->saved_state = pwrdm_read_next_pwrst(pwrst->pwrdm);
527 /* Set ones wanted by suspend */
528 list_for_each_entry(pwrst, &pwrst_list, node) {
529 if (omap_set_pwrdm_state(pwrst->pwrdm, pwrst->next_state))
530 goto restore;
531 if (pwrdm_clear_all_prev_pwrst(pwrst->pwrdm))
532 goto restore;
533 }
534
535 omap_uart_prepare_suspend();
536 omap3_intc_suspend();
537
538 omap_sram_idle();
539
540 restore:
541 /* Restore next_pwrsts */
542 list_for_each_entry(pwrst, &pwrst_list, node) {
543 state = pwrdm_read_prev_pwrst(pwrst->pwrdm);
544 if (state > pwrst->next_state) {
545 printk(KERN_INFO "Powerdomain (%s) didn't enter "
546 "target state %d\n",
547 pwrst->pwrdm->name, pwrst->next_state);
548 ret = -1;
549 }
550 omap_set_pwrdm_state(pwrst->pwrdm, pwrst->saved_state);
551 }
552 if (ret)
553 printk(KERN_ERR "Could not enter target state in pm_suspend\n");
554 else
555 printk(KERN_INFO "Successfully put all powerdomains "
556 "to target state\n");
557
558 return ret;
559 }
560
561 static int omap3_pm_enter(suspend_state_t unused)
562 {
563 int ret = 0;
564
565 switch (suspend_state) {
566 case PM_SUSPEND_STANDBY:
567 case PM_SUSPEND_MEM:
568 ret = omap3_pm_suspend();
569 break;
570 default:
571 ret = -EINVAL;
572 }
573
574 return ret;
575 }
576
577 static void omap3_pm_finish(void)
578 {
579 enable_hlt();
580 }
581
582 /* Hooks to enable / disable UART interrupts during suspend */
583 static int omap3_pm_begin(suspend_state_t state)
584 {
585 suspend_state = state;
586 omap_uart_enable_irqs(0);
587 return 0;
588 }
589
590 static void omap3_pm_end(void)
591 {
592 suspend_state = PM_SUSPEND_ON;
593 omap_uart_enable_irqs(1);
594 return;
595 }
596
597 static struct platform_suspend_ops omap_pm_ops = {
598 .begin = omap3_pm_begin,
599 .end = omap3_pm_end,
600 .prepare = omap3_pm_prepare,
601 .enter = omap3_pm_enter,
602 .finish = omap3_pm_finish,
603 .valid = suspend_valid_only_mem,
604 };
605 #endif /* CONFIG_SUSPEND */
606
607
608 /**
609 * omap3_iva_idle(): ensure IVA is in idle so it can be put into
610 * retention
611 *
612 * In cases where IVA2 is activated by bootcode, it may prevent
613 * full-chip retention or off-mode because it is not idle. This
614 * function forces the IVA2 into idle state so it can go
615 * into retention/off and thus allow full-chip retention/off.
616 *
617 **/
618 static void __init omap3_iva_idle(void)
619 {
620 /* ensure IVA2 clock is disabled */
621 cm_write_mod_reg(0, OMAP3430_IVA2_MOD, CM_FCLKEN);
622
623 /* if no clock activity, nothing else to do */
624 if (!(cm_read_mod_reg(OMAP3430_IVA2_MOD, OMAP3430_CM_CLKSTST) &
625 OMAP3430_CLKACTIVITY_IVA2_MASK))
626 return;
627
628 /* Reset IVA2 */
629 prm_write_mod_reg(OMAP3430_RST1_IVA2_MASK |
630 OMAP3430_RST2_IVA2_MASK |
631 OMAP3430_RST3_IVA2_MASK,
632 OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
633
634 /* Enable IVA2 clock */
635 cm_write_mod_reg(OMAP3430_CM_FCLKEN_IVA2_EN_IVA2_MASK,
636 OMAP3430_IVA2_MOD, CM_FCLKEN);
637
638 /* Set IVA2 boot mode to 'idle' */
639 omap_ctrl_writel(OMAP3_IVA2_BOOTMOD_IDLE,
640 OMAP343X_CONTROL_IVA2_BOOTMOD);
641
642 /* Un-reset IVA2 */
643 prm_write_mod_reg(0, OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
644
645 /* Disable IVA2 clock */
646 cm_write_mod_reg(0, OMAP3430_IVA2_MOD, CM_FCLKEN);
647
648 /* Reset IVA2 */
649 prm_write_mod_reg(OMAP3430_RST1_IVA2_MASK |
650 OMAP3430_RST2_IVA2_MASK |
651 OMAP3430_RST3_IVA2_MASK,
652 OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
653 }
654
655 static void __init omap3_d2d_idle(void)
656 {
657 u16 mask, padconf;
658
659 /* In a stand alone OMAP3430 where there is not a stacked
660 * modem for the D2D Idle Ack and D2D MStandby must be pulled
661 * high. S CONTROL_PADCONF_SAD2D_IDLEACK and
662 * CONTROL_PADCONF_SAD2D_MSTDBY to have a pull up. */
663 mask = (1 << 4) | (1 << 3); /* pull-up, enabled */
664 padconf = omap_ctrl_readw(OMAP3_PADCONF_SAD2D_MSTANDBY);
665 padconf |= mask;
666 omap_ctrl_writew(padconf, OMAP3_PADCONF_SAD2D_MSTANDBY);
667
668 padconf = omap_ctrl_readw(OMAP3_PADCONF_SAD2D_IDLEACK);
669 padconf |= mask;
670 omap_ctrl_writew(padconf, OMAP3_PADCONF_SAD2D_IDLEACK);
671
672 /* reset modem */
673 prm_write_mod_reg(OMAP3430_RM_RSTCTRL_CORE_MODEM_SW_RSTPWRON_MASK |
674 OMAP3430_RM_RSTCTRL_CORE_MODEM_SW_RST_MASK,
675 CORE_MOD, OMAP2_RM_RSTCTRL);
676 prm_write_mod_reg(0, CORE_MOD, OMAP2_RM_RSTCTRL);
677 }
678
679 static void __init prcm_setup_regs(void)
680 {
681 u32 omap3630_auto_uart4_mask = cpu_is_omap3630() ?
682 OMAP3630_AUTO_UART4_MASK : 0;
683 u32 omap3630_en_uart4_mask = cpu_is_omap3630() ?
684 OMAP3630_EN_UART4_MASK : 0;
685 u32 omap3630_grpsel_uart4_mask = cpu_is_omap3630() ?
686 OMAP3630_GRPSEL_UART4_MASK : 0;
687
688
689 /* XXX Reset all wkdeps. This should be done when initializing
690 * powerdomains */
691 prm_write_mod_reg(0, OMAP3430_IVA2_MOD, PM_WKDEP);
692 prm_write_mod_reg(0, MPU_MOD, PM_WKDEP);
693 prm_write_mod_reg(0, OMAP3430_DSS_MOD, PM_WKDEP);
694 prm_write_mod_reg(0, OMAP3430_NEON_MOD, PM_WKDEP);
695 prm_write_mod_reg(0, OMAP3430_CAM_MOD, PM_WKDEP);
696 prm_write_mod_reg(0, OMAP3430_PER_MOD, PM_WKDEP);
697 if (omap_rev() > OMAP3430_REV_ES1_0) {
698 prm_write_mod_reg(0, OMAP3430ES2_SGX_MOD, PM_WKDEP);
699 prm_write_mod_reg(0, OMAP3430ES2_USBHOST_MOD, PM_WKDEP);
700 } else
701 prm_write_mod_reg(0, GFX_MOD, PM_WKDEP);
702
703 /*
704 * Enable interface clock autoidle for all modules.
705 * Note that in the long run this should be done by clockfw
706 */
707 cm_write_mod_reg(
708 OMAP3430_AUTO_MODEM_MASK |
709 OMAP3430ES2_AUTO_MMC3_MASK |
710 OMAP3430ES2_AUTO_ICR_MASK |
711 OMAP3430_AUTO_AES2_MASK |
712 OMAP3430_AUTO_SHA12_MASK |
713 OMAP3430_AUTO_DES2_MASK |
714 OMAP3430_AUTO_MMC2_MASK |
715 OMAP3430_AUTO_MMC1_MASK |
716 OMAP3430_AUTO_MSPRO_MASK |
717 OMAP3430_AUTO_HDQ_MASK |
718 OMAP3430_AUTO_MCSPI4_MASK |
719 OMAP3430_AUTO_MCSPI3_MASK |
720 OMAP3430_AUTO_MCSPI2_MASK |
721 OMAP3430_AUTO_MCSPI1_MASK |
722 OMAP3430_AUTO_I2C3_MASK |
723 OMAP3430_AUTO_I2C2_MASK |
724 OMAP3430_AUTO_I2C1_MASK |
725 OMAP3430_AUTO_UART2_MASK |
726 OMAP3430_AUTO_UART1_MASK |
727 OMAP3430_AUTO_GPT11_MASK |
728 OMAP3430_AUTO_GPT10_MASK |
729 OMAP3430_AUTO_MCBSP5_MASK |
730 OMAP3430_AUTO_MCBSP1_MASK |
731 OMAP3430ES1_AUTO_FAC_MASK | /* This is es1 only */
732 OMAP3430_AUTO_MAILBOXES_MASK |
733 OMAP3430_AUTO_OMAPCTRL_MASK |
734 OMAP3430ES1_AUTO_FSHOSTUSB_MASK |
735 OMAP3430_AUTO_HSOTGUSB_MASK |
736 OMAP3430_AUTO_SAD2D_MASK |
737 OMAP3430_AUTO_SSI_MASK,
738 CORE_MOD, CM_AUTOIDLE1);
739
740 cm_write_mod_reg(
741 OMAP3430_AUTO_PKA_MASK |
742 OMAP3430_AUTO_AES1_MASK |
743 OMAP3430_AUTO_RNG_MASK |
744 OMAP3430_AUTO_SHA11_MASK |
745 OMAP3430_AUTO_DES1_MASK,
746 CORE_MOD, CM_AUTOIDLE2);
747
748 if (omap_rev() > OMAP3430_REV_ES1_0) {
749 cm_write_mod_reg(
750 OMAP3430_AUTO_MAD2D_MASK |
751 OMAP3430ES2_AUTO_USBTLL_MASK,
752 CORE_MOD, CM_AUTOIDLE3);
753 }
754
755 cm_write_mod_reg(
756 OMAP3430_AUTO_WDT2_MASK |
757 OMAP3430_AUTO_WDT1_MASK |
758 OMAP3430_AUTO_GPIO1_MASK |
759 OMAP3430_AUTO_32KSYNC_MASK |
760 OMAP3430_AUTO_GPT12_MASK |
761 OMAP3430_AUTO_GPT1_MASK,
762 WKUP_MOD, CM_AUTOIDLE);
763
764 cm_write_mod_reg(
765 OMAP3430_AUTO_DSS_MASK,
766 OMAP3430_DSS_MOD,
767 CM_AUTOIDLE);
768
769 cm_write_mod_reg(
770 OMAP3430_AUTO_CAM_MASK,
771 OMAP3430_CAM_MOD,
772 CM_AUTOIDLE);
773
774 cm_write_mod_reg(
775 omap3630_auto_uart4_mask |
776 OMAP3430_AUTO_GPIO6_MASK |
777 OMAP3430_AUTO_GPIO5_MASK |
778 OMAP3430_AUTO_GPIO4_MASK |
779 OMAP3430_AUTO_GPIO3_MASK |
780 OMAP3430_AUTO_GPIO2_MASK |
781 OMAP3430_AUTO_WDT3_MASK |
782 OMAP3430_AUTO_UART3_MASK |
783 OMAP3430_AUTO_GPT9_MASK |
784 OMAP3430_AUTO_GPT8_MASK |
785 OMAP3430_AUTO_GPT7_MASK |
786 OMAP3430_AUTO_GPT6_MASK |
787 OMAP3430_AUTO_GPT5_MASK |
788 OMAP3430_AUTO_GPT4_MASK |
789 OMAP3430_AUTO_GPT3_MASK |
790 OMAP3430_AUTO_GPT2_MASK |
791 OMAP3430_AUTO_MCBSP4_MASK |
792 OMAP3430_AUTO_MCBSP3_MASK |
793 OMAP3430_AUTO_MCBSP2_MASK,
794 OMAP3430_PER_MOD,
795 CM_AUTOIDLE);
796
797 if (omap_rev() > OMAP3430_REV_ES1_0) {
798 cm_write_mod_reg(
799 OMAP3430ES2_AUTO_USBHOST_MASK,
800 OMAP3430ES2_USBHOST_MOD,
801 CM_AUTOIDLE);
802 }
803
804 omap_ctrl_writel(OMAP3430_AUTOIDLE_MASK, OMAP2_CONTROL_SYSCONFIG);
805
806 /*
807 * Set all plls to autoidle. This is needed until autoidle is
808 * enabled by clockfw
809 */
810 cm_write_mod_reg(1 << OMAP3430_AUTO_IVA2_DPLL_SHIFT,
811 OMAP3430_IVA2_MOD, CM_AUTOIDLE2);
812 cm_write_mod_reg(1 << OMAP3430_AUTO_MPU_DPLL_SHIFT,
813 MPU_MOD,
814 CM_AUTOIDLE2);
815 cm_write_mod_reg((1 << OMAP3430_AUTO_PERIPH_DPLL_SHIFT) |
816 (1 << OMAP3430_AUTO_CORE_DPLL_SHIFT),
817 PLL_MOD,
818 CM_AUTOIDLE);
819 cm_write_mod_reg(1 << OMAP3430ES2_AUTO_PERIPH2_DPLL_SHIFT,
820 PLL_MOD,
821 CM_AUTOIDLE2);
822
823 /*
824 * Enable control of expternal oscillator through
825 * sys_clkreq. In the long run clock framework should
826 * take care of this.
827 */
828 prm_rmw_mod_reg_bits(OMAP_AUTOEXTCLKMODE_MASK,
829 1 << OMAP_AUTOEXTCLKMODE_SHIFT,
830 OMAP3430_GR_MOD,
831 OMAP3_PRM_CLKSRC_CTRL_OFFSET);
832
833 /* setup wakup source */
834 prm_write_mod_reg(OMAP3430_EN_IO_MASK | OMAP3430_EN_GPIO1_MASK |
835 OMAP3430_EN_GPT1_MASK | OMAP3430_EN_GPT12_MASK,
836 WKUP_MOD, PM_WKEN);
837 /* No need to write EN_IO, that is always enabled */
838 prm_write_mod_reg(OMAP3430_GRPSEL_GPIO1_MASK |
839 OMAP3430_GRPSEL_GPT1_MASK |
840 OMAP3430_GRPSEL_GPT12_MASK,
841 WKUP_MOD, OMAP3430_PM_MPUGRPSEL);
842 /* For some reason IO doesn't generate wakeup event even if
843 * it is selected to mpu wakeup goup */
844 prm_write_mod_reg(OMAP3430_IO_EN_MASK | OMAP3430_WKUP_EN_MASK,
845 OCP_MOD, OMAP3_PRM_IRQENABLE_MPU_OFFSET);
846
847 /* Enable PM_WKEN to support DSS LPR */
848 prm_write_mod_reg(OMAP3430_PM_WKEN_DSS_EN_DSS_MASK,
849 OMAP3430_DSS_MOD, PM_WKEN);
850
851 /* Enable wakeups in PER */
852 prm_write_mod_reg(omap3630_en_uart4_mask |
853 OMAP3430_EN_GPIO2_MASK | OMAP3430_EN_GPIO3_MASK |
854 OMAP3430_EN_GPIO4_MASK | OMAP3430_EN_GPIO5_MASK |
855 OMAP3430_EN_GPIO6_MASK | OMAP3430_EN_UART3_MASK |
856 OMAP3430_EN_MCBSP2_MASK | OMAP3430_EN_MCBSP3_MASK |
857 OMAP3430_EN_MCBSP4_MASK,
858 OMAP3430_PER_MOD, PM_WKEN);
859 /* and allow them to wake up MPU */
860 prm_write_mod_reg(omap3630_grpsel_uart4_mask |
861 OMAP3430_GRPSEL_GPIO2_MASK |
862 OMAP3430_GRPSEL_GPIO3_MASK |
863 OMAP3430_GRPSEL_GPIO4_MASK |
864 OMAP3430_GRPSEL_GPIO5_MASK |
865 OMAP3430_GRPSEL_GPIO6_MASK |
866 OMAP3430_GRPSEL_UART3_MASK |
867 OMAP3430_GRPSEL_MCBSP2_MASK |
868 OMAP3430_GRPSEL_MCBSP3_MASK |
869 OMAP3430_GRPSEL_MCBSP4_MASK,
870 OMAP3430_PER_MOD, OMAP3430_PM_MPUGRPSEL);
871
872 /* Don't attach IVA interrupts */
873 prm_write_mod_reg(0, WKUP_MOD, OMAP3430_PM_IVAGRPSEL);
874 prm_write_mod_reg(0, CORE_MOD, OMAP3430_PM_IVAGRPSEL1);
875 prm_write_mod_reg(0, CORE_MOD, OMAP3430ES2_PM_IVAGRPSEL3);
876 prm_write_mod_reg(0, OMAP3430_PER_MOD, OMAP3430_PM_IVAGRPSEL);
877
878 /* Clear any pending 'reset' flags */
879 prm_write_mod_reg(0xffffffff, MPU_MOD, OMAP2_RM_RSTST);
880 prm_write_mod_reg(0xffffffff, CORE_MOD, OMAP2_RM_RSTST);
881 prm_write_mod_reg(0xffffffff, OMAP3430_PER_MOD, OMAP2_RM_RSTST);
882 prm_write_mod_reg(0xffffffff, OMAP3430_EMU_MOD, OMAP2_RM_RSTST);
883 prm_write_mod_reg(0xffffffff, OMAP3430_NEON_MOD, OMAP2_RM_RSTST);
884 prm_write_mod_reg(0xffffffff, OMAP3430_DSS_MOD, OMAP2_RM_RSTST);
885 prm_write_mod_reg(0xffffffff, OMAP3430ES2_USBHOST_MOD, OMAP2_RM_RSTST);
886
887 /* Clear any pending PRCM interrupts */
888 prm_write_mod_reg(0, OCP_MOD, OMAP3_PRM_IRQSTATUS_MPU_OFFSET);
889
890 omap3_iva_idle();
891 omap3_d2d_idle();
892 }
893
894 void omap3_pm_off_mode_enable(int enable)
895 {
896 struct power_state *pwrst;
897 u32 state;
898
899 if (enable)
900 state = PWRDM_POWER_OFF;
901 else
902 state = PWRDM_POWER_RET;
903
904 #ifdef CONFIG_CPU_IDLE
905 omap3_cpuidle_update_states();
906 #endif
907
908 list_for_each_entry(pwrst, &pwrst_list, node) {
909 pwrst->next_state = state;
910 omap_set_pwrdm_state(pwrst->pwrdm, state);
911 }
912 }
913
914 int omap3_pm_get_suspend_state(struct powerdomain *pwrdm)
915 {
916 struct power_state *pwrst;
917
918 list_for_each_entry(pwrst, &pwrst_list, node) {
919 if (pwrst->pwrdm == pwrdm)
920 return pwrst->next_state;
921 }
922 return -EINVAL;
923 }
924
925 int omap3_pm_set_suspend_state(struct powerdomain *pwrdm, int state)
926 {
927 struct power_state *pwrst;
928
929 list_for_each_entry(pwrst, &pwrst_list, node) {
930 if (pwrst->pwrdm == pwrdm) {
931 pwrst->next_state = state;
932 return 0;
933 }
934 }
935 return -EINVAL;
936 }
937
938 static int __init pwrdms_setup(struct powerdomain *pwrdm, void *unused)
939 {
940 struct power_state *pwrst;
941
942 if (!pwrdm->pwrsts)
943 return 0;
944
945 pwrst = kmalloc(sizeof(struct power_state), GFP_ATOMIC);
946 if (!pwrst)
947 return -ENOMEM;
948 pwrst->pwrdm = pwrdm;
949 pwrst->next_state = PWRDM_POWER_RET;
950 list_add(&pwrst->node, &pwrst_list);
951
952 if (pwrdm_has_hdwr_sar(pwrdm))
953 pwrdm_enable_hdwr_sar(pwrdm);
954
955 return omap_set_pwrdm_state(pwrst->pwrdm, pwrst->next_state);
956 }
957
958 /*
959 * Enable hw supervised mode for all clockdomains if it's
960 * supported. Initiate sleep transition for other clockdomains, if
961 * they are not used
962 */
963 static int __init clkdms_setup(struct clockdomain *clkdm, void *unused)
964 {
965 if (clkdm->flags & CLKDM_CAN_ENABLE_AUTO)
966 omap2_clkdm_allow_idle(clkdm);
967 else if (clkdm->flags & CLKDM_CAN_FORCE_SLEEP &&
968 atomic_read(&clkdm->usecount) == 0)
969 omap2_clkdm_sleep(clkdm);
970 return 0;
971 }
972
973 void omap_push_sram_idle(void)
974 {
975 _omap_sram_idle = omap_sram_push(omap34xx_cpu_suspend,
976 omap34xx_cpu_suspend_sz);
977 if (omap_type() != OMAP2_DEVICE_TYPE_GP)
978 _omap_save_secure_sram = omap_sram_push(save_secure_ram_context,
979 save_secure_ram_context_sz);
980 }
981
982 static int __init omap3_pm_init(void)
983 {
984 struct power_state *pwrst, *tmp;
985 struct clockdomain *neon_clkdm, *per_clkdm, *mpu_clkdm, *core_clkdm;
986 int ret;
987
988 if (!cpu_is_omap34xx())
989 return -ENODEV;
990
991 printk(KERN_ERR "Power Management for TI OMAP3.\n");
992
993 /* XXX prcm_setup_regs needs to be before enabling hw
994 * supervised mode for powerdomains */
995 prcm_setup_regs();
996
997 ret = request_irq(INT_34XX_PRCM_MPU_IRQ,
998 (irq_handler_t)prcm_interrupt_handler,
999 IRQF_DISABLED, "prcm", NULL);
1000 if (ret) {
1001 printk(KERN_ERR "request_irq failed to register for 0x%x\n",
1002 INT_34XX_PRCM_MPU_IRQ);
1003 goto err1;
1004 }
1005
1006 ret = pwrdm_for_each(pwrdms_setup, NULL);
1007 if (ret) {
1008 printk(KERN_ERR "Failed to setup powerdomains\n");
1009 goto err2;
1010 }
1011
1012 (void) clkdm_for_each(clkdms_setup, NULL);
1013
1014 mpu_pwrdm = pwrdm_lookup("mpu_pwrdm");
1015 if (mpu_pwrdm == NULL) {
1016 printk(KERN_ERR "Failed to get mpu_pwrdm\n");
1017 goto err2;
1018 }
1019
1020 neon_pwrdm = pwrdm_lookup("neon_pwrdm");
1021 per_pwrdm = pwrdm_lookup("per_pwrdm");
1022 core_pwrdm = pwrdm_lookup("core_pwrdm");
1023 cam_pwrdm = pwrdm_lookup("cam_pwrdm");
1024
1025 neon_clkdm = clkdm_lookup("neon_clkdm");
1026 mpu_clkdm = clkdm_lookup("mpu_clkdm");
1027 per_clkdm = clkdm_lookup("per_clkdm");
1028 core_clkdm = clkdm_lookup("core_clkdm");
1029
1030 omap_push_sram_idle();
1031 #ifdef CONFIG_SUSPEND
1032 suspend_set_ops(&omap_pm_ops);
1033 #endif /* CONFIG_SUSPEND */
1034
1035 pm_idle = omap3_pm_idle;
1036 omap3_idle_init();
1037
1038 clkdm_add_wkdep(neon_clkdm, mpu_clkdm);
1039 if (omap_type() != OMAP2_DEVICE_TYPE_GP) {
1040 omap3_secure_ram_storage =
1041 kmalloc(0x803F, GFP_KERNEL);
1042 if (!omap3_secure_ram_storage)
1043 printk(KERN_ERR "Memory allocation failed when"
1044 "allocating for secure sram context\n");
1045
1046 local_irq_disable();
1047 local_fiq_disable();
1048
1049 omap_dma_global_context_save();
1050 omap3_save_secure_ram_context(PWRDM_POWER_ON);
1051 omap_dma_global_context_restore();
1052
1053 local_irq_enable();
1054 local_fiq_enable();
1055 }
1056
1057 omap3_save_scratchpad_contents();
1058 err1:
1059 return ret;
1060 err2:
1061 free_irq(INT_34XX_PRCM_MPU_IRQ, NULL);
1062 list_for_each_entry_safe(pwrst, tmp, &pwrst_list, node) {
1063 list_del(&pwrst->node);
1064 kfree(pwrst);
1065 }
1066 return ret;
1067 }
1068
1069 late_initcall(omap3_pm_init);