Merge branch 'upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/linville...
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / powerpc / kernel / idle_6xx.S
1 /*
2 * This file contains the power_save function for 6xx & 7xxx CPUs
3 * rewritten in assembler
4 *
5 * Warning ! This code assumes that if your machine has a 750fx
6 * it will have PLL 1 set to low speed mode (used during NAP/DOZE).
7 * if this is not the case some additional changes will have to
8 * be done to check a runtime var (a bit like powersave-nap)
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
14 */
15
16 #include <linux/config.h>
17 #include <linux/threads.h>
18 #include <asm/reg.h>
19 #include <asm/page.h>
20 #include <asm/cputable.h>
21 #include <asm/thread_info.h>
22 #include <asm/ppc_asm.h>
23 #include <asm/asm-offsets.h>
24
25 #undef DEBUG
26
27 .text
28
29 /*
30 * Init idle, called at early CPU setup time from head.S for each CPU
31 * Make sure no rest of NAP mode remains in HID0, save default
32 * values for some CPU specific registers. Called with r24
33 * containing CPU number and r3 reloc offset
34 */
35 _GLOBAL(init_idle_6xx)
36 BEGIN_FTR_SECTION
37 mfspr r4,SPRN_HID0
38 rlwinm r4,r4,0,10,8 /* Clear NAP */
39 mtspr SPRN_HID0, r4
40 b 1f
41 END_FTR_SECTION_IFSET(CPU_FTR_CAN_NAP)
42 blr
43 1:
44 slwi r5,r24,2
45 add r5,r5,r3
46 BEGIN_FTR_SECTION
47 mfspr r4,SPRN_MSSCR0
48 addis r6,r5, nap_save_msscr0@ha
49 stw r4,nap_save_msscr0@l(r6)
50 END_FTR_SECTION_IFSET(CPU_FTR_NAP_DISABLE_L2_PR)
51 BEGIN_FTR_SECTION
52 mfspr r4,SPRN_HID1
53 addis r6,r5,nap_save_hid1@ha
54 stw r4,nap_save_hid1@l(r6)
55 END_FTR_SECTION_IFSET(CPU_FTR_DUAL_PLL_750FX)
56 blr
57
58 /*
59 * Here is the power_save_6xx function. This could eventually be
60 * split into several functions & changing the function pointer
61 * depending on the various features.
62 */
63 _GLOBAL(ppc6xx_idle)
64 /* Check if we can nap or doze, put HID0 mask in r3
65 */
66 lis r3, 0
67 BEGIN_FTR_SECTION
68 lis r3,HID0_DOZE@h
69 END_FTR_SECTION_IFSET(CPU_FTR_CAN_DOZE)
70 BEGIN_FTR_SECTION
71 /* We must dynamically check for the NAP feature as it
72 * can be cleared by CPU init after the fixups are done
73 */
74 lis r4,cur_cpu_spec@ha
75 lwz r4,cur_cpu_spec@l(r4)
76 lwz r4,CPU_SPEC_FEATURES(r4)
77 andi. r0,r4,CPU_FTR_CAN_NAP
78 beq 1f
79 /* Now check if user or arch enabled NAP mode */
80 lis r4,powersave_nap@ha
81 lwz r4,powersave_nap@l(r4)
82 cmpwi 0,r4,0
83 beq 1f
84 lis r3,HID0_NAP@h
85 1:
86 END_FTR_SECTION_IFSET(CPU_FTR_CAN_NAP)
87 cmpwi 0,r3,0
88 beqlr
89
90 /* Some pre-nap cleanups needed on some CPUs */
91 andis. r0,r3,HID0_NAP@h
92 beq 2f
93 BEGIN_FTR_SECTION
94 /* Disable L2 prefetch on some 745x and try to ensure
95 * L2 prefetch engines are idle. As explained by errata
96 * text, we can't be sure they are, we just hope very hard
97 * that well be enough (sic !). At least I noticed Apple
98 * doesn't even bother doing the dcbf's here...
99 */
100 mfspr r4,SPRN_MSSCR0
101 rlwinm r4,r4,0,0,29
102 sync
103 mtspr SPRN_MSSCR0,r4
104 sync
105 isync
106 lis r4,KERNELBASE@h
107 dcbf 0,r4
108 dcbf 0,r4
109 dcbf 0,r4
110 dcbf 0,r4
111 END_FTR_SECTION_IFSET(CPU_FTR_NAP_DISABLE_L2_PR)
112 #ifdef DEBUG
113 lis r6,nap_enter_count@ha
114 lwz r4,nap_enter_count@l(r6)
115 addi r4,r4,1
116 stw r4,nap_enter_count@l(r6)
117 #endif
118 2:
119 BEGIN_FTR_SECTION
120 /* Go to low speed mode on some 750FX */
121 lis r4,powersave_lowspeed@ha
122 lwz r4,powersave_lowspeed@l(r4)
123 cmpwi 0,r4,0
124 beq 1f
125 mfspr r4,SPRN_HID1
126 oris r4,r4,0x0001
127 mtspr SPRN_HID1,r4
128 1:
129 END_FTR_SECTION_IFSET(CPU_FTR_DUAL_PLL_750FX)
130
131 /* Go to NAP or DOZE now */
132 mfspr r4,SPRN_HID0
133 lis r5,(HID0_NAP|HID0_SLEEP)@h
134 BEGIN_FTR_SECTION
135 oris r5,r5,HID0_DOZE@h
136 END_FTR_SECTION_IFSET(CPU_FTR_CAN_DOZE)
137 andc r4,r4,r5
138 or r4,r4,r3
139 BEGIN_FTR_SECTION
140 oris r4,r4,HID0_DPM@h /* that should be done once for all */
141 END_FTR_SECTION_IFCLR(CPU_FTR_NO_DPM)
142 mtspr SPRN_HID0,r4
143 BEGIN_FTR_SECTION
144 DSSALL
145 sync
146 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
147 mfmsr r7
148 ori r7,r7,MSR_EE
149 oris r7,r7,MSR_POW@h
150 sync
151 isync
152 mtmsr r7
153 isync
154 sync
155 blr
156
157 /*
158 * Return from NAP/DOZE mode, restore some CPU specific registers,
159 * we are called with DR/IR still off and r2 containing physical
160 * address of current.
161 */
162 _GLOBAL(power_save_6xx_restore)
163 mfspr r11,SPRN_HID0
164 rlwinm. r11,r11,0,10,8 /* Clear NAP & copy NAP bit !state to cr1 EQ */
165 cror 4*cr1+eq,4*cr0+eq,4*cr0+eq
166 BEGIN_FTR_SECTION
167 rlwinm r11,r11,0,9,7 /* Clear DOZE */
168 END_FTR_SECTION_IFSET(CPU_FTR_CAN_DOZE)
169 mtspr SPRN_HID0, r11
170
171 #ifdef DEBUG
172 beq cr1,1f
173 lis r11,(nap_return_count-KERNELBASE)@ha
174 lwz r9,nap_return_count@l(r11)
175 addi r9,r9,1
176 stw r9,nap_return_count@l(r11)
177 1:
178 #endif
179
180 rlwinm r9,r1,0,0,18
181 tophys(r9,r9)
182 lwz r11,TI_CPU(r9)
183 slwi r11,r11,2
184 /* Todo make sure all these are in the same page
185 * and load r22 (@ha part + CPU offset) only once
186 */
187 BEGIN_FTR_SECTION
188 beq cr1,1f
189 addis r9,r11,(nap_save_msscr0-KERNELBASE)@ha
190 lwz r9,nap_save_msscr0@l(r9)
191 mtspr SPRN_MSSCR0, r9
192 sync
193 isync
194 1:
195 END_FTR_SECTION_IFSET(CPU_FTR_NAP_DISABLE_L2_PR)
196 BEGIN_FTR_SECTION
197 addis r9,r11,(nap_save_hid1-KERNELBASE)@ha
198 lwz r9,nap_save_hid1@l(r9)
199 mtspr SPRN_HID1, r9
200 END_FTR_SECTION_IFSET(CPU_FTR_DUAL_PLL_750FX)
201 b transfer_to_handler_cont
202
203 .data
204
205 _GLOBAL(nap_save_msscr0)
206 .space 4*NR_CPUS
207
208 _GLOBAL(nap_save_hid1)
209 .space 4*NR_CPUS
210
211 _GLOBAL(powersave_lowspeed)
212 .long 0
213
214 #ifdef DEBUG
215 _GLOBAL(nap_enter_count)
216 .space 4
217 _GLOBAL(nap_return_count)
218 .space 4
219 #endif