[PATCH] swsusp: clean up suspend header
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / powerpc / kernel / swsusp_32.S
CommitLineData
1da177e4
LT
1#include <linux/threads.h>
2#include <asm/processor.h>
3#include <asm/page.h>
4#include <asm/cputable.h>
5#include <asm/thread_info.h>
6#include <asm/ppc_asm.h>
0013a854 7#include <asm/asm-offsets.h>
1da177e4
LT
8
9
10/*
11 * Structure for storing CPU registers on the save area.
12 */
13#define SL_SP 0
14#define SL_PC 4
15#define SL_MSR 8
16#define SL_SDR1 0xc
17#define SL_SPRG0 0x10 /* 4 sprg's */
18#define SL_DBAT0 0x20
19#define SL_IBAT0 0x28
20#define SL_DBAT1 0x30
21#define SL_IBAT1 0x38
22#define SL_DBAT2 0x40
23#define SL_IBAT2 0x48
24#define SL_DBAT3 0x50
25#define SL_IBAT3 0x58
26#define SL_TB 0x60
27#define SL_R2 0x68
28#define SL_CR 0x6c
29#define SL_LR 0x70
30#define SL_R12 0x74 /* r12 to r31 */
31#define SL_SIZE (SL_R12 + 80)
32
33 .section .data
34 .align 5
35
36_GLOBAL(swsusp_save_area)
37 .space SL_SIZE
38
39
40 .section .text
41 .align 5
42
43_GLOBAL(swsusp_arch_suspend)
44
45 lis r11,swsusp_save_area@h
46 ori r11,r11,swsusp_save_area@l
47
48 mflr r0
49 stw r0,SL_LR(r11)
50 mfcr r0
51 stw r0,SL_CR(r11)
52 stw r1,SL_SP(r11)
53 stw r2,SL_R2(r11)
54 stmw r12,SL_R12(r11)
55
56 /* Save MSR & SDR1 */
57 mfmsr r4
58 stw r4,SL_MSR(r11)
59 mfsdr1 r4
60 stw r4,SL_SDR1(r11)
61
62 /* Get a stable timebase and save it */
631: mftbu r4
64 stw r4,SL_TB(r11)
65 mftb r5
66 stw r5,SL_TB+4(r11)
67 mftbu r3
68 cmpw r3,r4
69 bne 1b
70
71 /* Save SPRGs */
72 mfsprg r4,0
73 stw r4,SL_SPRG0(r11)
74 mfsprg r4,1
75 stw r4,SL_SPRG0+4(r11)
76 mfsprg r4,2
77 stw r4,SL_SPRG0+8(r11)
78 mfsprg r4,3
79 stw r4,SL_SPRG0+12(r11)
80
81 /* Save BATs */
82 mfdbatu r4,0
83 stw r4,SL_DBAT0(r11)
84 mfdbatl r4,0
85 stw r4,SL_DBAT0+4(r11)
86 mfdbatu r4,1
87 stw r4,SL_DBAT1(r11)
88 mfdbatl r4,1
89 stw r4,SL_DBAT1+4(r11)
90 mfdbatu r4,2
91 stw r4,SL_DBAT2(r11)
92 mfdbatl r4,2
93 stw r4,SL_DBAT2+4(r11)
94 mfdbatu r4,3
95 stw r4,SL_DBAT3(r11)
96 mfdbatl r4,3
97 stw r4,SL_DBAT3+4(r11)
98 mfibatu r4,0
99 stw r4,SL_IBAT0(r11)
100 mfibatl r4,0
101 stw r4,SL_IBAT0+4(r11)
102 mfibatu r4,1
103 stw r4,SL_IBAT1(r11)
104 mfibatl r4,1
105 stw r4,SL_IBAT1+4(r11)
106 mfibatu r4,2
107 stw r4,SL_IBAT2(r11)
108 mfibatl r4,2
109 stw r4,SL_IBAT2+4(r11)
110 mfibatu r4,3
111 stw r4,SL_IBAT3(r11)
112 mfibatl r4,3
113 stw r4,SL_IBAT3+4(r11)
114
115#if 0
116 /* Backup various CPU config stuffs */
117 bl __save_cpu_setup
118#endif
119 /* Call the low level suspend stuff (we should probably have made
120 * a stackframe...
121 */
122 bl swsusp_save
123
124 /* Restore LR from the save area */
125 lis r11,swsusp_save_area@h
126 ori r11,r11,swsusp_save_area@l
127 lwz r0,SL_LR(r11)
128 mtlr r0
129
130 blr
131
132
133/* Resume code */
134_GLOBAL(swsusp_arch_resume)
135
136 /* Stop pending alitvec streams and memory accesses */
137BEGIN_FTR_SECTION
138 DSSALL
139END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
140 sync
141
142 /* Disable MSR:DR to make sure we don't take a TLB or
143 * hash miss during the copy, as our hash table will
144 * for a while be unuseable. For .text, we assume we are
145 * covered by a BAT. This works only for non-G5 at this
146 * point. G5 will need a better approach, possibly using
147 * a small temporary hash table filled with large mappings,
148 * disabling the MMU completely isn't a good option for
149 * performance reasons.
150 * (Note that 750's may have the same performance issue as
151 * the G5 in this case, we should investigate using moving
152 * BATs for these CPUs)
153 */
154 mfmsr r0
155 sync
156 rlwinm r0,r0,0,28,26 /* clear MSR_DR */
157 mtmsr r0
158 sync
159 isync
160
161 /* Load ptr the list of pages to copy in r3 */
162 lis r11,(pagedir_nosave - KERNELBASE)@h
163 ori r11,r11,pagedir_nosave@l
164 lwz r10,0(r11)
165
166 /* Copy the pages. This is a very basic implementation, to
167 * be replaced by something more cache efficient */
1681:
169 tophys(r3,r10)
170 li r0,256
171 mtctr r0
172 lwz r11,pbe_address(r3) /* source */
173 tophys(r5,r11)
174 lwz r10,pbe_orig_address(r3) /* destination */
175 tophys(r6,r10)
1762:
177 lwz r8,0(r5)
178 lwz r9,4(r5)
179 lwz r10,8(r5)
180 lwz r11,12(r5)
181 addi r5,r5,16
182 stw r8,0(r6)
183 stw r9,4(r6)
184 stw r10,8(r6)
185 stw r11,12(r6)
186 addi r6,r6,16
187 bdnz 2b
188 lwz r10,pbe_next(r3)
189 cmpwi 0,r10,0
190 bne 1b
191
192 /* Do a very simple cache flush/inval of the L1 to ensure
193 * coherency of the icache
194 */
195 lis r3,0x0002
196 mtctr r3
197 li r3, 0
1981:
199 lwz r0,0(r3)
200 addi r3,r3,0x0020
201 bdnz 1b
202 isync
203 sync
204
205 /* Now flush those cache lines */
206 lis r3,0x0002
207 mtctr r3
208 li r3, 0
2091:
210 dcbf 0,r3
211 addi r3,r3,0x0020
212 bdnz 1b
213 sync
214
215 /* Ok, we are now running with the kernel data of the old
216 * kernel fully restored. We can get to the save area
217 * easily now. As for the rest of the code, it assumes the
218 * loader kernel and the booted one are exactly identical
219 */
220 lis r11,swsusp_save_area@h
221 ori r11,r11,swsusp_save_area@l
222 tophys(r11,r11)
223
224#if 0
225 /* Restore various CPU config stuffs */
226 bl __restore_cpu_setup
227#endif
228 /* Restore the BATs, and SDR1. Then we can turn on the MMU.
229 * This is a bit hairy as we are running out of those BATs,
230 * but first, our code is probably in the icache, and we are
231 * writing the same value to the BAT, so that should be fine,
232 * though a better solution will have to be found long-term
233 */
234 lwz r4,SL_SDR1(r11)
235 mtsdr1 r4
236 lwz r4,SL_SPRG0(r11)
237 mtsprg 0,r4
238 lwz r4,SL_SPRG0+4(r11)
239 mtsprg 1,r4
240 lwz r4,SL_SPRG0+8(r11)
241 mtsprg 2,r4
242 lwz r4,SL_SPRG0+12(r11)
243 mtsprg 3,r4
244
245#if 0
246 lwz r4,SL_DBAT0(r11)
247 mtdbatu 0,r4
248 lwz r4,SL_DBAT0+4(r11)
249 mtdbatl 0,r4
250 lwz r4,SL_DBAT1(r11)
251 mtdbatu 1,r4
252 lwz r4,SL_DBAT1+4(r11)
253 mtdbatl 1,r4
254 lwz r4,SL_DBAT2(r11)
255 mtdbatu 2,r4
256 lwz r4,SL_DBAT2+4(r11)
257 mtdbatl 2,r4
258 lwz r4,SL_DBAT3(r11)
259 mtdbatu 3,r4
260 lwz r4,SL_DBAT3+4(r11)
261 mtdbatl 3,r4
262 lwz r4,SL_IBAT0(r11)
263 mtibatu 0,r4
264 lwz r4,SL_IBAT0+4(r11)
265 mtibatl 0,r4
266 lwz r4,SL_IBAT1(r11)
267 mtibatu 1,r4
268 lwz r4,SL_IBAT1+4(r11)
269 mtibatl 1,r4
270 lwz r4,SL_IBAT2(r11)
271 mtibatu 2,r4
272 lwz r4,SL_IBAT2+4(r11)
273 mtibatl 2,r4
274 lwz r4,SL_IBAT3(r11)
275 mtibatu 3,r4
276 lwz r4,SL_IBAT3+4(r11)
277 mtibatl 3,r4
278#endif
279
280BEGIN_FTR_SECTION
281 li r4,0
282 mtspr SPRN_DBAT4U,r4
283 mtspr SPRN_DBAT4L,r4
284 mtspr SPRN_DBAT5U,r4
285 mtspr SPRN_DBAT5L,r4
286 mtspr SPRN_DBAT6U,r4
287 mtspr SPRN_DBAT6L,r4
288 mtspr SPRN_DBAT7U,r4
289 mtspr SPRN_DBAT7L,r4
290 mtspr SPRN_IBAT4U,r4
291 mtspr SPRN_IBAT4L,r4
292 mtspr SPRN_IBAT5U,r4
293 mtspr SPRN_IBAT5L,r4
294 mtspr SPRN_IBAT6U,r4
295 mtspr SPRN_IBAT6L,r4
296 mtspr SPRN_IBAT7U,r4
297 mtspr SPRN_IBAT7L,r4
298END_FTR_SECTION_IFSET(CPU_FTR_HAS_HIGH_BATS)
299
300 /* Flush all TLBs */
301 lis r4,0x1000
3021: addic. r4,r4,-0x1000
303 tlbie r4
304 blt 1b
305 sync
306
307 /* restore the MSR and turn on the MMU */
308 lwz r3,SL_MSR(r11)
309 bl turn_on_mmu
310 tovirt(r11,r11)
311
312 /* Restore TB */
313 li r3,0
314 mttbl r3
315 lwz r3,SL_TB(r11)
316 lwz r4,SL_TB+4(r11)
317 mttbu r3
318 mttbl r4
319
320 /* Kick decrementer */
321 li r0,1
322 mtdec r0
323
324 /* Restore the callee-saved registers and return */
325 lwz r0,SL_CR(r11)
326 mtcr r0
327 lwz r2,SL_R2(r11)
328 lmw r12,SL_R12(r11)
329 lwz r1,SL_SP(r11)
330 lwz r0,SL_LR(r11)
331 mtlr r0
332
333 // XXX Note: we don't really need to call swsusp_resume
334
335 li r3,0
336 blr
337
338/* FIXME:This construct is actually not useful since we don't shut
339 * down the instruction MMU, we could just flip back MSR-DR on.
340 */
341turn_on_mmu:
342 mflr r4
343 mtsrr0 r4
344 mtsrr1 r3
345 sync
346 isync
347 rfi
348