powerpc: Unify the 32 and 64 bit idle loops
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / powerpc / kernel / idle_6xx.S
CommitLineData
14cf11af
PM
1/*
2 * This file contains the power_save function for 6xx & 7xxx CPUs
3 * rewritten in assembler
4 *
5 * Warning ! This code assumes that if your machine has a 750fx
6 * it will have PLL 1 set to low speed mode (used during NAP/DOZE).
7 * if this is not the case some additional changes will have to
8 * be done to check a runtime var (a bit like powersave-nap)
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
14 */
15
16#include <linux/config.h>
17#include <linux/threads.h>
b3b8dc6c 18#include <asm/reg.h>
14cf11af
PM
19#include <asm/page.h>
20#include <asm/cputable.h>
21#include <asm/thread_info.h>
22#include <asm/ppc_asm.h>
23#include <asm/asm-offsets.h>
24
25#undef DEBUG
26
27 .text
28
29/*
30 * Init idle, called at early CPU setup time from head.S for each CPU
31 * Make sure no rest of NAP mode remains in HID0, save default
32 * values for some CPU specific registers. Called with r24
33 * containing CPU number and r3 reloc offset
34 */
35_GLOBAL(init_idle_6xx)
36BEGIN_FTR_SECTION
37 mfspr r4,SPRN_HID0
38 rlwinm r4,r4,0,10,8 /* Clear NAP */
39 mtspr SPRN_HID0, r4
40 b 1f
41END_FTR_SECTION_IFSET(CPU_FTR_CAN_NAP)
42 blr
431:
44 slwi r5,r24,2
45 add r5,r5,r3
46BEGIN_FTR_SECTION
47 mfspr r4,SPRN_MSSCR0
48 addis r6,r5, nap_save_msscr0@ha
49 stw r4,nap_save_msscr0@l(r6)
50END_FTR_SECTION_IFSET(CPU_FTR_NAP_DISABLE_L2_PR)
51BEGIN_FTR_SECTION
52 mfspr r4,SPRN_HID1
53 addis r6,r5,nap_save_hid1@ha
54 stw r4,nap_save_hid1@l(r6)
55END_FTR_SECTION_IFSET(CPU_FTR_DUAL_PLL_750FX)
56 blr
57
58/*
59 * Here is the power_save_6xx function. This could eventually be
60 * split into several functions & changing the function pointer
61 * depending on the various features.
62 */
63_GLOBAL(ppc6xx_idle)
64 /* Check if we can nap or doze, put HID0 mask in r3
65 */
66 lis r3, 0
67BEGIN_FTR_SECTION
68 lis r3,HID0_DOZE@h
69END_FTR_SECTION_IFSET(CPU_FTR_CAN_DOZE)
70BEGIN_FTR_SECTION
71 /* We must dynamically check for the NAP feature as it
72 * can be cleared by CPU init after the fixups are done
73 */
74 lis r4,cur_cpu_spec@ha
75 lwz r4,cur_cpu_spec@l(r4)
76 lwz r4,CPU_SPEC_FEATURES(r4)
77 andi. r0,r4,CPU_FTR_CAN_NAP
78 beq 1f
79 /* Now check if user or arch enabled NAP mode */
80 lis r4,powersave_nap@ha
81 lwz r4,powersave_nap@l(r4)
82 cmpwi 0,r4,0
83 beq 1f
84 lis r3,HID0_NAP@h
851:
86END_FTR_SECTION_IFSET(CPU_FTR_CAN_NAP)
87 cmpwi 0,r3,0
88 beqlr
89
14cf11af
PM
90 /* Some pre-nap cleanups needed on some CPUs */
91 andis. r0,r3,HID0_NAP@h
92 beq 2f
93BEGIN_FTR_SECTION
94 /* Disable L2 prefetch on some 745x and try to ensure
95 * L2 prefetch engines are idle. As explained by errata
96 * text, we can't be sure they are, we just hope very hard
97 * that well be enough (sic !). At least I noticed Apple
98 * doesn't even bother doing the dcbf's here...
99 */
100 mfspr r4,SPRN_MSSCR0
101 rlwinm r4,r4,0,0,29
102 sync
103 mtspr SPRN_MSSCR0,r4
104 sync
105 isync
106 lis r4,KERNELBASE@h
107 dcbf 0,r4
108 dcbf 0,r4
109 dcbf 0,r4
110 dcbf 0,r4
111END_FTR_SECTION_IFSET(CPU_FTR_NAP_DISABLE_L2_PR)
112#ifdef DEBUG
113 lis r6,nap_enter_count@ha
114 lwz r4,nap_enter_count@l(r6)
115 addi r4,r4,1
116 stw r4,nap_enter_count@l(r6)
117#endif
1182:
119BEGIN_FTR_SECTION
120 /* Go to low speed mode on some 750FX */
121 lis r4,powersave_lowspeed@ha
122 lwz r4,powersave_lowspeed@l(r4)
123 cmpwi 0,r4,0
124 beq 1f
125 mfspr r4,SPRN_HID1
126 oris r4,r4,0x0001
127 mtspr SPRN_HID1,r4
1281:
129END_FTR_SECTION_IFSET(CPU_FTR_DUAL_PLL_750FX)
130
131 /* Go to NAP or DOZE now */
132 mfspr r4,SPRN_HID0
133 lis r5,(HID0_NAP|HID0_SLEEP)@h
134BEGIN_FTR_SECTION
135 oris r5,r5,HID0_DOZE@h
136END_FTR_SECTION_IFSET(CPU_FTR_CAN_DOZE)
137 andc r4,r4,r5
138 or r4,r4,r3
139BEGIN_FTR_SECTION
140 oris r4,r4,HID0_DPM@h /* that should be done once for all */
141END_FTR_SECTION_IFCLR(CPU_FTR_NO_DPM)
142 mtspr SPRN_HID0,r4
143BEGIN_FTR_SECTION
144 DSSALL
145 sync
146END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
147 ori r7,r7,MSR_EE /* Could be ommited (already set) */
148 oris r7,r7,MSR_POW@h
149 sync
150 isync
151 mtmsr r7
152 isync
153 sync
154 blr
155
156/*
157 * Return from NAP/DOZE mode, restore some CPU specific registers,
158 * we are called with DR/IR still off and r2 containing physical
159 * address of current.
160 */
161_GLOBAL(power_save_6xx_restore)
162 mfspr r11,SPRN_HID0
163 rlwinm. r11,r11,0,10,8 /* Clear NAP & copy NAP bit !state to cr1 EQ */
164 cror 4*cr1+eq,4*cr0+eq,4*cr0+eq
165BEGIN_FTR_SECTION
166 rlwinm r11,r11,0,9,7 /* Clear DOZE */
167END_FTR_SECTION_IFSET(CPU_FTR_CAN_DOZE)
168 mtspr SPRN_HID0, r11
169
170#ifdef DEBUG
171 beq cr1,1f
172 lis r11,(nap_return_count-KERNELBASE)@ha
173 lwz r9,nap_return_count@l(r11)
174 addi r9,r9,1
175 stw r9,nap_return_count@l(r11)
1761:
177#endif
178
179 rlwinm r9,r1,0,0,18
180 tophys(r9,r9)
181 lwz r11,TI_CPU(r9)
182 slwi r11,r11,2
183 /* Todo make sure all these are in the same page
184 * and load r22 (@ha part + CPU offset) only once
185 */
186BEGIN_FTR_SECTION
187 beq cr1,1f
188 addis r9,r11,(nap_save_msscr0-KERNELBASE)@ha
189 lwz r9,nap_save_msscr0@l(r9)
190 mtspr SPRN_MSSCR0, r9
191 sync
192 isync
1931:
194END_FTR_SECTION_IFSET(CPU_FTR_NAP_DISABLE_L2_PR)
195BEGIN_FTR_SECTION
196 addis r9,r11,(nap_save_hid1-KERNELBASE)@ha
197 lwz r9,nap_save_hid1@l(r9)
198 mtspr SPRN_HID1, r9
199END_FTR_SECTION_IFSET(CPU_FTR_DUAL_PLL_750FX)
200 b transfer_to_handler_cont
201
202 .data
203
204_GLOBAL(nap_save_msscr0)
205 .space 4*NR_CPUS
206
207_GLOBAL(nap_save_hid1)
208 .space 4*NR_CPUS
209
14cf11af
PM
210_GLOBAL(powersave_lowspeed)
211 .long 0
212
213#ifdef DEBUG
214_GLOBAL(nap_enter_count)
215 .space 4
216_GLOBAL(nap_return_count)
217 .space 4
218#endif