Merge branch 'tracing/kmemtrace' into tracing/kmemtrace2
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / sparc / kernel / etrap_32.S
1 /*
2 * etrap.S: Sparc trap window preparation for entry into the
3 * Linux kernel.
4 *
5 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
6 */
7
8 #include <asm/head.h>
9 #include <asm/asi.h>
10 #include <asm/contregs.h>
11 #include <asm/page.h>
12 #include <asm/psr.h>
13 #include <asm/ptrace.h>
14 #include <asm/winmacro.h>
15 #include <asm/asmmacro.h>
16 #include <asm/thread_info.h>
17
18 /* Registers to not touch at all. */
19 #define t_psr l0 /* Set by caller */
20 #define t_pc l1 /* Set by caller */
21 #define t_npc l2 /* Set by caller */
22 #define t_wim l3 /* Set by caller */
23 #define t_twinmask l4 /* Set at beginning of this entry routine. */
24 #define t_kstack l5 /* Set right before pt_regs frame is built */
25 #define t_retpc l6 /* If you change this, change winmacro.h header file */
26 #define t_systable l7 /* Never touch this, could be the syscall table ptr. */
27 #define curptr g6 /* Set after pt_regs frame is built */
28
29 .text
30 .align 4
31
32 /* SEVEN WINDOW PATCH INSTRUCTIONS */
33 .globl tsetup_7win_patch1, tsetup_7win_patch2
34 .globl tsetup_7win_patch3, tsetup_7win_patch4
35 .globl tsetup_7win_patch5, tsetup_7win_patch6
36 tsetup_7win_patch1: sll %t_wim, 0x6, %t_wim
37 tsetup_7win_patch2: and %g2, 0x7f, %g2
38 tsetup_7win_patch3: and %g2, 0x7f, %g2
39 tsetup_7win_patch4: and %g1, 0x7f, %g1
40 tsetup_7win_patch5: sll %t_wim, 0x6, %t_wim
41 tsetup_7win_patch6: and %g2, 0x7f, %g2
42 /* END OF PATCH INSTRUCTIONS */
43
44 /* At trap time, interrupts and all generic traps do the
45 * following:
46 *
47 * rd %psr, %l0
48 * b some_handler
49 * rd %wim, %l3
50 * nop
51 *
52 * Then 'some_handler' if it needs a trap frame (ie. it has
53 * to call c-code and the trap cannot be handled in-window)
54 * then it does the SAVE_ALL macro in entry.S which does
55 *
56 * sethi %hi(trap_setup), %l4
57 * jmpl %l4 + %lo(trap_setup), %l6
58 * nop
59 */
60
61 /* 2 3 4 window number
62 * -----
63 * O T S mnemonic
64 *
65 * O == Current window before trap
66 * T == Window entered when trap occurred
67 * S == Window we will need to save if (1<<T) == %wim
68 *
69 * Before execution gets here, it must be guaranteed that
70 * %l0 contains trap time %psr, %l1 and %l2 contain the
71 * trap pc and npc, and %l3 contains the trap time %wim.
72 */
73
74 .globl trap_setup, tsetup_patch1, tsetup_patch2
75 .globl tsetup_patch3, tsetup_patch4
76 .globl tsetup_patch5, tsetup_patch6
77 trap_setup:
78 /* Calculate mask of trap window. See if from user
79 * or kernel and branch conditionally.
80 */
81 mov 1, %t_twinmask
82 andcc %t_psr, PSR_PS, %g0 ! fromsupv_p = (psr & PSR_PS)
83 be trap_setup_from_user ! nope, from user mode
84 sll %t_twinmask, %t_psr, %t_twinmask ! t_twinmask = (1 << psr)
85
86 /* From kernel, allocate more kernel stack and
87 * build a pt_regs trap frame.
88 */
89 sub %fp, (STACKFRAME_SZ + TRACEREG_SZ), %t_kstack
90 STORE_PT_ALL(t_kstack, t_psr, t_pc, t_npc, g2)
91
92 /* See if we are in the trap window. */
93 andcc %t_twinmask, %t_wim, %g0
94 bne trap_setup_kernel_spill ! in trap window, clean up
95 nop
96
97 /* Trap from kernel with a window available.
98 * Just do it...
99 */
100 jmpl %t_retpc + 0x8, %g0 ! return to caller
101 mov %t_kstack, %sp ! jump onto new stack
102
103 trap_setup_kernel_spill:
104 ld [%curptr + TI_UWINMASK], %g1
105 orcc %g0, %g1, %g0
106 bne trap_setup_user_spill ! there are some user windows, yuck
107 /* Spill from kernel, but only kernel windows, adjust
108 * %wim and go.
109 */
110 srl %t_wim, 0x1, %g2 ! begin computation of new %wim
111 tsetup_patch1:
112 sll %t_wim, 0x7, %t_wim ! patched on 7 window Sparcs
113 or %t_wim, %g2, %g2
114 tsetup_patch2:
115 and %g2, 0xff, %g2 ! patched on 7 window Sparcs
116
117 save %g0, %g0, %g0
118
119 /* Set new %wim value */
120 wr %g2, 0x0, %wim
121
122 /* Save the kernel window onto the corresponding stack. */
123 STORE_WINDOW(sp)
124
125 restore %g0, %g0, %g0
126
127 jmpl %t_retpc + 0x8, %g0 ! return to caller
128 mov %t_kstack, %sp ! and onto new kernel stack
129
130 #define STACK_OFFSET (THREAD_SIZE - TRACEREG_SZ - STACKFRAME_SZ)
131
132 trap_setup_from_user:
133 /* We can't use %curptr yet. */
134 LOAD_CURRENT(t_kstack, t_twinmask)
135
136 sethi %hi(STACK_OFFSET), %t_twinmask
137 or %t_twinmask, %lo(STACK_OFFSET), %t_twinmask
138 add %t_kstack, %t_twinmask, %t_kstack
139
140 mov 1, %t_twinmask
141 sll %t_twinmask, %t_psr, %t_twinmask ! t_twinmask = (1 << psr)
142
143 /* Build pt_regs frame. */
144 STORE_PT_ALL(t_kstack, t_psr, t_pc, t_npc, g2)
145
146 #if 0
147 /* If we're sure every task_struct is THREAD_SIZE aligned,
148 we can speed this up. */
149 sethi %hi(STACK_OFFSET), %curptr
150 or %curptr, %lo(STACK_OFFSET), %curptr
151 sub %t_kstack, %curptr, %curptr
152 #else
153 sethi %hi(~(THREAD_SIZE - 1)), %curptr
154 and %t_kstack, %curptr, %curptr
155 #endif
156
157 /* Clear current_thread_info->w_saved */
158 st %g0, [%curptr + TI_W_SAVED]
159
160 /* See if we are in the trap window. */
161 andcc %t_twinmask, %t_wim, %g0
162 bne trap_setup_user_spill ! yep we are
163 orn %g0, %t_twinmask, %g1 ! negate trap win mask into %g1
164
165 /* Trap from user, but not into the invalid window.
166 * Calculate new umask. The way this works is,
167 * any window from the %wim at trap time until
168 * the window right before the one we are in now,
169 * is a user window. A diagram:
170 *
171 * 7 6 5 4 3 2 1 0 window number
172 * ---------------
173 * I L T mnemonic
174 *
175 * Window 'I' is the invalid window in our example,
176 * window 'L' is the window the user was in when
177 * the trap occurred, window T is the trap window
178 * we are in now. So therefore, windows 5, 4 and
179 * 3 are user windows. The following sequence
180 * computes the user winmask to represent this.
181 */
182 subcc %t_wim, %t_twinmask, %g2
183 bneg,a 1f
184 sub %g2, 0x1, %g2
185 1:
186 andn %g2, %t_twinmask, %g2
187 tsetup_patch3:
188 and %g2, 0xff, %g2 ! patched on 7win Sparcs
189 st %g2, [%curptr + TI_UWINMASK] ! store new umask
190
191 jmpl %t_retpc + 0x8, %g0 ! return to caller
192 mov %t_kstack, %sp ! and onto kernel stack
193
194 trap_setup_user_spill:
195 /* A spill occurred from either kernel or user mode
196 * and there exist some user windows to deal with.
197 * A mask of the currently valid user windows
198 * is in %g1 upon entry to here.
199 */
200
201 tsetup_patch4:
202 and %g1, 0xff, %g1 ! patched on 7win Sparcs, mask
203 srl %t_wim, 0x1, %g2 ! compute new %wim
204 tsetup_patch5:
205 sll %t_wim, 0x7, %t_wim ! patched on 7win Sparcs
206 or %t_wim, %g2, %g2 ! %g2 is new %wim
207 tsetup_patch6:
208 and %g2, 0xff, %g2 ! patched on 7win Sparcs
209 andn %g1, %g2, %g1 ! clear this bit in %g1
210 st %g1, [%curptr + TI_UWINMASK]
211
212 save %g0, %g0, %g0
213
214 wr %g2, 0x0, %wim
215
216 /* Call MMU-architecture dependent stack checking
217 * routine.
218 */
219 .globl tsetup_mmu_patchme
220 tsetup_mmu_patchme:
221 b tsetup_sun4c_stackchk
222 andcc %sp, 0x7, %g0
223
224 /* Architecture specific stack checking routines. When either
225 * of these routines are called, the globals are free to use
226 * as they have been safely stashed on the new kernel stack
227 * pointer. Thus the definition below for simplicity.
228 */
229 #define glob_tmp g1
230
231 tsetup_sun4c_stackchk:
232 /* Done by caller: andcc %sp, 0x7, %g0 */
233 bne trap_setup_user_stack_is_bolixed
234 sra %sp, 29, %glob_tmp
235
236 add %glob_tmp, 0x1, %glob_tmp
237 andncc %glob_tmp, 0x1, %g0
238 bne trap_setup_user_stack_is_bolixed
239 and %sp, 0xfff, %glob_tmp ! delay slot
240
241 /* See if our dump area will be on more than one
242 * page.
243 */
244 add %glob_tmp, 0x38, %glob_tmp
245 andncc %glob_tmp, 0xff8, %g0
246 be tsetup_sun4c_onepage ! only one page to check
247 lda [%sp] ASI_PTE, %glob_tmp ! have to check first page anyways
248
249 tsetup_sun4c_twopages:
250 /* Is first page ok permission wise? */
251 srl %glob_tmp, 29, %glob_tmp
252 cmp %glob_tmp, 0x6
253 bne trap_setup_user_stack_is_bolixed
254 add %sp, 0x38, %glob_tmp /* Is second page in vma hole? */
255
256 sra %glob_tmp, 29, %glob_tmp
257 add %glob_tmp, 0x1, %glob_tmp
258 andncc %glob_tmp, 0x1, %g0
259 bne trap_setup_user_stack_is_bolixed
260 add %sp, 0x38, %glob_tmp
261
262 lda [%glob_tmp] ASI_PTE, %glob_tmp
263
264 tsetup_sun4c_onepage:
265 srl %glob_tmp, 29, %glob_tmp
266 cmp %glob_tmp, 0x6 ! can user write to it?
267 bne trap_setup_user_stack_is_bolixed ! failure
268 nop
269
270 STORE_WINDOW(sp)
271
272 restore %g0, %g0, %g0
273
274 jmpl %t_retpc + 0x8, %g0
275 mov %t_kstack, %sp
276
277 .globl tsetup_srmmu_stackchk
278 tsetup_srmmu_stackchk:
279 /* Check results of callers andcc %sp, 0x7, %g0 */
280 bne trap_setup_user_stack_is_bolixed
281 sethi %hi(PAGE_OFFSET), %glob_tmp
282
283 cmp %glob_tmp, %sp
284 bleu,a 1f
285 lda [%g0] ASI_M_MMUREGS, %glob_tmp ! read MMU control
286
287 trap_setup_user_stack_is_bolixed:
288 /* From user/kernel into invalid window w/bad user
289 * stack. Save bad user stack, and return to caller.
290 */
291 SAVE_BOLIXED_USER_STACK(curptr, g3)
292 restore %g0, %g0, %g0
293
294 jmpl %t_retpc + 0x8, %g0
295 mov %t_kstack, %sp
296
297 1:
298 /* Clear the fault status and turn on the no_fault bit. */
299 or %glob_tmp, 0x2, %glob_tmp ! or in no_fault bit
300 sta %glob_tmp, [%g0] ASI_M_MMUREGS ! set it
301
302 /* Dump the registers and cross fingers. */
303 STORE_WINDOW(sp)
304
305 /* Clear the no_fault bit and check the status. */
306 andn %glob_tmp, 0x2, %glob_tmp
307 sta %glob_tmp, [%g0] ASI_M_MMUREGS
308 mov AC_M_SFAR, %glob_tmp
309 lda [%glob_tmp] ASI_M_MMUREGS, %g0
310 mov AC_M_SFSR, %glob_tmp
311 lda [%glob_tmp] ASI_M_MMUREGS, %glob_tmp ! save away status of winstore
312 andcc %glob_tmp, 0x2, %g0 ! did we fault?
313 bne trap_setup_user_stack_is_bolixed ! failure
314 nop
315
316 restore %g0, %g0, %g0
317
318 jmpl %t_retpc + 0x8, %g0
319 mov %t_kstack, %sp
320