Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * This file is subject to the terms and conditions of the GNU General Public | |
3 | * License. See the file "COPYING" in the main directory of this archive | |
4 | * for more details. | |
5 | * | |
6 | * arch/sh64/kernel/traps.c | |
7 | * | |
8 | * Copyright (C) 2000, 2001 Paolo Alberelli | |
9 | * Copyright (C) 2003, 2004 Paul Mundt | |
10 | * Copyright (C) 2003, 2004 Richard Curnow | |
11 | * | |
12 | */ | |
13 | ||
14 | /* | |
15 | * 'Traps.c' handles hardware traps and faults after we have saved some | |
16 | * state in 'entry.S'. | |
17 | */ | |
18 | #include <linux/sched.h> | |
19 | #include <linux/kernel.h> | |
20 | #include <linux/string.h> | |
21 | #include <linux/errno.h> | |
22 | #include <linux/ptrace.h> | |
23 | #include <linux/timer.h> | |
24 | #include <linux/mm.h> | |
25 | #include <linux/smp.h> | |
26 | #include <linux/smp_lock.h> | |
27 | #include <linux/init.h> | |
28 | #include <linux/delay.h> | |
29 | #include <linux/spinlock.h> | |
30 | #include <linux/kallsyms.h> | |
31 | #include <linux/interrupt.h> | |
32 | #include <linux/sysctl.h> | |
33 | #include <linux/module.h> | |
34 | ||
35 | #include <asm/system.h> | |
36 | #include <asm/uaccess.h> | |
37 | #include <asm/io.h> | |
38 | #include <asm/atomic.h> | |
39 | #include <asm/processor.h> | |
40 | #include <asm/pgtable.h> | |
41 | ||
42 | #undef DEBUG_EXCEPTION | |
43 | #ifdef DEBUG_EXCEPTION | |
44 | /* implemented in ../lib/dbg.c */ | |
45 | extern void show_excp_regs(char *fname, int trapnr, int signr, | |
46 | struct pt_regs *regs); | |
47 | #else | |
48 | #define show_excp_regs(a, b, c, d) | |
49 | #endif | |
50 | ||
51 | static void do_unhandled_exception(int trapnr, int signr, char *str, char *fn_name, | |
52 | unsigned long error_code, struct pt_regs *regs, struct task_struct *tsk); | |
53 | ||
54 | #define DO_ERROR(trapnr, signr, str, name, tsk) \ | |
55 | asmlinkage void do_##name(unsigned long error_code, struct pt_regs *regs) \ | |
56 | { \ | |
57 | do_unhandled_exception(trapnr, signr, str, __stringify(name), error_code, regs, current); \ | |
58 | } | |
59 | ||
60 | spinlock_t die_lock; | |
61 | ||
62 | void die(const char * str, struct pt_regs * regs, long err) | |
63 | { | |
64 | console_verbose(); | |
65 | spin_lock_irq(&die_lock); | |
66 | printk("%s: %lx\n", str, (err & 0xffffff)); | |
67 | show_regs(regs); | |
68 | spin_unlock_irq(&die_lock); | |
69 | do_exit(SIGSEGV); | |
70 | } | |
71 | ||
72 | static inline void die_if_kernel(const char * str, struct pt_regs * regs, long err) | |
73 | { | |
74 | if (!user_mode(regs)) | |
75 | die(str, regs, err); | |
76 | } | |
77 | ||
78 | static void die_if_no_fixup(const char * str, struct pt_regs * regs, long err) | |
79 | { | |
80 | if (!user_mode(regs)) { | |
81 | const struct exception_table_entry *fixup; | |
82 | fixup = search_exception_tables(regs->pc); | |
83 | if (fixup) { | |
84 | regs->pc = fixup->fixup; | |
85 | return; | |
86 | } | |
87 | die(str, regs, err); | |
88 | } | |
89 | } | |
90 | ||
91 | DO_ERROR(13, SIGILL, "illegal slot instruction", illegal_slot_inst, current) | |
92 | DO_ERROR(87, SIGSEGV, "address error (exec)", address_error_exec, current) | |
93 | ||
94 | ||
95 | /* Implement misaligned load/store handling for kernel (and optionally for user | |
96 | mode too). Limitation : only SHmedia mode code is handled - there is no | |
97 | handling at all for misaligned accesses occurring in SHcompact code yet. */ | |
98 | ||
99 | static int misaligned_fixup(struct pt_regs *regs); | |
100 | ||
101 | asmlinkage void do_address_error_load(unsigned long error_code, struct pt_regs *regs) | |
102 | { | |
103 | if (misaligned_fixup(regs) < 0) { | |
104 | do_unhandled_exception(7, SIGSEGV, "address error(load)", | |
105 | "do_address_error_load", | |
106 | error_code, regs, current); | |
107 | } | |
108 | return; | |
109 | } | |
110 | ||
111 | asmlinkage void do_address_error_store(unsigned long error_code, struct pt_regs *regs) | |
112 | { | |
113 | if (misaligned_fixup(regs) < 0) { | |
114 | do_unhandled_exception(8, SIGSEGV, "address error(store)", | |
115 | "do_address_error_store", | |
116 | error_code, regs, current); | |
117 | } | |
118 | return; | |
119 | } | |
120 | ||
121 | #if defined(CONFIG_SH64_ID2815_WORKAROUND) | |
122 | ||
123 | #define OPCODE_INVALID 0 | |
124 | #define OPCODE_USER_VALID 1 | |
125 | #define OPCODE_PRIV_VALID 2 | |
126 | ||
127 | /* getcon/putcon - requires checking which control register is referenced. */ | |
128 | #define OPCODE_CTRL_REG 3 | |
129 | ||
130 | /* Table of valid opcodes for SHmedia mode. | |
131 | Form a 10-bit value by concatenating the major/minor opcodes i.e. | |
132 | opcode[31:26,20:16]. The 6 MSBs of this value index into the following | |
133 | array. The 4 LSBs select the bit-pair in the entry (bits 1:0 correspond to | |
134 | LSBs==4'b0000 etc). */ | |
135 | static unsigned long shmedia_opcode_table[64] = { | |
136 | 0x55554044,0x54445055,0x15141514,0x14541414,0x00000000,0x10001000,0x01110055,0x04050015, | |
137 | 0x00000444,0xc0000000,0x44545515,0x40405555,0x55550015,0x10005555,0x55555505,0x04050000, | |
138 | 0x00000555,0x00000404,0x00040445,0x15151414,0x00000000,0x00000000,0x00000000,0x00000000, | |
139 | 0x00000055,0x40404444,0x00000404,0xc0009495,0x00000000,0x00000000,0x00000000,0x00000000, | |
140 | 0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555, | |
141 | 0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555, | |
142 | 0x80005050,0x04005055,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555, | |
143 | 0x81055554,0x00000404,0x55555555,0x55555555,0x00000000,0x00000000,0x00000000,0x00000000 | |
144 | }; | |
145 | ||
146 | void do_reserved_inst(unsigned long error_code, struct pt_regs *regs) | |
147 | { | |
148 | /* Workaround SH5-101 cut2 silicon defect #2815 : | |
149 | in some situations, inter-mode branches from SHcompact -> SHmedia | |
150 | which should take ITLBMISS or EXECPROT exceptions at the target | |
151 | falsely take RESINST at the target instead. */ | |
152 | ||
153 | unsigned long opcode = 0x6ff4fff0; /* guaranteed reserved opcode */ | |
154 | unsigned long pc, aligned_pc; | |
155 | int get_user_error; | |
156 | int trapnr = 12; | |
157 | int signr = SIGILL; | |
158 | char *exception_name = "reserved_instruction"; | |
159 | ||
160 | pc = regs->pc; | |
161 | if ((pc & 3) == 1) { | |
162 | /* SHmedia : check for defect. This requires executable vmas | |
163 | to be readable too. */ | |
164 | aligned_pc = pc & ~3; | |
165 | if (!access_ok(VERIFY_READ, aligned_pc, sizeof(unsigned long))) { | |
166 | get_user_error = -EFAULT; | |
167 | } else { | |
168 | get_user_error = __get_user(opcode, (unsigned long *)aligned_pc); | |
169 | } | |
170 | if (get_user_error >= 0) { | |
171 | unsigned long index, shift; | |
172 | unsigned long major, minor, combined; | |
173 | unsigned long reserved_field; | |
174 | reserved_field = opcode & 0xf; /* These bits are currently reserved as zero in all valid opcodes */ | |
175 | major = (opcode >> 26) & 0x3f; | |
176 | minor = (opcode >> 16) & 0xf; | |
177 | combined = (major << 4) | minor; | |
178 | index = major; | |
179 | shift = minor << 1; | |
180 | if (reserved_field == 0) { | |
181 | int opcode_state = (shmedia_opcode_table[index] >> shift) & 0x3; | |
182 | switch (opcode_state) { | |
183 | case OPCODE_INVALID: | |
184 | /* Trap. */ | |
185 | break; | |
186 | case OPCODE_USER_VALID: | |
187 | /* Restart the instruction : the branch to the instruction will now be from an RTE | |
188 | not from SHcompact so the silicon defect won't be triggered. */ | |
189 | return; | |
190 | case OPCODE_PRIV_VALID: | |
191 | if (!user_mode(regs)) { | |
192 | /* Should only ever get here if a module has | |
193 | SHcompact code inside it. If so, the same fix up is needed. */ | |
194 | return; /* same reason */ | |
195 | } | |
196 | /* Otherwise, user mode trying to execute a privileged instruction - | |
197 | fall through to trap. */ | |
198 | break; | |
199 | case OPCODE_CTRL_REG: | |
200 | /* If in privileged mode, return as above. */ | |
201 | if (!user_mode(regs)) return; | |
202 | /* In user mode ... */ | |
203 | if (combined == 0x9f) { /* GETCON */ | |
204 | unsigned long regno = (opcode >> 20) & 0x3f; | |
205 | if (regno >= 62) { | |
206 | return; | |
207 | } | |
208 | /* Otherwise, reserved or privileged control register, => trap */ | |
209 | } else if (combined == 0x1bf) { /* PUTCON */ | |
210 | unsigned long regno = (opcode >> 4) & 0x3f; | |
211 | if (regno >= 62) { | |
212 | return; | |
213 | } | |
214 | /* Otherwise, reserved or privileged control register, => trap */ | |
215 | } else { | |
216 | /* Trap */ | |
217 | } | |
218 | break; | |
219 | default: | |
220 | /* Fall through to trap. */ | |
221 | break; | |
222 | } | |
223 | } | |
224 | /* fall through to normal resinst processing */ | |
225 | } else { | |
226 | /* Error trying to read opcode. This typically means a | |
227 | real fault, not a RESINST any more. So change the | |
228 | codes. */ | |
229 | trapnr = 87; | |
230 | exception_name = "address error (exec)"; | |
231 | signr = SIGSEGV; | |
232 | } | |
233 | } | |
234 | ||
235 | do_unhandled_exception(trapnr, signr, exception_name, "do_reserved_inst", error_code, regs, current); | |
236 | } | |
237 | ||
238 | #else /* CONFIG_SH64_ID2815_WORKAROUND */ | |
239 | ||
240 | /* If the workaround isn't needed, this is just a straightforward reserved | |
241 | instruction */ | |
242 | DO_ERROR(12, SIGILL, "reserved instruction", reserved_inst, current) | |
243 | ||
244 | #endif /* CONFIG_SH64_ID2815_WORKAROUND */ | |
245 | ||
246 | ||
247 | #include <asm/system.h> | |
248 | ||
249 | /* Called with interrupts disabled */ | |
250 | asmlinkage void do_exception_error(unsigned long ex, struct pt_regs *regs) | |
251 | { | |
252 | PLS(); | |
253 | show_excp_regs(__FUNCTION__, -1, -1, regs); | |
254 | die_if_kernel("exception", regs, ex); | |
255 | } | |
256 | ||
257 | int do_unknown_trapa(unsigned long scId, struct pt_regs *regs) | |
258 | { | |
259 | /* Syscall debug */ | |
260 | printk("System call ID error: [0x1#args:8 #syscall:16 0x%lx]\n", scId); | |
261 | ||
262 | die_if_kernel("unknown trapa", regs, scId); | |
263 | ||
264 | return -ENOSYS; | |
265 | } | |
266 | ||
267 | void show_stack(struct task_struct *tsk, unsigned long *sp) | |
268 | { | |
269 | #ifdef CONFIG_KALLSYMS | |
270 | extern void sh64_unwind(struct pt_regs *regs); | |
271 | struct pt_regs *regs; | |
272 | ||
273 | regs = tsk ? tsk->thread.kregs : NULL; | |
274 | ||
275 | sh64_unwind(regs); | |
276 | #else | |
277 | printk(KERN_ERR "Can't backtrace on sh64 without CONFIG_KALLSYMS\n"); | |
278 | #endif | |
279 | } | |
280 | ||
281 | void show_task(unsigned long *sp) | |
282 | { | |
283 | show_stack(NULL, sp); | |
284 | } | |
285 | ||
286 | void dump_stack(void) | |
287 | { | |
288 | show_task(NULL); | |
289 | } | |
290 | /* Needed by any user of WARN_ON in view of the defn in include/asm-sh/bug.h */ | |
291 | EXPORT_SYMBOL(dump_stack); | |
292 | ||
293 | static void do_unhandled_exception(int trapnr, int signr, char *str, char *fn_name, | |
294 | unsigned long error_code, struct pt_regs *regs, struct task_struct *tsk) | |
295 | { | |
296 | show_excp_regs(fn_name, trapnr, signr, regs); | |
297 | tsk->thread.error_code = error_code; | |
298 | tsk->thread.trap_no = trapnr; | |
299 | ||
300 | if (user_mode(regs)) | |
301 | force_sig(signr, tsk); | |
302 | ||
303 | die_if_no_fixup(str, regs, error_code); | |
304 | } | |
305 | ||
306 | static int read_opcode(unsigned long long pc, unsigned long *result_opcode, int from_user_mode) | |
307 | { | |
308 | int get_user_error; | |
309 | unsigned long aligned_pc; | |
310 | unsigned long opcode; | |
311 | ||
312 | if ((pc & 3) == 1) { | |
313 | /* SHmedia */ | |
314 | aligned_pc = pc & ~3; | |
315 | if (from_user_mode) { | |
316 | if (!access_ok(VERIFY_READ, aligned_pc, sizeof(unsigned long))) { | |
317 | get_user_error = -EFAULT; | |
318 | } else { | |
319 | get_user_error = __get_user(opcode, (unsigned long *)aligned_pc); | |
320 | *result_opcode = opcode; | |
321 | } | |
322 | return get_user_error; | |
323 | } else { | |
324 | /* If the fault was in the kernel, we can either read | |
325 | * this directly, or if not, we fault. | |
326 | */ | |
327 | *result_opcode = *(unsigned long *) aligned_pc; | |
328 | return 0; | |
329 | } | |
330 | } else if ((pc & 1) == 0) { | |
331 | /* SHcompact */ | |
332 | /* TODO : provide handling for this. We don't really support | |
333 | user-mode SHcompact yet, and for a kernel fault, this would | |
334 | have to come from a module built for SHcompact. */ | |
335 | return -EFAULT; | |
336 | } else { | |
337 | /* misaligned */ | |
338 | return -EFAULT; | |
339 | } | |
340 | } | |
341 | ||
342 | static int address_is_sign_extended(__u64 a) | |
343 | { | |
344 | __u64 b; | |
345 | #if (NEFF == 32) | |
346 | b = (__u64)(__s64)(__s32)(a & 0xffffffffUL); | |
347 | return (b == a) ? 1 : 0; | |
348 | #else | |
349 | #error "Sign extend check only works for NEFF==32" | |
350 | #endif | |
351 | } | |
352 | ||
353 | static int generate_and_check_address(struct pt_regs *regs, | |
354 | __u32 opcode, | |
355 | int displacement_not_indexed, | |
356 | int width_shift, | |
357 | __u64 *address) | |
358 | { | |
359 | /* return -1 for fault, 0 for OK */ | |
360 | ||
361 | __u64 base_address, addr; | |
362 | int basereg; | |
363 | ||
364 | basereg = (opcode >> 20) & 0x3f; | |
365 | base_address = regs->regs[basereg]; | |
366 | if (displacement_not_indexed) { | |
367 | __s64 displacement; | |
368 | displacement = (opcode >> 10) & 0x3ff; | |
369 | displacement = ((displacement << 54) >> 54); /* sign extend */ | |
370 | addr = (__u64)((__s64)base_address + (displacement << width_shift)); | |
371 | } else { | |
372 | __u64 offset; | |
373 | int offsetreg; | |
374 | offsetreg = (opcode >> 10) & 0x3f; | |
375 | offset = regs->regs[offsetreg]; | |
376 | addr = base_address + offset; | |
377 | } | |
378 | ||
379 | /* Check sign extended */ | |
380 | if (!address_is_sign_extended(addr)) { | |
381 | return -1; | |
382 | } | |
383 | ||
384 | #if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP) | |
385 | /* Check accessible. For misaligned access in the kernel, assume the | |
386 | address is always accessible (and if not, just fault when the | |
387 | load/store gets done.) */ | |
388 | if (user_mode(regs)) { | |
389 | if (addr >= TASK_SIZE) { | |
390 | return -1; | |
391 | } | |
392 | /* Do access_ok check later - it depends on whether it's a load or a store. */ | |
393 | } | |
394 | #endif | |
395 | ||
396 | *address = addr; | |
397 | return 0; | |
398 | } | |
399 | ||
400 | /* Default value as for sh */ | |
401 | #if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP) | |
402 | static int user_mode_unaligned_fixup_count = 10; | |
403 | static int user_mode_unaligned_fixup_enable = 1; | |
404 | #endif | |
405 | ||
406 | static int kernel_mode_unaligned_fixup_count = 32; | |
407 | ||
408 | static void misaligned_kernel_word_load(__u64 address, int do_sign_extend, __u64 *result) | |
409 | { | |
410 | unsigned short x; | |
411 | unsigned char *p, *q; | |
412 | p = (unsigned char *) (int) address; | |
413 | q = (unsigned char *) &x; | |
414 | q[0] = p[0]; | |
415 | q[1] = p[1]; | |
416 | ||
417 | if (do_sign_extend) { | |
418 | *result = (__u64)(__s64) *(short *) &x; | |
419 | } else { | |
420 | *result = (__u64) x; | |
421 | } | |
422 | } | |
423 | ||
424 | static void misaligned_kernel_word_store(__u64 address, __u64 value) | |
425 | { | |
426 | unsigned short x; | |
427 | unsigned char *p, *q; | |
428 | p = (unsigned char *) (int) address; | |
429 | q = (unsigned char *) &x; | |
430 | ||
431 | x = (__u16) value; | |
432 | p[0] = q[0]; | |
433 | p[1] = q[1]; | |
434 | } | |
435 | ||
436 | static int misaligned_load(struct pt_regs *regs, | |
437 | __u32 opcode, | |
438 | int displacement_not_indexed, | |
439 | int width_shift, | |
440 | int do_sign_extend) | |
441 | { | |
442 | /* Return -1 for a fault, 0 for OK */ | |
443 | int error; | |
444 | int destreg; | |
445 | __u64 address; | |
446 | ||
447 | error = generate_and_check_address(regs, opcode, | |
448 | displacement_not_indexed, width_shift, &address); | |
449 | if (error < 0) { | |
450 | return error; | |
451 | } | |
452 | ||
453 | destreg = (opcode >> 4) & 0x3f; | |
454 | #if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP) | |
455 | if (user_mode(regs)) { | |
456 | __u64 buffer; | |
457 | ||
458 | if (!access_ok(VERIFY_READ, (unsigned long) address, 1UL<<width_shift)) { | |
459 | return -1; | |
460 | } | |
461 | ||
462 | if (__copy_user(&buffer, (const void *)(int)address, (1 << width_shift)) > 0) { | |
463 | return -1; /* fault */ | |
464 | } | |
465 | switch (width_shift) { | |
466 | case 1: | |
467 | if (do_sign_extend) { | |
468 | regs->regs[destreg] = (__u64)(__s64) *(__s16 *) &buffer; | |
469 | } else { | |
470 | regs->regs[destreg] = (__u64) *(__u16 *) &buffer; | |
471 | } | |
472 | break; | |
473 | case 2: | |
474 | regs->regs[destreg] = (__u64)(__s64) *(__s32 *) &buffer; | |
475 | break; | |
476 | case 3: | |
477 | regs->regs[destreg] = buffer; | |
478 | break; | |
479 | default: | |
480 | printk("Unexpected width_shift %d in misaligned_load, PC=%08lx\n", | |
481 | width_shift, (unsigned long) regs->pc); | |
482 | break; | |
483 | } | |
484 | } else | |
485 | #endif | |
486 | { | |
487 | /* kernel mode - we can take short cuts since if we fault, it's a genuine bug */ | |
488 | __u64 lo, hi; | |
489 | ||
490 | switch (width_shift) { | |
491 | case 1: | |
492 | misaligned_kernel_word_load(address, do_sign_extend, ®s->regs[destreg]); | |
493 | break; | |
494 | case 2: | |
495 | asm ("ldlo.l %1, 0, %0" : "=r" (lo) : "r" (address)); | |
496 | asm ("ldhi.l %1, 3, %0" : "=r" (hi) : "r" (address)); | |
497 | regs->regs[destreg] = lo | hi; | |
498 | break; | |
499 | case 3: | |
500 | asm ("ldlo.q %1, 0, %0" : "=r" (lo) : "r" (address)); | |
501 | asm ("ldhi.q %1, 7, %0" : "=r" (hi) : "r" (address)); | |
502 | regs->regs[destreg] = lo | hi; | |
503 | break; | |
504 | ||
505 | default: | |
506 | printk("Unexpected width_shift %d in misaligned_load, PC=%08lx\n", | |
507 | width_shift, (unsigned long) regs->pc); | |
508 | break; | |
509 | } | |
510 | } | |
511 | ||
512 | return 0; | |
513 | ||
514 | } | |
515 | ||
516 | static int misaligned_store(struct pt_regs *regs, | |
517 | __u32 opcode, | |
518 | int displacement_not_indexed, | |
519 | int width_shift) | |
520 | { | |
521 | /* Return -1 for a fault, 0 for OK */ | |
522 | int error; | |
523 | int srcreg; | |
524 | __u64 address; | |
525 | ||
526 | error = generate_and_check_address(regs, opcode, | |
527 | displacement_not_indexed, width_shift, &address); | |
528 | if (error < 0) { | |
529 | return error; | |
530 | } | |
531 | ||
532 | srcreg = (opcode >> 4) & 0x3f; | |
533 | #if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP) | |
534 | if (user_mode(regs)) { | |
535 | __u64 buffer; | |
536 | ||
537 | if (!access_ok(VERIFY_WRITE, (unsigned long) address, 1UL<<width_shift)) { | |
538 | return -1; | |
539 | } | |
540 | ||
541 | switch (width_shift) { | |
542 | case 1: | |
543 | *(__u16 *) &buffer = (__u16) regs->regs[srcreg]; | |
544 | break; | |
545 | case 2: | |
546 | *(__u32 *) &buffer = (__u32) regs->regs[srcreg]; | |
547 | break; | |
548 | case 3: | |
549 | buffer = regs->regs[srcreg]; | |
550 | break; | |
551 | default: | |
552 | printk("Unexpected width_shift %d in misaligned_store, PC=%08lx\n", | |
553 | width_shift, (unsigned long) regs->pc); | |
554 | break; | |
555 | } | |
556 | ||
557 | if (__copy_user((void *)(int)address, &buffer, (1 << width_shift)) > 0) { | |
558 | return -1; /* fault */ | |
559 | } | |
560 | } else | |
561 | #endif | |
562 | { | |
563 | /* kernel mode - we can take short cuts since if we fault, it's a genuine bug */ | |
564 | __u64 val = regs->regs[srcreg]; | |
565 | ||
566 | switch (width_shift) { | |
567 | case 1: | |
568 | misaligned_kernel_word_store(address, val); | |
569 | break; | |
570 | case 2: | |
571 | asm ("stlo.l %1, 0, %0" : : "r" (val), "r" (address)); | |
572 | asm ("sthi.l %1, 3, %0" : : "r" (val), "r" (address)); | |
573 | break; | |
574 | case 3: | |
575 | asm ("stlo.q %1, 0, %0" : : "r" (val), "r" (address)); | |
576 | asm ("sthi.q %1, 7, %0" : : "r" (val), "r" (address)); | |
577 | break; | |
578 | ||
579 | default: | |
580 | printk("Unexpected width_shift %d in misaligned_store, PC=%08lx\n", | |
581 | width_shift, (unsigned long) regs->pc); | |
582 | break; | |
583 | } | |
584 | } | |
585 | ||
586 | return 0; | |
587 | ||
588 | } | |
589 | ||
590 | #if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP) | |
591 | /* Never need to fix up misaligned FPU accesses within the kernel since that's a real | |
592 | error. */ | |
593 | static int misaligned_fpu_load(struct pt_regs *regs, | |
594 | __u32 opcode, | |
595 | int displacement_not_indexed, | |
596 | int width_shift, | |
597 | int do_paired_load) | |
598 | { | |
599 | /* Return -1 for a fault, 0 for OK */ | |
600 | int error; | |
601 | int destreg; | |
602 | __u64 address; | |
603 | ||
604 | error = generate_and_check_address(regs, opcode, | |
605 | displacement_not_indexed, width_shift, &address); | |
606 | if (error < 0) { | |
607 | return error; | |
608 | } | |
609 | ||
610 | destreg = (opcode >> 4) & 0x3f; | |
611 | if (user_mode(regs)) { | |
612 | __u64 buffer; | |
613 | __u32 buflo, bufhi; | |
614 | ||
615 | if (!access_ok(VERIFY_READ, (unsigned long) address, 1UL<<width_shift)) { | |
616 | return -1; | |
617 | } | |
618 | ||
619 | if (__copy_user(&buffer, (const void *)(int)address, (1 << width_shift)) > 0) { | |
620 | return -1; /* fault */ | |
621 | } | |
622 | /* 'current' may be the current owner of the FPU state, so | |
623 | context switch the registers into memory so they can be | |
624 | indexed by register number. */ | |
625 | if (last_task_used_math == current) { | |
626 | grab_fpu(); | |
627 | fpsave(¤t->thread.fpu.hard); | |
628 | release_fpu(); | |
629 | last_task_used_math = NULL; | |
630 | regs->sr |= SR_FD; | |
631 | } | |
632 | ||
633 | buflo = *(__u32*) &buffer; | |
634 | bufhi = *(1 + (__u32*) &buffer); | |
635 | ||
636 | switch (width_shift) { | |
637 | case 2: | |
638 | current->thread.fpu.hard.fp_regs[destreg] = buflo; | |
639 | break; | |
640 | case 3: | |
641 | if (do_paired_load) { | |
642 | current->thread.fpu.hard.fp_regs[destreg] = buflo; | |
643 | current->thread.fpu.hard.fp_regs[destreg+1] = bufhi; | |
644 | } else { | |
645 | #if defined(CONFIG_LITTLE_ENDIAN) | |
646 | current->thread.fpu.hard.fp_regs[destreg] = bufhi; | |
647 | current->thread.fpu.hard.fp_regs[destreg+1] = buflo; | |
648 | #else | |
649 | current->thread.fpu.hard.fp_regs[destreg] = buflo; | |
650 | current->thread.fpu.hard.fp_regs[destreg+1] = bufhi; | |
651 | #endif | |
652 | } | |
653 | break; | |
654 | default: | |
655 | printk("Unexpected width_shift %d in misaligned_fpu_load, PC=%08lx\n", | |
656 | width_shift, (unsigned long) regs->pc); | |
657 | break; | |
658 | } | |
659 | return 0; | |
660 | } else { | |
661 | die ("Misaligned FPU load inside kernel", regs, 0); | |
662 | return -1; | |
663 | } | |
664 | ||
665 | ||
666 | } | |
667 | ||
668 | static int misaligned_fpu_store(struct pt_regs *regs, | |
669 | __u32 opcode, | |
670 | int displacement_not_indexed, | |
671 | int width_shift, | |
672 | int do_paired_load) | |
673 | { | |
674 | /* Return -1 for a fault, 0 for OK */ | |
675 | int error; | |
676 | int srcreg; | |
677 | __u64 address; | |
678 | ||
679 | error = generate_and_check_address(regs, opcode, | |
680 | displacement_not_indexed, width_shift, &address); | |
681 | if (error < 0) { | |
682 | return error; | |
683 | } | |
684 | ||
685 | srcreg = (opcode >> 4) & 0x3f; | |
686 | if (user_mode(regs)) { | |
687 | __u64 buffer; | |
688 | /* Initialise these to NaNs. */ | |
689 | __u32 buflo=0xffffffffUL, bufhi=0xffffffffUL; | |
690 | ||
691 | if (!access_ok(VERIFY_WRITE, (unsigned long) address, 1UL<<width_shift)) { | |
692 | return -1; | |
693 | } | |
694 | ||
695 | /* 'current' may be the current owner of the FPU state, so | |
696 | context switch the registers into memory so they can be | |
697 | indexed by register number. */ | |
698 | if (last_task_used_math == current) { | |
699 | grab_fpu(); | |
700 | fpsave(¤t->thread.fpu.hard); | |
701 | release_fpu(); | |
702 | last_task_used_math = NULL; | |
703 | regs->sr |= SR_FD; | |
704 | } | |
705 | ||
706 | switch (width_shift) { | |
707 | case 2: | |
708 | buflo = current->thread.fpu.hard.fp_regs[srcreg]; | |
709 | break; | |
710 | case 3: | |
711 | if (do_paired_load) { | |
712 | buflo = current->thread.fpu.hard.fp_regs[srcreg]; | |
713 | bufhi = current->thread.fpu.hard.fp_regs[srcreg+1]; | |
714 | } else { | |
715 | #if defined(CONFIG_LITTLE_ENDIAN) | |
716 | bufhi = current->thread.fpu.hard.fp_regs[srcreg]; | |
717 | buflo = current->thread.fpu.hard.fp_regs[srcreg+1]; | |
718 | #else | |
719 | buflo = current->thread.fpu.hard.fp_regs[srcreg]; | |
720 | bufhi = current->thread.fpu.hard.fp_regs[srcreg+1]; | |
721 | #endif | |
722 | } | |
723 | break; | |
724 | default: | |
725 | printk("Unexpected width_shift %d in misaligned_fpu_store, PC=%08lx\n", | |
726 | width_shift, (unsigned long) regs->pc); | |
727 | break; | |
728 | } | |
729 | ||
730 | *(__u32*) &buffer = buflo; | |
731 | *(1 + (__u32*) &buffer) = bufhi; | |
732 | if (__copy_user((void *)(int)address, &buffer, (1 << width_shift)) > 0) { | |
733 | return -1; /* fault */ | |
734 | } | |
735 | return 0; | |
736 | } else { | |
737 | die ("Misaligned FPU load inside kernel", regs, 0); | |
738 | return -1; | |
739 | } | |
740 | } | |
741 | #endif | |
742 | ||
743 | static int misaligned_fixup(struct pt_regs *regs) | |
744 | { | |
745 | unsigned long opcode; | |
746 | int error; | |
747 | int major, minor; | |
748 | ||
749 | #if !defined(CONFIG_SH64_USER_MISALIGNED_FIXUP) | |
750 | /* Never fixup user mode misaligned accesses without this option enabled. */ | |
751 | return -1; | |
752 | #else | |
753 | if (!user_mode_unaligned_fixup_enable) return -1; | |
754 | #endif | |
755 | ||
756 | error = read_opcode(regs->pc, &opcode, user_mode(regs)); | |
757 | if (error < 0) { | |
758 | return error; | |
759 | } | |
760 | major = (opcode >> 26) & 0x3f; | |
761 | minor = (opcode >> 16) & 0xf; | |
762 | ||
763 | #if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP) | |
764 | if (user_mode(regs) && (user_mode_unaligned_fixup_count > 0)) { | |
765 | --user_mode_unaligned_fixup_count; | |
766 | /* Only do 'count' worth of these reports, to remove a potential DoS against syslog */ | |
767 | printk("Fixing up unaligned userspace access in \"%s\" pid=%d pc=0x%08x ins=0x%08lx\n", | |
768 | current->comm, current->pid, (__u32)regs->pc, opcode); | |
769 | } else | |
770 | #endif | |
771 | if (!user_mode(regs) && (kernel_mode_unaligned_fixup_count > 0)) { | |
772 | --kernel_mode_unaligned_fixup_count; | |
773 | if (in_interrupt()) { | |
774 | printk("Fixing up unaligned kernelspace access in interrupt pc=0x%08x ins=0x%08lx\n", | |
775 | (__u32)regs->pc, opcode); | |
776 | } else { | |
777 | printk("Fixing up unaligned kernelspace access in \"%s\" pid=%d pc=0x%08x ins=0x%08lx\n", | |
778 | current->comm, current->pid, (__u32)regs->pc, opcode); | |
779 | } | |
780 | } | |
781 | ||
782 | ||
783 | switch (major) { | |
784 | case (0x84>>2): /* LD.W */ | |
785 | error = misaligned_load(regs, opcode, 1, 1, 1); | |
786 | break; | |
787 | case (0xb0>>2): /* LD.UW */ | |
788 | error = misaligned_load(regs, opcode, 1, 1, 0); | |
789 | break; | |
790 | case (0x88>>2): /* LD.L */ | |
791 | error = misaligned_load(regs, opcode, 1, 2, 1); | |
792 | break; | |
793 | case (0x8c>>2): /* LD.Q */ | |
794 | error = misaligned_load(regs, opcode, 1, 3, 0); | |
795 | break; | |
796 | ||
797 | case (0xa4>>2): /* ST.W */ | |
798 | error = misaligned_store(regs, opcode, 1, 1); | |
799 | break; | |
800 | case (0xa8>>2): /* ST.L */ | |
801 | error = misaligned_store(regs, opcode, 1, 2); | |
802 | break; | |
803 | case (0xac>>2): /* ST.Q */ | |
804 | error = misaligned_store(regs, opcode, 1, 3); | |
805 | break; | |
806 | ||
807 | case (0x40>>2): /* indexed loads */ | |
808 | switch (minor) { | |
809 | case 0x1: /* LDX.W */ | |
810 | error = misaligned_load(regs, opcode, 0, 1, 1); | |
811 | break; | |
812 | case 0x5: /* LDX.UW */ | |
813 | error = misaligned_load(regs, opcode, 0, 1, 0); | |
814 | break; | |
815 | case 0x2: /* LDX.L */ | |
816 | error = misaligned_load(regs, opcode, 0, 2, 1); | |
817 | break; | |
818 | case 0x3: /* LDX.Q */ | |
819 | error = misaligned_load(regs, opcode, 0, 3, 0); | |
820 | break; | |
821 | default: | |
822 | error = -1; | |
823 | break; | |
824 | } | |
825 | break; | |
826 | ||
827 | case (0x60>>2): /* indexed stores */ | |
828 | switch (minor) { | |
829 | case 0x1: /* STX.W */ | |
830 | error = misaligned_store(regs, opcode, 0, 1); | |
831 | break; | |
832 | case 0x2: /* STX.L */ | |
833 | error = misaligned_store(regs, opcode, 0, 2); | |
834 | break; | |
835 | case 0x3: /* STX.Q */ | |
836 | error = misaligned_store(regs, opcode, 0, 3); | |
837 | break; | |
838 | default: | |
839 | error = -1; | |
840 | break; | |
841 | } | |
842 | break; | |
843 | ||
844 | #if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP) | |
845 | case (0x94>>2): /* FLD.S */ | |
846 | error = misaligned_fpu_load(regs, opcode, 1, 2, 0); | |
847 | break; | |
848 | case (0x98>>2): /* FLD.P */ | |
849 | error = misaligned_fpu_load(regs, opcode, 1, 3, 1); | |
850 | break; | |
851 | case (0x9c>>2): /* FLD.D */ | |
852 | error = misaligned_fpu_load(regs, opcode, 1, 3, 0); | |
853 | break; | |
854 | case (0x1c>>2): /* floating indexed loads */ | |
855 | switch (minor) { | |
856 | case 0x8: /* FLDX.S */ | |
857 | error = misaligned_fpu_load(regs, opcode, 0, 2, 0); | |
858 | break; | |
859 | case 0xd: /* FLDX.P */ | |
860 | error = misaligned_fpu_load(regs, opcode, 0, 3, 1); | |
861 | break; | |
862 | case 0x9: /* FLDX.D */ | |
863 | error = misaligned_fpu_load(regs, opcode, 0, 3, 0); | |
864 | break; | |
865 | default: | |
866 | error = -1; | |
867 | break; | |
868 | } | |
869 | break; | |
870 | case (0xb4>>2): /* FLD.S */ | |
871 | error = misaligned_fpu_store(regs, opcode, 1, 2, 0); | |
872 | break; | |
873 | case (0xb8>>2): /* FLD.P */ | |
874 | error = misaligned_fpu_store(regs, opcode, 1, 3, 1); | |
875 | break; | |
876 | case (0xbc>>2): /* FLD.D */ | |
877 | error = misaligned_fpu_store(regs, opcode, 1, 3, 0); | |
878 | break; | |
879 | case (0x3c>>2): /* floating indexed stores */ | |
880 | switch (minor) { | |
881 | case 0x8: /* FSTX.S */ | |
882 | error = misaligned_fpu_store(regs, opcode, 0, 2, 0); | |
883 | break; | |
884 | case 0xd: /* FSTX.P */ | |
885 | error = misaligned_fpu_store(regs, opcode, 0, 3, 1); | |
886 | break; | |
887 | case 0x9: /* FSTX.D */ | |
888 | error = misaligned_fpu_store(regs, opcode, 0, 3, 0); | |
889 | break; | |
890 | default: | |
891 | error = -1; | |
892 | break; | |
893 | } | |
894 | break; | |
895 | #endif | |
896 | ||
897 | default: | |
898 | /* Fault */ | |
899 | error = -1; | |
900 | break; | |
901 | } | |
902 | ||
903 | if (error < 0) { | |
904 | return error; | |
905 | } else { | |
906 | regs->pc += 4; /* Skip the instruction that's just been emulated */ | |
907 | return 0; | |
908 | } | |
909 | ||
910 | } | |
911 | ||
912 | static ctl_table unaligned_table[] = { | |
913 | {1, "kernel_reports", &kernel_mode_unaligned_fixup_count, | |
914 | sizeof(int), 0644, NULL, &proc_dointvec}, | |
915 | #if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP) | |
916 | {2, "user_reports", &user_mode_unaligned_fixup_count, | |
917 | sizeof(int), 0644, NULL, &proc_dointvec}, | |
918 | {3, "user_enable", &user_mode_unaligned_fixup_enable, | |
919 | sizeof(int), 0644, NULL, &proc_dointvec}, | |
920 | #endif | |
921 | {0} | |
922 | }; | |
923 | ||
924 | static ctl_table unaligned_root[] = { | |
925 | {1, "unaligned_fixup", NULL, 0, 0555, unaligned_table}, | |
926 | {0} | |
927 | }; | |
928 | ||
929 | static ctl_table sh64_root[] = { | |
930 | {1, "sh64", NULL, 0, 0555, unaligned_root}, | |
931 | {0} | |
932 | }; | |
933 | static struct ctl_table_header *sysctl_header; | |
934 | static int __init init_sysctl(void) | |
935 | { | |
936 | sysctl_header = register_sysctl_table(sh64_root, 0); | |
937 | return 0; | |
938 | } | |
939 | ||
940 | __initcall(init_sysctl); | |
941 | ||
942 | ||
943 | asmlinkage void do_debug_interrupt(unsigned long code, struct pt_regs *regs) | |
944 | { | |
945 | u64 peek_real_address_q(u64 addr); | |
946 | u64 poke_real_address_q(u64 addr, u64 val); | |
947 | unsigned long long DM_EXP_CAUSE_PHY = 0x0c100010; | |
948 | unsigned long long exp_cause; | |
949 | /* It's not worth ioremapping the debug module registers for the amount | |
950 | of access we make to them - just go direct to their physical | |
951 | addresses. */ | |
952 | exp_cause = peek_real_address_q(DM_EXP_CAUSE_PHY); | |
953 | if (exp_cause & ~4) { | |
954 | printk("DM.EXP_CAUSE had unexpected bits set (=%08lx)\n", | |
955 | (unsigned long)(exp_cause & 0xffffffff)); | |
956 | } | |
957 | show_state(); | |
958 | /* Clear all DEBUGINT causes */ | |
959 | poke_real_address_q(DM_EXP_CAUSE_PHY, 0x0); | |
960 | } | |
961 |