Commit | Line | Data |
---|---|---|
1da177e4 | 1 | /* |
a23ba435 | 2 | * arch/sh/kernel/traps_64.c |
1da177e4 LT |
3 | * |
4 | * Copyright (C) 2000, 2001 Paolo Alberelli | |
5 | * Copyright (C) 2003, 2004 Paul Mundt | |
6 | * Copyright (C) 2003, 2004 Richard Curnow | |
7 | * | |
a23ba435 PM |
8 | * This file is subject to the terms and conditions of the GNU General Public |
9 | * License. See the file "COPYING" in the main directory of this archive | |
10 | * for more details. | |
1da177e4 LT |
11 | */ |
12 | #include <linux/sched.h> | |
13 | #include <linux/kernel.h> | |
14 | #include <linux/string.h> | |
15 | #include <linux/errno.h> | |
16 | #include <linux/ptrace.h> | |
17 | #include <linux/timer.h> | |
18 | #include <linux/mm.h> | |
19 | #include <linux/smp.h> | |
1da177e4 LT |
20 | #include <linux/init.h> |
21 | #include <linux/delay.h> | |
22 | #include <linux/spinlock.h> | |
23 | #include <linux/kallsyms.h> | |
24 | #include <linux/interrupt.h> | |
25 | #include <linux/sysctl.h> | |
26 | #include <linux/module.h> | |
1da177e4 LT |
27 | #include <asm/system.h> |
28 | #include <asm/uaccess.h> | |
29 | #include <asm/io.h> | |
30 | #include <asm/atomic.h> | |
31 | #include <asm/processor.h> | |
32 | #include <asm/pgtable.h> | |
50387b3e | 33 | #include <asm/fpu.h> |
1da177e4 LT |
34 | |
35 | #undef DEBUG_EXCEPTION | |
36 | #ifdef DEBUG_EXCEPTION | |
37 | /* implemented in ../lib/dbg.c */ | |
38 | extern void show_excp_regs(char *fname, int trapnr, int signr, | |
39 | struct pt_regs *regs); | |
40 | #else | |
41 | #define show_excp_regs(a, b, c, d) | |
42 | #endif | |
43 | ||
44 | static void do_unhandled_exception(int trapnr, int signr, char *str, char *fn_name, | |
45 | unsigned long error_code, struct pt_regs *regs, struct task_struct *tsk); | |
46 | ||
47 | #define DO_ERROR(trapnr, signr, str, name, tsk) \ | |
48 | asmlinkage void do_##name(unsigned long error_code, struct pt_regs *regs) \ | |
49 | { \ | |
50 | do_unhandled_exception(trapnr, signr, str, __stringify(name), error_code, regs, current); \ | |
51 | } | |
52 | ||
53 | spinlock_t die_lock; | |
54 | ||
55 | void die(const char * str, struct pt_regs * regs, long err) | |
56 | { | |
57 | console_verbose(); | |
58 | spin_lock_irq(&die_lock); | |
59 | printk("%s: %lx\n", str, (err & 0xffffff)); | |
60 | show_regs(regs); | |
61 | spin_unlock_irq(&die_lock); | |
62 | do_exit(SIGSEGV); | |
63 | } | |
64 | ||
65 | static inline void die_if_kernel(const char * str, struct pt_regs * regs, long err) | |
66 | { | |
67 | if (!user_mode(regs)) | |
68 | die(str, regs, err); | |
69 | } | |
70 | ||
71 | static void die_if_no_fixup(const char * str, struct pt_regs * regs, long err) | |
72 | { | |
73 | if (!user_mode(regs)) { | |
74 | const struct exception_table_entry *fixup; | |
75 | fixup = search_exception_tables(regs->pc); | |
76 | if (fixup) { | |
77 | regs->pc = fixup->fixup; | |
78 | return; | |
79 | } | |
80 | die(str, regs, err); | |
81 | } | |
82 | } | |
83 | ||
84 | DO_ERROR(13, SIGILL, "illegal slot instruction", illegal_slot_inst, current) | |
85 | DO_ERROR(87, SIGSEGV, "address error (exec)", address_error_exec, current) | |
86 | ||
87 | ||
88 | /* Implement misaligned load/store handling for kernel (and optionally for user | |
89 | mode too). Limitation : only SHmedia mode code is handled - there is no | |
90 | handling at all for misaligned accesses occurring in SHcompact code yet. */ | |
91 | ||
92 | static int misaligned_fixup(struct pt_regs *regs); | |
93 | ||
94 | asmlinkage void do_address_error_load(unsigned long error_code, struct pt_regs *regs) | |
95 | { | |
96 | if (misaligned_fixup(regs) < 0) { | |
97 | do_unhandled_exception(7, SIGSEGV, "address error(load)", | |
98 | "do_address_error_load", | |
99 | error_code, regs, current); | |
100 | } | |
101 | return; | |
102 | } | |
103 | ||
104 | asmlinkage void do_address_error_store(unsigned long error_code, struct pt_regs *regs) | |
105 | { | |
106 | if (misaligned_fixup(regs) < 0) { | |
107 | do_unhandled_exception(8, SIGSEGV, "address error(store)", | |
108 | "do_address_error_store", | |
109 | error_code, regs, current); | |
110 | } | |
111 | return; | |
112 | } | |
113 | ||
114 | #if defined(CONFIG_SH64_ID2815_WORKAROUND) | |
115 | ||
116 | #define OPCODE_INVALID 0 | |
117 | #define OPCODE_USER_VALID 1 | |
118 | #define OPCODE_PRIV_VALID 2 | |
119 | ||
120 | /* getcon/putcon - requires checking which control register is referenced. */ | |
121 | #define OPCODE_CTRL_REG 3 | |
122 | ||
123 | /* Table of valid opcodes for SHmedia mode. | |
124 | Form a 10-bit value by concatenating the major/minor opcodes i.e. | |
125 | opcode[31:26,20:16]. The 6 MSBs of this value index into the following | |
126 | array. The 4 LSBs select the bit-pair in the entry (bits 1:0 correspond to | |
127 | LSBs==4'b0000 etc). */ | |
128 | static unsigned long shmedia_opcode_table[64] = { | |
129 | 0x55554044,0x54445055,0x15141514,0x14541414,0x00000000,0x10001000,0x01110055,0x04050015, | |
130 | 0x00000444,0xc0000000,0x44545515,0x40405555,0x55550015,0x10005555,0x55555505,0x04050000, | |
131 | 0x00000555,0x00000404,0x00040445,0x15151414,0x00000000,0x00000000,0x00000000,0x00000000, | |
132 | 0x00000055,0x40404444,0x00000404,0xc0009495,0x00000000,0x00000000,0x00000000,0x00000000, | |
133 | 0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555, | |
134 | 0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555, | |
135 | 0x80005050,0x04005055,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555, | |
136 | 0x81055554,0x00000404,0x55555555,0x55555555,0x00000000,0x00000000,0x00000000,0x00000000 | |
137 | }; | |
138 | ||
139 | void do_reserved_inst(unsigned long error_code, struct pt_regs *regs) | |
140 | { | |
141 | /* Workaround SH5-101 cut2 silicon defect #2815 : | |
142 | in some situations, inter-mode branches from SHcompact -> SHmedia | |
143 | which should take ITLBMISS or EXECPROT exceptions at the target | |
144 | falsely take RESINST at the target instead. */ | |
145 | ||
146 | unsigned long opcode = 0x6ff4fff0; /* guaranteed reserved opcode */ | |
147 | unsigned long pc, aligned_pc; | |
148 | int get_user_error; | |
149 | int trapnr = 12; | |
150 | int signr = SIGILL; | |
151 | char *exception_name = "reserved_instruction"; | |
152 | ||
153 | pc = regs->pc; | |
154 | if ((pc & 3) == 1) { | |
155 | /* SHmedia : check for defect. This requires executable vmas | |
156 | to be readable too. */ | |
157 | aligned_pc = pc & ~3; | |
158 | if (!access_ok(VERIFY_READ, aligned_pc, sizeof(unsigned long))) { | |
159 | get_user_error = -EFAULT; | |
160 | } else { | |
161 | get_user_error = __get_user(opcode, (unsigned long *)aligned_pc); | |
162 | } | |
163 | if (get_user_error >= 0) { | |
164 | unsigned long index, shift; | |
165 | unsigned long major, minor, combined; | |
166 | unsigned long reserved_field; | |
167 | reserved_field = opcode & 0xf; /* These bits are currently reserved as zero in all valid opcodes */ | |
168 | major = (opcode >> 26) & 0x3f; | |
169 | minor = (opcode >> 16) & 0xf; | |
170 | combined = (major << 4) | minor; | |
171 | index = major; | |
172 | shift = minor << 1; | |
173 | if (reserved_field == 0) { | |
174 | int opcode_state = (shmedia_opcode_table[index] >> shift) & 0x3; | |
175 | switch (opcode_state) { | |
176 | case OPCODE_INVALID: | |
177 | /* Trap. */ | |
178 | break; | |
179 | case OPCODE_USER_VALID: | |
180 | /* Restart the instruction : the branch to the instruction will now be from an RTE | |
181 | not from SHcompact so the silicon defect won't be triggered. */ | |
182 | return; | |
183 | case OPCODE_PRIV_VALID: | |
184 | if (!user_mode(regs)) { | |
185 | /* Should only ever get here if a module has | |
186 | SHcompact code inside it. If so, the same fix up is needed. */ | |
187 | return; /* same reason */ | |
188 | } | |
189 | /* Otherwise, user mode trying to execute a privileged instruction - | |
190 | fall through to trap. */ | |
191 | break; | |
192 | case OPCODE_CTRL_REG: | |
193 | /* If in privileged mode, return as above. */ | |
194 | if (!user_mode(regs)) return; | |
195 | /* In user mode ... */ | |
196 | if (combined == 0x9f) { /* GETCON */ | |
197 | unsigned long regno = (opcode >> 20) & 0x3f; | |
198 | if (regno >= 62) { | |
199 | return; | |
200 | } | |
201 | /* Otherwise, reserved or privileged control register, => trap */ | |
202 | } else if (combined == 0x1bf) { /* PUTCON */ | |
203 | unsigned long regno = (opcode >> 4) & 0x3f; | |
204 | if (regno >= 62) { | |
205 | return; | |
206 | } | |
207 | /* Otherwise, reserved or privileged control register, => trap */ | |
208 | } else { | |
209 | /* Trap */ | |
210 | } | |
211 | break; | |
212 | default: | |
213 | /* Fall through to trap. */ | |
214 | break; | |
215 | } | |
216 | } | |
217 | /* fall through to normal resinst processing */ | |
218 | } else { | |
219 | /* Error trying to read opcode. This typically means a | |
220 | real fault, not a RESINST any more. So change the | |
221 | codes. */ | |
222 | trapnr = 87; | |
223 | exception_name = "address error (exec)"; | |
224 | signr = SIGSEGV; | |
225 | } | |
226 | } | |
227 | ||
228 | do_unhandled_exception(trapnr, signr, exception_name, "do_reserved_inst", error_code, regs, current); | |
229 | } | |
230 | ||
231 | #else /* CONFIG_SH64_ID2815_WORKAROUND */ | |
232 | ||
233 | /* If the workaround isn't needed, this is just a straightforward reserved | |
234 | instruction */ | |
235 | DO_ERROR(12, SIGILL, "reserved instruction", reserved_inst, current) | |
236 | ||
237 | #endif /* CONFIG_SH64_ID2815_WORKAROUND */ | |
238 | ||
1da177e4 LT |
239 | /* Called with interrupts disabled */ |
240 | asmlinkage void do_exception_error(unsigned long ex, struct pt_regs *regs) | |
241 | { | |
866e6b9e | 242 | show_excp_regs(__func__, -1, -1, regs); |
1da177e4 LT |
243 | die_if_kernel("exception", regs, ex); |
244 | } | |
245 | ||
246 | int do_unknown_trapa(unsigned long scId, struct pt_regs *regs) | |
247 | { | |
248 | /* Syscall debug */ | |
249 | printk("System call ID error: [0x1#args:8 #syscall:16 0x%lx]\n", scId); | |
250 | ||
251 | die_if_kernel("unknown trapa", regs, scId); | |
252 | ||
253 | return -ENOSYS; | |
254 | } | |
255 | ||
256 | void show_stack(struct task_struct *tsk, unsigned long *sp) | |
257 | { | |
258 | #ifdef CONFIG_KALLSYMS | |
259 | extern void sh64_unwind(struct pt_regs *regs); | |
260 | struct pt_regs *regs; | |
261 | ||
262 | regs = tsk ? tsk->thread.kregs : NULL; | |
263 | ||
264 | sh64_unwind(regs); | |
265 | #else | |
266 | printk(KERN_ERR "Can't backtrace on sh64 without CONFIG_KALLSYMS\n"); | |
267 | #endif | |
268 | } | |
269 | ||
270 | void show_task(unsigned long *sp) | |
271 | { | |
272 | show_stack(NULL, sp); | |
273 | } | |
274 | ||
275 | void dump_stack(void) | |
276 | { | |
277 | show_task(NULL); | |
278 | } | |
279 | /* Needed by any user of WARN_ON in view of the defn in include/asm-sh/bug.h */ | |
280 | EXPORT_SYMBOL(dump_stack); | |
281 | ||
282 | static void do_unhandled_exception(int trapnr, int signr, char *str, char *fn_name, | |
283 | unsigned long error_code, struct pt_regs *regs, struct task_struct *tsk) | |
284 | { | |
285 | show_excp_regs(fn_name, trapnr, signr, regs); | |
286 | tsk->thread.error_code = error_code; | |
287 | tsk->thread.trap_no = trapnr; | |
288 | ||
289 | if (user_mode(regs)) | |
290 | force_sig(signr, tsk); | |
291 | ||
292 | die_if_no_fixup(str, regs, error_code); | |
293 | } | |
294 | ||
295 | static int read_opcode(unsigned long long pc, unsigned long *result_opcode, int from_user_mode) | |
296 | { | |
297 | int get_user_error; | |
298 | unsigned long aligned_pc; | |
299 | unsigned long opcode; | |
300 | ||
301 | if ((pc & 3) == 1) { | |
302 | /* SHmedia */ | |
303 | aligned_pc = pc & ~3; | |
304 | if (from_user_mode) { | |
305 | if (!access_ok(VERIFY_READ, aligned_pc, sizeof(unsigned long))) { | |
306 | get_user_error = -EFAULT; | |
307 | } else { | |
308 | get_user_error = __get_user(opcode, (unsigned long *)aligned_pc); | |
309 | *result_opcode = opcode; | |
310 | } | |
311 | return get_user_error; | |
312 | } else { | |
313 | /* If the fault was in the kernel, we can either read | |
314 | * this directly, or if not, we fault. | |
315 | */ | |
316 | *result_opcode = *(unsigned long *) aligned_pc; | |
317 | return 0; | |
318 | } | |
319 | } else if ((pc & 1) == 0) { | |
320 | /* SHcompact */ | |
321 | /* TODO : provide handling for this. We don't really support | |
322 | user-mode SHcompact yet, and for a kernel fault, this would | |
323 | have to come from a module built for SHcompact. */ | |
324 | return -EFAULT; | |
325 | } else { | |
326 | /* misaligned */ | |
327 | return -EFAULT; | |
328 | } | |
329 | } | |
330 | ||
331 | static int address_is_sign_extended(__u64 a) | |
332 | { | |
333 | __u64 b; | |
334 | #if (NEFF == 32) | |
335 | b = (__u64)(__s64)(__s32)(a & 0xffffffffUL); | |
336 | return (b == a) ? 1 : 0; | |
337 | #else | |
338 | #error "Sign extend check only works for NEFF==32" | |
339 | #endif | |
340 | } | |
341 | ||
342 | static int generate_and_check_address(struct pt_regs *regs, | |
343 | __u32 opcode, | |
344 | int displacement_not_indexed, | |
345 | int width_shift, | |
346 | __u64 *address) | |
347 | { | |
348 | /* return -1 for fault, 0 for OK */ | |
349 | ||
350 | __u64 base_address, addr; | |
351 | int basereg; | |
352 | ||
353 | basereg = (opcode >> 20) & 0x3f; | |
354 | base_address = regs->regs[basereg]; | |
355 | if (displacement_not_indexed) { | |
356 | __s64 displacement; | |
357 | displacement = (opcode >> 10) & 0x3ff; | |
358 | displacement = ((displacement << 54) >> 54); /* sign extend */ | |
359 | addr = (__u64)((__s64)base_address + (displacement << width_shift)); | |
360 | } else { | |
361 | __u64 offset; | |
362 | int offsetreg; | |
363 | offsetreg = (opcode >> 10) & 0x3f; | |
364 | offset = regs->regs[offsetreg]; | |
365 | addr = base_address + offset; | |
366 | } | |
367 | ||
368 | /* Check sign extended */ | |
369 | if (!address_is_sign_extended(addr)) { | |
370 | return -1; | |
371 | } | |
372 | ||
373 | #if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP) | |
374 | /* Check accessible. For misaligned access in the kernel, assume the | |
375 | address is always accessible (and if not, just fault when the | |
376 | load/store gets done.) */ | |
377 | if (user_mode(regs)) { | |
378 | if (addr >= TASK_SIZE) { | |
379 | return -1; | |
380 | } | |
381 | /* Do access_ok check later - it depends on whether it's a load or a store. */ | |
382 | } | |
383 | #endif | |
384 | ||
385 | *address = addr; | |
386 | return 0; | |
387 | } | |
388 | ||
389 | /* Default value as for sh */ | |
390 | #if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP) | |
391 | static int user_mode_unaligned_fixup_count = 10; | |
392 | static int user_mode_unaligned_fixup_enable = 1; | |
393 | #endif | |
394 | ||
395 | static int kernel_mode_unaligned_fixup_count = 32; | |
396 | ||
397 | static void misaligned_kernel_word_load(__u64 address, int do_sign_extend, __u64 *result) | |
398 | { | |
399 | unsigned short x; | |
400 | unsigned char *p, *q; | |
401 | p = (unsigned char *) (int) address; | |
402 | q = (unsigned char *) &x; | |
403 | q[0] = p[0]; | |
404 | q[1] = p[1]; | |
405 | ||
406 | if (do_sign_extend) { | |
407 | *result = (__u64)(__s64) *(short *) &x; | |
408 | } else { | |
409 | *result = (__u64) x; | |
410 | } | |
411 | } | |
412 | ||
413 | static void misaligned_kernel_word_store(__u64 address, __u64 value) | |
414 | { | |
415 | unsigned short x; | |
416 | unsigned char *p, *q; | |
417 | p = (unsigned char *) (int) address; | |
418 | q = (unsigned char *) &x; | |
419 | ||
420 | x = (__u16) value; | |
421 | p[0] = q[0]; | |
422 | p[1] = q[1]; | |
423 | } | |
424 | ||
425 | static int misaligned_load(struct pt_regs *regs, | |
426 | __u32 opcode, | |
427 | int displacement_not_indexed, | |
428 | int width_shift, | |
429 | int do_sign_extend) | |
430 | { | |
431 | /* Return -1 for a fault, 0 for OK */ | |
432 | int error; | |
433 | int destreg; | |
434 | __u64 address; | |
435 | ||
436 | error = generate_and_check_address(regs, opcode, | |
437 | displacement_not_indexed, width_shift, &address); | |
438 | if (error < 0) { | |
439 | return error; | |
440 | } | |
441 | ||
442 | destreg = (opcode >> 4) & 0x3f; | |
443 | #if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP) | |
444 | if (user_mode(regs)) { | |
445 | __u64 buffer; | |
446 | ||
447 | if (!access_ok(VERIFY_READ, (unsigned long) address, 1UL<<width_shift)) { | |
448 | return -1; | |
449 | } | |
450 | ||
451 | if (__copy_user(&buffer, (const void *)(int)address, (1 << width_shift)) > 0) { | |
452 | return -1; /* fault */ | |
453 | } | |
454 | switch (width_shift) { | |
455 | case 1: | |
456 | if (do_sign_extend) { | |
457 | regs->regs[destreg] = (__u64)(__s64) *(__s16 *) &buffer; | |
458 | } else { | |
459 | regs->regs[destreg] = (__u64) *(__u16 *) &buffer; | |
460 | } | |
461 | break; | |
462 | case 2: | |
463 | regs->regs[destreg] = (__u64)(__s64) *(__s32 *) &buffer; | |
464 | break; | |
465 | case 3: | |
466 | regs->regs[destreg] = buffer; | |
467 | break; | |
468 | default: | |
469 | printk("Unexpected width_shift %d in misaligned_load, PC=%08lx\n", | |
470 | width_shift, (unsigned long) regs->pc); | |
471 | break; | |
472 | } | |
473 | } else | |
474 | #endif | |
475 | { | |
476 | /* kernel mode - we can take short cuts since if we fault, it's a genuine bug */ | |
477 | __u64 lo, hi; | |
478 | ||
479 | switch (width_shift) { | |
480 | case 1: | |
481 | misaligned_kernel_word_load(address, do_sign_extend, ®s->regs[destreg]); | |
482 | break; | |
483 | case 2: | |
484 | asm ("ldlo.l %1, 0, %0" : "=r" (lo) : "r" (address)); | |
485 | asm ("ldhi.l %1, 3, %0" : "=r" (hi) : "r" (address)); | |
486 | regs->regs[destreg] = lo | hi; | |
487 | break; | |
488 | case 3: | |
489 | asm ("ldlo.q %1, 0, %0" : "=r" (lo) : "r" (address)); | |
490 | asm ("ldhi.q %1, 7, %0" : "=r" (hi) : "r" (address)); | |
491 | regs->regs[destreg] = lo | hi; | |
492 | break; | |
493 | ||
494 | default: | |
495 | printk("Unexpected width_shift %d in misaligned_load, PC=%08lx\n", | |
496 | width_shift, (unsigned long) regs->pc); | |
497 | break; | |
498 | } | |
499 | } | |
500 | ||
501 | return 0; | |
502 | ||
503 | } | |
504 | ||
505 | static int misaligned_store(struct pt_regs *regs, | |
506 | __u32 opcode, | |
507 | int displacement_not_indexed, | |
508 | int width_shift) | |
509 | { | |
510 | /* Return -1 for a fault, 0 for OK */ | |
511 | int error; | |
512 | int srcreg; | |
513 | __u64 address; | |
514 | ||
515 | error = generate_and_check_address(regs, opcode, | |
516 | displacement_not_indexed, width_shift, &address); | |
517 | if (error < 0) { | |
518 | return error; | |
519 | } | |
520 | ||
521 | srcreg = (opcode >> 4) & 0x3f; | |
522 | #if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP) | |
523 | if (user_mode(regs)) { | |
524 | __u64 buffer; | |
525 | ||
526 | if (!access_ok(VERIFY_WRITE, (unsigned long) address, 1UL<<width_shift)) { | |
527 | return -1; | |
528 | } | |
529 | ||
530 | switch (width_shift) { | |
531 | case 1: | |
532 | *(__u16 *) &buffer = (__u16) regs->regs[srcreg]; | |
533 | break; | |
534 | case 2: | |
535 | *(__u32 *) &buffer = (__u32) regs->regs[srcreg]; | |
536 | break; | |
537 | case 3: | |
538 | buffer = regs->regs[srcreg]; | |
539 | break; | |
540 | default: | |
541 | printk("Unexpected width_shift %d in misaligned_store, PC=%08lx\n", | |
542 | width_shift, (unsigned long) regs->pc); | |
543 | break; | |
544 | } | |
545 | ||
546 | if (__copy_user((void *)(int)address, &buffer, (1 << width_shift)) > 0) { | |
547 | return -1; /* fault */ | |
548 | } | |
549 | } else | |
550 | #endif | |
551 | { | |
552 | /* kernel mode - we can take short cuts since if we fault, it's a genuine bug */ | |
553 | __u64 val = regs->regs[srcreg]; | |
554 | ||
555 | switch (width_shift) { | |
556 | case 1: | |
557 | misaligned_kernel_word_store(address, val); | |
558 | break; | |
559 | case 2: | |
560 | asm ("stlo.l %1, 0, %0" : : "r" (val), "r" (address)); | |
561 | asm ("sthi.l %1, 3, %0" : : "r" (val), "r" (address)); | |
562 | break; | |
563 | case 3: | |
564 | asm ("stlo.q %1, 0, %0" : : "r" (val), "r" (address)); | |
565 | asm ("sthi.q %1, 7, %0" : : "r" (val), "r" (address)); | |
566 | break; | |
567 | ||
568 | default: | |
569 | printk("Unexpected width_shift %d in misaligned_store, PC=%08lx\n", | |
570 | width_shift, (unsigned long) regs->pc); | |
571 | break; | |
572 | } | |
573 | } | |
574 | ||
575 | return 0; | |
576 | ||
577 | } | |
578 | ||
579 | #if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP) | |
580 | /* Never need to fix up misaligned FPU accesses within the kernel since that's a real | |
581 | error. */ | |
582 | static int misaligned_fpu_load(struct pt_regs *regs, | |
583 | __u32 opcode, | |
584 | int displacement_not_indexed, | |
585 | int width_shift, | |
586 | int do_paired_load) | |
587 | { | |
588 | /* Return -1 for a fault, 0 for OK */ | |
589 | int error; | |
590 | int destreg; | |
591 | __u64 address; | |
592 | ||
593 | error = generate_and_check_address(regs, opcode, | |
594 | displacement_not_indexed, width_shift, &address); | |
595 | if (error < 0) { | |
596 | return error; | |
597 | } | |
598 | ||
599 | destreg = (opcode >> 4) & 0x3f; | |
600 | if (user_mode(regs)) { | |
601 | __u64 buffer; | |
602 | __u32 buflo, bufhi; | |
603 | ||
604 | if (!access_ok(VERIFY_READ, (unsigned long) address, 1UL<<width_shift)) { | |
605 | return -1; | |
606 | } | |
607 | ||
608 | if (__copy_user(&buffer, (const void *)(int)address, (1 << width_shift)) > 0) { | |
609 | return -1; /* fault */ | |
610 | } | |
611 | /* 'current' may be the current owner of the FPU state, so | |
612 | context switch the registers into memory so they can be | |
613 | indexed by register number. */ | |
614 | if (last_task_used_math == current) { | |
256b22ca | 615 | enable_fpu(); |
332fd57b | 616 | save_fpu(current, regs); |
256b22ca | 617 | disable_fpu(); |
1da177e4 LT |
618 | last_task_used_math = NULL; |
619 | regs->sr |= SR_FD; | |
620 | } | |
621 | ||
622 | buflo = *(__u32*) &buffer; | |
623 | bufhi = *(1 + (__u32*) &buffer); | |
624 | ||
625 | switch (width_shift) { | |
626 | case 2: | |
627 | current->thread.fpu.hard.fp_regs[destreg] = buflo; | |
628 | break; | |
629 | case 3: | |
630 | if (do_paired_load) { | |
631 | current->thread.fpu.hard.fp_regs[destreg] = buflo; | |
632 | current->thread.fpu.hard.fp_regs[destreg+1] = bufhi; | |
633 | } else { | |
f99cb7a4 | 634 | #if defined(CONFIG_CPU_LITTLE_ENDIAN) |
1da177e4 LT |
635 | current->thread.fpu.hard.fp_regs[destreg] = bufhi; |
636 | current->thread.fpu.hard.fp_regs[destreg+1] = buflo; | |
637 | #else | |
638 | current->thread.fpu.hard.fp_regs[destreg] = buflo; | |
639 | current->thread.fpu.hard.fp_regs[destreg+1] = bufhi; | |
640 | #endif | |
641 | } | |
642 | break; | |
643 | default: | |
644 | printk("Unexpected width_shift %d in misaligned_fpu_load, PC=%08lx\n", | |
645 | width_shift, (unsigned long) regs->pc); | |
646 | break; | |
647 | } | |
648 | return 0; | |
649 | } else { | |
650 | die ("Misaligned FPU load inside kernel", regs, 0); | |
651 | return -1; | |
652 | } | |
653 | ||
654 | ||
655 | } | |
656 | ||
657 | static int misaligned_fpu_store(struct pt_regs *regs, | |
658 | __u32 opcode, | |
659 | int displacement_not_indexed, | |
660 | int width_shift, | |
661 | int do_paired_load) | |
662 | { | |
663 | /* Return -1 for a fault, 0 for OK */ | |
664 | int error; | |
665 | int srcreg; | |
666 | __u64 address; | |
667 | ||
668 | error = generate_and_check_address(regs, opcode, | |
669 | displacement_not_indexed, width_shift, &address); | |
670 | if (error < 0) { | |
671 | return error; | |
672 | } | |
673 | ||
674 | srcreg = (opcode >> 4) & 0x3f; | |
675 | if (user_mode(regs)) { | |
676 | __u64 buffer; | |
677 | /* Initialise these to NaNs. */ | |
678 | __u32 buflo=0xffffffffUL, bufhi=0xffffffffUL; | |
679 | ||
680 | if (!access_ok(VERIFY_WRITE, (unsigned long) address, 1UL<<width_shift)) { | |
681 | return -1; | |
682 | } | |
683 | ||
684 | /* 'current' may be the current owner of the FPU state, so | |
685 | context switch the registers into memory so they can be | |
686 | indexed by register number. */ | |
687 | if (last_task_used_math == current) { | |
256b22ca | 688 | enable_fpu(); |
332fd57b | 689 | save_fpu(current, regs); |
256b22ca | 690 | disable_fpu(); |
1da177e4 LT |
691 | last_task_used_math = NULL; |
692 | regs->sr |= SR_FD; | |
693 | } | |
694 | ||
695 | switch (width_shift) { | |
696 | case 2: | |
697 | buflo = current->thread.fpu.hard.fp_regs[srcreg]; | |
698 | break; | |
699 | case 3: | |
700 | if (do_paired_load) { | |
701 | buflo = current->thread.fpu.hard.fp_regs[srcreg]; | |
702 | bufhi = current->thread.fpu.hard.fp_regs[srcreg+1]; | |
703 | } else { | |
f99cb7a4 | 704 | #if defined(CONFIG_CPU_LITTLE_ENDIAN) |
1da177e4 LT |
705 | bufhi = current->thread.fpu.hard.fp_regs[srcreg]; |
706 | buflo = current->thread.fpu.hard.fp_regs[srcreg+1]; | |
707 | #else | |
708 | buflo = current->thread.fpu.hard.fp_regs[srcreg]; | |
709 | bufhi = current->thread.fpu.hard.fp_regs[srcreg+1]; | |
710 | #endif | |
711 | } | |
712 | break; | |
713 | default: | |
714 | printk("Unexpected width_shift %d in misaligned_fpu_store, PC=%08lx\n", | |
715 | width_shift, (unsigned long) regs->pc); | |
716 | break; | |
717 | } | |
718 | ||
719 | *(__u32*) &buffer = buflo; | |
720 | *(1 + (__u32*) &buffer) = bufhi; | |
721 | if (__copy_user((void *)(int)address, &buffer, (1 << width_shift)) > 0) { | |
722 | return -1; /* fault */ | |
723 | } | |
724 | return 0; | |
725 | } else { | |
726 | die ("Misaligned FPU load inside kernel", regs, 0); | |
727 | return -1; | |
728 | } | |
729 | } | |
730 | #endif | |
731 | ||
732 | static int misaligned_fixup(struct pt_regs *regs) | |
733 | { | |
734 | unsigned long opcode; | |
735 | int error; | |
736 | int major, minor; | |
737 | ||
738 | #if !defined(CONFIG_SH64_USER_MISALIGNED_FIXUP) | |
739 | /* Never fixup user mode misaligned accesses without this option enabled. */ | |
740 | return -1; | |
741 | #else | |
742 | if (!user_mode_unaligned_fixup_enable) return -1; | |
743 | #endif | |
744 | ||
745 | error = read_opcode(regs->pc, &opcode, user_mode(regs)); | |
746 | if (error < 0) { | |
747 | return error; | |
748 | } | |
749 | major = (opcode >> 26) & 0x3f; | |
750 | minor = (opcode >> 16) & 0xf; | |
751 | ||
752 | #if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP) | |
753 | if (user_mode(regs) && (user_mode_unaligned_fixup_count > 0)) { | |
754 | --user_mode_unaligned_fixup_count; | |
755 | /* Only do 'count' worth of these reports, to remove a potential DoS against syslog */ | |
756 | printk("Fixing up unaligned userspace access in \"%s\" pid=%d pc=0x%08x ins=0x%08lx\n", | |
19c5870c | 757 | current->comm, task_pid_nr(current), (__u32)regs->pc, opcode); |
1da177e4 LT |
758 | } else |
759 | #endif | |
760 | if (!user_mode(regs) && (kernel_mode_unaligned_fixup_count > 0)) { | |
761 | --kernel_mode_unaligned_fixup_count; | |
762 | if (in_interrupt()) { | |
763 | printk("Fixing up unaligned kernelspace access in interrupt pc=0x%08x ins=0x%08lx\n", | |
764 | (__u32)regs->pc, opcode); | |
765 | } else { | |
766 | printk("Fixing up unaligned kernelspace access in \"%s\" pid=%d pc=0x%08x ins=0x%08lx\n", | |
19c5870c | 767 | current->comm, task_pid_nr(current), (__u32)regs->pc, opcode); |
1da177e4 LT |
768 | } |
769 | } | |
770 | ||
771 | ||
772 | switch (major) { | |
773 | case (0x84>>2): /* LD.W */ | |
774 | error = misaligned_load(regs, opcode, 1, 1, 1); | |
775 | break; | |
776 | case (0xb0>>2): /* LD.UW */ | |
777 | error = misaligned_load(regs, opcode, 1, 1, 0); | |
778 | break; | |
779 | case (0x88>>2): /* LD.L */ | |
780 | error = misaligned_load(regs, opcode, 1, 2, 1); | |
781 | break; | |
782 | case (0x8c>>2): /* LD.Q */ | |
783 | error = misaligned_load(regs, opcode, 1, 3, 0); | |
784 | break; | |
785 | ||
786 | case (0xa4>>2): /* ST.W */ | |
787 | error = misaligned_store(regs, opcode, 1, 1); | |
788 | break; | |
789 | case (0xa8>>2): /* ST.L */ | |
790 | error = misaligned_store(regs, opcode, 1, 2); | |
791 | break; | |
792 | case (0xac>>2): /* ST.Q */ | |
793 | error = misaligned_store(regs, opcode, 1, 3); | |
794 | break; | |
795 | ||
796 | case (0x40>>2): /* indexed loads */ | |
797 | switch (minor) { | |
798 | case 0x1: /* LDX.W */ | |
799 | error = misaligned_load(regs, opcode, 0, 1, 1); | |
800 | break; | |
801 | case 0x5: /* LDX.UW */ | |
802 | error = misaligned_load(regs, opcode, 0, 1, 0); | |
803 | break; | |
804 | case 0x2: /* LDX.L */ | |
805 | error = misaligned_load(regs, opcode, 0, 2, 1); | |
806 | break; | |
807 | case 0x3: /* LDX.Q */ | |
808 | error = misaligned_load(regs, opcode, 0, 3, 0); | |
809 | break; | |
810 | default: | |
811 | error = -1; | |
812 | break; | |
813 | } | |
814 | break; | |
815 | ||
816 | case (0x60>>2): /* indexed stores */ | |
817 | switch (minor) { | |
818 | case 0x1: /* STX.W */ | |
819 | error = misaligned_store(regs, opcode, 0, 1); | |
820 | break; | |
821 | case 0x2: /* STX.L */ | |
822 | error = misaligned_store(regs, opcode, 0, 2); | |
823 | break; | |
824 | case 0x3: /* STX.Q */ | |
825 | error = misaligned_store(regs, opcode, 0, 3); | |
826 | break; | |
827 | default: | |
828 | error = -1; | |
829 | break; | |
830 | } | |
831 | break; | |
832 | ||
833 | #if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP) | |
834 | case (0x94>>2): /* FLD.S */ | |
835 | error = misaligned_fpu_load(regs, opcode, 1, 2, 0); | |
836 | break; | |
837 | case (0x98>>2): /* FLD.P */ | |
838 | error = misaligned_fpu_load(regs, opcode, 1, 3, 1); | |
839 | break; | |
840 | case (0x9c>>2): /* FLD.D */ | |
841 | error = misaligned_fpu_load(regs, opcode, 1, 3, 0); | |
842 | break; | |
843 | case (0x1c>>2): /* floating indexed loads */ | |
844 | switch (minor) { | |
845 | case 0x8: /* FLDX.S */ | |
846 | error = misaligned_fpu_load(regs, opcode, 0, 2, 0); | |
847 | break; | |
848 | case 0xd: /* FLDX.P */ | |
849 | error = misaligned_fpu_load(regs, opcode, 0, 3, 1); | |
850 | break; | |
851 | case 0x9: /* FLDX.D */ | |
852 | error = misaligned_fpu_load(regs, opcode, 0, 3, 0); | |
853 | break; | |
854 | default: | |
855 | error = -1; | |
856 | break; | |
857 | } | |
858 | break; | |
859 | case (0xb4>>2): /* FLD.S */ | |
860 | error = misaligned_fpu_store(regs, opcode, 1, 2, 0); | |
861 | break; | |
862 | case (0xb8>>2): /* FLD.P */ | |
863 | error = misaligned_fpu_store(regs, opcode, 1, 3, 1); | |
864 | break; | |
865 | case (0xbc>>2): /* FLD.D */ | |
866 | error = misaligned_fpu_store(regs, opcode, 1, 3, 0); | |
867 | break; | |
868 | case (0x3c>>2): /* floating indexed stores */ | |
869 | switch (minor) { | |
870 | case 0x8: /* FSTX.S */ | |
871 | error = misaligned_fpu_store(regs, opcode, 0, 2, 0); | |
872 | break; | |
873 | case 0xd: /* FSTX.P */ | |
874 | error = misaligned_fpu_store(regs, opcode, 0, 3, 1); | |
875 | break; | |
876 | case 0x9: /* FSTX.D */ | |
877 | error = misaligned_fpu_store(regs, opcode, 0, 3, 0); | |
878 | break; | |
879 | default: | |
880 | error = -1; | |
881 | break; | |
882 | } | |
883 | break; | |
884 | #endif | |
885 | ||
886 | default: | |
887 | /* Fault */ | |
888 | error = -1; | |
889 | break; | |
890 | } | |
891 | ||
892 | if (error < 0) { | |
893 | return error; | |
894 | } else { | |
895 | regs->pc += 4; /* Skip the instruction that's just been emulated */ | |
896 | return 0; | |
897 | } | |
898 | ||
899 | } | |
900 | ||
901 | static ctl_table unaligned_table[] = { | |
e3c6449d EB |
902 | { |
903 | .ctl_name = CTL_UNNUMBERED, | |
904 | .procname = "kernel_reports", | |
905 | .data = &kernel_mode_unaligned_fixup_count, | |
906 | .maxlen = sizeof(int), | |
907 | .mode = 0644, | |
908 | .proc_handler = &proc_dointvec | |
909 | }, | |
1da177e4 | 910 | #if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP) |
e3c6449d EB |
911 | { |
912 | .ctl_name = CTL_UNNUMBERED, | |
913 | .procname = "user_reports", | |
914 | .data = &user_mode_unaligned_fixup_count, | |
915 | .maxlen = sizeof(int), | |
916 | .mode = 0644, | |
917 | .proc_handler = &proc_dointvec | |
918 | }, | |
919 | { | |
920 | .ctl_name = CTL_UNNUMBERED, | |
921 | .procname = "user_enable", | |
922 | .data = &user_mode_unaligned_fixup_enable, | |
923 | .maxlen = sizeof(int), | |
924 | .mode = 0644, | |
925 | .proc_handler = &proc_dointvec}, | |
1da177e4 | 926 | #endif |
e3c6449d | 927 | {} |
1da177e4 LT |
928 | }; |
929 | ||
930 | static ctl_table unaligned_root[] = { | |
e3c6449d EB |
931 | { |
932 | .ctl_name = CTL_UNNUMBERED, | |
933 | .procname = "unaligned_fixup", | |
934 | .mode = 0555, | |
935 | unaligned_table | |
936 | }, | |
937 | {} | |
1da177e4 LT |
938 | }; |
939 | ||
940 | static ctl_table sh64_root[] = { | |
e3c6449d EB |
941 | { |
942 | .ctl_name = CTL_UNNUMBERED, | |
943 | .procname = "sh64", | |
944 | .mode = 0555, | |
945 | .child = unaligned_root | |
946 | }, | |
947 | {} | |
1da177e4 LT |
948 | }; |
949 | static struct ctl_table_header *sysctl_header; | |
950 | static int __init init_sysctl(void) | |
951 | { | |
0b4d4147 | 952 | sysctl_header = register_sysctl_table(sh64_root); |
1da177e4 LT |
953 | return 0; |
954 | } | |
955 | ||
956 | __initcall(init_sysctl); | |
957 | ||
958 | ||
959 | asmlinkage void do_debug_interrupt(unsigned long code, struct pt_regs *regs) | |
960 | { | |
961 | u64 peek_real_address_q(u64 addr); | |
962 | u64 poke_real_address_q(u64 addr, u64 val); | |
963 | unsigned long long DM_EXP_CAUSE_PHY = 0x0c100010; | |
964 | unsigned long long exp_cause; | |
965 | /* It's not worth ioremapping the debug module registers for the amount | |
966 | of access we make to them - just go direct to their physical | |
967 | addresses. */ | |
968 | exp_cause = peek_real_address_q(DM_EXP_CAUSE_PHY); | |
969 | if (exp_cause & ~4) { | |
970 | printk("DM.EXP_CAUSE had unexpected bits set (=%08lx)\n", | |
971 | (unsigned long)(exp_cause & 0xffffffff)); | |
972 | } | |
973 | show_state(); | |
974 | /* Clear all DEBUGINT causes */ | |
975 | poke_real_address_q(DM_EXP_CAUSE_PHY, 0x0); | |
976 | } |