Merge tag 'v3.10.55' into update
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / mips / kernel / unaligned.c
1 /*
2 * Handle unaligned accesses by emulation.
3 *
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
6 * for more details.
7 *
8 * Copyright (C) 1996, 1998, 1999, 2002 by Ralf Baechle
9 * Copyright (C) 1999 Silicon Graphics, Inc.
10 *
11 * This file contains exception handler for address error exception with the
12 * special capability to execute faulting instructions in software. The
13 * handler does not try to handle the case when the program counter points
14 * to an address not aligned to a word boundary.
15 *
16 * Putting data to unaligned addresses is a bad practice even on Intel where
17 * only the performance is affected. Much worse is that such code is non-
18 * portable. Due to several programs that die on MIPS due to alignment
19 * problems I decided to implement this handler anyway though I originally
20 * didn't intend to do this at all for user code.
21 *
22 * For now I enable fixing of address errors by default to make life easier.
23 * I however intend to disable this somewhen in the future when the alignment
24 * problems with user programs have been fixed. For programmers this is the
25 * right way to go.
26 *
27 * Fixing address errors is a per process option. The option is inherited
28 * across fork(2) and execve(2) calls. If you really want to use the
29 * option in your user programs - I discourage the use of the software
30 * emulation strongly - use the following code in your userland stuff:
31 *
32 * #include <sys/sysmips.h>
33 *
34 * ...
35 * sysmips(MIPS_FIXADE, x);
36 * ...
37 *
38 * The argument x is 0 for disabling software emulation, enabled otherwise.
39 *
40 * Below a little program to play around with this feature.
41 *
42 * #include <stdio.h>
43 * #include <sys/sysmips.h>
44 *
45 * struct foo {
46 * unsigned char bar[8];
47 * };
48 *
49 * main(int argc, char *argv[])
50 * {
51 * struct foo x = {0, 1, 2, 3, 4, 5, 6, 7};
52 * unsigned int *p = (unsigned int *) (x.bar + 3);
53 * int i;
54 *
55 * if (argc > 1)
56 * sysmips(MIPS_FIXADE, atoi(argv[1]));
57 *
58 * printf("*p = %08lx\n", *p);
59 *
60 * *p = 0xdeadface;
61 *
62 * for(i = 0; i <= 7; i++)
63 * printf("%02x ", x.bar[i]);
64 * printf("\n");
65 * }
66 *
67 * Coprocessor loads are not supported; I think this case is unimportant
68 * in the practice.
69 *
70 * TODO: Handle ndc (attempted store to doubleword in uncached memory)
71 * exception for the R6000.
72 * A store crossing a page boundary might be executed only partially.
73 * Undo the partial store in this case.
74 */
75 #include <linux/mm.h>
76 #include <linux/signal.h>
77 #include <linux/smp.h>
78 #include <linux/sched.h>
79 #include <linux/debugfs.h>
80 #include <linux/perf_event.h>
81
82 #include <asm/asm.h>
83 #include <asm/branch.h>
84 #include <asm/byteorder.h>
85 #include <asm/cop2.h>
86 #include <asm/fpu.h>
87 #include <asm/fpu_emulator.h>
88 #include <asm/inst.h>
89 #include <asm/uaccess.h>
90 #include <asm/fpu.h>
91 #include <asm/fpu_emulator.h>
92
93 #define STR(x) __STR(x)
94 #define __STR(x) #x
95
96 enum {
97 UNALIGNED_ACTION_QUIET,
98 UNALIGNED_ACTION_SIGNAL,
99 UNALIGNED_ACTION_SHOW,
100 };
101 #ifdef CONFIG_DEBUG_FS
102 static u32 unaligned_instructions;
103 static u32 unaligned_action;
104 #else
105 #define unaligned_action UNALIGNED_ACTION_QUIET
106 #endif
107 extern void show_registers(struct pt_regs *regs);
108
109 #ifdef __BIG_ENDIAN
110 #define LoadHW(addr, value, res) \
111 __asm__ __volatile__ (".set\tnoat\n" \
112 "1:\tlb\t%0, 0(%2)\n" \
113 "2:\tlbu\t$1, 1(%2)\n\t" \
114 "sll\t%0, 0x8\n\t" \
115 "or\t%0, $1\n\t" \
116 "li\t%1, 0\n" \
117 "3:\t.set\tat\n\t" \
118 ".insn\n\t" \
119 ".section\t.fixup,\"ax\"\n\t" \
120 "4:\tli\t%1, %3\n\t" \
121 "j\t3b\n\t" \
122 ".previous\n\t" \
123 ".section\t__ex_table,\"a\"\n\t" \
124 STR(PTR)"\t1b, 4b\n\t" \
125 STR(PTR)"\t2b, 4b\n\t" \
126 ".previous" \
127 : "=&r" (value), "=r" (res) \
128 : "r" (addr), "i" (-EFAULT));
129
130 #define LoadW(addr, value, res) \
131 __asm__ __volatile__ ( \
132 "1:\tlwl\t%0, (%2)\n" \
133 "2:\tlwr\t%0, 3(%2)\n\t" \
134 "li\t%1, 0\n" \
135 "3:\n\t" \
136 ".insn\n\t" \
137 ".section\t.fixup,\"ax\"\n\t" \
138 "4:\tli\t%1, %3\n\t" \
139 "j\t3b\n\t" \
140 ".previous\n\t" \
141 ".section\t__ex_table,\"a\"\n\t" \
142 STR(PTR)"\t1b, 4b\n\t" \
143 STR(PTR)"\t2b, 4b\n\t" \
144 ".previous" \
145 : "=&r" (value), "=r" (res) \
146 : "r" (addr), "i" (-EFAULT));
147
148 #define LoadHWU(addr, value, res) \
149 __asm__ __volatile__ ( \
150 ".set\tnoat\n" \
151 "1:\tlbu\t%0, 0(%2)\n" \
152 "2:\tlbu\t$1, 1(%2)\n\t" \
153 "sll\t%0, 0x8\n\t" \
154 "or\t%0, $1\n\t" \
155 "li\t%1, 0\n" \
156 "3:\n\t" \
157 ".insn\n\t" \
158 ".set\tat\n\t" \
159 ".section\t.fixup,\"ax\"\n\t" \
160 "4:\tli\t%1, %3\n\t" \
161 "j\t3b\n\t" \
162 ".previous\n\t" \
163 ".section\t__ex_table,\"a\"\n\t" \
164 STR(PTR)"\t1b, 4b\n\t" \
165 STR(PTR)"\t2b, 4b\n\t" \
166 ".previous" \
167 : "=&r" (value), "=r" (res) \
168 : "r" (addr), "i" (-EFAULT));
169
170 #define LoadWU(addr, value, res) \
171 __asm__ __volatile__ ( \
172 "1:\tlwl\t%0, (%2)\n" \
173 "2:\tlwr\t%0, 3(%2)\n\t" \
174 "dsll\t%0, %0, 32\n\t" \
175 "dsrl\t%0, %0, 32\n\t" \
176 "li\t%1, 0\n" \
177 "3:\n\t" \
178 ".insn\n\t" \
179 "\t.section\t.fixup,\"ax\"\n\t" \
180 "4:\tli\t%1, %3\n\t" \
181 "j\t3b\n\t" \
182 ".previous\n\t" \
183 ".section\t__ex_table,\"a\"\n\t" \
184 STR(PTR)"\t1b, 4b\n\t" \
185 STR(PTR)"\t2b, 4b\n\t" \
186 ".previous" \
187 : "=&r" (value), "=r" (res) \
188 : "r" (addr), "i" (-EFAULT));
189
190 #define LoadDW(addr, value, res) \
191 __asm__ __volatile__ ( \
192 "1:\tldl\t%0, (%2)\n" \
193 "2:\tldr\t%0, 7(%2)\n\t" \
194 "li\t%1, 0\n" \
195 "3:\n\t" \
196 ".insn\n\t" \
197 "\t.section\t.fixup,\"ax\"\n\t" \
198 "4:\tli\t%1, %3\n\t" \
199 "j\t3b\n\t" \
200 ".previous\n\t" \
201 ".section\t__ex_table,\"a\"\n\t" \
202 STR(PTR)"\t1b, 4b\n\t" \
203 STR(PTR)"\t2b, 4b\n\t" \
204 ".previous" \
205 : "=&r" (value), "=r" (res) \
206 : "r" (addr), "i" (-EFAULT));
207
208 #define StoreHW(addr, value, res) \
209 __asm__ __volatile__ ( \
210 ".set\tnoat\n" \
211 "1:\tsb\t%1, 1(%2)\n\t" \
212 "srl\t$1, %1, 0x8\n" \
213 "2:\tsb\t$1, 0(%2)\n\t" \
214 ".set\tat\n\t" \
215 "li\t%0, 0\n" \
216 "3:\n\t" \
217 ".insn\n\t" \
218 ".section\t.fixup,\"ax\"\n\t" \
219 "4:\tli\t%0, %3\n\t" \
220 "j\t3b\n\t" \
221 ".previous\n\t" \
222 ".section\t__ex_table,\"a\"\n\t" \
223 STR(PTR)"\t1b, 4b\n\t" \
224 STR(PTR)"\t2b, 4b\n\t" \
225 ".previous" \
226 : "=r" (res) \
227 : "r" (value), "r" (addr), "i" (-EFAULT));
228
229 #define StoreW(addr, value, res) \
230 __asm__ __volatile__ ( \
231 "1:\tswl\t%1,(%2)\n" \
232 "2:\tswr\t%1, 3(%2)\n\t" \
233 "li\t%0, 0\n" \
234 "3:\n\t" \
235 ".insn\n\t" \
236 ".section\t.fixup,\"ax\"\n\t" \
237 "4:\tli\t%0, %3\n\t" \
238 "j\t3b\n\t" \
239 ".previous\n\t" \
240 ".section\t__ex_table,\"a\"\n\t" \
241 STR(PTR)"\t1b, 4b\n\t" \
242 STR(PTR)"\t2b, 4b\n\t" \
243 ".previous" \
244 : "=r" (res) \
245 : "r" (value), "r" (addr), "i" (-EFAULT));
246
247 #define StoreDW(addr, value, res) \
248 __asm__ __volatile__ ( \
249 "1:\tsdl\t%1,(%2)\n" \
250 "2:\tsdr\t%1, 7(%2)\n\t" \
251 "li\t%0, 0\n" \
252 "3:\n\t" \
253 ".insn\n\t" \
254 ".section\t.fixup,\"ax\"\n\t" \
255 "4:\tli\t%0, %3\n\t" \
256 "j\t3b\n\t" \
257 ".previous\n\t" \
258 ".section\t__ex_table,\"a\"\n\t" \
259 STR(PTR)"\t1b, 4b\n\t" \
260 STR(PTR)"\t2b, 4b\n\t" \
261 ".previous" \
262 : "=r" (res) \
263 : "r" (value), "r" (addr), "i" (-EFAULT));
264 #endif
265
266 #ifdef __LITTLE_ENDIAN
267 #define LoadHW(addr, value, res) \
268 __asm__ __volatile__ (".set\tnoat\n" \
269 "1:\tlb\t%0, 1(%2)\n" \
270 "2:\tlbu\t$1, 0(%2)\n\t" \
271 "sll\t%0, 0x8\n\t" \
272 "or\t%0, $1\n\t" \
273 "li\t%1, 0\n" \
274 "3:\t.set\tat\n\t" \
275 ".insn\n\t" \
276 ".section\t.fixup,\"ax\"\n\t" \
277 "4:\tli\t%1, %3\n\t" \
278 "j\t3b\n\t" \
279 ".previous\n\t" \
280 ".section\t__ex_table,\"a\"\n\t" \
281 STR(PTR)"\t1b, 4b\n\t" \
282 STR(PTR)"\t2b, 4b\n\t" \
283 ".previous" \
284 : "=&r" (value), "=r" (res) \
285 : "r" (addr), "i" (-EFAULT));
286
287 #define LoadW(addr, value, res) \
288 __asm__ __volatile__ ( \
289 "1:\tlwl\t%0, 3(%2)\n" \
290 "2:\tlwr\t%0, (%2)\n\t" \
291 "li\t%1, 0\n" \
292 "3:\n\t" \
293 ".insn\n\t" \
294 ".section\t.fixup,\"ax\"\n\t" \
295 "4:\tli\t%1, %3\n\t" \
296 "j\t3b\n\t" \
297 ".previous\n\t" \
298 ".section\t__ex_table,\"a\"\n\t" \
299 STR(PTR)"\t1b, 4b\n\t" \
300 STR(PTR)"\t2b, 4b\n\t" \
301 ".previous" \
302 : "=&r" (value), "=r" (res) \
303 : "r" (addr), "i" (-EFAULT));
304
305 #define LoadHWU(addr, value, res) \
306 __asm__ __volatile__ ( \
307 ".set\tnoat\n" \
308 "1:\tlbu\t%0, 1(%2)\n" \
309 "2:\tlbu\t$1, 0(%2)\n\t" \
310 "sll\t%0, 0x8\n\t" \
311 "or\t%0, $1\n\t" \
312 "li\t%1, 0\n" \
313 "3:\n\t" \
314 ".insn\n\t" \
315 ".set\tat\n\t" \
316 ".section\t.fixup,\"ax\"\n\t" \
317 "4:\tli\t%1, %3\n\t" \
318 "j\t3b\n\t" \
319 ".previous\n\t" \
320 ".section\t__ex_table,\"a\"\n\t" \
321 STR(PTR)"\t1b, 4b\n\t" \
322 STR(PTR)"\t2b, 4b\n\t" \
323 ".previous" \
324 : "=&r" (value), "=r" (res) \
325 : "r" (addr), "i" (-EFAULT));
326
327 #define LoadWU(addr, value, res) \
328 __asm__ __volatile__ ( \
329 "1:\tlwl\t%0, 3(%2)\n" \
330 "2:\tlwr\t%0, (%2)\n\t" \
331 "dsll\t%0, %0, 32\n\t" \
332 "dsrl\t%0, %0, 32\n\t" \
333 "li\t%1, 0\n" \
334 "3:\n\t" \
335 ".insn\n\t" \
336 "\t.section\t.fixup,\"ax\"\n\t" \
337 "4:\tli\t%1, %3\n\t" \
338 "j\t3b\n\t" \
339 ".previous\n\t" \
340 ".section\t__ex_table,\"a\"\n\t" \
341 STR(PTR)"\t1b, 4b\n\t" \
342 STR(PTR)"\t2b, 4b\n\t" \
343 ".previous" \
344 : "=&r" (value), "=r" (res) \
345 : "r" (addr), "i" (-EFAULT));
346
347 #define LoadDW(addr, value, res) \
348 __asm__ __volatile__ ( \
349 "1:\tldl\t%0, 7(%2)\n" \
350 "2:\tldr\t%0, (%2)\n\t" \
351 "li\t%1, 0\n" \
352 "3:\n\t" \
353 ".insn\n\t" \
354 "\t.section\t.fixup,\"ax\"\n\t" \
355 "4:\tli\t%1, %3\n\t" \
356 "j\t3b\n\t" \
357 ".previous\n\t" \
358 ".section\t__ex_table,\"a\"\n\t" \
359 STR(PTR)"\t1b, 4b\n\t" \
360 STR(PTR)"\t2b, 4b\n\t" \
361 ".previous" \
362 : "=&r" (value), "=r" (res) \
363 : "r" (addr), "i" (-EFAULT));
364
365 #define StoreHW(addr, value, res) \
366 __asm__ __volatile__ ( \
367 ".set\tnoat\n" \
368 "1:\tsb\t%1, 0(%2)\n\t" \
369 "srl\t$1,%1, 0x8\n" \
370 "2:\tsb\t$1, 1(%2)\n\t" \
371 ".set\tat\n\t" \
372 "li\t%0, 0\n" \
373 "3:\n\t" \
374 ".insn\n\t" \
375 ".section\t.fixup,\"ax\"\n\t" \
376 "4:\tli\t%0, %3\n\t" \
377 "j\t3b\n\t" \
378 ".previous\n\t" \
379 ".section\t__ex_table,\"a\"\n\t" \
380 STR(PTR)"\t1b, 4b\n\t" \
381 STR(PTR)"\t2b, 4b\n\t" \
382 ".previous" \
383 : "=r" (res) \
384 : "r" (value), "r" (addr), "i" (-EFAULT));
385
386 #define StoreW(addr, value, res) \
387 __asm__ __volatile__ ( \
388 "1:\tswl\t%1, 3(%2)\n" \
389 "2:\tswr\t%1, (%2)\n\t" \
390 "li\t%0, 0\n" \
391 "3:\n\t" \
392 ".insn\n\t" \
393 ".section\t.fixup,\"ax\"\n\t" \
394 "4:\tli\t%0, %3\n\t" \
395 "j\t3b\n\t" \
396 ".previous\n\t" \
397 ".section\t__ex_table,\"a\"\n\t" \
398 STR(PTR)"\t1b, 4b\n\t" \
399 STR(PTR)"\t2b, 4b\n\t" \
400 ".previous" \
401 : "=r" (res) \
402 : "r" (value), "r" (addr), "i" (-EFAULT));
403
404 #define StoreDW(addr, value, res) \
405 __asm__ __volatile__ ( \
406 "1:\tsdl\t%1, 7(%2)\n" \
407 "2:\tsdr\t%1, (%2)\n\t" \
408 "li\t%0, 0\n" \
409 "3:\n\t" \
410 ".insn\n\t" \
411 ".section\t.fixup,\"ax\"\n\t" \
412 "4:\tli\t%0, %3\n\t" \
413 "j\t3b\n\t" \
414 ".previous\n\t" \
415 ".section\t__ex_table,\"a\"\n\t" \
416 STR(PTR)"\t1b, 4b\n\t" \
417 STR(PTR)"\t2b, 4b\n\t" \
418 ".previous" \
419 : "=r" (res) \
420 : "r" (value), "r" (addr), "i" (-EFAULT));
421 #endif
422
423 static void emulate_load_store_insn(struct pt_regs *regs,
424 void __user *addr, unsigned int __user *pc)
425 {
426 union mips_instruction insn;
427 unsigned long value;
428 unsigned int res;
429 unsigned long origpc;
430 unsigned long orig31;
431 void __user *fault_addr = NULL;
432
433 origpc = (unsigned long)pc;
434 orig31 = regs->regs[31];
435
436 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0);
437
438 /*
439 * This load never faults.
440 */
441 __get_user(insn.word, pc);
442
443 switch (insn.i_format.opcode) {
444 /*
445 * These are instructions that a compiler doesn't generate. We
446 * can assume therefore that the code is MIPS-aware and
447 * really buggy. Emulating these instructions would break the
448 * semantics anyway.
449 */
450 case ll_op:
451 case lld_op:
452 case sc_op:
453 case scd_op:
454
455 /*
456 * For these instructions the only way to create an address
457 * error is an attempted access to kernel/supervisor address
458 * space.
459 */
460 case ldl_op:
461 case ldr_op:
462 case lwl_op:
463 case lwr_op:
464 case sdl_op:
465 case sdr_op:
466 case swl_op:
467 case swr_op:
468 case lb_op:
469 case lbu_op:
470 case sb_op:
471 goto sigbus;
472
473 /*
474 * The remaining opcodes are the ones that are really of
475 * interest.
476 */
477 case lh_op:
478 if (!access_ok(VERIFY_READ, addr, 2))
479 goto sigbus;
480
481 LoadHW(addr, value, res);
482 if (res)
483 goto fault;
484 compute_return_epc(regs);
485 regs->regs[insn.i_format.rt] = value;
486 break;
487
488 case lw_op:
489 if (!access_ok(VERIFY_READ, addr, 4))
490 goto sigbus;
491
492 LoadW(addr, value, res);
493 if (res)
494 goto fault;
495 compute_return_epc(regs);
496 regs->regs[insn.i_format.rt] = value;
497 break;
498
499 case lhu_op:
500 if (!access_ok(VERIFY_READ, addr, 2))
501 goto sigbus;
502
503 LoadHWU(addr, value, res);
504 if (res)
505 goto fault;
506 compute_return_epc(regs);
507 regs->regs[insn.i_format.rt] = value;
508 break;
509
510 case lwu_op:
511 #ifdef CONFIG_64BIT
512 /*
513 * A 32-bit kernel might be running on a 64-bit processor. But
514 * if we're on a 32-bit processor and an i-cache incoherency
515 * or race makes us see a 64-bit instruction here the sdl/sdr
516 * would blow up, so for now we don't handle unaligned 64-bit
517 * instructions on 32-bit kernels.
518 */
519 if (!access_ok(VERIFY_READ, addr, 4))
520 goto sigbus;
521
522 LoadWU(addr, value, res);
523 if (res)
524 goto fault;
525 compute_return_epc(regs);
526 regs->regs[insn.i_format.rt] = value;
527 break;
528 #endif /* CONFIG_64BIT */
529
530 /* Cannot handle 64-bit instructions in 32-bit kernel */
531 goto sigill;
532
533 case ld_op:
534 #ifdef CONFIG_64BIT
535 /*
536 * A 32-bit kernel might be running on a 64-bit processor. But
537 * if we're on a 32-bit processor and an i-cache incoherency
538 * or race makes us see a 64-bit instruction here the sdl/sdr
539 * would blow up, so for now we don't handle unaligned 64-bit
540 * instructions on 32-bit kernels.
541 */
542 if (!access_ok(VERIFY_READ, addr, 8))
543 goto sigbus;
544
545 LoadDW(addr, value, res);
546 if (res)
547 goto fault;
548 compute_return_epc(regs);
549 regs->regs[insn.i_format.rt] = value;
550 break;
551 #endif /* CONFIG_64BIT */
552
553 /* Cannot handle 64-bit instructions in 32-bit kernel */
554 goto sigill;
555
556 case sh_op:
557 if (!access_ok(VERIFY_WRITE, addr, 2))
558 goto sigbus;
559
560 compute_return_epc(regs);
561 value = regs->regs[insn.i_format.rt];
562 StoreHW(addr, value, res);
563 if (res)
564 goto fault;
565 break;
566
567 case sw_op:
568 if (!access_ok(VERIFY_WRITE, addr, 4))
569 goto sigbus;
570
571 compute_return_epc(regs);
572 value = regs->regs[insn.i_format.rt];
573 StoreW(addr, value, res);
574 if (res)
575 goto fault;
576 break;
577
578 case sd_op:
579 #ifdef CONFIG_64BIT
580 /*
581 * A 32-bit kernel might be running on a 64-bit processor. But
582 * if we're on a 32-bit processor and an i-cache incoherency
583 * or race makes us see a 64-bit instruction here the sdl/sdr
584 * would blow up, so for now we don't handle unaligned 64-bit
585 * instructions on 32-bit kernels.
586 */
587 if (!access_ok(VERIFY_WRITE, addr, 8))
588 goto sigbus;
589
590 compute_return_epc(regs);
591 value = regs->regs[insn.i_format.rt];
592 StoreDW(addr, value, res);
593 if (res)
594 goto fault;
595 break;
596 #endif /* CONFIG_64BIT */
597
598 /* Cannot handle 64-bit instructions in 32-bit kernel */
599 goto sigill;
600
601 case lwc1_op:
602 case ldc1_op:
603 case swc1_op:
604 case sdc1_op:
605 die_if_kernel("Unaligned FP access in kernel code", regs);
606 BUG_ON(!used_math());
607
608 lose_fpu(1); /* Save FPU state for the emulator. */
609 res = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 1,
610 &fault_addr);
611 own_fpu(1); /* Restore FPU state. */
612
613 /* Signal if something went wrong. */
614 process_fpemu_return(res, fault_addr);
615
616 if (res == 0)
617 break;
618 return;
619
620 /*
621 * COP2 is available to implementor for application specific use.
622 * It's up to applications to register a notifier chain and do
623 * whatever they have to do, including possible sending of signals.
624 */
625 case lwc2_op:
626 cu2_notifier_call_chain(CU2_LWC2_OP, regs);
627 break;
628
629 case ldc2_op:
630 cu2_notifier_call_chain(CU2_LDC2_OP, regs);
631 break;
632
633 case swc2_op:
634 cu2_notifier_call_chain(CU2_SWC2_OP, regs);
635 break;
636
637 case sdc2_op:
638 cu2_notifier_call_chain(CU2_SDC2_OP, regs);
639 break;
640
641 default:
642 /*
643 * Pheeee... We encountered an yet unknown instruction or
644 * cache coherence problem. Die sucker, die ...
645 */
646 goto sigill;
647 }
648
649 #ifdef CONFIG_DEBUG_FS
650 unaligned_instructions++;
651 #endif
652
653 return;
654
655 fault:
656 /* roll back jump/branch */
657 regs->cp0_epc = origpc;
658 regs->regs[31] = orig31;
659 /* Did we have an exception handler installed? */
660 if (fixup_exception(regs))
661 return;
662
663 die_if_kernel("Unhandled kernel unaligned access", regs);
664 force_sig(SIGSEGV, current);
665
666 return;
667
668 sigbus:
669 die_if_kernel("Unhandled kernel unaligned access", regs);
670 force_sig(SIGBUS, current);
671
672 return;
673
674 sigill:
675 die_if_kernel
676 ("Unhandled kernel unaligned access or invalid instruction", regs);
677 force_sig(SIGILL, current);
678 }
679
680 /* Recode table from 16-bit register notation to 32-bit GPR. */
681 const int reg16to32[] = { 16, 17, 2, 3, 4, 5, 6, 7 };
682
683 /* Recode table from 16-bit STORE register notation to 32-bit GPR. */
684 const int reg16to32st[] = { 0, 17, 2, 3, 4, 5, 6, 7 };
685
686 void emulate_load_store_microMIPS(struct pt_regs *regs, void __user * addr)
687 {
688 unsigned long value;
689 unsigned int res;
690 int i;
691 unsigned int reg = 0, rvar;
692 unsigned long orig31;
693 u16 __user *pc16;
694 u16 halfword;
695 unsigned int word;
696 unsigned long origpc, contpc;
697 union mips_instruction insn;
698 struct mm_decoded_insn mminsn;
699 void __user *fault_addr = NULL;
700
701 origpc = regs->cp0_epc;
702 orig31 = regs->regs[31];
703
704 mminsn.micro_mips_mode = 1;
705
706 /*
707 * This load never faults.
708 */
709 pc16 = (unsigned short __user *)msk_isa16_mode(regs->cp0_epc);
710 __get_user(halfword, pc16);
711 pc16++;
712 contpc = regs->cp0_epc + 2;
713 word = ((unsigned int)halfword << 16);
714 mminsn.pc_inc = 2;
715
716 if (!mm_insn_16bit(halfword)) {
717 __get_user(halfword, pc16);
718 pc16++;
719 contpc = regs->cp0_epc + 4;
720 mminsn.pc_inc = 4;
721 word |= halfword;
722 }
723 mminsn.insn = word;
724
725 if (get_user(halfword, pc16))
726 goto fault;
727 mminsn.next_pc_inc = 2;
728 word = ((unsigned int)halfword << 16);
729
730 if (!mm_insn_16bit(halfword)) {
731 pc16++;
732 if (get_user(halfword, pc16))
733 goto fault;
734 mminsn.next_pc_inc = 4;
735 word |= halfword;
736 }
737 mminsn.next_insn = word;
738
739 insn = (union mips_instruction)(mminsn.insn);
740 if (mm_isBranchInstr(regs, mminsn, &contpc))
741 insn = (union mips_instruction)(mminsn.next_insn);
742
743 /* Parse instruction to find what to do */
744
745 switch (insn.mm_i_format.opcode) {
746
747 case mm_pool32a_op:
748 switch (insn.mm_x_format.func) {
749 case mm_lwxs_op:
750 reg = insn.mm_x_format.rd;
751 goto loadW;
752 }
753
754 goto sigbus;
755
756 case mm_pool32b_op:
757 switch (insn.mm_m_format.func) {
758 case mm_lwp_func:
759 reg = insn.mm_m_format.rd;
760 if (reg == 31)
761 goto sigbus;
762
763 if (!access_ok(VERIFY_READ, addr, 8))
764 goto sigbus;
765
766 LoadW(addr, value, res);
767 if (res)
768 goto fault;
769 regs->regs[reg] = value;
770 addr += 4;
771 LoadW(addr, value, res);
772 if (res)
773 goto fault;
774 regs->regs[reg + 1] = value;
775 goto success;
776
777 case mm_swp_func:
778 reg = insn.mm_m_format.rd;
779 if (reg == 31)
780 goto sigbus;
781
782 if (!access_ok(VERIFY_WRITE, addr, 8))
783 goto sigbus;
784
785 value = regs->regs[reg];
786 StoreW(addr, value, res);
787 if (res)
788 goto fault;
789 addr += 4;
790 value = regs->regs[reg + 1];
791 StoreW(addr, value, res);
792 if (res)
793 goto fault;
794 goto success;
795
796 case mm_ldp_func:
797 #ifdef CONFIG_64BIT
798 reg = insn.mm_m_format.rd;
799 if (reg == 31)
800 goto sigbus;
801
802 if (!access_ok(VERIFY_READ, addr, 16))
803 goto sigbus;
804
805 LoadDW(addr, value, res);
806 if (res)
807 goto fault;
808 regs->regs[reg] = value;
809 addr += 8;
810 LoadDW(addr, value, res);
811 if (res)
812 goto fault;
813 regs->regs[reg + 1] = value;
814 goto success;
815 #endif /* CONFIG_64BIT */
816
817 goto sigill;
818
819 case mm_sdp_func:
820 #ifdef CONFIG_64BIT
821 reg = insn.mm_m_format.rd;
822 if (reg == 31)
823 goto sigbus;
824
825 if (!access_ok(VERIFY_WRITE, addr, 16))
826 goto sigbus;
827
828 value = regs->regs[reg];
829 StoreDW(addr, value, res);
830 if (res)
831 goto fault;
832 addr += 8;
833 value = regs->regs[reg + 1];
834 StoreDW(addr, value, res);
835 if (res)
836 goto fault;
837 goto success;
838 #endif /* CONFIG_64BIT */
839
840 goto sigill;
841
842 case mm_lwm32_func:
843 reg = insn.mm_m_format.rd;
844 rvar = reg & 0xf;
845 if ((rvar > 9) || !reg)
846 goto sigill;
847 if (reg & 0x10) {
848 if (!access_ok
849 (VERIFY_READ, addr, 4 * (rvar + 1)))
850 goto sigbus;
851 } else {
852 if (!access_ok(VERIFY_READ, addr, 4 * rvar))
853 goto sigbus;
854 }
855 if (rvar == 9)
856 rvar = 8;
857 for (i = 16; rvar; rvar--, i++) {
858 LoadW(addr, value, res);
859 if (res)
860 goto fault;
861 addr += 4;
862 regs->regs[i] = value;
863 }
864 if ((reg & 0xf) == 9) {
865 LoadW(addr, value, res);
866 if (res)
867 goto fault;
868 addr += 4;
869 regs->regs[30] = value;
870 }
871 if (reg & 0x10) {
872 LoadW(addr, value, res);
873 if (res)
874 goto fault;
875 regs->regs[31] = value;
876 }
877 goto success;
878
879 case mm_swm32_func:
880 reg = insn.mm_m_format.rd;
881 rvar = reg & 0xf;
882 if ((rvar > 9) || !reg)
883 goto sigill;
884 if (reg & 0x10) {
885 if (!access_ok
886 (VERIFY_WRITE, addr, 4 * (rvar + 1)))
887 goto sigbus;
888 } else {
889 if (!access_ok(VERIFY_WRITE, addr, 4 * rvar))
890 goto sigbus;
891 }
892 if (rvar == 9)
893 rvar = 8;
894 for (i = 16; rvar; rvar--, i++) {
895 value = regs->regs[i];
896 StoreW(addr, value, res);
897 if (res)
898 goto fault;
899 addr += 4;
900 }
901 if ((reg & 0xf) == 9) {
902 value = regs->regs[30];
903 StoreW(addr, value, res);
904 if (res)
905 goto fault;
906 addr += 4;
907 }
908 if (reg & 0x10) {
909 value = regs->regs[31];
910 StoreW(addr, value, res);
911 if (res)
912 goto fault;
913 }
914 goto success;
915
916 case mm_ldm_func:
917 #ifdef CONFIG_64BIT
918 reg = insn.mm_m_format.rd;
919 rvar = reg & 0xf;
920 if ((rvar > 9) || !reg)
921 goto sigill;
922 if (reg & 0x10) {
923 if (!access_ok
924 (VERIFY_READ, addr, 8 * (rvar + 1)))
925 goto sigbus;
926 } else {
927 if (!access_ok(VERIFY_READ, addr, 8 * rvar))
928 goto sigbus;
929 }
930 if (rvar == 9)
931 rvar = 8;
932
933 for (i = 16; rvar; rvar--, i++) {
934 LoadDW(addr, value, res);
935 if (res)
936 goto fault;
937 addr += 4;
938 regs->regs[i] = value;
939 }
940 if ((reg & 0xf) == 9) {
941 LoadDW(addr, value, res);
942 if (res)
943 goto fault;
944 addr += 8;
945 regs->regs[30] = value;
946 }
947 if (reg & 0x10) {
948 LoadDW(addr, value, res);
949 if (res)
950 goto fault;
951 regs->regs[31] = value;
952 }
953 goto success;
954 #endif /* CONFIG_64BIT */
955
956 goto sigill;
957
958 case mm_sdm_func:
959 #ifdef CONFIG_64BIT
960 reg = insn.mm_m_format.rd;
961 rvar = reg & 0xf;
962 if ((rvar > 9) || !reg)
963 goto sigill;
964 if (reg & 0x10) {
965 if (!access_ok
966 (VERIFY_WRITE, addr, 8 * (rvar + 1)))
967 goto sigbus;
968 } else {
969 if (!access_ok(VERIFY_WRITE, addr, 8 * rvar))
970 goto sigbus;
971 }
972 if (rvar == 9)
973 rvar = 8;
974
975 for (i = 16; rvar; rvar--, i++) {
976 value = regs->regs[i];
977 StoreDW(addr, value, res);
978 if (res)
979 goto fault;
980 addr += 8;
981 }
982 if ((reg & 0xf) == 9) {
983 value = regs->regs[30];
984 StoreDW(addr, value, res);
985 if (res)
986 goto fault;
987 addr += 8;
988 }
989 if (reg & 0x10) {
990 value = regs->regs[31];
991 StoreDW(addr, value, res);
992 if (res)
993 goto fault;
994 }
995 goto success;
996 #endif /* CONFIG_64BIT */
997
998 goto sigill;
999
1000 /* LWC2, SWC2, LDC2, SDC2 are not serviced */
1001 }
1002
1003 goto sigbus;
1004
1005 case mm_pool32c_op:
1006 switch (insn.mm_m_format.func) {
1007 case mm_lwu_func:
1008 reg = insn.mm_m_format.rd;
1009 goto loadWU;
1010 }
1011
1012 /* LL,SC,LLD,SCD are not serviced */
1013 goto sigbus;
1014
1015 case mm_pool32f_op:
1016 switch (insn.mm_x_format.func) {
1017 case mm_lwxc1_func:
1018 case mm_swxc1_func:
1019 case mm_ldxc1_func:
1020 case mm_sdxc1_func:
1021 goto fpu_emul;
1022 }
1023
1024 goto sigbus;
1025
1026 case mm_ldc132_op:
1027 case mm_sdc132_op:
1028 case mm_lwc132_op:
1029 case mm_swc132_op:
1030 fpu_emul:
1031 /* roll back jump/branch */
1032 regs->cp0_epc = origpc;
1033 regs->regs[31] = orig31;
1034
1035 die_if_kernel("Unaligned FP access in kernel code", regs);
1036 BUG_ON(!used_math());
1037 BUG_ON(!is_fpu_owner());
1038
1039 lose_fpu(1); /* save the FPU state for the emulator */
1040 res = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 1,
1041 &fault_addr);
1042 own_fpu(1); /* restore FPU state */
1043
1044 /* If something went wrong, signal */
1045 process_fpemu_return(res, fault_addr);
1046
1047 if (res == 0)
1048 goto success;
1049 return;
1050
1051 case mm_lh32_op:
1052 reg = insn.mm_i_format.rt;
1053 goto loadHW;
1054
1055 case mm_lhu32_op:
1056 reg = insn.mm_i_format.rt;
1057 goto loadHWU;
1058
1059 case mm_lw32_op:
1060 reg = insn.mm_i_format.rt;
1061 goto loadW;
1062
1063 case mm_sh32_op:
1064 reg = insn.mm_i_format.rt;
1065 goto storeHW;
1066
1067 case mm_sw32_op:
1068 reg = insn.mm_i_format.rt;
1069 goto storeW;
1070
1071 case mm_ld32_op:
1072 reg = insn.mm_i_format.rt;
1073 goto loadDW;
1074
1075 case mm_sd32_op:
1076 reg = insn.mm_i_format.rt;
1077 goto storeDW;
1078
1079 case mm_pool16c_op:
1080 switch (insn.mm16_m_format.func) {
1081 case mm_lwm16_op:
1082 reg = insn.mm16_m_format.rlist;
1083 rvar = reg + 1;
1084 if (!access_ok(VERIFY_READ, addr, 4 * rvar))
1085 goto sigbus;
1086
1087 for (i = 16; rvar; rvar--, i++) {
1088 LoadW(addr, value, res);
1089 if (res)
1090 goto fault;
1091 addr += 4;
1092 regs->regs[i] = value;
1093 }
1094 LoadW(addr, value, res);
1095 if (res)
1096 goto fault;
1097 regs->regs[31] = value;
1098
1099 goto success;
1100
1101 case mm_swm16_op:
1102 reg = insn.mm16_m_format.rlist;
1103 rvar = reg + 1;
1104 if (!access_ok(VERIFY_WRITE, addr, 4 * rvar))
1105 goto sigbus;
1106
1107 for (i = 16; rvar; rvar--, i++) {
1108 value = regs->regs[i];
1109 StoreW(addr, value, res);
1110 if (res)
1111 goto fault;
1112 addr += 4;
1113 }
1114 value = regs->regs[31];
1115 StoreW(addr, value, res);
1116 if (res)
1117 goto fault;
1118
1119 goto success;
1120
1121 }
1122
1123 goto sigbus;
1124
1125 case mm_lhu16_op:
1126 reg = reg16to32[insn.mm16_rb_format.rt];
1127 goto loadHWU;
1128
1129 case mm_lw16_op:
1130 reg = reg16to32[insn.mm16_rb_format.rt];
1131 goto loadW;
1132
1133 case mm_sh16_op:
1134 reg = reg16to32st[insn.mm16_rb_format.rt];
1135 goto storeHW;
1136
1137 case mm_sw16_op:
1138 reg = reg16to32st[insn.mm16_rb_format.rt];
1139 goto storeW;
1140
1141 case mm_lwsp16_op:
1142 reg = insn.mm16_r5_format.rt;
1143 goto loadW;
1144
1145 case mm_swsp16_op:
1146 reg = insn.mm16_r5_format.rt;
1147 goto storeW;
1148
1149 case mm_lwgp16_op:
1150 reg = reg16to32[insn.mm16_r3_format.rt];
1151 goto loadW;
1152
1153 default:
1154 goto sigill;
1155 }
1156
1157 loadHW:
1158 if (!access_ok(VERIFY_READ, addr, 2))
1159 goto sigbus;
1160
1161 LoadHW(addr, value, res);
1162 if (res)
1163 goto fault;
1164 regs->regs[reg] = value;
1165 goto success;
1166
1167 loadHWU:
1168 if (!access_ok(VERIFY_READ, addr, 2))
1169 goto sigbus;
1170
1171 LoadHWU(addr, value, res);
1172 if (res)
1173 goto fault;
1174 regs->regs[reg] = value;
1175 goto success;
1176
1177 loadW:
1178 if (!access_ok(VERIFY_READ, addr, 4))
1179 goto sigbus;
1180
1181 LoadW(addr, value, res);
1182 if (res)
1183 goto fault;
1184 regs->regs[reg] = value;
1185 goto success;
1186
1187 loadWU:
1188 #ifdef CONFIG_64BIT
1189 /*
1190 * A 32-bit kernel might be running on a 64-bit processor. But
1191 * if we're on a 32-bit processor and an i-cache incoherency
1192 * or race makes us see a 64-bit instruction here the sdl/sdr
1193 * would blow up, so for now we don't handle unaligned 64-bit
1194 * instructions on 32-bit kernels.
1195 */
1196 if (!access_ok(VERIFY_READ, addr, 4))
1197 goto sigbus;
1198
1199 LoadWU(addr, value, res);
1200 if (res)
1201 goto fault;
1202 regs->regs[reg] = value;
1203 goto success;
1204 #endif /* CONFIG_64BIT */
1205
1206 /* Cannot handle 64-bit instructions in 32-bit kernel */
1207 goto sigill;
1208
1209 loadDW:
1210 #ifdef CONFIG_64BIT
1211 /*
1212 * A 32-bit kernel might be running on a 64-bit processor. But
1213 * if we're on a 32-bit processor and an i-cache incoherency
1214 * or race makes us see a 64-bit instruction here the sdl/sdr
1215 * would blow up, so for now we don't handle unaligned 64-bit
1216 * instructions on 32-bit kernels.
1217 */
1218 if (!access_ok(VERIFY_READ, addr, 8))
1219 goto sigbus;
1220
1221 LoadDW(addr, value, res);
1222 if (res)
1223 goto fault;
1224 regs->regs[reg] = value;
1225 goto success;
1226 #endif /* CONFIG_64BIT */
1227
1228 /* Cannot handle 64-bit instructions in 32-bit kernel */
1229 goto sigill;
1230
1231 storeHW:
1232 if (!access_ok(VERIFY_WRITE, addr, 2))
1233 goto sigbus;
1234
1235 value = regs->regs[reg];
1236 StoreHW(addr, value, res);
1237 if (res)
1238 goto fault;
1239 goto success;
1240
1241 storeW:
1242 if (!access_ok(VERIFY_WRITE, addr, 4))
1243 goto sigbus;
1244
1245 value = regs->regs[reg];
1246 StoreW(addr, value, res);
1247 if (res)
1248 goto fault;
1249 goto success;
1250
1251 storeDW:
1252 #ifdef CONFIG_64BIT
1253 /*
1254 * A 32-bit kernel might be running on a 64-bit processor. But
1255 * if we're on a 32-bit processor and an i-cache incoherency
1256 * or race makes us see a 64-bit instruction here the sdl/sdr
1257 * would blow up, so for now we don't handle unaligned 64-bit
1258 * instructions on 32-bit kernels.
1259 */
1260 if (!access_ok(VERIFY_WRITE, addr, 8))
1261 goto sigbus;
1262
1263 value = regs->regs[reg];
1264 StoreDW(addr, value, res);
1265 if (res)
1266 goto fault;
1267 goto success;
1268 #endif /* CONFIG_64BIT */
1269
1270 /* Cannot handle 64-bit instructions in 32-bit kernel */
1271 goto sigill;
1272
1273 success:
1274 regs->cp0_epc = contpc; /* advance or branch */
1275
1276 #ifdef CONFIG_DEBUG_FS
1277 unaligned_instructions++;
1278 #endif
1279 return;
1280
1281 fault:
1282 /* roll back jump/branch */
1283 regs->cp0_epc = origpc;
1284 regs->regs[31] = orig31;
1285 /* Did we have an exception handler installed? */
1286 if (fixup_exception(regs))
1287 return;
1288
1289 die_if_kernel("Unhandled kernel unaligned access", regs);
1290 force_sig(SIGSEGV, current);
1291
1292 return;
1293
1294 sigbus:
1295 die_if_kernel("Unhandled kernel unaligned access", regs);
1296 force_sig(SIGBUS, current);
1297
1298 return;
1299
1300 sigill:
1301 die_if_kernel
1302 ("Unhandled kernel unaligned access or invalid instruction", regs);
1303 force_sig(SIGILL, current);
1304 }
1305
1306 static void emulate_load_store_MIPS16e(struct pt_regs *regs, void __user * addr)
1307 {
1308 unsigned long value;
1309 unsigned int res;
1310 int reg;
1311 unsigned long orig31;
1312 u16 __user *pc16;
1313 unsigned long origpc;
1314 union mips16e_instruction mips16inst, oldinst;
1315
1316 origpc = regs->cp0_epc;
1317 orig31 = regs->regs[31];
1318 pc16 = (unsigned short __user *)msk_isa16_mode(origpc);
1319 /*
1320 * This load never faults.
1321 */
1322 __get_user(mips16inst.full, pc16);
1323 oldinst = mips16inst;
1324
1325 /* skip EXTEND instruction */
1326 if (mips16inst.ri.opcode == MIPS16e_extend_op) {
1327 pc16++;
1328 __get_user(mips16inst.full, pc16);
1329 } else if (delay_slot(regs)) {
1330 /* skip jump instructions */
1331 /* JAL/JALX are 32 bits but have OPCODE in first short int */
1332 if (mips16inst.ri.opcode == MIPS16e_jal_op)
1333 pc16++;
1334 pc16++;
1335 if (get_user(mips16inst.full, pc16))
1336 goto sigbus;
1337 }
1338
1339 switch (mips16inst.ri.opcode) {
1340 case MIPS16e_i64_op: /* I64 or RI64 instruction */
1341 switch (mips16inst.i64.func) { /* I64/RI64 func field check */
1342 case MIPS16e_ldpc_func:
1343 case MIPS16e_ldsp_func:
1344 reg = reg16to32[mips16inst.ri64.ry];
1345 goto loadDW;
1346
1347 case MIPS16e_sdsp_func:
1348 reg = reg16to32[mips16inst.ri64.ry];
1349 goto writeDW;
1350
1351 case MIPS16e_sdrasp_func:
1352 reg = 29; /* GPRSP */
1353 goto writeDW;
1354 }
1355
1356 goto sigbus;
1357
1358 case MIPS16e_swsp_op:
1359 case MIPS16e_lwpc_op:
1360 case MIPS16e_lwsp_op:
1361 reg = reg16to32[mips16inst.ri.rx];
1362 break;
1363
1364 case MIPS16e_i8_op:
1365 if (mips16inst.i8.func != MIPS16e_swrasp_func)
1366 goto sigbus;
1367 reg = 29; /* GPRSP */
1368 break;
1369
1370 default:
1371 reg = reg16to32[mips16inst.rri.ry];
1372 break;
1373 }
1374
1375 switch (mips16inst.ri.opcode) {
1376
1377 case MIPS16e_lb_op:
1378 case MIPS16e_lbu_op:
1379 case MIPS16e_sb_op:
1380 goto sigbus;
1381
1382 case MIPS16e_lh_op:
1383 if (!access_ok(VERIFY_READ, addr, 2))
1384 goto sigbus;
1385
1386 LoadHW(addr, value, res);
1387 if (res)
1388 goto fault;
1389 MIPS16e_compute_return_epc(regs, &oldinst);
1390 regs->regs[reg] = value;
1391 break;
1392
1393 case MIPS16e_lhu_op:
1394 if (!access_ok(VERIFY_READ, addr, 2))
1395 goto sigbus;
1396
1397 LoadHWU(addr, value, res);
1398 if (res)
1399 goto fault;
1400 MIPS16e_compute_return_epc(regs, &oldinst);
1401 regs->regs[reg] = value;
1402 break;
1403
1404 case MIPS16e_lw_op:
1405 case MIPS16e_lwpc_op:
1406 case MIPS16e_lwsp_op:
1407 if (!access_ok(VERIFY_READ, addr, 4))
1408 goto sigbus;
1409
1410 LoadW(addr, value, res);
1411 if (res)
1412 goto fault;
1413 MIPS16e_compute_return_epc(regs, &oldinst);
1414 regs->regs[reg] = value;
1415 break;
1416
1417 case MIPS16e_lwu_op:
1418 #ifdef CONFIG_64BIT
1419 /*
1420 * A 32-bit kernel might be running on a 64-bit processor. But
1421 * if we're on a 32-bit processor and an i-cache incoherency
1422 * or race makes us see a 64-bit instruction here the sdl/sdr
1423 * would blow up, so for now we don't handle unaligned 64-bit
1424 * instructions on 32-bit kernels.
1425 */
1426 if (!access_ok(VERIFY_READ, addr, 4))
1427 goto sigbus;
1428
1429 LoadWU(addr, value, res);
1430 if (res)
1431 goto fault;
1432 MIPS16e_compute_return_epc(regs, &oldinst);
1433 regs->regs[reg] = value;
1434 break;
1435 #endif /* CONFIG_64BIT */
1436
1437 /* Cannot handle 64-bit instructions in 32-bit kernel */
1438 goto sigill;
1439
1440 case MIPS16e_ld_op:
1441 loadDW:
1442 #ifdef CONFIG_64BIT
1443 /*
1444 * A 32-bit kernel might be running on a 64-bit processor. But
1445 * if we're on a 32-bit processor and an i-cache incoherency
1446 * or race makes us see a 64-bit instruction here the sdl/sdr
1447 * would blow up, so for now we don't handle unaligned 64-bit
1448 * instructions on 32-bit kernels.
1449 */
1450 if (!access_ok(VERIFY_READ, addr, 8))
1451 goto sigbus;
1452
1453 LoadDW(addr, value, res);
1454 if (res)
1455 goto fault;
1456 MIPS16e_compute_return_epc(regs, &oldinst);
1457 regs->regs[reg] = value;
1458 break;
1459 #endif /* CONFIG_64BIT */
1460
1461 /* Cannot handle 64-bit instructions in 32-bit kernel */
1462 goto sigill;
1463
1464 case MIPS16e_sh_op:
1465 if (!access_ok(VERIFY_WRITE, addr, 2))
1466 goto sigbus;
1467
1468 MIPS16e_compute_return_epc(regs, &oldinst);
1469 value = regs->regs[reg];
1470 StoreHW(addr, value, res);
1471 if (res)
1472 goto fault;
1473 break;
1474
1475 case MIPS16e_sw_op:
1476 case MIPS16e_swsp_op:
1477 case MIPS16e_i8_op: /* actually - MIPS16e_swrasp_func */
1478 if (!access_ok(VERIFY_WRITE, addr, 4))
1479 goto sigbus;
1480
1481 MIPS16e_compute_return_epc(regs, &oldinst);
1482 value = regs->regs[reg];
1483 StoreW(addr, value, res);
1484 if (res)
1485 goto fault;
1486 break;
1487
1488 case MIPS16e_sd_op:
1489 writeDW:
1490 #ifdef CONFIG_64BIT
1491 /*
1492 * A 32-bit kernel might be running on a 64-bit processor. But
1493 * if we're on a 32-bit processor and an i-cache incoherency
1494 * or race makes us see a 64-bit instruction here the sdl/sdr
1495 * would blow up, so for now we don't handle unaligned 64-bit
1496 * instructions on 32-bit kernels.
1497 */
1498 if (!access_ok(VERIFY_WRITE, addr, 8))
1499 goto sigbus;
1500
1501 MIPS16e_compute_return_epc(regs, &oldinst);
1502 value = regs->regs[reg];
1503 StoreDW(addr, value, res);
1504 if (res)
1505 goto fault;
1506 break;
1507 #endif /* CONFIG_64BIT */
1508
1509 /* Cannot handle 64-bit instructions in 32-bit kernel */
1510 goto sigill;
1511
1512 default:
1513 /*
1514 * Pheeee... We encountered an yet unknown instruction or
1515 * cache coherence problem. Die sucker, die ...
1516 */
1517 goto sigill;
1518 }
1519
1520 #ifdef CONFIG_DEBUG_FS
1521 unaligned_instructions++;
1522 #endif
1523
1524 return;
1525
1526 fault:
1527 /* roll back jump/branch */
1528 regs->cp0_epc = origpc;
1529 regs->regs[31] = orig31;
1530 /* Did we have an exception handler installed? */
1531 if (fixup_exception(regs))
1532 return;
1533
1534 die_if_kernel("Unhandled kernel unaligned access", regs);
1535 force_sig(SIGSEGV, current);
1536
1537 return;
1538
1539 sigbus:
1540 die_if_kernel("Unhandled kernel unaligned access", regs);
1541 force_sig(SIGBUS, current);
1542
1543 return;
1544
1545 sigill:
1546 die_if_kernel
1547 ("Unhandled kernel unaligned access or invalid instruction", regs);
1548 force_sig(SIGILL, current);
1549 }
1550 asmlinkage void do_ade(struct pt_regs *regs)
1551 {
1552 unsigned int __user *pc;
1553 mm_segment_t seg;
1554
1555 perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS,
1556 1, regs, regs->cp0_badvaddr);
1557 /*
1558 * Did we catch a fault trying to load an instruction?
1559 */
1560 if (regs->cp0_badvaddr == regs->cp0_epc)
1561 goto sigbus;
1562
1563 if (user_mode(regs) && !test_thread_flag(TIF_FIXADE))
1564 goto sigbus;
1565 if (unaligned_action == UNALIGNED_ACTION_SIGNAL)
1566 goto sigbus;
1567
1568 /*
1569 * Do branch emulation only if we didn't forward the exception.
1570 * This is all so but ugly ...
1571 */
1572
1573 /*
1574 * Are we running in microMIPS mode?
1575 */
1576 if (get_isa16_mode(regs->cp0_epc)) {
1577 /*
1578 * Did we catch a fault trying to load an instruction in
1579 * 16-bit mode?
1580 */
1581 if (regs->cp0_badvaddr == msk_isa16_mode(regs->cp0_epc))
1582 goto sigbus;
1583 if (unaligned_action == UNALIGNED_ACTION_SHOW)
1584 show_registers(regs);
1585
1586 if (cpu_has_mmips) {
1587 seg = get_fs();
1588 if (!user_mode(regs))
1589 set_fs(KERNEL_DS);
1590 emulate_load_store_microMIPS(regs,
1591 (void __user *)regs->cp0_badvaddr);
1592 set_fs(seg);
1593
1594 return;
1595 }
1596
1597 if (cpu_has_mips16) {
1598 seg = get_fs();
1599 if (!user_mode(regs))
1600 set_fs(KERNEL_DS);
1601 emulate_load_store_MIPS16e(regs,
1602 (void __user *)regs->cp0_badvaddr);
1603 set_fs(seg);
1604
1605 return;
1606 }
1607
1608 goto sigbus;
1609 }
1610
1611 if (unaligned_action == UNALIGNED_ACTION_SHOW)
1612 show_registers(regs);
1613 pc = (unsigned int __user *)exception_epc(regs);
1614
1615 seg = get_fs();
1616 if (!user_mode(regs))
1617 set_fs(KERNEL_DS);
1618 emulate_load_store_insn(regs, (void __user *)regs->cp0_badvaddr, pc);
1619 set_fs(seg);
1620
1621 return;
1622
1623 sigbus:
1624 die_if_kernel("Kernel unaligned instruction access", regs);
1625 force_sig(SIGBUS, current);
1626
1627 /*
1628 * XXX On return from the signal handler we should advance the epc
1629 */
1630 }
1631
1632 #ifdef CONFIG_DEBUG_FS
1633 extern struct dentry *mips_debugfs_dir;
1634 static int __init debugfs_unaligned(void)
1635 {
1636 struct dentry *d;
1637
1638 if (!mips_debugfs_dir)
1639 return -ENODEV;
1640 d = debugfs_create_u32("unaligned_instructions", S_IRUGO,
1641 mips_debugfs_dir, &unaligned_instructions);
1642 if (!d)
1643 return -ENOMEM;
1644 d = debugfs_create_u32("unaligned_action", S_IRUGO | S_IWUSR,
1645 mips_debugfs_dir, &unaligned_action);
1646 if (!d)
1647 return -ENOMEM;
1648 return 0;
1649 }
1650 __initcall(debugfs_unaligned);
1651 #endif