Merge tag 'v3.10.108' into update
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / powerpc / lib / sstep.c
CommitLineData
14cf11af
PM
1/*
2 * Single-step support.
3 *
4 * Copyright (C) 2004 Paul Mackerras <paulus@au.ibm.com>, IBM
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11#include <linux/kernel.h>
0d69a052 12#include <linux/kprobes.h>
14cf11af 13#include <linux/ptrace.h>
268bb0ce 14#include <linux/prefetch.h>
14cf11af
PM
15#include <asm/sstep.h>
16#include <asm/processor.h>
0016a4cf
PM
17#include <asm/uaccess.h>
18#include <asm/cputable.h>
14cf11af
PM
19
20extern char system_call_common[];
21
c032524f 22#ifdef CONFIG_PPC64
14cf11af 23/* Bits in SRR1 that are copied from MSR */
af308377 24#define MSR_MASK 0xffffffff87c0ffffUL
c032524f
PM
25#else
26#define MSR_MASK 0x87c0ffff
27#endif
14cf11af 28
0016a4cf
PM
29/* Bits in XER */
30#define XER_SO 0x80000000U
31#define XER_OV 0x40000000U
32#define XER_CA 0x20000000U
33
cd64d169 34#ifdef CONFIG_PPC_FPU
0016a4cf
PM
35/*
36 * Functions in ldstfp.S
37 */
38extern int do_lfs(int rn, unsigned long ea);
39extern int do_lfd(int rn, unsigned long ea);
40extern int do_stfs(int rn, unsigned long ea);
41extern int do_stfd(int rn, unsigned long ea);
42extern int do_lvx(int rn, unsigned long ea);
43extern int do_stvx(int rn, unsigned long ea);
44extern int do_lxvd2x(int rn, unsigned long ea);
45extern int do_stxvd2x(int rn, unsigned long ea);
cd64d169 46#endif
0016a4cf 47
b91e136c
ME
48/*
49 * Emulate the truncation of 64 bit values in 32-bit mode.
50 */
51static unsigned long truncate_if_32bit(unsigned long msr, unsigned long val)
52{
53#ifdef __powerpc64__
54 if ((msr & MSR_64BIT) == 0)
55 val &= 0xffffffffUL;
56#endif
57 return val;
58}
59
14cf11af
PM
60/*
61 * Determine whether a conditional branch instruction would branch.
62 */
0d69a052 63static int __kprobes branch_taken(unsigned int instr, struct pt_regs *regs)
14cf11af
PM
64{
65 unsigned int bo = (instr >> 21) & 0x1f;
66 unsigned int bi;
67
68 if ((bo & 4) == 0) {
69 /* decrement counter */
70 --regs->ctr;
71 if (((bo >> 1) & 1) ^ (regs->ctr == 0))
72 return 0;
73 }
74 if ((bo & 0x10) == 0) {
75 /* check bit from CR */
76 bi = (instr >> 16) & 0x1f;
77 if (((regs->ccr >> (31 - bi)) & 1) != ((bo >> 3) & 1))
78 return 0;
79 }
80 return 1;
81}
82
0016a4cf
PM
83
84static long __kprobes address_ok(struct pt_regs *regs, unsigned long ea, int nb)
85{
86 if (!user_mode(regs))
87 return 1;
88 return __access_ok(ea, nb, USER_DS);
89}
90
91/*
92 * Calculate effective address for a D-form instruction
93 */
94static unsigned long __kprobes dform_ea(unsigned int instr, struct pt_regs *regs)
95{
96 int ra;
97 unsigned long ea;
98
99 ra = (instr >> 16) & 0x1f;
100 ea = (signed short) instr; /* sign-extend */
101 if (ra) {
102 ea += regs->gpr[ra];
103 if (instr & 0x04000000) /* update forms */
104 regs->gpr[ra] = ea;
105 }
b91e136c
ME
106
107 return truncate_if_32bit(regs->msr, ea);
0016a4cf
PM
108}
109
110#ifdef __powerpc64__
111/*
112 * Calculate effective address for a DS-form instruction
113 */
114static unsigned long __kprobes dsform_ea(unsigned int instr, struct pt_regs *regs)
115{
116 int ra;
117 unsigned long ea;
118
119 ra = (instr >> 16) & 0x1f;
120 ea = (signed short) (instr & ~3); /* sign-extend */
121 if (ra) {
122 ea += regs->gpr[ra];
123 if ((instr & 3) == 1) /* update forms */
124 regs->gpr[ra] = ea;
125 }
b91e136c
ME
126
127 return truncate_if_32bit(regs->msr, ea);
0016a4cf
PM
128}
129#endif /* __powerpc64 */
130
131/*
132 * Calculate effective address for an X-form instruction
133 */
134static unsigned long __kprobes xform_ea(unsigned int instr, struct pt_regs *regs,
135 int do_update)
136{
137 int ra, rb;
138 unsigned long ea;
139
140 ra = (instr >> 16) & 0x1f;
141 rb = (instr >> 11) & 0x1f;
142 ea = regs->gpr[rb];
143 if (ra) {
144 ea += regs->gpr[ra];
145 if (do_update) /* update forms */
146 regs->gpr[ra] = ea;
147 }
b91e136c
ME
148
149 return truncate_if_32bit(regs->msr, ea);
0016a4cf
PM
150}
151
152/*
153 * Return the largest power of 2, not greater than sizeof(unsigned long),
154 * such that x is a multiple of it.
155 */
156static inline unsigned long max_align(unsigned long x)
157{
158 x |= sizeof(unsigned long);
159 return x & -x; /* isolates rightmost bit */
160}
161
162
163static inline unsigned long byterev_2(unsigned long x)
164{
165 return ((x >> 8) & 0xff) | ((x & 0xff) << 8);
166}
167
168static inline unsigned long byterev_4(unsigned long x)
169{
170 return ((x >> 24) & 0xff) | ((x >> 8) & 0xff00) |
171 ((x & 0xff00) << 8) | ((x & 0xff) << 24);
172}
173
174#ifdef __powerpc64__
175static inline unsigned long byterev_8(unsigned long x)
176{
177 return (byterev_4(x) << 32) | byterev_4(x >> 32);
178}
179#endif
180
181static int __kprobes read_mem_aligned(unsigned long *dest, unsigned long ea,
182 int nb)
183{
184 int err = 0;
185 unsigned long x = 0;
186
187 switch (nb) {
188 case 1:
189 err = __get_user(x, (unsigned char __user *) ea);
190 break;
191 case 2:
192 err = __get_user(x, (unsigned short __user *) ea);
193 break;
194 case 4:
195 err = __get_user(x, (unsigned int __user *) ea);
196 break;
197#ifdef __powerpc64__
198 case 8:
199 err = __get_user(x, (unsigned long __user *) ea);
200 break;
201#endif
202 }
203 if (!err)
204 *dest = x;
205 return err;
206}
207
208static int __kprobes read_mem_unaligned(unsigned long *dest, unsigned long ea,
209 int nb, struct pt_regs *regs)
210{
211 int err;
212 unsigned long x, b, c;
213
214 /* unaligned, do this in pieces */
215 x = 0;
216 for (; nb > 0; nb -= c) {
217 c = max_align(ea);
218 if (c > nb)
219 c = max_align(nb);
220 err = read_mem_aligned(&b, ea, c);
221 if (err)
222 return err;
223 x = (x << (8 * c)) + b;
224 ea += c;
225 }
226 *dest = x;
227 return 0;
228}
229
230/*
231 * Read memory at address ea for nb bytes, return 0 for success
232 * or -EFAULT if an error occurred.
233 */
234static int __kprobes read_mem(unsigned long *dest, unsigned long ea, int nb,
235 struct pt_regs *regs)
236{
237 if (!address_ok(regs, ea, nb))
238 return -EFAULT;
239 if ((ea & (nb - 1)) == 0)
240 return read_mem_aligned(dest, ea, nb);
241 return read_mem_unaligned(dest, ea, nb, regs);
242}
243
244static int __kprobes write_mem_aligned(unsigned long val, unsigned long ea,
245 int nb)
246{
247 int err = 0;
248
249 switch (nb) {
250 case 1:
251 err = __put_user(val, (unsigned char __user *) ea);
252 break;
253 case 2:
254 err = __put_user(val, (unsigned short __user *) ea);
255 break;
256 case 4:
257 err = __put_user(val, (unsigned int __user *) ea);
258 break;
259#ifdef __powerpc64__
260 case 8:
261 err = __put_user(val, (unsigned long __user *) ea);
262 break;
263#endif
264 }
265 return err;
266}
267
268static int __kprobes write_mem_unaligned(unsigned long val, unsigned long ea,
269 int nb, struct pt_regs *regs)
270{
271 int err;
272 unsigned long c;
273
274 /* unaligned or little-endian, do this in pieces */
275 for (; nb > 0; nb -= c) {
276 c = max_align(ea);
277 if (c > nb)
278 c = max_align(nb);
279 err = write_mem_aligned(val >> (nb - c) * 8, ea, c);
280 if (err)
281 return err;
282 ++ea;
283 }
284 return 0;
285}
286
287/*
288 * Write memory at address ea for nb bytes, return 0 for success
289 * or -EFAULT if an error occurred.
290 */
291static int __kprobes write_mem(unsigned long val, unsigned long ea, int nb,
292 struct pt_regs *regs)
293{
294 if (!address_ok(regs, ea, nb))
295 return -EFAULT;
296 if ((ea & (nb - 1)) == 0)
297 return write_mem_aligned(val, ea, nb);
298 return write_mem_unaligned(val, ea, nb, regs);
299}
300
cd64d169 301#ifdef CONFIG_PPC_FPU
14cf11af 302/*
0016a4cf
PM
303 * Check the address and alignment, and call func to do the actual
304 * load or store.
305 */
306static int __kprobes do_fp_load(int rn, int (*func)(int, unsigned long),
307 unsigned long ea, int nb,
308 struct pt_regs *regs)
309{
310 int err;
311 unsigned long val[sizeof(double) / sizeof(long)];
312 unsigned long ptr;
313
314 if (!address_ok(regs, ea, nb))
315 return -EFAULT;
316 if ((ea & 3) == 0)
317 return (*func)(rn, ea);
318 ptr = (unsigned long) &val[0];
319 if (sizeof(unsigned long) == 8 || nb == 4) {
320 err = read_mem_unaligned(&val[0], ea, nb, regs);
321 ptr += sizeof(unsigned long) - nb;
322 } else {
323 /* reading a double on 32-bit */
324 err = read_mem_unaligned(&val[0], ea, 4, regs);
325 if (!err)
326 err = read_mem_unaligned(&val[1], ea + 4, 4, regs);
327 }
328 if (err)
329 return err;
330 return (*func)(rn, ptr);
331}
332
333static int __kprobes do_fp_store(int rn, int (*func)(int, unsigned long),
334 unsigned long ea, int nb,
335 struct pt_regs *regs)
336{
337 int err;
338 unsigned long val[sizeof(double) / sizeof(long)];
339 unsigned long ptr;
340
341 if (!address_ok(regs, ea, nb))
342 return -EFAULT;
343 if ((ea & 3) == 0)
344 return (*func)(rn, ea);
345 ptr = (unsigned long) &val[0];
346 if (sizeof(unsigned long) == 8 || nb == 4) {
347 ptr += sizeof(unsigned long) - nb;
348 err = (*func)(rn, ptr);
349 if (err)
350 return err;
351 err = write_mem_unaligned(val[0], ea, nb, regs);
352 } else {
353 /* writing a double on 32-bit */
354 err = (*func)(rn, ptr);
355 if (err)
356 return err;
357 err = write_mem_unaligned(val[0], ea, 4, regs);
358 if (!err)
359 err = write_mem_unaligned(val[1], ea + 4, 4, regs);
360 }
361 return err;
362}
cd64d169 363#endif
0016a4cf
PM
364
365#ifdef CONFIG_ALTIVEC
366/* For Altivec/VMX, no need to worry about alignment */
367static int __kprobes do_vec_load(int rn, int (*func)(int, unsigned long),
368 unsigned long ea, struct pt_regs *regs)
369{
370 if (!address_ok(regs, ea & ~0xfUL, 16))
371 return -EFAULT;
372 return (*func)(rn, ea);
373}
374
375static int __kprobes do_vec_store(int rn, int (*func)(int, unsigned long),
376 unsigned long ea, struct pt_regs *regs)
377{
378 if (!address_ok(regs, ea & ~0xfUL, 16))
379 return -EFAULT;
380 return (*func)(rn, ea);
381}
382#endif /* CONFIG_ALTIVEC */
383
384#ifdef CONFIG_VSX
385static int __kprobes do_vsx_load(int rn, int (*func)(int, unsigned long),
386 unsigned long ea, struct pt_regs *regs)
387{
388 int err;
389 unsigned long val[2];
390
391 if (!address_ok(regs, ea, 16))
392 return -EFAULT;
393 if ((ea & 3) == 0)
394 return (*func)(rn, ea);
395 err = read_mem_unaligned(&val[0], ea, 8, regs);
396 if (!err)
397 err = read_mem_unaligned(&val[1], ea + 8, 8, regs);
398 if (!err)
399 err = (*func)(rn, (unsigned long) &val[0]);
400 return err;
401}
402
403static int __kprobes do_vsx_store(int rn, int (*func)(int, unsigned long),
404 unsigned long ea, struct pt_regs *regs)
405{
406 int err;
407 unsigned long val[2];
408
409 if (!address_ok(regs, ea, 16))
410 return -EFAULT;
411 if ((ea & 3) == 0)
412 return (*func)(rn, ea);
413 err = (*func)(rn, (unsigned long) &val[0]);
414 if (err)
415 return err;
416 err = write_mem_unaligned(val[0], ea, 8, regs);
417 if (!err)
418 err = write_mem_unaligned(val[1], ea + 8, 8, regs);
419 return err;
420}
421#endif /* CONFIG_VSX */
422
423#define __put_user_asmx(x, addr, err, op, cr) \
424 __asm__ __volatile__( \
425 "1: " op " %2,0,%3\n" \
426 " mfcr %1\n" \
427 "2:\n" \
428 ".section .fixup,\"ax\"\n" \
429 "3: li %0,%4\n" \
430 " b 2b\n" \
431 ".previous\n" \
432 ".section __ex_table,\"a\"\n" \
433 PPC_LONG_ALIGN "\n" \
434 PPC_LONG "1b,3b\n" \
435 ".previous" \
436 : "=r" (err), "=r" (cr) \
437 : "r" (x), "r" (addr), "i" (-EFAULT), "0" (err))
438
439#define __get_user_asmx(x, addr, err, op) \
440 __asm__ __volatile__( \
441 "1: "op" %1,0,%2\n" \
442 "2:\n" \
443 ".section .fixup,\"ax\"\n" \
444 "3: li %0,%3\n" \
445 " b 2b\n" \
446 ".previous\n" \
447 ".section __ex_table,\"a\"\n" \
448 PPC_LONG_ALIGN "\n" \
449 PPC_LONG "1b,3b\n" \
450 ".previous" \
451 : "=r" (err), "=r" (x) \
452 : "r" (addr), "i" (-EFAULT), "0" (err))
453
454#define __cacheop_user_asmx(addr, err, op) \
455 __asm__ __volatile__( \
456 "1: "op" 0,%1\n" \
457 "2:\n" \
458 ".section .fixup,\"ax\"\n" \
459 "3: li %0,%3\n" \
460 " b 2b\n" \
461 ".previous\n" \
462 ".section __ex_table,\"a\"\n" \
463 PPC_LONG_ALIGN "\n" \
464 PPC_LONG "1b,3b\n" \
465 ".previous" \
466 : "=r" (err) \
467 : "r" (addr), "i" (-EFAULT), "0" (err))
468
469static void __kprobes set_cr0(struct pt_regs *regs, int rd)
470{
471 long val = regs->gpr[rd];
472
473 regs->ccr = (regs->ccr & 0x0fffffff) | ((regs->xer >> 3) & 0x10000000);
474#ifdef __powerpc64__
b91e136c 475 if (!(regs->msr & MSR_64BIT))
0016a4cf
PM
476 val = (int) val;
477#endif
478 if (val < 0)
479 regs->ccr |= 0x80000000;
480 else if (val > 0)
481 regs->ccr |= 0x40000000;
482 else
483 regs->ccr |= 0x20000000;
484}
485
486static void __kprobes add_with_carry(struct pt_regs *regs, int rd,
487 unsigned long val1, unsigned long val2,
488 unsigned long carry_in)
489{
490 unsigned long val = val1 + val2;
491
492 if (carry_in)
493 ++val;
494 regs->gpr[rd] = val;
495#ifdef __powerpc64__
b91e136c 496 if (!(regs->msr & MSR_64BIT)) {
0016a4cf
PM
497 val = (unsigned int) val;
498 val1 = (unsigned int) val1;
499 }
500#endif
501 if (val < val1 || (carry_in && val == val1))
502 regs->xer |= XER_CA;
503 else
504 regs->xer &= ~XER_CA;
505}
506
507static void __kprobes do_cmp_signed(struct pt_regs *regs, long v1, long v2,
508 int crfld)
509{
510 unsigned int crval, shift;
511
512 crval = (regs->xer >> 31) & 1; /* get SO bit */
513 if (v1 < v2)
514 crval |= 8;
515 else if (v1 > v2)
516 crval |= 4;
517 else
518 crval |= 2;
519 shift = (7 - crfld) * 4;
520 regs->ccr = (regs->ccr & ~(0xf << shift)) | (crval << shift);
521}
522
523static void __kprobes do_cmp_unsigned(struct pt_regs *regs, unsigned long v1,
524 unsigned long v2, int crfld)
525{
526 unsigned int crval, shift;
527
528 crval = (regs->xer >> 31) & 1; /* get SO bit */
529 if (v1 < v2)
530 crval |= 8;
531 else if (v1 > v2)
532 crval |= 4;
533 else
534 crval |= 2;
535 shift = (7 - crfld) * 4;
536 regs->ccr = (regs->ccr & ~(0xf << shift)) | (crval << shift);
537}
538
539/*
540 * Elements of 32-bit rotate and mask instructions.
541 */
542#define MASK32(mb, me) ((0xffffffffUL >> (mb)) + \
543 ((signed long)-0x80000000L >> (me)) + ((me) >= (mb)))
544#ifdef __powerpc64__
545#define MASK64_L(mb) (~0UL >> (mb))
546#define MASK64_R(me) ((signed long)-0x8000000000000000L >> (me))
547#define MASK64(mb, me) (MASK64_L(mb) + MASK64_R(me) + ((me) >= (mb)))
548#define DATA32(x) (((x) & 0xffffffffUL) | (((x) & 0xffffffffUL) << 32))
549#else
550#define DATA32(x) (x)
551#endif
552#define ROTATE(x, n) ((n) ? (((x) << (n)) | ((x) >> (8 * sizeof(long) - (n)))) : (x))
553
554/*
555 * Emulate instructions that cause a transfer of control,
556 * loads and stores, and a few other instructions.
14cf11af
PM
557 * Returns 1 if the step was emulated, 0 if not,
558 * or -1 if the instruction is one that should not be stepped,
559 * such as an rfid, or a mtmsrd that would clear MSR_RI.
560 */
0d69a052 561int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
14cf11af 562{
0016a4cf 563 unsigned int opcode, ra, rb, rd, spr, u;
14cf11af 564 unsigned long int imm;
0016a4cf
PM
565 unsigned long int val, val2;
566 unsigned long int ea;
567 unsigned int cr, mb, me, sh;
568 int err;
8e9f6937 569 unsigned long old_ra, val3;
0016a4cf 570 long ival;
14cf11af
PM
571
572 opcode = instr >> 26;
573 switch (opcode) {
574 case 16: /* bc */
575 imm = (signed short)(instr & 0xfffc);
576 if ((instr & 2) == 0)
577 imm += regs->nip;
578 regs->nip += 4;
b91e136c 579 regs->nip = truncate_if_32bit(regs->msr, regs->nip);
14cf11af
PM
580 if (instr & 1)
581 regs->link = regs->nip;
582 if (branch_taken(instr, regs))
583 regs->nip = imm;
584 return 1;
c032524f 585#ifdef CONFIG_PPC64
14cf11af
PM
586 case 17: /* sc */
587 /*
588 * N.B. this uses knowledge about how the syscall
589 * entry code works. If that is changed, this will
590 * need to be changed also.
591 */
0016a4cf
PM
592 if (regs->gpr[0] == 0x1ebe &&
593 cpu_has_feature(CPU_FTR_REAL_LE)) {
594 regs->msr ^= MSR_LE;
595 goto instr_done;
596 }
14cf11af 597 regs->gpr[9] = regs->gpr[13];
0016a4cf 598 regs->gpr[10] = MSR_KERNEL;
14cf11af
PM
599 regs->gpr[11] = regs->nip + 4;
600 regs->gpr[12] = regs->msr & MSR_MASK;
601 regs->gpr[13] = (unsigned long) get_paca();
602 regs->nip = (unsigned long) &system_call_common;
603 regs->msr = MSR_KERNEL;
604 return 1;
c032524f 605#endif
14cf11af
PM
606 case 18: /* b */
607 imm = instr & 0x03fffffc;
608 if (imm & 0x02000000)
609 imm -= 0x04000000;
610 if ((instr & 2) == 0)
611 imm += regs->nip;
b91e136c
ME
612 if (instr & 1)
613 regs->link = truncate_if_32bit(regs->msr, regs->nip + 4);
614 imm = truncate_if_32bit(regs->msr, imm);
14cf11af
PM
615 regs->nip = imm;
616 return 1;
617 case 19:
0016a4cf
PM
618 switch ((instr >> 1) & 0x3ff) {
619 case 16: /* bclr */
620 case 528: /* bcctr */
14cf11af 621 imm = (instr & 0x400)? regs->ctr: regs->link;
b91e136c
ME
622 regs->nip = truncate_if_32bit(regs->msr, regs->nip + 4);
623 imm = truncate_if_32bit(regs->msr, imm);
14cf11af
PM
624 if (instr & 1)
625 regs->link = regs->nip;
626 if (branch_taken(instr, regs))
627 regs->nip = imm;
628 return 1;
0016a4cf
PM
629
630 case 18: /* rfid, scary */
14cf11af 631 return -1;
0016a4cf
PM
632
633 case 150: /* isync */
634 isync();
635 goto instr_done;
636
637 case 33: /* crnor */
638 case 129: /* crandc */
639 case 193: /* crxor */
640 case 225: /* crnand */
641 case 257: /* crand */
642 case 289: /* creqv */
643 case 417: /* crorc */
644 case 449: /* cror */
645 ra = (instr >> 16) & 0x1f;
646 rb = (instr >> 11) & 0x1f;
647 rd = (instr >> 21) & 0x1f;
648 ra = (regs->ccr >> (31 - ra)) & 1;
649 rb = (regs->ccr >> (31 - rb)) & 1;
650 val = (instr >> (6 + ra * 2 + rb)) & 1;
651 regs->ccr = (regs->ccr & ~(1UL << (31 - rd))) |
652 (val << (31 - rd));
653 goto instr_done;
654 }
655 break;
656 case 31:
657 switch ((instr >> 1) & 0x3ff) {
658 case 598: /* sync */
659#ifdef __powerpc64__
660 switch ((instr >> 21) & 3) {
661 case 1: /* lwsync */
662 asm volatile("lwsync" : : : "memory");
663 goto instr_done;
664 case 2: /* ptesync */
665 asm volatile("ptesync" : : : "memory");
666 goto instr_done;
667 }
668#endif
669 mb();
670 goto instr_done;
671
672 case 854: /* eieio */
673 eieio();
674 goto instr_done;
675 }
676 break;
677 }
678
679 /* Following cases refer to regs->gpr[], so we need all regs */
680 if (!FULL_REGS(regs))
681 return 0;
682
683 rd = (instr >> 21) & 0x1f;
684 ra = (instr >> 16) & 0x1f;
685 rb = (instr >> 11) & 0x1f;
686
687 switch (opcode) {
688 case 7: /* mulli */
689 regs->gpr[rd] = regs->gpr[ra] * (short) instr;
690 goto instr_done;
691
692 case 8: /* subfic */
693 imm = (short) instr;
694 add_with_carry(regs, rd, ~regs->gpr[ra], imm, 1);
695 goto instr_done;
696
697 case 10: /* cmpli */
698 imm = (unsigned short) instr;
699 val = regs->gpr[ra];
700#ifdef __powerpc64__
701 if ((rd & 1) == 0)
702 val = (unsigned int) val;
703#endif
704 do_cmp_unsigned(regs, val, imm, rd >> 2);
705 goto instr_done;
706
707 case 11: /* cmpi */
708 imm = (short) instr;
709 val = regs->gpr[ra];
710#ifdef __powerpc64__
711 if ((rd & 1) == 0)
712 val = (int) val;
713#endif
714 do_cmp_signed(regs, val, imm, rd >> 2);
715 goto instr_done;
716
717 case 12: /* addic */
718 imm = (short) instr;
719 add_with_carry(regs, rd, regs->gpr[ra], imm, 0);
720 goto instr_done;
721
722 case 13: /* addic. */
723 imm = (short) instr;
724 add_with_carry(regs, rd, regs->gpr[ra], imm, 0);
725 set_cr0(regs, rd);
726 goto instr_done;
727
728 case 14: /* addi */
729 imm = (short) instr;
730 if (ra)
731 imm += regs->gpr[ra];
732 regs->gpr[rd] = imm;
733 goto instr_done;
734
735 case 15: /* addis */
736 imm = ((short) instr) << 16;
737 if (ra)
738 imm += regs->gpr[ra];
739 regs->gpr[rd] = imm;
740 goto instr_done;
741
742 case 20: /* rlwimi */
743 mb = (instr >> 6) & 0x1f;
744 me = (instr >> 1) & 0x1f;
745 val = DATA32(regs->gpr[rd]);
746 imm = MASK32(mb, me);
747 regs->gpr[ra] = (regs->gpr[ra] & ~imm) | (ROTATE(val, rb) & imm);
748 goto logical_done;
749
750 case 21: /* rlwinm */
751 mb = (instr >> 6) & 0x1f;
752 me = (instr >> 1) & 0x1f;
753 val = DATA32(regs->gpr[rd]);
754 regs->gpr[ra] = ROTATE(val, rb) & MASK32(mb, me);
755 goto logical_done;
756
757 case 23: /* rlwnm */
758 mb = (instr >> 6) & 0x1f;
759 me = (instr >> 1) & 0x1f;
760 rb = regs->gpr[rb] & 0x1f;
761 val = DATA32(regs->gpr[rd]);
762 regs->gpr[ra] = ROTATE(val, rb) & MASK32(mb, me);
763 goto logical_done;
764
765 case 24: /* ori */
766 imm = (unsigned short) instr;
767 regs->gpr[ra] = regs->gpr[rd] | imm;
768 goto instr_done;
769
770 case 25: /* oris */
771 imm = (unsigned short) instr;
772 regs->gpr[ra] = regs->gpr[rd] | (imm << 16);
773 goto instr_done;
774
775 case 26: /* xori */
776 imm = (unsigned short) instr;
777 regs->gpr[ra] = regs->gpr[rd] ^ imm;
778 goto instr_done;
779
780 case 27: /* xoris */
781 imm = (unsigned short) instr;
782 regs->gpr[ra] = regs->gpr[rd] ^ (imm << 16);
783 goto instr_done;
784
785 case 28: /* andi. */
786 imm = (unsigned short) instr;
787 regs->gpr[ra] = regs->gpr[rd] & imm;
788 set_cr0(regs, ra);
789 goto instr_done;
790
791 case 29: /* andis. */
792 imm = (unsigned short) instr;
793 regs->gpr[ra] = regs->gpr[rd] & (imm << 16);
794 set_cr0(regs, ra);
795 goto instr_done;
796
797#ifdef __powerpc64__
798 case 30: /* rld* */
799 mb = ((instr >> 6) & 0x1f) | (instr & 0x20);
800 val = regs->gpr[rd];
801 if ((instr & 0x10) == 0) {
802 sh = rb | ((instr & 2) << 4);
803 val = ROTATE(val, sh);
804 switch ((instr >> 2) & 3) {
805 case 0: /* rldicl */
806 regs->gpr[ra] = val & MASK64_L(mb);
807 goto logical_done;
808 case 1: /* rldicr */
809 regs->gpr[ra] = val & MASK64_R(mb);
810 goto logical_done;
811 case 2: /* rldic */
812 regs->gpr[ra] = val & MASK64(mb, 63 - sh);
813 goto logical_done;
814 case 3: /* rldimi */
815 imm = MASK64(mb, 63 - sh);
816 regs->gpr[ra] = (regs->gpr[ra] & ~imm) |
817 (val & imm);
818 goto logical_done;
819 }
820 } else {
821 sh = regs->gpr[rb] & 0x3f;
822 val = ROTATE(val, sh);
823 switch ((instr >> 1) & 7) {
824 case 0: /* rldcl */
825 regs->gpr[ra] = val & MASK64_L(mb);
826 goto logical_done;
827 case 1: /* rldcr */
828 regs->gpr[ra] = val & MASK64_R(mb);
829 goto logical_done;
830 }
14cf11af 831 }
0016a4cf
PM
832#endif
833
14cf11af 834 case 31:
0016a4cf
PM
835 switch ((instr >> 1) & 0x3ff) {
836 case 83: /* mfmsr */
837 if (regs->msr & MSR_PR)
838 break;
14cf11af 839 regs->gpr[rd] = regs->msr & MSR_MASK;
0016a4cf
PM
840 goto instr_done;
841 case 146: /* mtmsr */
842 if (regs->msr & MSR_PR)
843 break;
c032524f
PM
844 imm = regs->gpr[rd];
845 if ((imm & MSR_RI) == 0)
846 /* can't step mtmsr that would clear MSR_RI */
847 return -1;
848 regs->msr = imm;
0016a4cf 849 goto instr_done;
c032524f 850#ifdef CONFIG_PPC64
0016a4cf 851 case 178: /* mtmsrd */
14cf11af
PM
852 /* only MSR_EE and MSR_RI get changed if bit 15 set */
853 /* mtmsrd doesn't change MSR_HV and MSR_ME */
0016a4cf
PM
854 if (regs->msr & MSR_PR)
855 break;
14cf11af
PM
856 imm = (instr & 0x10000)? 0x8002: 0xefffffffffffefffUL;
857 imm = (regs->msr & MSR_MASK & ~imm)
858 | (regs->gpr[rd] & imm);
859 if ((imm & MSR_RI) == 0)
860 /* can't step mtmsrd that would clear MSR_RI */
861 return -1;
862 regs->msr = imm;
0016a4cf 863 goto instr_done;
c032524f 864#endif
0016a4cf 865 case 19: /* mfcr */
733c88d4
AB
866 if ((instr >> 20) & 1) {
867 imm = 0xf0000000UL;
868 for (sh = 0; sh < 8; ++sh) {
869 if (instr & (0x80000 >> sh)) {
870 regs->gpr[rd] = regs->ccr & imm;
871 break;
872 }
873 imm >>= 4;
874 }
875
876 goto instr_done;
877 }
878
6888199f
AM
879 regs->gpr[rd] = regs->ccr;
880 regs->gpr[rd] &= 0xffffffffUL;
0016a4cf
PM
881 goto instr_done;
882
883 case 144: /* mtcrf */
884 imm = 0xf0000000UL;
885 val = regs->gpr[rd];
886 for (sh = 0; sh < 8; ++sh) {
887 if (instr & (0x80000 >> sh))
888 regs->ccr = (regs->ccr & ~imm) |
889 (val & imm);
890 imm >>= 4;
891 }
892 goto instr_done;
893
894 case 339: /* mfspr */
6888199f
AM
895 spr = (instr >> 11) & 0x3ff;
896 switch (spr) {
897 case 0x20: /* mfxer */
898 regs->gpr[rd] = regs->xer;
899 regs->gpr[rd] &= 0xffffffffUL;
0016a4cf 900 goto instr_done;
6888199f
AM
901 case 0x100: /* mflr */
902 regs->gpr[rd] = regs->link;
0016a4cf 903 goto instr_done;
6888199f
AM
904 case 0x120: /* mfctr */
905 regs->gpr[rd] = regs->ctr;
0016a4cf 906 goto instr_done;
6888199f
AM
907 }
908 break;
0016a4cf
PM
909
910 case 467: /* mtspr */
6888199f
AM
911 spr = (instr >> 11) & 0x3ff;
912 switch (spr) {
913 case 0x20: /* mtxer */
914 regs->xer = (regs->gpr[rd] & 0xffffffffUL);
0016a4cf 915 goto instr_done;
6888199f
AM
916 case 0x100: /* mtlr */
917 regs->link = regs->gpr[rd];
0016a4cf 918 goto instr_done;
6888199f
AM
919 case 0x120: /* mtctr */
920 regs->ctr = regs->gpr[rd];
0016a4cf 921 goto instr_done;
6888199f 922 }
0016a4cf
PM
923 break;
924
925/*
926 * Compare instructions
927 */
928 case 0: /* cmp */
929 val = regs->gpr[ra];
930 val2 = regs->gpr[rb];
931#ifdef __powerpc64__
932 if ((rd & 1) == 0) {
933 /* word (32-bit) compare */
934 val = (int) val;
935 val2 = (int) val2;
936 }
937#endif
938 do_cmp_signed(regs, val, val2, rd >> 2);
939 goto instr_done;
940
941 case 32: /* cmpl */
942 val = regs->gpr[ra];
943 val2 = regs->gpr[rb];
944#ifdef __powerpc64__
945 if ((rd & 1) == 0) {
946 /* word (32-bit) compare */
947 val = (unsigned int) val;
948 val2 = (unsigned int) val2;
949 }
950#endif
951 do_cmp_unsigned(regs, val, val2, rd >> 2);
952 goto instr_done;
953
954/*
955 * Arithmetic instructions
956 */
957 case 8: /* subfc */
958 add_with_carry(regs, rd, ~regs->gpr[ra],
959 regs->gpr[rb], 1);
960 goto arith_done;
961#ifdef __powerpc64__
962 case 9: /* mulhdu */
963 asm("mulhdu %0,%1,%2" : "=r" (regs->gpr[rd]) :
964 "r" (regs->gpr[ra]), "r" (regs->gpr[rb]));
965 goto arith_done;
966#endif
967 case 10: /* addc */
968 add_with_carry(regs, rd, regs->gpr[ra],
969 regs->gpr[rb], 0);
970 goto arith_done;
971
972 case 11: /* mulhwu */
973 asm("mulhwu %0,%1,%2" : "=r" (regs->gpr[rd]) :
974 "r" (regs->gpr[ra]), "r" (regs->gpr[rb]));
975 goto arith_done;
976
977 case 40: /* subf */
978 regs->gpr[rd] = regs->gpr[rb] - regs->gpr[ra];
979 goto arith_done;
980#ifdef __powerpc64__
981 case 73: /* mulhd */
982 asm("mulhd %0,%1,%2" : "=r" (regs->gpr[rd]) :
983 "r" (regs->gpr[ra]), "r" (regs->gpr[rb]));
984 goto arith_done;
985#endif
986 case 75: /* mulhw */
987 asm("mulhw %0,%1,%2" : "=r" (regs->gpr[rd]) :
988 "r" (regs->gpr[ra]), "r" (regs->gpr[rb]));
989 goto arith_done;
990
991 case 104: /* neg */
992 regs->gpr[rd] = -regs->gpr[ra];
993 goto arith_done;
994
995 case 136: /* subfe */
996 add_with_carry(regs, rd, ~regs->gpr[ra], regs->gpr[rb],
997 regs->xer & XER_CA);
998 goto arith_done;
999
1000 case 138: /* adde */
1001 add_with_carry(regs, rd, regs->gpr[ra], regs->gpr[rb],
1002 regs->xer & XER_CA);
1003 goto arith_done;
1004
1005 case 200: /* subfze */
1006 add_with_carry(regs, rd, ~regs->gpr[ra], 0L,
1007 regs->xer & XER_CA);
1008 goto arith_done;
1009
1010 case 202: /* addze */
1011 add_with_carry(regs, rd, regs->gpr[ra], 0L,
1012 regs->xer & XER_CA);
1013 goto arith_done;
1014
1015 case 232: /* subfme */
1016 add_with_carry(regs, rd, ~regs->gpr[ra], -1L,
1017 regs->xer & XER_CA);
1018 goto arith_done;
1019#ifdef __powerpc64__
1020 case 233: /* mulld */
1021 regs->gpr[rd] = regs->gpr[ra] * regs->gpr[rb];
1022 goto arith_done;
1023#endif
1024 case 234: /* addme */
1025 add_with_carry(regs, rd, regs->gpr[ra], -1L,
1026 regs->xer & XER_CA);
1027 goto arith_done;
1028
1029 case 235: /* mullw */
1030 regs->gpr[rd] = (unsigned int) regs->gpr[ra] *
1031 (unsigned int) regs->gpr[rb];
1032 goto arith_done;
1033
1034 case 266: /* add */
1035 regs->gpr[rd] = regs->gpr[ra] + regs->gpr[rb];
1036 goto arith_done;
1037#ifdef __powerpc64__
1038 case 457: /* divdu */
1039 regs->gpr[rd] = regs->gpr[ra] / regs->gpr[rb];
1040 goto arith_done;
1041#endif
1042 case 459: /* divwu */
1043 regs->gpr[rd] = (unsigned int) regs->gpr[ra] /
1044 (unsigned int) regs->gpr[rb];
1045 goto arith_done;
1046#ifdef __powerpc64__
1047 case 489: /* divd */
1048 regs->gpr[rd] = (long int) regs->gpr[ra] /
1049 (long int) regs->gpr[rb];
1050 goto arith_done;
1051#endif
1052 case 491: /* divw */
1053 regs->gpr[rd] = (int) regs->gpr[ra] /
1054 (int) regs->gpr[rb];
1055 goto arith_done;
1056
1057
1058/*
1059 * Logical instructions
1060 */
1061 case 26: /* cntlzw */
1062 asm("cntlzw %0,%1" : "=r" (regs->gpr[ra]) :
1063 "r" (regs->gpr[rd]));
1064 goto logical_done;
1065#ifdef __powerpc64__
1066 case 58: /* cntlzd */
1067 asm("cntlzd %0,%1" : "=r" (regs->gpr[ra]) :
1068 "r" (regs->gpr[rd]));
1069 goto logical_done;
1070#endif
1071 case 28: /* and */
1072 regs->gpr[ra] = regs->gpr[rd] & regs->gpr[rb];
1073 goto logical_done;
1074
1075 case 60: /* andc */
1076 regs->gpr[ra] = regs->gpr[rd] & ~regs->gpr[rb];
1077 goto logical_done;
1078
1079 case 124: /* nor */
1080 regs->gpr[ra] = ~(regs->gpr[rd] | regs->gpr[rb]);
1081 goto logical_done;
1082
1083 case 284: /* xor */
1084 regs->gpr[ra] = ~(regs->gpr[rd] ^ regs->gpr[rb]);
1085 goto logical_done;
1086
1087 case 316: /* xor */
1088 regs->gpr[ra] = regs->gpr[rd] ^ regs->gpr[rb];
1089 goto logical_done;
1090
1091 case 412: /* orc */
1092 regs->gpr[ra] = regs->gpr[rd] | ~regs->gpr[rb];
1093 goto logical_done;
1094
1095 case 444: /* or */
1096 regs->gpr[ra] = regs->gpr[rd] | regs->gpr[rb];
1097 goto logical_done;
1098
1099 case 476: /* nand */
1100 regs->gpr[ra] = ~(regs->gpr[rd] & regs->gpr[rb]);
1101 goto logical_done;
1102
1103 case 922: /* extsh */
1104 regs->gpr[ra] = (signed short) regs->gpr[rd];
1105 goto logical_done;
1106
1107 case 954: /* extsb */
1108 regs->gpr[ra] = (signed char) regs->gpr[rd];
1109 goto logical_done;
1110#ifdef __powerpc64__
1111 case 986: /* extsw */
1112 regs->gpr[ra] = (signed int) regs->gpr[rd];
1113 goto logical_done;
1114#endif
1115
1116/*
1117 * Shift instructions
1118 */
1119 case 24: /* slw */
1120 sh = regs->gpr[rb] & 0x3f;
1121 if (sh < 32)
1122 regs->gpr[ra] = (regs->gpr[rd] << sh) & 0xffffffffUL;
1123 else
1124 regs->gpr[ra] = 0;
1125 goto logical_done;
1126
1127 case 536: /* srw */
1128 sh = regs->gpr[rb] & 0x3f;
1129 if (sh < 32)
1130 regs->gpr[ra] = (regs->gpr[rd] & 0xffffffffUL) >> sh;
1131 else
1132 regs->gpr[ra] = 0;
1133 goto logical_done;
1134
1135 case 792: /* sraw */
1136 sh = regs->gpr[rb] & 0x3f;
1137 ival = (signed int) regs->gpr[rd];
1138 regs->gpr[ra] = ival >> (sh < 32 ? sh : 31);
1139 if (ival < 0 && (sh >= 32 || (ival & ((1 << sh) - 1)) != 0))
1140 regs->xer |= XER_CA;
1141 else
1142 regs->xer &= ~XER_CA;
1143 goto logical_done;
1144
1145 case 824: /* srawi */
1146 sh = rb;
1147 ival = (signed int) regs->gpr[rd];
1148 regs->gpr[ra] = ival >> sh;
1149 if (ival < 0 && (ival & ((1 << sh) - 1)) != 0)
1150 regs->xer |= XER_CA;
1151 else
1152 regs->xer &= ~XER_CA;
1153 goto logical_done;
1154
1155#ifdef __powerpc64__
1156 case 27: /* sld */
1157 sh = regs->gpr[rd] & 0x7f;
1158 if (sh < 64)
1159 regs->gpr[ra] = regs->gpr[rd] << sh;
1160 else
1161 regs->gpr[ra] = 0;
1162 goto logical_done;
1163
1164 case 539: /* srd */
1165 sh = regs->gpr[rb] & 0x7f;
1166 if (sh < 64)
1167 regs->gpr[ra] = regs->gpr[rd] >> sh;
1168 else
1169 regs->gpr[ra] = 0;
1170 goto logical_done;
1171
1172 case 794: /* srad */
1173 sh = regs->gpr[rb] & 0x7f;
1174 ival = (signed long int) regs->gpr[rd];
1175 regs->gpr[ra] = ival >> (sh < 64 ? sh : 63);
1176 if (ival < 0 && (sh >= 64 || (ival & ((1 << sh) - 1)) != 0))
1177 regs->xer |= XER_CA;
1178 else
1179 regs->xer &= ~XER_CA;
1180 goto logical_done;
1181
1182 case 826: /* sradi with sh_5 = 0 */
1183 case 827: /* sradi with sh_5 = 1 */
1184 sh = rb | ((instr & 2) << 4);
1185 ival = (signed long int) regs->gpr[rd];
1186 regs->gpr[ra] = ival >> sh;
1187 if (ival < 0 && (ival & ((1 << sh) - 1)) != 0)
1188 regs->xer |= XER_CA;
1189 else
1190 regs->xer &= ~XER_CA;
1191 goto logical_done;
1192#endif /* __powerpc64__ */
1193
1194/*
1195 * Cache instructions
1196 */
1197 case 54: /* dcbst */
1198 ea = xform_ea(instr, regs, 0);
1199 if (!address_ok(regs, ea, 8))
1200 return 0;
1201 err = 0;
1202 __cacheop_user_asmx(ea, err, "dcbst");
1203 if (err)
1204 return 0;
1205 goto instr_done;
1206
1207 case 86: /* dcbf */
1208 ea = xform_ea(instr, regs, 0);
1209 if (!address_ok(regs, ea, 8))
1210 return 0;
1211 err = 0;
1212 __cacheop_user_asmx(ea, err, "dcbf");
1213 if (err)
1214 return 0;
1215 goto instr_done;
1216
1217 case 246: /* dcbtst */
1218 if (rd == 0) {
1219 ea = xform_ea(instr, regs, 0);
1220 prefetchw((void *) ea);
1221 }
1222 goto instr_done;
1223
1224 case 278: /* dcbt */
1225 if (rd == 0) {
1226 ea = xform_ea(instr, regs, 0);
1227 prefetch((void *) ea);
1228 }
1229 goto instr_done;
1230
14cf11af 1231 }
0016a4cf 1232 break;
14cf11af 1233 }
0016a4cf
PM
1234
1235 /*
1236 * Following cases are for loads and stores, so bail out
1237 * if we're in little-endian mode.
1238 */
1239 if (regs->msr & MSR_LE)
1240 return 0;
1241
1242 /*
1243 * Save register RA in case it's an update form load or store
1244 * and the access faults.
1245 */
1246 old_ra = regs->gpr[ra];
1247
1248 switch (opcode) {
1249 case 31:
1250 u = instr & 0x40;
1251 switch ((instr >> 1) & 0x3ff) {
1252 case 20: /* lwarx */
1253 ea = xform_ea(instr, regs, 0);
1254 if (ea & 3)
1255 break; /* can't handle misaligned */
1256 err = -EFAULT;
1257 if (!address_ok(regs, ea, 4))
1258 goto ldst_done;
1259 err = 0;
1260 __get_user_asmx(val, ea, err, "lwarx");
1261 if (!err)
1262 regs->gpr[rd] = val;
1263 goto ldst_done;
1264
1265 case 150: /* stwcx. */
1266 ea = xform_ea(instr, regs, 0);
1267 if (ea & 3)
1268 break; /* can't handle misaligned */
1269 err = -EFAULT;
1270 if (!address_ok(regs, ea, 4))
1271 goto ldst_done;
1272 err = 0;
1273 __put_user_asmx(regs->gpr[rd], ea, err, "stwcx.", cr);
1274 if (!err)
1275 regs->ccr = (regs->ccr & 0x0fffffff) |
1276 (cr & 0xe0000000) |
1277 ((regs->xer >> 3) & 0x10000000);
1278 goto ldst_done;
1279
1280#ifdef __powerpc64__
1281 case 84: /* ldarx */
1282 ea = xform_ea(instr, regs, 0);
1283 if (ea & 7)
1284 break; /* can't handle misaligned */
1285 err = -EFAULT;
1286 if (!address_ok(regs, ea, 8))
1287 goto ldst_done;
1288 err = 0;
1289 __get_user_asmx(val, ea, err, "ldarx");
1290 if (!err)
1291 regs->gpr[rd] = val;
1292 goto ldst_done;
1293
1294 case 214: /* stdcx. */
1295 ea = xform_ea(instr, regs, 0);
1296 if (ea & 7)
1297 break; /* can't handle misaligned */
1298 err = -EFAULT;
1299 if (!address_ok(regs, ea, 8))
1300 goto ldst_done;
1301 err = 0;
1302 __put_user_asmx(regs->gpr[rd], ea, err, "stdcx.", cr);
1303 if (!err)
1304 regs->ccr = (regs->ccr & 0x0fffffff) |
1305 (cr & 0xe0000000) |
1306 ((regs->xer >> 3) & 0x10000000);
1307 goto ldst_done;
1308
1309 case 21: /* ldx */
1310 case 53: /* ldux */
1311 err = read_mem(&regs->gpr[rd], xform_ea(instr, regs, u),
1312 8, regs);
1313 goto ldst_done;
1314#endif
1315
1316 case 23: /* lwzx */
1317 case 55: /* lwzux */
1318 err = read_mem(&regs->gpr[rd], xform_ea(instr, regs, u),
1319 4, regs);
1320 goto ldst_done;
1321
1322 case 87: /* lbzx */
1323 case 119: /* lbzux */
1324 err = read_mem(&regs->gpr[rd], xform_ea(instr, regs, u),
1325 1, regs);
1326 goto ldst_done;
1327
1328#ifdef CONFIG_ALTIVEC
1329 case 103: /* lvx */
1330 case 359: /* lvxl */
1331 if (!(regs->msr & MSR_VEC))
1332 break;
1333 ea = xform_ea(instr, regs, 0);
1334 err = do_vec_load(rd, do_lvx, ea, regs);
1335 goto ldst_done;
1336
1337 case 231: /* stvx */
1338 case 487: /* stvxl */
1339 if (!(regs->msr & MSR_VEC))
1340 break;
1341 ea = xform_ea(instr, regs, 0);
1342 err = do_vec_store(rd, do_stvx, ea, regs);
1343 goto ldst_done;
1344#endif /* CONFIG_ALTIVEC */
1345
1346#ifdef __powerpc64__
1347 case 149: /* stdx */
1348 case 181: /* stdux */
1349 val = regs->gpr[rd];
1350 err = write_mem(val, xform_ea(instr, regs, u), 8, regs);
1351 goto ldst_done;
1352#endif
1353
1354 case 151: /* stwx */
1355 case 183: /* stwux */
1356 val = regs->gpr[rd];
1357 err = write_mem(val, xform_ea(instr, regs, u), 4, regs);
1358 goto ldst_done;
1359
1360 case 215: /* stbx */
1361 case 247: /* stbux */
1362 val = regs->gpr[rd];
1363 err = write_mem(val, xform_ea(instr, regs, u), 1, regs);
1364 goto ldst_done;
1365
1366 case 279: /* lhzx */
1367 case 311: /* lhzux */
1368 err = read_mem(&regs->gpr[rd], xform_ea(instr, regs, u),
1369 2, regs);
1370 goto ldst_done;
1371
1372#ifdef __powerpc64__
1373 case 341: /* lwax */
1374 case 373: /* lwaux */
1375 err = read_mem(&regs->gpr[rd], xform_ea(instr, regs, u),
1376 4, regs);
1377 if (!err)
1378 regs->gpr[rd] = (signed int) regs->gpr[rd];
1379 goto ldst_done;
1380#endif
1381
1382 case 343: /* lhax */
1383 case 375: /* lhaux */
1384 err = read_mem(&regs->gpr[rd], xform_ea(instr, regs, u),
1385 2, regs);
1386 if (!err)
1387 regs->gpr[rd] = (signed short) regs->gpr[rd];
1388 goto ldst_done;
1389
1390 case 407: /* sthx */
1391 case 439: /* sthux */
1392 val = regs->gpr[rd];
1393 err = write_mem(val, xform_ea(instr, regs, u), 2, regs);
1394 goto ldst_done;
1395
1396#ifdef __powerpc64__
1397 case 532: /* ldbrx */
1398 err = read_mem(&val, xform_ea(instr, regs, 0), 8, regs);
1399 if (!err)
1400 regs->gpr[rd] = byterev_8(val);
1401 goto ldst_done;
1402
1403#endif
1404
1405 case 534: /* lwbrx */
1406 err = read_mem(&val, xform_ea(instr, regs, 0), 4, regs);
1407 if (!err)
1408 regs->gpr[rd] = byterev_4(val);
1409 goto ldst_done;
1410
485fafb3 1411#ifdef CONFIG_PPC_FPU
0016a4cf
PM
1412 case 535: /* lfsx */
1413 case 567: /* lfsux */
1414 if (!(regs->msr & MSR_FP))
1415 break;
1416 ea = xform_ea(instr, regs, u);
1417 err = do_fp_load(rd, do_lfs, ea, 4, regs);
1418 goto ldst_done;
1419
1420 case 599: /* lfdx */
1421 case 631: /* lfdux */
1422 if (!(regs->msr & MSR_FP))
1423 break;
1424 ea = xform_ea(instr, regs, u);
1425 err = do_fp_load(rd, do_lfd, ea, 8, regs);
1426 goto ldst_done;
1427
1428 case 663: /* stfsx */
1429 case 695: /* stfsux */
1430 if (!(regs->msr & MSR_FP))
1431 break;
1432 ea = xform_ea(instr, regs, u);
1433 err = do_fp_store(rd, do_stfs, ea, 4, regs);
1434 goto ldst_done;
1435
1436 case 727: /* stfdx */
1437 case 759: /* stfdux */
1438 if (!(regs->msr & MSR_FP))
1439 break;
1440 ea = xform_ea(instr, regs, u);
1441 err = do_fp_store(rd, do_stfd, ea, 8, regs);
1442 goto ldst_done;
cd64d169 1443#endif
0016a4cf
PM
1444
1445#ifdef __powerpc64__
1446 case 660: /* stdbrx */
1447 val = byterev_8(regs->gpr[rd]);
1448 err = write_mem(val, xform_ea(instr, regs, 0), 8, regs);
1449 goto ldst_done;
1450
1451#endif
1452 case 662: /* stwbrx */
1453 val = byterev_4(regs->gpr[rd]);
1454 err = write_mem(val, xform_ea(instr, regs, 0), 4, regs);
1455 goto ldst_done;
1456
1457 case 790: /* lhbrx */
1458 err = read_mem(&val, xform_ea(instr, regs, 0), 2, regs);
1459 if (!err)
1460 regs->gpr[rd] = byterev_2(val);
1461 goto ldst_done;
1462
1463 case 918: /* sthbrx */
1464 val = byterev_2(regs->gpr[rd]);
1465 err = write_mem(val, xform_ea(instr, regs, 0), 2, regs);
1466 goto ldst_done;
1467
1468#ifdef CONFIG_VSX
1469 case 844: /* lxvd2x */
1470 case 876: /* lxvd2ux */
1471 if (!(regs->msr & MSR_VSX))
1472 break;
1473 rd |= (instr & 1) << 5;
1474 ea = xform_ea(instr, regs, u);
1475 err = do_vsx_load(rd, do_lxvd2x, ea, regs);
1476 goto ldst_done;
1477
1478 case 972: /* stxvd2x */
1479 case 1004: /* stxvd2ux */
1480 if (!(regs->msr & MSR_VSX))
1481 break;
1482 rd |= (instr & 1) << 5;
1483 ea = xform_ea(instr, regs, u);
1484 err = do_vsx_store(rd, do_stxvd2x, ea, regs);
1485 goto ldst_done;
1486
1487#endif /* CONFIG_VSX */
1488 }
1489 break;
1490
1491 case 32: /* lwz */
1492 case 33: /* lwzu */
1493 err = read_mem(&regs->gpr[rd], dform_ea(instr, regs), 4, regs);
1494 goto ldst_done;
1495
1496 case 34: /* lbz */
1497 case 35: /* lbzu */
1498 err = read_mem(&regs->gpr[rd], dform_ea(instr, regs), 1, regs);
1499 goto ldst_done;
1500
1501 case 36: /* stw */
0016a4cf
PM
1502 val = regs->gpr[rd];
1503 err = write_mem(val, dform_ea(instr, regs), 4, regs);
1504 goto ldst_done;
1505
8e9f6937
TC
1506 case 37: /* stwu */
1507 val = regs->gpr[rd];
1508 val3 = dform_ea(instr, regs);
1509 /*
1510 * For PPC32 we always use stwu to change stack point with r1. So
1511 * this emulated store may corrupt the exception frame, now we
1512 * have to provide the exception frame trampoline, which is pushed
1513 * below the kprobed function stack. So we only update gpr[1] but
1514 * don't emulate the real store operation. We will do real store
1515 * operation safely in exception return code by checking this flag.
1516 */
1517 if ((ra == 1) && !(regs->msr & MSR_PR) \
1518 && (val3 >= (regs->gpr[1] - STACK_INT_FRAME_SIZE))) {
1519 /*
1520 * Check if we will touch kernel sack overflow
1521 */
1522 if (val3 - STACK_INT_FRAME_SIZE <= current->thread.ksp_limit) {
1523 printk(KERN_CRIT "Can't kprobe this since Kernel stack overflow.\n");
1524 err = -EINVAL;
1525 break;
1526 }
1527
1528 /*
1529 * Check if we already set since that means we'll
1530 * lose the previous value.
1531 */
1532 WARN_ON(test_thread_flag(TIF_EMULATE_STACK_STORE));
1533 set_thread_flag(TIF_EMULATE_STACK_STORE);
1534 err = 0;
1535 } else
1536 err = write_mem(val, val3, 4, regs);
1537 goto ldst_done;
1538
0016a4cf
PM
1539 case 38: /* stb */
1540 case 39: /* stbu */
1541 val = regs->gpr[rd];
1542 err = write_mem(val, dform_ea(instr, regs), 1, regs);
1543 goto ldst_done;
1544
1545 case 40: /* lhz */
1546 case 41: /* lhzu */
1547 err = read_mem(&regs->gpr[rd], dform_ea(instr, regs), 2, regs);
1548 goto ldst_done;
1549
1550 case 42: /* lha */
1551 case 43: /* lhau */
1552 err = read_mem(&regs->gpr[rd], dform_ea(instr, regs), 2, regs);
1553 if (!err)
1554 regs->gpr[rd] = (signed short) regs->gpr[rd];
1555 goto ldst_done;
1556
1557 case 44: /* sth */
1558 case 45: /* sthu */
1559 val = regs->gpr[rd];
1560 err = write_mem(val, dform_ea(instr, regs), 2, regs);
1561 goto ldst_done;
1562
1563 case 46: /* lmw */
1564 ra = (instr >> 16) & 0x1f;
1565 if (ra >= rd)
1566 break; /* invalid form, ra in range to load */
1567 ea = dform_ea(instr, regs);
1568 do {
1569 err = read_mem(&regs->gpr[rd], ea, 4, regs);
1570 if (err)
1571 return 0;
1572 ea += 4;
1573 } while (++rd < 32);
1574 goto instr_done;
1575
1576 case 47: /* stmw */
1577 ea = dform_ea(instr, regs);
1578 do {
1579 err = write_mem(regs->gpr[rd], ea, 4, regs);
1580 if (err)
1581 return 0;
1582 ea += 4;
1583 } while (++rd < 32);
1584 goto instr_done;
1585
cd64d169 1586#ifdef CONFIG_PPC_FPU
0016a4cf
PM
1587 case 48: /* lfs */
1588 case 49: /* lfsu */
1589 if (!(regs->msr & MSR_FP))
1590 break;
1591 ea = dform_ea(instr, regs);
1592 err = do_fp_load(rd, do_lfs, ea, 4, regs);
1593 goto ldst_done;
1594
1595 case 50: /* lfd */
1596 case 51: /* lfdu */
1597 if (!(regs->msr & MSR_FP))
1598 break;
1599 ea = dform_ea(instr, regs);
1600 err = do_fp_load(rd, do_lfd, ea, 8, regs);
1601 goto ldst_done;
1602
1603 case 52: /* stfs */
1604 case 53: /* stfsu */
1605 if (!(regs->msr & MSR_FP))
1606 break;
1607 ea = dform_ea(instr, regs);
1608 err = do_fp_store(rd, do_stfs, ea, 4, regs);
1609 goto ldst_done;
1610
1611 case 54: /* stfd */
1612 case 55: /* stfdu */
1613 if (!(regs->msr & MSR_FP))
1614 break;
1615 ea = dform_ea(instr, regs);
1616 err = do_fp_store(rd, do_stfd, ea, 8, regs);
1617 goto ldst_done;
cd64d169 1618#endif
0016a4cf
PM
1619
1620#ifdef __powerpc64__
1621 case 58: /* ld[u], lwa */
1622 switch (instr & 3) {
1623 case 0: /* ld */
1624 err = read_mem(&regs->gpr[rd], dsform_ea(instr, regs),
1625 8, regs);
1626 goto ldst_done;
1627 case 1: /* ldu */
1628 err = read_mem(&regs->gpr[rd], dsform_ea(instr, regs),
1629 8, regs);
1630 goto ldst_done;
1631 case 2: /* lwa */
1632 err = read_mem(&regs->gpr[rd], dsform_ea(instr, regs),
1633 4, regs);
1634 if (!err)
1635 regs->gpr[rd] = (signed int) regs->gpr[rd];
1636 goto ldst_done;
1637 }
1638 break;
1639
1640 case 62: /* std[u] */
1641 val = regs->gpr[rd];
1642 switch (instr & 3) {
1643 case 0: /* std */
1644 err = write_mem(val, dsform_ea(instr, regs), 8, regs);
1645 goto ldst_done;
1646 case 1: /* stdu */
1647 err = write_mem(val, dsform_ea(instr, regs), 8, regs);
1648 goto ldst_done;
1649 }
1650 break;
1651#endif /* __powerpc64__ */
1652
1653 }
1654 err = -EINVAL;
1655
1656 ldst_done:
1657 if (err) {
1658 regs->gpr[ra] = old_ra;
1659 return 0; /* invoke DSI if -EFAULT? */
1660 }
1661 instr_done:
b91e136c 1662 regs->nip = truncate_if_32bit(regs->msr, regs->nip + 4);
0016a4cf
PM
1663 return 1;
1664
1665 logical_done:
1666 if (instr & 1)
1667 set_cr0(regs, ra);
1668 goto instr_done;
1669
1670 arith_done:
1671 if (instr & 1)
1672 set_cr0(regs, rd);
1673 goto instr_done;
14cf11af 1674}