4 * Copyright (C) 2004 Paul Mackerras <paulus@au.ibm.com>, IBM
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
11 #include <linux/kernel.h>
12 #include <linux/kprobes.h>
13 #include <linux/ptrace.h>
14 #include <asm/sstep.h>
15 #include <asm/processor.h>
16 #include <asm/uaccess.h>
17 #include <asm/cputable.h>
19 extern char system_call_common[];
22 /* Bits in SRR1 that are copied from MSR */
23 #define MSR_MASK 0xffffffff87c0ffffUL
25 #define MSR_MASK 0x87c0ffff
29 #define XER_SO 0x80000000U
30 #define XER_OV 0x40000000U
31 #define XER_CA 0x20000000U
34 * Functions in ldstfp.S
36 extern int do_lfs(int rn, unsigned long ea);
37 extern int do_lfd(int rn, unsigned long ea);
38 extern int do_stfs(int rn, unsigned long ea);
39 extern int do_stfd(int rn, unsigned long ea);
40 extern int do_lvx(int rn, unsigned long ea);
41 extern int do_stvx(int rn, unsigned long ea);
42 extern int do_lxvd2x(int rn, unsigned long ea);
43 extern int do_stxvd2x(int rn, unsigned long ea);
46 * Determine whether a conditional branch instruction would branch.
48 static int __kprobes branch_taken(unsigned int instr, struct pt_regs *regs)
50 unsigned int bo = (instr >> 21) & 0x1f;
54 /* decrement counter */
56 if (((bo >> 1) & 1) ^ (regs->ctr == 0))
59 if ((bo & 0x10) == 0) {
60 /* check bit from CR */
61 bi = (instr >> 16) & 0x1f;
62 if (((regs->ccr >> (31 - bi)) & 1) != ((bo >> 3) & 1))
69 static long __kprobes address_ok(struct pt_regs *regs, unsigned long ea, int nb)
73 return __access_ok(ea, nb, USER_DS);
77 * Calculate effective address for a D-form instruction
79 static unsigned long __kprobes dform_ea(unsigned int instr, struct pt_regs *regs)
84 ra = (instr >> 16) & 0x1f;
85 ea = (signed short) instr; /* sign-extend */
88 if (instr & 0x04000000) /* update forms */
92 if (!(regs->msr & MSR_SF))
100 * Calculate effective address for a DS-form instruction
102 static unsigned long __kprobes dsform_ea(unsigned int instr, struct pt_regs *regs)
107 ra = (instr >> 16) & 0x1f;
108 ea = (signed short) (instr & ~3); /* sign-extend */
111 if ((instr & 3) == 1) /* update forms */
114 if (!(regs->msr & MSR_SF))
118 #endif /* __powerpc64 */
121 * Calculate effective address for an X-form instruction
123 static unsigned long __kprobes xform_ea(unsigned int instr, struct pt_regs *regs,
129 ra = (instr >> 16) & 0x1f;
130 rb = (instr >> 11) & 0x1f;
134 if (do_update) /* update forms */
138 if (!(regs->msr & MSR_SF))
145 * Return the largest power of 2, not greater than sizeof(unsigned long),
146 * such that x is a multiple of it.
148 static inline unsigned long max_align(unsigned long x)
150 x |= sizeof(unsigned long);
151 return x & -x; /* isolates rightmost bit */
155 static inline unsigned long byterev_2(unsigned long x)
157 return ((x >> 8) & 0xff) | ((x & 0xff) << 8);
160 static inline unsigned long byterev_4(unsigned long x)
162 return ((x >> 24) & 0xff) | ((x >> 8) & 0xff00) |
163 ((x & 0xff00) << 8) | ((x & 0xff) << 24);
167 static inline unsigned long byterev_8(unsigned long x)
169 return (byterev_4(x) << 32) | byterev_4(x >> 32);
173 static int __kprobes read_mem_aligned(unsigned long *dest, unsigned long ea,
181 err = __get_user(x, (unsigned char __user *) ea);
184 err = __get_user(x, (unsigned short __user *) ea);
187 err = __get_user(x, (unsigned int __user *) ea);
191 err = __get_user(x, (unsigned long __user *) ea);
200 static int __kprobes read_mem_unaligned(unsigned long *dest, unsigned long ea,
201 int nb, struct pt_regs *regs)
204 unsigned long x, b, c;
206 /* unaligned, do this in pieces */
208 for (; nb > 0; nb -= c) {
212 err = read_mem_aligned(&b, ea, c);
215 x = (x << (8 * c)) + b;
223 * Read memory at address ea for nb bytes, return 0 for success
224 * or -EFAULT if an error occurred.
226 static int __kprobes read_mem(unsigned long *dest, unsigned long ea, int nb,
227 struct pt_regs *regs)
229 if (!address_ok(regs, ea, nb))
231 if ((ea & (nb - 1)) == 0)
232 return read_mem_aligned(dest, ea, nb);
233 return read_mem_unaligned(dest, ea, nb, regs);
236 static int __kprobes write_mem_aligned(unsigned long val, unsigned long ea,
243 err = __put_user(val, (unsigned char __user *) ea);
246 err = __put_user(val, (unsigned short __user *) ea);
249 err = __put_user(val, (unsigned int __user *) ea);
253 err = __put_user(val, (unsigned long __user *) ea);
260 static int __kprobes write_mem_unaligned(unsigned long val, unsigned long ea,
261 int nb, struct pt_regs *regs)
266 /* unaligned or little-endian, do this in pieces */
267 for (; nb > 0; nb -= c) {
271 err = write_mem_aligned(val >> (nb - c) * 8, ea, c);
280 * Write memory at address ea for nb bytes, return 0 for success
281 * or -EFAULT if an error occurred.
283 static int __kprobes write_mem(unsigned long val, unsigned long ea, int nb,
284 struct pt_regs *regs)
286 if (!address_ok(regs, ea, nb))
288 if ((ea & (nb - 1)) == 0)
289 return write_mem_aligned(val, ea, nb);
290 return write_mem_unaligned(val, ea, nb, regs);
294 * Check the address and alignment, and call func to do the actual
297 static int __kprobes do_fp_load(int rn, int (*func)(int, unsigned long),
298 unsigned long ea, int nb,
299 struct pt_regs *regs)
302 unsigned long val[sizeof(double) / sizeof(long)];
305 if (!address_ok(regs, ea, nb))
308 return (*func)(rn, ea);
309 ptr = (unsigned long) &val[0];
310 if (sizeof(unsigned long) == 8 || nb == 4) {
311 err = read_mem_unaligned(&val[0], ea, nb, regs);
312 ptr += sizeof(unsigned long) - nb;
314 /* reading a double on 32-bit */
315 err = read_mem_unaligned(&val[0], ea, 4, regs);
317 err = read_mem_unaligned(&val[1], ea + 4, 4, regs);
321 return (*func)(rn, ptr);
324 static int __kprobes do_fp_store(int rn, int (*func)(int, unsigned long),
325 unsigned long ea, int nb,
326 struct pt_regs *regs)
329 unsigned long val[sizeof(double) / sizeof(long)];
332 if (!address_ok(regs, ea, nb))
335 return (*func)(rn, ea);
336 ptr = (unsigned long) &val[0];
337 if (sizeof(unsigned long) == 8 || nb == 4) {
338 ptr += sizeof(unsigned long) - nb;
339 err = (*func)(rn, ptr);
342 err = write_mem_unaligned(val[0], ea, nb, regs);
344 /* writing a double on 32-bit */
345 err = (*func)(rn, ptr);
348 err = write_mem_unaligned(val[0], ea, 4, regs);
350 err = write_mem_unaligned(val[1], ea + 4, 4, regs);
355 #ifdef CONFIG_ALTIVEC
356 /* For Altivec/VMX, no need to worry about alignment */
357 static int __kprobes do_vec_load(int rn, int (*func)(int, unsigned long),
358 unsigned long ea, struct pt_regs *regs)
360 if (!address_ok(regs, ea & ~0xfUL, 16))
362 return (*func)(rn, ea);
365 static int __kprobes do_vec_store(int rn, int (*func)(int, unsigned long),
366 unsigned long ea, struct pt_regs *regs)
368 if (!address_ok(regs, ea & ~0xfUL, 16))
370 return (*func)(rn, ea);
372 #endif /* CONFIG_ALTIVEC */
375 static int __kprobes do_vsx_load(int rn, int (*func)(int, unsigned long),
376 unsigned long ea, struct pt_regs *regs)
379 unsigned long val[2];
381 if (!address_ok(regs, ea, 16))
384 return (*func)(rn, ea);
385 err = read_mem_unaligned(&val[0], ea, 8, regs);
387 err = read_mem_unaligned(&val[1], ea + 8, 8, regs);
389 err = (*func)(rn, (unsigned long) &val[0]);
393 static int __kprobes do_vsx_store(int rn, int (*func)(int, unsigned long),
394 unsigned long ea, struct pt_regs *regs)
397 unsigned long val[2];
399 if (!address_ok(regs, ea, 16))
402 return (*func)(rn, ea);
403 err = (*func)(rn, (unsigned long) &val[0]);
406 err = write_mem_unaligned(val[0], ea, 8, regs);
408 err = write_mem_unaligned(val[1], ea + 8, 8, regs);
411 #endif /* CONFIG_VSX */
413 #define __put_user_asmx(x, addr, err, op, cr) \
414 __asm__ __volatile__( \
415 "1: " op " %2,0,%3\n" \
418 ".section .fixup,\"ax\"\n" \
422 ".section __ex_table,\"a\"\n" \
423 PPC_LONG_ALIGN "\n" \
426 : "=r" (err), "=r" (cr) \
427 : "r" (x), "r" (addr), "i" (-EFAULT), "0" (err))
429 #define __get_user_asmx(x, addr, err, op) \
430 __asm__ __volatile__( \
431 "1: "op" %1,0,%2\n" \
433 ".section .fixup,\"ax\"\n" \
437 ".section __ex_table,\"a\"\n" \
438 PPC_LONG_ALIGN "\n" \
441 : "=r" (err), "=r" (x) \
442 : "r" (addr), "i" (-EFAULT), "0" (err))
444 #define __cacheop_user_asmx(addr, err, op) \
445 __asm__ __volatile__( \
448 ".section .fixup,\"ax\"\n" \
452 ".section __ex_table,\"a\"\n" \
453 PPC_LONG_ALIGN "\n" \
457 : "r" (addr), "i" (-EFAULT), "0" (err))
459 static void __kprobes set_cr0(struct pt_regs *regs, int rd)
461 long val = regs->gpr[rd];
463 regs->ccr = (regs->ccr & 0x0fffffff) | ((regs->xer >> 3) & 0x10000000);
465 if (!(regs->msr & MSR_SF))
469 regs->ccr |= 0x80000000;
471 regs->ccr |= 0x40000000;
473 regs->ccr |= 0x20000000;
476 static void __kprobes add_with_carry(struct pt_regs *regs, int rd,
477 unsigned long val1, unsigned long val2,
478 unsigned long carry_in)
480 unsigned long val = val1 + val2;
486 if (!(regs->msr & MSR_SF)) {
487 val = (unsigned int) val;
488 val1 = (unsigned int) val1;
491 if (val < val1 || (carry_in && val == val1))
494 regs->xer &= ~XER_CA;
497 static void __kprobes do_cmp_signed(struct pt_regs *regs, long v1, long v2,
500 unsigned int crval, shift;
502 crval = (regs->xer >> 31) & 1; /* get SO bit */
509 shift = (7 - crfld) * 4;
510 regs->ccr = (regs->ccr & ~(0xf << shift)) | (crval << shift);
513 static void __kprobes do_cmp_unsigned(struct pt_regs *regs, unsigned long v1,
514 unsigned long v2, int crfld)
516 unsigned int crval, shift;
518 crval = (regs->xer >> 31) & 1; /* get SO bit */
525 shift = (7 - crfld) * 4;
526 regs->ccr = (regs->ccr & ~(0xf << shift)) | (crval << shift);
530 * Elements of 32-bit rotate and mask instructions.
532 #define MASK32(mb, me) ((0xffffffffUL >> (mb)) + \
533 ((signed long)-0x80000000L >> (me)) + ((me) >= (mb)))
535 #define MASK64_L(mb) (~0UL >> (mb))
536 #define MASK64_R(me) ((signed long)-0x8000000000000000L >> (me))
537 #define MASK64(mb, me) (MASK64_L(mb) + MASK64_R(me) + ((me) >= (mb)))
538 #define DATA32(x) (((x) & 0xffffffffUL) | (((x) & 0xffffffffUL) << 32))
540 #define DATA32(x) (x)
542 #define ROTATE(x, n) ((n) ? (((x) << (n)) | ((x) >> (8 * sizeof(long) - (n)))) : (x))
545 * Emulate instructions that cause a transfer of control,
546 * loads and stores, and a few other instructions.
547 * Returns 1 if the step was emulated, 0 if not,
548 * or -1 if the instruction is one that should not be stepped,
549 * such as an rfid, or a mtmsrd that would clear MSR_RI.
551 int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
553 unsigned int opcode, ra, rb, rd, spr, u;
554 unsigned long int imm;
555 unsigned long int val, val2;
556 unsigned long int ea;
557 unsigned int cr, mb, me, sh;
559 unsigned long old_ra;
562 opcode = instr >> 26;
565 imm = (signed short)(instr & 0xfffc);
566 if ((instr & 2) == 0)
569 if ((regs->msr & MSR_SF) == 0)
570 regs->nip &= 0xffffffffUL;
572 regs->link = regs->nip;
573 if (branch_taken(instr, regs))
579 * N.B. this uses knowledge about how the syscall
580 * entry code works. If that is changed, this will
581 * need to be changed also.
583 if (regs->gpr[0] == 0x1ebe &&
584 cpu_has_feature(CPU_FTR_REAL_LE)) {
588 regs->gpr[9] = regs->gpr[13];
589 regs->gpr[10] = MSR_KERNEL;
590 regs->gpr[11] = regs->nip + 4;
591 regs->gpr[12] = regs->msr & MSR_MASK;
592 regs->gpr[13] = (unsigned long) get_paca();
593 regs->nip = (unsigned long) &system_call_common;
594 regs->msr = MSR_KERNEL;
598 imm = instr & 0x03fffffc;
599 if (imm & 0x02000000)
601 if ((instr & 2) == 0)
604 regs->link = regs->nip + 4;
605 if ((regs->msr & MSR_SF) == 0)
606 regs->link &= 0xffffffffUL;
608 if ((regs->msr & MSR_SF) == 0)
613 switch ((instr >> 1) & 0x3ff) {
615 case 528: /* bcctr */
616 imm = (instr & 0x400)? regs->ctr: regs->link;
618 if ((regs->msr & MSR_SF) == 0) {
619 regs->nip &= 0xffffffffUL;
623 regs->link = regs->nip;
624 if (branch_taken(instr, regs))
628 case 18: /* rfid, scary */
631 case 150: /* isync */
636 case 129: /* crandc */
637 case 193: /* crxor */
638 case 225: /* crnand */
639 case 257: /* crand */
640 case 289: /* creqv */
641 case 417: /* crorc */
643 ra = (instr >> 16) & 0x1f;
644 rb = (instr >> 11) & 0x1f;
645 rd = (instr >> 21) & 0x1f;
646 ra = (regs->ccr >> (31 - ra)) & 1;
647 rb = (regs->ccr >> (31 - rb)) & 1;
648 val = (instr >> (6 + ra * 2 + rb)) & 1;
649 regs->ccr = (regs->ccr & ~(1UL << (31 - rd))) |
655 switch ((instr >> 1) & 0x3ff) {
658 switch ((instr >> 21) & 3) {
660 asm volatile("lwsync" : : : "memory");
662 case 2: /* ptesync */
663 asm volatile("ptesync" : : : "memory");
670 case 854: /* eieio */
677 /* Following cases refer to regs->gpr[], so we need all regs */
678 if (!FULL_REGS(regs))
681 rd = (instr >> 21) & 0x1f;
682 ra = (instr >> 16) & 0x1f;
683 rb = (instr >> 11) & 0x1f;
687 regs->gpr[rd] = regs->gpr[ra] * (short) instr;
692 add_with_carry(regs, rd, ~regs->gpr[ra], imm, 1);
696 imm = (unsigned short) instr;
700 val = (unsigned int) val;
702 do_cmp_unsigned(regs, val, imm, rd >> 2);
712 do_cmp_signed(regs, val, imm, rd >> 2);
717 add_with_carry(regs, rd, regs->gpr[ra], imm, 0);
720 case 13: /* addic. */
722 add_with_carry(regs, rd, regs->gpr[ra], imm, 0);
729 imm += regs->gpr[ra];
734 imm = ((short) instr) << 16;
736 imm += regs->gpr[ra];
740 case 20: /* rlwimi */
741 mb = (instr >> 6) & 0x1f;
742 me = (instr >> 1) & 0x1f;
743 val = DATA32(regs->gpr[rd]);
744 imm = MASK32(mb, me);
745 regs->gpr[ra] = (regs->gpr[ra] & ~imm) | (ROTATE(val, rb) & imm);
748 case 21: /* rlwinm */
749 mb = (instr >> 6) & 0x1f;
750 me = (instr >> 1) & 0x1f;
751 val = DATA32(regs->gpr[rd]);
752 regs->gpr[ra] = ROTATE(val, rb) & MASK32(mb, me);
756 mb = (instr >> 6) & 0x1f;
757 me = (instr >> 1) & 0x1f;
758 rb = regs->gpr[rb] & 0x1f;
759 val = DATA32(regs->gpr[rd]);
760 regs->gpr[ra] = ROTATE(val, rb) & MASK32(mb, me);
764 imm = (unsigned short) instr;
765 regs->gpr[ra] = regs->gpr[rd] | imm;
769 imm = (unsigned short) instr;
770 regs->gpr[ra] = regs->gpr[rd] | (imm << 16);
774 imm = (unsigned short) instr;
775 regs->gpr[ra] = regs->gpr[rd] ^ imm;
779 imm = (unsigned short) instr;
780 regs->gpr[ra] = regs->gpr[rd] ^ (imm << 16);
784 imm = (unsigned short) instr;
785 regs->gpr[ra] = regs->gpr[rd] & imm;
789 case 29: /* andis. */
790 imm = (unsigned short) instr;
791 regs->gpr[ra] = regs->gpr[rd] & (imm << 16);
797 mb = ((instr >> 6) & 0x1f) | (instr & 0x20);
799 if ((instr & 0x10) == 0) {
800 sh = rb | ((instr & 2) << 4);
801 val = ROTATE(val, sh);
802 switch ((instr >> 2) & 3) {
804 regs->gpr[ra] = val & MASK64_L(mb);
807 regs->gpr[ra] = val & MASK64_R(mb);
810 regs->gpr[ra] = val & MASK64(mb, 63 - sh);
813 imm = MASK64(mb, 63 - sh);
814 regs->gpr[ra] = (regs->gpr[ra] & ~imm) |
819 sh = regs->gpr[rb] & 0x3f;
820 val = ROTATE(val, sh);
821 switch ((instr >> 1) & 7) {
823 regs->gpr[ra] = val & MASK64_L(mb);
826 regs->gpr[ra] = val & MASK64_R(mb);
833 switch ((instr >> 1) & 0x3ff) {
835 if (regs->msr & MSR_PR)
837 regs->gpr[rd] = regs->msr & MSR_MASK;
839 case 146: /* mtmsr */
840 if (regs->msr & MSR_PR)
843 if ((imm & MSR_RI) == 0)
844 /* can't step mtmsr that would clear MSR_RI */
849 case 178: /* mtmsrd */
850 /* only MSR_EE and MSR_RI get changed if bit 15 set */
851 /* mtmsrd doesn't change MSR_HV and MSR_ME */
852 if (regs->msr & MSR_PR)
854 imm = (instr & 0x10000)? 0x8002: 0xefffffffffffefffUL;
855 imm = (regs->msr & MSR_MASK & ~imm)
856 | (regs->gpr[rd] & imm);
857 if ((imm & MSR_RI) == 0)
858 /* can't step mtmsrd that would clear MSR_RI */
864 regs->gpr[rd] = regs->ccr;
865 regs->gpr[rd] &= 0xffffffffUL;
868 case 144: /* mtcrf */
871 for (sh = 0; sh < 8; ++sh) {
872 if (instr & (0x80000 >> sh))
873 regs->ccr = (regs->ccr & ~imm) |
879 case 339: /* mfspr */
880 spr = (instr >> 11) & 0x3ff;
882 case 0x20: /* mfxer */
883 regs->gpr[rd] = regs->xer;
884 regs->gpr[rd] &= 0xffffffffUL;
886 case 0x100: /* mflr */
887 regs->gpr[rd] = regs->link;
889 case 0x120: /* mfctr */
890 regs->gpr[rd] = regs->ctr;
895 case 467: /* mtspr */
896 spr = (instr >> 11) & 0x3ff;
898 case 0x20: /* mtxer */
899 regs->xer = (regs->gpr[rd] & 0xffffffffUL);
901 case 0x100: /* mtlr */
902 regs->link = regs->gpr[rd];
904 case 0x120: /* mtctr */
905 regs->ctr = regs->gpr[rd];
911 * Compare instructions
915 val2 = regs->gpr[rb];
918 /* word (32-bit) compare */
923 do_cmp_signed(regs, val, val2, rd >> 2);
928 val2 = regs->gpr[rb];
931 /* word (32-bit) compare */
932 val = (unsigned int) val;
933 val2 = (unsigned int) val2;
936 do_cmp_unsigned(regs, val, val2, rd >> 2);
940 * Arithmetic instructions
943 add_with_carry(regs, rd, ~regs->gpr[ra],
948 asm("mulhdu %0,%1,%2" : "=r" (regs->gpr[rd]) :
949 "r" (regs->gpr[ra]), "r" (regs->gpr[rb]));
953 add_with_carry(regs, rd, regs->gpr[ra],
957 case 11: /* mulhwu */
958 asm("mulhwu %0,%1,%2" : "=r" (regs->gpr[rd]) :
959 "r" (regs->gpr[ra]), "r" (regs->gpr[rb]));
963 regs->gpr[rd] = regs->gpr[rb] - regs->gpr[ra];
967 asm("mulhd %0,%1,%2" : "=r" (regs->gpr[rd]) :
968 "r" (regs->gpr[ra]), "r" (regs->gpr[rb]));
972 asm("mulhw %0,%1,%2" : "=r" (regs->gpr[rd]) :
973 "r" (regs->gpr[ra]), "r" (regs->gpr[rb]));
977 regs->gpr[rd] = -regs->gpr[ra];
980 case 136: /* subfe */
981 add_with_carry(regs, rd, ~regs->gpr[ra], regs->gpr[rb],
986 add_with_carry(regs, rd, regs->gpr[ra], regs->gpr[rb],
990 case 200: /* subfze */
991 add_with_carry(regs, rd, ~regs->gpr[ra], 0L,
995 case 202: /* addze */
996 add_with_carry(regs, rd, regs->gpr[ra], 0L,
1000 case 232: /* subfme */
1001 add_with_carry(regs, rd, ~regs->gpr[ra], -1L,
1002 regs->xer & XER_CA);
1004 #ifdef __powerpc64__
1005 case 233: /* mulld */
1006 regs->gpr[rd] = regs->gpr[ra] * regs->gpr[rb];
1009 case 234: /* addme */
1010 add_with_carry(regs, rd, regs->gpr[ra], -1L,
1011 regs->xer & XER_CA);
1014 case 235: /* mullw */
1015 regs->gpr[rd] = (unsigned int) regs->gpr[ra] *
1016 (unsigned int) regs->gpr[rb];
1020 regs->gpr[rd] = regs->gpr[ra] + regs->gpr[rb];
1022 #ifdef __powerpc64__
1023 case 457: /* divdu */
1024 regs->gpr[rd] = regs->gpr[ra] / regs->gpr[rb];
1027 case 459: /* divwu */
1028 regs->gpr[rd] = (unsigned int) regs->gpr[ra] /
1029 (unsigned int) regs->gpr[rb];
1031 #ifdef __powerpc64__
1032 case 489: /* divd */
1033 regs->gpr[rd] = (long int) regs->gpr[ra] /
1034 (long int) regs->gpr[rb];
1037 case 491: /* divw */
1038 regs->gpr[rd] = (int) regs->gpr[ra] /
1039 (int) regs->gpr[rb];
1044 * Logical instructions
1046 case 26: /* cntlzw */
1047 asm("cntlzw %0,%1" : "=r" (regs->gpr[ra]) :
1048 "r" (regs->gpr[rd]));
1050 #ifdef __powerpc64__
1051 case 58: /* cntlzd */
1052 asm("cntlzd %0,%1" : "=r" (regs->gpr[ra]) :
1053 "r" (regs->gpr[rd]));
1057 regs->gpr[ra] = regs->gpr[rd] & regs->gpr[rb];
1061 regs->gpr[ra] = regs->gpr[rd] & ~regs->gpr[rb];
1065 regs->gpr[ra] = ~(regs->gpr[rd] | regs->gpr[rb]);
1069 regs->gpr[ra] = ~(regs->gpr[rd] ^ regs->gpr[rb]);
1073 regs->gpr[ra] = regs->gpr[rd] ^ regs->gpr[rb];
1077 regs->gpr[ra] = regs->gpr[rd] | ~regs->gpr[rb];
1081 regs->gpr[ra] = regs->gpr[rd] | regs->gpr[rb];
1084 case 476: /* nand */
1085 regs->gpr[ra] = ~(regs->gpr[rd] & regs->gpr[rb]);
1088 case 922: /* extsh */
1089 regs->gpr[ra] = (signed short) regs->gpr[rd];
1092 case 954: /* extsb */
1093 regs->gpr[ra] = (signed char) regs->gpr[rd];
1095 #ifdef __powerpc64__
1096 case 986: /* extsw */
1097 regs->gpr[ra] = (signed int) regs->gpr[rd];
1102 * Shift instructions
1105 sh = regs->gpr[rb] & 0x3f;
1107 regs->gpr[ra] = (regs->gpr[rd] << sh) & 0xffffffffUL;
1113 sh = regs->gpr[rb] & 0x3f;
1115 regs->gpr[ra] = (regs->gpr[rd] & 0xffffffffUL) >> sh;
1120 case 792: /* sraw */
1121 sh = regs->gpr[rb] & 0x3f;
1122 ival = (signed int) regs->gpr[rd];
1123 regs->gpr[ra] = ival >> (sh < 32 ? sh : 31);
1124 if (ival < 0 && (sh >= 32 || (ival & ((1 << sh) - 1)) != 0))
1125 regs->xer |= XER_CA;
1127 regs->xer &= ~XER_CA;
1130 case 824: /* srawi */
1132 ival = (signed int) regs->gpr[rd];
1133 regs->gpr[ra] = ival >> sh;
1134 if (ival < 0 && (ival & ((1 << sh) - 1)) != 0)
1135 regs->xer |= XER_CA;
1137 regs->xer &= ~XER_CA;
1140 #ifdef __powerpc64__
1142 sh = regs->gpr[rd] & 0x7f;
1144 regs->gpr[ra] = regs->gpr[rd] << sh;
1150 sh = regs->gpr[rb] & 0x7f;
1152 regs->gpr[ra] = regs->gpr[rd] >> sh;
1157 case 794: /* srad */
1158 sh = regs->gpr[rb] & 0x7f;
1159 ival = (signed long int) regs->gpr[rd];
1160 regs->gpr[ra] = ival >> (sh < 64 ? sh : 63);
1161 if (ival < 0 && (sh >= 64 || (ival & ((1 << sh) - 1)) != 0))
1162 regs->xer |= XER_CA;
1164 regs->xer &= ~XER_CA;
1167 case 826: /* sradi with sh_5 = 0 */
1168 case 827: /* sradi with sh_5 = 1 */
1169 sh = rb | ((instr & 2) << 4);
1170 ival = (signed long int) regs->gpr[rd];
1171 regs->gpr[ra] = ival >> sh;
1172 if (ival < 0 && (ival & ((1 << sh) - 1)) != 0)
1173 regs->xer |= XER_CA;
1175 regs->xer &= ~XER_CA;
1177 #endif /* __powerpc64__ */
1180 * Cache instructions
1182 case 54: /* dcbst */
1183 ea = xform_ea(instr, regs, 0);
1184 if (!address_ok(regs, ea, 8))
1187 __cacheop_user_asmx(ea, err, "dcbst");
1193 ea = xform_ea(instr, regs, 0);
1194 if (!address_ok(regs, ea, 8))
1197 __cacheop_user_asmx(ea, err, "dcbf");
1202 case 246: /* dcbtst */
1204 ea = xform_ea(instr, regs, 0);
1205 prefetchw((void *) ea);
1209 case 278: /* dcbt */
1211 ea = xform_ea(instr, regs, 0);
1212 prefetch((void *) ea);
1221 * Following cases are for loads and stores, so bail out
1222 * if we're in little-endian mode.
1224 if (regs->msr & MSR_LE)
1228 * Save register RA in case it's an update form load or store
1229 * and the access faults.
1231 old_ra = regs->gpr[ra];
1236 switch ((instr >> 1) & 0x3ff) {
1237 case 20: /* lwarx */
1238 ea = xform_ea(instr, regs, 0);
1240 break; /* can't handle misaligned */
1242 if (!address_ok(regs, ea, 4))
1245 __get_user_asmx(val, ea, err, "lwarx");
1247 regs->gpr[rd] = val;
1250 case 150: /* stwcx. */
1251 ea = xform_ea(instr, regs, 0);
1253 break; /* can't handle misaligned */
1255 if (!address_ok(regs, ea, 4))
1258 __put_user_asmx(regs->gpr[rd], ea, err, "stwcx.", cr);
1260 regs->ccr = (regs->ccr & 0x0fffffff) |
1262 ((regs->xer >> 3) & 0x10000000);
1265 #ifdef __powerpc64__
1266 case 84: /* ldarx */
1267 ea = xform_ea(instr, regs, 0);
1269 break; /* can't handle misaligned */
1271 if (!address_ok(regs, ea, 8))
1274 __get_user_asmx(val, ea, err, "ldarx");
1276 regs->gpr[rd] = val;
1279 case 214: /* stdcx. */
1280 ea = xform_ea(instr, regs, 0);
1282 break; /* can't handle misaligned */
1284 if (!address_ok(regs, ea, 8))
1287 __put_user_asmx(regs->gpr[rd], ea, err, "stdcx.", cr);
1289 regs->ccr = (regs->ccr & 0x0fffffff) |
1291 ((regs->xer >> 3) & 0x10000000);
1296 err = read_mem(®s->gpr[rd], xform_ea(instr, regs, u),
1302 case 55: /* lwzux */
1303 err = read_mem(®s->gpr[rd], xform_ea(instr, regs, u),
1308 case 119: /* lbzux */
1309 err = read_mem(®s->gpr[rd], xform_ea(instr, regs, u),
1313 #ifdef CONFIG_ALTIVEC
1315 case 359: /* lvxl */
1316 if (!(regs->msr & MSR_VEC))
1318 ea = xform_ea(instr, regs, 0);
1319 err = do_vec_load(rd, do_lvx, ea, regs);
1322 case 231: /* stvx */
1323 case 487: /* stvxl */
1324 if (!(regs->msr & MSR_VEC))
1326 ea = xform_ea(instr, regs, 0);
1327 err = do_vec_store(rd, do_stvx, ea, regs);
1329 #endif /* CONFIG_ALTIVEC */
1331 #ifdef __powerpc64__
1332 case 149: /* stdx */
1333 case 181: /* stdux */
1334 val = regs->gpr[rd];
1335 err = write_mem(val, xform_ea(instr, regs, u), 8, regs);
1339 case 151: /* stwx */
1340 case 183: /* stwux */
1341 val = regs->gpr[rd];
1342 err = write_mem(val, xform_ea(instr, regs, u), 4, regs);
1345 case 215: /* stbx */
1346 case 247: /* stbux */
1347 val = regs->gpr[rd];
1348 err = write_mem(val, xform_ea(instr, regs, u), 1, regs);
1351 case 279: /* lhzx */
1352 case 311: /* lhzux */
1353 err = read_mem(®s->gpr[rd], xform_ea(instr, regs, u),
1357 #ifdef __powerpc64__
1358 case 341: /* lwax */
1359 case 373: /* lwaux */
1360 err = read_mem(®s->gpr[rd], xform_ea(instr, regs, u),
1363 regs->gpr[rd] = (signed int) regs->gpr[rd];
1367 case 343: /* lhax */
1368 case 375: /* lhaux */
1369 err = read_mem(®s->gpr[rd], xform_ea(instr, regs, u),
1372 regs->gpr[rd] = (signed short) regs->gpr[rd];
1375 case 407: /* sthx */
1376 case 439: /* sthux */
1377 val = regs->gpr[rd];
1378 err = write_mem(val, xform_ea(instr, regs, u), 2, regs);
1381 #ifdef __powerpc64__
1382 case 532: /* ldbrx */
1383 err = read_mem(&val, xform_ea(instr, regs, 0), 8, regs);
1385 regs->gpr[rd] = byterev_8(val);
1390 case 534: /* lwbrx */
1391 err = read_mem(&val, xform_ea(instr, regs, 0), 4, regs);
1393 regs->gpr[rd] = byterev_4(val);
1396 case 535: /* lfsx */
1397 case 567: /* lfsux */
1398 if (!(regs->msr & MSR_FP))
1400 ea = xform_ea(instr, regs, u);
1401 err = do_fp_load(rd, do_lfs, ea, 4, regs);
1404 case 599: /* lfdx */
1405 case 631: /* lfdux */
1406 if (!(regs->msr & MSR_FP))
1408 ea = xform_ea(instr, regs, u);
1409 err = do_fp_load(rd, do_lfd, ea, 8, regs);
1412 case 663: /* stfsx */
1413 case 695: /* stfsux */
1414 if (!(regs->msr & MSR_FP))
1416 ea = xform_ea(instr, regs, u);
1417 err = do_fp_store(rd, do_stfs, ea, 4, regs);
1420 case 727: /* stfdx */
1421 case 759: /* stfdux */
1422 if (!(regs->msr & MSR_FP))
1424 ea = xform_ea(instr, regs, u);
1425 err = do_fp_store(rd, do_stfd, ea, 8, regs);
1428 #ifdef __powerpc64__
1429 case 660: /* stdbrx */
1430 val = byterev_8(regs->gpr[rd]);
1431 err = write_mem(val, xform_ea(instr, regs, 0), 8, regs);
1435 case 662: /* stwbrx */
1436 val = byterev_4(regs->gpr[rd]);
1437 err = write_mem(val, xform_ea(instr, regs, 0), 4, regs);
1440 case 790: /* lhbrx */
1441 err = read_mem(&val, xform_ea(instr, regs, 0), 2, regs);
1443 regs->gpr[rd] = byterev_2(val);
1446 case 918: /* sthbrx */
1447 val = byterev_2(regs->gpr[rd]);
1448 err = write_mem(val, xform_ea(instr, regs, 0), 2, regs);
1452 case 844: /* lxvd2x */
1453 case 876: /* lxvd2ux */
1454 if (!(regs->msr & MSR_VSX))
1456 rd |= (instr & 1) << 5;
1457 ea = xform_ea(instr, regs, u);
1458 err = do_vsx_load(rd, do_lxvd2x, ea, regs);
1461 case 972: /* stxvd2x */
1462 case 1004: /* stxvd2ux */
1463 if (!(regs->msr & MSR_VSX))
1465 rd |= (instr & 1) << 5;
1466 ea = xform_ea(instr, regs, u);
1467 err = do_vsx_store(rd, do_stxvd2x, ea, regs);
1470 #endif /* CONFIG_VSX */
1476 err = read_mem(®s->gpr[rd], dform_ea(instr, regs), 4, regs);
1481 err = read_mem(®s->gpr[rd], dform_ea(instr, regs), 1, regs);
1486 val = regs->gpr[rd];
1487 err = write_mem(val, dform_ea(instr, regs), 4, regs);
1492 val = regs->gpr[rd];
1493 err = write_mem(val, dform_ea(instr, regs), 1, regs);
1498 err = read_mem(®s->gpr[rd], dform_ea(instr, regs), 2, regs);
1503 err = read_mem(®s->gpr[rd], dform_ea(instr, regs), 2, regs);
1505 regs->gpr[rd] = (signed short) regs->gpr[rd];
1510 val = regs->gpr[rd];
1511 err = write_mem(val, dform_ea(instr, regs), 2, regs);
1515 ra = (instr >> 16) & 0x1f;
1517 break; /* invalid form, ra in range to load */
1518 ea = dform_ea(instr, regs);
1520 err = read_mem(®s->gpr[rd], ea, 4, regs);
1524 } while (++rd < 32);
1528 ea = dform_ea(instr, regs);
1530 err = write_mem(regs->gpr[rd], ea, 4, regs);
1534 } while (++rd < 32);
1539 if (!(regs->msr & MSR_FP))
1541 ea = dform_ea(instr, regs);
1542 err = do_fp_load(rd, do_lfs, ea, 4, regs);
1547 if (!(regs->msr & MSR_FP))
1549 ea = dform_ea(instr, regs);
1550 err = do_fp_load(rd, do_lfd, ea, 8, regs);
1554 case 53: /* stfsu */
1555 if (!(regs->msr & MSR_FP))
1557 ea = dform_ea(instr, regs);
1558 err = do_fp_store(rd, do_stfs, ea, 4, regs);
1562 case 55: /* stfdu */
1563 if (!(regs->msr & MSR_FP))
1565 ea = dform_ea(instr, regs);
1566 err = do_fp_store(rd, do_stfd, ea, 8, regs);
1569 #ifdef __powerpc64__
1570 case 58: /* ld[u], lwa */
1571 switch (instr & 3) {
1573 err = read_mem(®s->gpr[rd], dsform_ea(instr, regs),
1577 err = read_mem(®s->gpr[rd], dsform_ea(instr, regs),
1581 err = read_mem(®s->gpr[rd], dsform_ea(instr, regs),
1584 regs->gpr[rd] = (signed int) regs->gpr[rd];
1589 case 62: /* std[u] */
1590 val = regs->gpr[rd];
1591 switch (instr & 3) {
1593 err = write_mem(val, dsform_ea(instr, regs), 8, regs);
1596 err = write_mem(val, dsform_ea(instr, regs), 8, regs);
1600 #endif /* __powerpc64__ */
1607 regs->gpr[ra] = old_ra;
1608 return 0; /* invoke DSI if -EFAULT? */
1612 #ifdef __powerpc64__
1613 if ((regs->msr & MSR_SF) == 0)
1614 regs->nip &= 0xffffffffUL;