Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6
[sfrench/cifs-2.6.git] / arch / sh / kernel / traps_64.c
1 /*
2  * arch/sh/kernel/traps_64.c
3  *
4  * Copyright (C) 2000, 2001  Paolo Alberelli
5  * Copyright (C) 2003, 2004  Paul Mundt
6  * Copyright (C) 2003, 2004  Richard Curnow
7  *
8  * This file is subject to the terms and conditions of the GNU General Public
9  * License.  See the file "COPYING" in the main directory of this archive
10  * for more details.
11  */
12 #include <linux/sched.h>
13 #include <linux/kernel.h>
14 #include <linux/string.h>
15 #include <linux/errno.h>
16 #include <linux/ptrace.h>
17 #include <linux/timer.h>
18 #include <linux/mm.h>
19 #include <linux/smp.h>
20 #include <linux/init.h>
21 #include <linux/delay.h>
22 #include <linux/spinlock.h>
23 #include <linux/kallsyms.h>
24 #include <linux/interrupt.h>
25 #include <linux/sysctl.h>
26 #include <linux/module.h>
27 #include <asm/system.h>
28 #include <asm/uaccess.h>
29 #include <asm/io.h>
30 #include <asm/atomic.h>
31 #include <asm/processor.h>
32 #include <asm/pgtable.h>
33
34 #undef DEBUG_EXCEPTION
35 #ifdef DEBUG_EXCEPTION
36 /* implemented in ../lib/dbg.c */
37 extern void show_excp_regs(char *fname, int trapnr, int signr,
38                            struct pt_regs *regs);
39 #else
40 #define show_excp_regs(a, b, c, d)
41 #endif
42
43 static void do_unhandled_exception(int trapnr, int signr, char *str, char *fn_name,
44                 unsigned long error_code, struct pt_regs *regs, struct task_struct *tsk);
45
46 #define DO_ERROR(trapnr, signr, str, name, tsk) \
47 asmlinkage void do_##name(unsigned long error_code, struct pt_regs *regs) \
48 { \
49         do_unhandled_exception(trapnr, signr, str, __stringify(name), error_code, regs, current); \
50 }
51
52 spinlock_t die_lock;
53
54 void die(const char * str, struct pt_regs * regs, long err)
55 {
56         console_verbose();
57         spin_lock_irq(&die_lock);
58         printk("%s: %lx\n", str, (err & 0xffffff));
59         show_regs(regs);
60         spin_unlock_irq(&die_lock);
61         do_exit(SIGSEGV);
62 }
63
64 static inline void die_if_kernel(const char * str, struct pt_regs * regs, long err)
65 {
66         if (!user_mode(regs))
67                 die(str, regs, err);
68 }
69
70 static void die_if_no_fixup(const char * str, struct pt_regs * regs, long err)
71 {
72         if (!user_mode(regs)) {
73                 const struct exception_table_entry *fixup;
74                 fixup = search_exception_tables(regs->pc);
75                 if (fixup) {
76                         regs->pc = fixup->fixup;
77                         return;
78                 }
79                 die(str, regs, err);
80         }
81 }
82
83 DO_ERROR(13, SIGILL,  "illegal slot instruction", illegal_slot_inst, current)
84 DO_ERROR(87, SIGSEGV, "address error (exec)", address_error_exec, current)
85
86
87 /* Implement misaligned load/store handling for kernel (and optionally for user
88    mode too).  Limitation : only SHmedia mode code is handled - there is no
89    handling at all for misaligned accesses occurring in SHcompact code yet. */
90
91 static int misaligned_fixup(struct pt_regs *regs);
92
93 asmlinkage void do_address_error_load(unsigned long error_code, struct pt_regs *regs)
94 {
95         if (misaligned_fixup(regs) < 0) {
96                 do_unhandled_exception(7, SIGSEGV, "address error(load)",
97                                 "do_address_error_load",
98                                 error_code, regs, current);
99         }
100         return;
101 }
102
103 asmlinkage void do_address_error_store(unsigned long error_code, struct pt_regs *regs)
104 {
105         if (misaligned_fixup(regs) < 0) {
106                 do_unhandled_exception(8, SIGSEGV, "address error(store)",
107                                 "do_address_error_store",
108                                 error_code, regs, current);
109         }
110         return;
111 }
112
113 #if defined(CONFIG_SH64_ID2815_WORKAROUND)
114
115 #define OPCODE_INVALID      0
116 #define OPCODE_USER_VALID   1
117 #define OPCODE_PRIV_VALID   2
118
119 /* getcon/putcon - requires checking which control register is referenced. */
120 #define OPCODE_CTRL_REG     3
121
122 /* Table of valid opcodes for SHmedia mode.
123    Form a 10-bit value by concatenating the major/minor opcodes i.e.
124    opcode[31:26,20:16].  The 6 MSBs of this value index into the following
125    array.  The 4 LSBs select the bit-pair in the entry (bits 1:0 correspond to
126    LSBs==4'b0000 etc). */
127 static unsigned long shmedia_opcode_table[64] = {
128         0x55554044,0x54445055,0x15141514,0x14541414,0x00000000,0x10001000,0x01110055,0x04050015,
129         0x00000444,0xc0000000,0x44545515,0x40405555,0x55550015,0x10005555,0x55555505,0x04050000,
130         0x00000555,0x00000404,0x00040445,0x15151414,0x00000000,0x00000000,0x00000000,0x00000000,
131         0x00000055,0x40404444,0x00000404,0xc0009495,0x00000000,0x00000000,0x00000000,0x00000000,
132         0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,
133         0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,
134         0x80005050,0x04005055,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,
135         0x81055554,0x00000404,0x55555555,0x55555555,0x00000000,0x00000000,0x00000000,0x00000000
136 };
137
138 void do_reserved_inst(unsigned long error_code, struct pt_regs *regs)
139 {
140         /* Workaround SH5-101 cut2 silicon defect #2815 :
141            in some situations, inter-mode branches from SHcompact -> SHmedia
142            which should take ITLBMISS or EXECPROT exceptions at the target
143            falsely take RESINST at the target instead. */
144
145         unsigned long opcode = 0x6ff4fff0; /* guaranteed reserved opcode */
146         unsigned long pc, aligned_pc;
147         int get_user_error;
148         int trapnr = 12;
149         int signr = SIGILL;
150         char *exception_name = "reserved_instruction";
151
152         pc = regs->pc;
153         if ((pc & 3) == 1) {
154                 /* SHmedia : check for defect.  This requires executable vmas
155                    to be readable too. */
156                 aligned_pc = pc & ~3;
157                 if (!access_ok(VERIFY_READ, aligned_pc, sizeof(unsigned long))) {
158                         get_user_error = -EFAULT;
159                 } else {
160                         get_user_error = __get_user(opcode, (unsigned long *)aligned_pc);
161                 }
162                 if (get_user_error >= 0) {
163                         unsigned long index, shift;
164                         unsigned long major, minor, combined;
165                         unsigned long reserved_field;
166                         reserved_field = opcode & 0xf; /* These bits are currently reserved as zero in all valid opcodes */
167                         major = (opcode >> 26) & 0x3f;
168                         minor = (opcode >> 16) & 0xf;
169                         combined = (major << 4) | minor;
170                         index = major;
171                         shift = minor << 1;
172                         if (reserved_field == 0) {
173                                 int opcode_state = (shmedia_opcode_table[index] >> shift) & 0x3;
174                                 switch (opcode_state) {
175                                         case OPCODE_INVALID:
176                                                 /* Trap. */
177                                                 break;
178                                         case OPCODE_USER_VALID:
179                                                 /* Restart the instruction : the branch to the instruction will now be from an RTE
180                                                    not from SHcompact so the silicon defect won't be triggered. */
181                                                 return;
182                                         case OPCODE_PRIV_VALID:
183                                                 if (!user_mode(regs)) {
184                                                         /* Should only ever get here if a module has
185                                                            SHcompact code inside it.  If so, the same fix up is needed. */
186                                                         return; /* same reason */
187                                                 }
188                                                 /* Otherwise, user mode trying to execute a privileged instruction -
189                                                    fall through to trap. */
190                                                 break;
191                                         case OPCODE_CTRL_REG:
192                                                 /* If in privileged mode, return as above. */
193                                                 if (!user_mode(regs)) return;
194                                                 /* In user mode ... */
195                                                 if (combined == 0x9f) { /* GETCON */
196                                                         unsigned long regno = (opcode >> 20) & 0x3f;
197                                                         if (regno >= 62) {
198                                                                 return;
199                                                         }
200                                                         /* Otherwise, reserved or privileged control register, => trap */
201                                                 } else if (combined == 0x1bf) { /* PUTCON */
202                                                         unsigned long regno = (opcode >> 4) & 0x3f;
203                                                         if (regno >= 62) {
204                                                                 return;
205                                                         }
206                                                         /* Otherwise, reserved or privileged control register, => trap */
207                                                 } else {
208                                                         /* Trap */
209                                                 }
210                                                 break;
211                                         default:
212                                                 /* Fall through to trap. */
213                                                 break;
214                                 }
215                         }
216                         /* fall through to normal resinst processing */
217                 } else {
218                         /* Error trying to read opcode.  This typically means a
219                            real fault, not a RESINST any more.  So change the
220                            codes. */
221                         trapnr = 87;
222                         exception_name = "address error (exec)";
223                         signr = SIGSEGV;
224                 }
225         }
226
227         do_unhandled_exception(trapnr, signr, exception_name, "do_reserved_inst", error_code, regs, current);
228 }
229
230 #else /* CONFIG_SH64_ID2815_WORKAROUND */
231
232 /* If the workaround isn't needed, this is just a straightforward reserved
233    instruction */
234 DO_ERROR(12, SIGILL,  "reserved instruction", reserved_inst, current)
235
236 #endif /* CONFIG_SH64_ID2815_WORKAROUND */
237
238 /* Called with interrupts disabled */
239 asmlinkage void do_exception_error(unsigned long ex, struct pt_regs *regs)
240 {
241         show_excp_regs(__func__, -1, -1, regs);
242         die_if_kernel("exception", regs, ex);
243 }
244
245 int do_unknown_trapa(unsigned long scId, struct pt_regs *regs)
246 {
247         /* Syscall debug */
248         printk("System call ID error: [0x1#args:8 #syscall:16  0x%lx]\n", scId);
249
250         die_if_kernel("unknown trapa", regs, scId);
251
252         return -ENOSYS;
253 }
254
255 void show_stack(struct task_struct *tsk, unsigned long *sp)
256 {
257 #ifdef CONFIG_KALLSYMS
258         extern void sh64_unwind(struct pt_regs *regs);
259         struct pt_regs *regs;
260
261         regs = tsk ? tsk->thread.kregs : NULL;
262
263         sh64_unwind(regs);
264 #else
265         printk(KERN_ERR "Can't backtrace on sh64 without CONFIG_KALLSYMS\n");
266 #endif
267 }
268
269 void show_task(unsigned long *sp)
270 {
271         show_stack(NULL, sp);
272 }
273
274 void dump_stack(void)
275 {
276         show_task(NULL);
277 }
278 /* Needed by any user of WARN_ON in view of the defn in include/asm-sh/bug.h */
279 EXPORT_SYMBOL(dump_stack);
280
281 static void do_unhandled_exception(int trapnr, int signr, char *str, char *fn_name,
282                 unsigned long error_code, struct pt_regs *regs, struct task_struct *tsk)
283 {
284         show_excp_regs(fn_name, trapnr, signr, regs);
285         tsk->thread.error_code = error_code;
286         tsk->thread.trap_no = trapnr;
287
288         if (user_mode(regs))
289                 force_sig(signr, tsk);
290
291         die_if_no_fixup(str, regs, error_code);
292 }
293
294 static int read_opcode(unsigned long long pc, unsigned long *result_opcode, int from_user_mode)
295 {
296         int get_user_error;
297         unsigned long aligned_pc;
298         unsigned long opcode;
299
300         if ((pc & 3) == 1) {
301                 /* SHmedia */
302                 aligned_pc = pc & ~3;
303                 if (from_user_mode) {
304                         if (!access_ok(VERIFY_READ, aligned_pc, sizeof(unsigned long))) {
305                                 get_user_error = -EFAULT;
306                         } else {
307                                 get_user_error = __get_user(opcode, (unsigned long *)aligned_pc);
308                                 *result_opcode = opcode;
309                         }
310                         return get_user_error;
311                 } else {
312                         /* If the fault was in the kernel, we can either read
313                          * this directly, or if not, we fault.
314                         */
315                         *result_opcode = *(unsigned long *) aligned_pc;
316                         return 0;
317                 }
318         } else if ((pc & 1) == 0) {
319                 /* SHcompact */
320                 /* TODO : provide handling for this.  We don't really support
321                    user-mode SHcompact yet, and for a kernel fault, this would
322                    have to come from a module built for SHcompact.  */
323                 return -EFAULT;
324         } else {
325                 /* misaligned */
326                 return -EFAULT;
327         }
328 }
329
330 static int address_is_sign_extended(__u64 a)
331 {
332         __u64 b;
333 #if (NEFF == 32)
334         b = (__u64)(__s64)(__s32)(a & 0xffffffffUL);
335         return (b == a) ? 1 : 0;
336 #else
337 #error "Sign extend check only works for NEFF==32"
338 #endif
339 }
340
341 static int generate_and_check_address(struct pt_regs *regs,
342                                       __u32 opcode,
343                                       int displacement_not_indexed,
344                                       int width_shift,
345                                       __u64 *address)
346 {
347         /* return -1 for fault, 0 for OK */
348
349         __u64 base_address, addr;
350         int basereg;
351
352         basereg = (opcode >> 20) & 0x3f;
353         base_address = regs->regs[basereg];
354         if (displacement_not_indexed) {
355                 __s64 displacement;
356                 displacement = (opcode >> 10) & 0x3ff;
357                 displacement = ((displacement << 54) >> 54); /* sign extend */
358                 addr = (__u64)((__s64)base_address + (displacement << width_shift));
359         } else {
360                 __u64 offset;
361                 int offsetreg;
362                 offsetreg = (opcode >> 10) & 0x3f;
363                 offset = regs->regs[offsetreg];
364                 addr = base_address + offset;
365         }
366
367         /* Check sign extended */
368         if (!address_is_sign_extended(addr)) {
369                 return -1;
370         }
371
372 #if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP)
373         /* Check accessible.  For misaligned access in the kernel, assume the
374            address is always accessible (and if not, just fault when the
375            load/store gets done.) */
376         if (user_mode(regs)) {
377                 if (addr >= TASK_SIZE) {
378                         return -1;
379                 }
380                 /* Do access_ok check later - it depends on whether it's a load or a store. */
381         }
382 #endif
383
384         *address = addr;
385         return 0;
386 }
387
388 /* Default value as for sh */
389 #if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP)
390 static int user_mode_unaligned_fixup_count = 10;
391 static int user_mode_unaligned_fixup_enable = 1;
392 #endif
393
394 static int kernel_mode_unaligned_fixup_count = 32;
395
396 static void misaligned_kernel_word_load(__u64 address, int do_sign_extend, __u64 *result)
397 {
398         unsigned short x;
399         unsigned char *p, *q;
400         p = (unsigned char *) (int) address;
401         q = (unsigned char *) &x;
402         q[0] = p[0];
403         q[1] = p[1];
404
405         if (do_sign_extend) {
406                 *result = (__u64)(__s64) *(short *) &x;
407         } else {
408                 *result = (__u64) x;
409         }
410 }
411
412 static void misaligned_kernel_word_store(__u64 address, __u64 value)
413 {
414         unsigned short x;
415         unsigned char *p, *q;
416         p = (unsigned char *) (int) address;
417         q = (unsigned char *) &x;
418
419         x = (__u16) value;
420         p[0] = q[0];
421         p[1] = q[1];
422 }
423
424 static int misaligned_load(struct pt_regs *regs,
425                            __u32 opcode,
426                            int displacement_not_indexed,
427                            int width_shift,
428                            int do_sign_extend)
429 {
430         /* Return -1 for a fault, 0 for OK */
431         int error;
432         int destreg;
433         __u64 address;
434
435         error = generate_and_check_address(regs, opcode,
436                         displacement_not_indexed, width_shift, &address);
437         if (error < 0) {
438                 return error;
439         }
440
441         destreg = (opcode >> 4) & 0x3f;
442 #if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP)
443         if (user_mode(regs)) {
444                 __u64 buffer;
445
446                 if (!access_ok(VERIFY_READ, (unsigned long) address, 1UL<<width_shift)) {
447                         return -1;
448                 }
449
450                 if (__copy_user(&buffer, (const void *)(int)address, (1 << width_shift)) > 0) {
451                         return -1; /* fault */
452                 }
453                 switch (width_shift) {
454                 case 1:
455                         if (do_sign_extend) {
456                                 regs->regs[destreg] = (__u64)(__s64) *(__s16 *) &buffer;
457                         } else {
458                                 regs->regs[destreg] = (__u64) *(__u16 *) &buffer;
459                         }
460                         break;
461                 case 2:
462                         regs->regs[destreg] = (__u64)(__s64) *(__s32 *) &buffer;
463                         break;
464                 case 3:
465                         regs->regs[destreg] = buffer;
466                         break;
467                 default:
468                         printk("Unexpected width_shift %d in misaligned_load, PC=%08lx\n",
469                                 width_shift, (unsigned long) regs->pc);
470                         break;
471                 }
472         } else
473 #endif
474         {
475                 /* kernel mode - we can take short cuts since if we fault, it's a genuine bug */
476                 __u64 lo, hi;
477
478                 switch (width_shift) {
479                 case 1:
480                         misaligned_kernel_word_load(address, do_sign_extend, &regs->regs[destreg]);
481                         break;
482                 case 2:
483                         asm ("ldlo.l %1, 0, %0" : "=r" (lo) : "r" (address));
484                         asm ("ldhi.l %1, 3, %0" : "=r" (hi) : "r" (address));
485                         regs->regs[destreg] = lo | hi;
486                         break;
487                 case 3:
488                         asm ("ldlo.q %1, 0, %0" : "=r" (lo) : "r" (address));
489                         asm ("ldhi.q %1, 7, %0" : "=r" (hi) : "r" (address));
490                         regs->regs[destreg] = lo | hi;
491                         break;
492
493                 default:
494                         printk("Unexpected width_shift %d in misaligned_load, PC=%08lx\n",
495                                 width_shift, (unsigned long) regs->pc);
496                         break;
497                 }
498         }
499
500         return 0;
501
502 }
503
504 static int misaligned_store(struct pt_regs *regs,
505                             __u32 opcode,
506                             int displacement_not_indexed,
507                             int width_shift)
508 {
509         /* Return -1 for a fault, 0 for OK */
510         int error;
511         int srcreg;
512         __u64 address;
513
514         error = generate_and_check_address(regs, opcode,
515                         displacement_not_indexed, width_shift, &address);
516         if (error < 0) {
517                 return error;
518         }
519
520         srcreg = (opcode >> 4) & 0x3f;
521 #if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP)
522         if (user_mode(regs)) {
523                 __u64 buffer;
524
525                 if (!access_ok(VERIFY_WRITE, (unsigned long) address, 1UL<<width_shift)) {
526                         return -1;
527                 }
528
529                 switch (width_shift) {
530                 case 1:
531                         *(__u16 *) &buffer = (__u16) regs->regs[srcreg];
532                         break;
533                 case 2:
534                         *(__u32 *) &buffer = (__u32) regs->regs[srcreg];
535                         break;
536                 case 3:
537                         buffer = regs->regs[srcreg];
538                         break;
539                 default:
540                         printk("Unexpected width_shift %d in misaligned_store, PC=%08lx\n",
541                                 width_shift, (unsigned long) regs->pc);
542                         break;
543                 }
544
545                 if (__copy_user((void *)(int)address, &buffer, (1 << width_shift)) > 0) {
546                         return -1; /* fault */
547                 }
548         } else
549 #endif
550         {
551                 /* kernel mode - we can take short cuts since if we fault, it's a genuine bug */
552                 __u64 val = regs->regs[srcreg];
553
554                 switch (width_shift) {
555                 case 1:
556                         misaligned_kernel_word_store(address, val);
557                         break;
558                 case 2:
559                         asm ("stlo.l %1, 0, %0" : : "r" (val), "r" (address));
560                         asm ("sthi.l %1, 3, %0" : : "r" (val), "r" (address));
561                         break;
562                 case 3:
563                         asm ("stlo.q %1, 0, %0" : : "r" (val), "r" (address));
564                         asm ("sthi.q %1, 7, %0" : : "r" (val), "r" (address));
565                         break;
566
567                 default:
568                         printk("Unexpected width_shift %d in misaligned_store, PC=%08lx\n",
569                                 width_shift, (unsigned long) regs->pc);
570                         break;
571                 }
572         }
573
574         return 0;
575
576 }
577
578 #if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP)
579 /* Never need to fix up misaligned FPU accesses within the kernel since that's a real
580    error. */
581 static int misaligned_fpu_load(struct pt_regs *regs,
582                            __u32 opcode,
583                            int displacement_not_indexed,
584                            int width_shift,
585                            int do_paired_load)
586 {
587         /* Return -1 for a fault, 0 for OK */
588         int error;
589         int destreg;
590         __u64 address;
591
592         error = generate_and_check_address(regs, opcode,
593                         displacement_not_indexed, width_shift, &address);
594         if (error < 0) {
595                 return error;
596         }
597
598         destreg = (opcode >> 4) & 0x3f;
599         if (user_mode(regs)) {
600                 __u64 buffer;
601                 __u32 buflo, bufhi;
602
603                 if (!access_ok(VERIFY_READ, (unsigned long) address, 1UL<<width_shift)) {
604                         return -1;
605                 }
606
607                 if (__copy_user(&buffer, (const void *)(int)address, (1 << width_shift)) > 0) {
608                         return -1; /* fault */
609                 }
610                 /* 'current' may be the current owner of the FPU state, so
611                    context switch the registers into memory so they can be
612                    indexed by register number. */
613                 if (last_task_used_math == current) {
614                         enable_fpu();
615                         save_fpu(current, regs);
616                         disable_fpu();
617                         last_task_used_math = NULL;
618                         regs->sr |= SR_FD;
619                 }
620
621                 buflo = *(__u32*) &buffer;
622                 bufhi = *(1 + (__u32*) &buffer);
623
624                 switch (width_shift) {
625                 case 2:
626                         current->thread.fpu.hard.fp_regs[destreg] = buflo;
627                         break;
628                 case 3:
629                         if (do_paired_load) {
630                                 current->thread.fpu.hard.fp_regs[destreg] = buflo;
631                                 current->thread.fpu.hard.fp_regs[destreg+1] = bufhi;
632                         } else {
633 #if defined(CONFIG_CPU_LITTLE_ENDIAN)
634                                 current->thread.fpu.hard.fp_regs[destreg] = bufhi;
635                                 current->thread.fpu.hard.fp_regs[destreg+1] = buflo;
636 #else
637                                 current->thread.fpu.hard.fp_regs[destreg] = buflo;
638                                 current->thread.fpu.hard.fp_regs[destreg+1] = bufhi;
639 #endif
640                         }
641                         break;
642                 default:
643                         printk("Unexpected width_shift %d in misaligned_fpu_load, PC=%08lx\n",
644                                 width_shift, (unsigned long) regs->pc);
645                         break;
646                 }
647                 return 0;
648         } else {
649                 die ("Misaligned FPU load inside kernel", regs, 0);
650                 return -1;
651         }
652
653
654 }
655
656 static int misaligned_fpu_store(struct pt_regs *regs,
657                            __u32 opcode,
658                            int displacement_not_indexed,
659                            int width_shift,
660                            int do_paired_load)
661 {
662         /* Return -1 for a fault, 0 for OK */
663         int error;
664         int srcreg;
665         __u64 address;
666
667         error = generate_and_check_address(regs, opcode,
668                         displacement_not_indexed, width_shift, &address);
669         if (error < 0) {
670                 return error;
671         }
672
673         srcreg = (opcode >> 4) & 0x3f;
674         if (user_mode(regs)) {
675                 __u64 buffer;
676                 /* Initialise these to NaNs. */
677                 __u32 buflo=0xffffffffUL, bufhi=0xffffffffUL;
678
679                 if (!access_ok(VERIFY_WRITE, (unsigned long) address, 1UL<<width_shift)) {
680                         return -1;
681                 }
682
683                 /* 'current' may be the current owner of the FPU state, so
684                    context switch the registers into memory so they can be
685                    indexed by register number. */
686                 if (last_task_used_math == current) {
687                         enable_fpu();
688                         save_fpu(current, regs);
689                         disable_fpu();
690                         last_task_used_math = NULL;
691                         regs->sr |= SR_FD;
692                 }
693
694                 switch (width_shift) {
695                 case 2:
696                         buflo = current->thread.fpu.hard.fp_regs[srcreg];
697                         break;
698                 case 3:
699                         if (do_paired_load) {
700                                 buflo = current->thread.fpu.hard.fp_regs[srcreg];
701                                 bufhi = current->thread.fpu.hard.fp_regs[srcreg+1];
702                         } else {
703 #if defined(CONFIG_CPU_LITTLE_ENDIAN)
704                                 bufhi = current->thread.fpu.hard.fp_regs[srcreg];
705                                 buflo = current->thread.fpu.hard.fp_regs[srcreg+1];
706 #else
707                                 buflo = current->thread.fpu.hard.fp_regs[srcreg];
708                                 bufhi = current->thread.fpu.hard.fp_regs[srcreg+1];
709 #endif
710                         }
711                         break;
712                 default:
713                         printk("Unexpected width_shift %d in misaligned_fpu_store, PC=%08lx\n",
714                                 width_shift, (unsigned long) regs->pc);
715                         break;
716                 }
717
718                 *(__u32*) &buffer = buflo;
719                 *(1 + (__u32*) &buffer) = bufhi;
720                 if (__copy_user((void *)(int)address, &buffer, (1 << width_shift)) > 0) {
721                         return -1; /* fault */
722                 }
723                 return 0;
724         } else {
725                 die ("Misaligned FPU load inside kernel", regs, 0);
726                 return -1;
727         }
728 }
729 #endif
730
731 static int misaligned_fixup(struct pt_regs *regs)
732 {
733         unsigned long opcode;
734         int error;
735         int major, minor;
736
737 #if !defined(CONFIG_SH64_USER_MISALIGNED_FIXUP)
738         /* Never fixup user mode misaligned accesses without this option enabled. */
739         return -1;
740 #else
741         if (!user_mode_unaligned_fixup_enable) return -1;
742 #endif
743
744         error = read_opcode(regs->pc, &opcode, user_mode(regs));
745         if (error < 0) {
746                 return error;
747         }
748         major = (opcode >> 26) & 0x3f;
749         minor = (opcode >> 16) & 0xf;
750
751 #if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP)
752         if (user_mode(regs) && (user_mode_unaligned_fixup_count > 0)) {
753                 --user_mode_unaligned_fixup_count;
754                 /* Only do 'count' worth of these reports, to remove a potential DoS against syslog */
755                 printk("Fixing up unaligned userspace access in \"%s\" pid=%d pc=0x%08x ins=0x%08lx\n",
756                        current->comm, task_pid_nr(current), (__u32)regs->pc, opcode);
757         } else
758 #endif
759         if (!user_mode(regs) && (kernel_mode_unaligned_fixup_count > 0)) {
760                 --kernel_mode_unaligned_fixup_count;
761                 if (in_interrupt()) {
762                         printk("Fixing up unaligned kernelspace access in interrupt pc=0x%08x ins=0x%08lx\n",
763                                (__u32)regs->pc, opcode);
764                 } else {
765                         printk("Fixing up unaligned kernelspace access in \"%s\" pid=%d pc=0x%08x ins=0x%08lx\n",
766                                current->comm, task_pid_nr(current), (__u32)regs->pc, opcode);
767                 }
768         }
769
770
771         switch (major) {
772                 case (0x84>>2): /* LD.W */
773                         error = misaligned_load(regs, opcode, 1, 1, 1);
774                         break;
775                 case (0xb0>>2): /* LD.UW */
776                         error = misaligned_load(regs, opcode, 1, 1, 0);
777                         break;
778                 case (0x88>>2): /* LD.L */
779                         error = misaligned_load(regs, opcode, 1, 2, 1);
780                         break;
781                 case (0x8c>>2): /* LD.Q */
782                         error = misaligned_load(regs, opcode, 1, 3, 0);
783                         break;
784
785                 case (0xa4>>2): /* ST.W */
786                         error = misaligned_store(regs, opcode, 1, 1);
787                         break;
788                 case (0xa8>>2): /* ST.L */
789                         error = misaligned_store(regs, opcode, 1, 2);
790                         break;
791                 case (0xac>>2): /* ST.Q */
792                         error = misaligned_store(regs, opcode, 1, 3);
793                         break;
794
795                 case (0x40>>2): /* indexed loads */
796                         switch (minor) {
797                                 case 0x1: /* LDX.W */
798                                         error = misaligned_load(regs, opcode, 0, 1, 1);
799                                         break;
800                                 case 0x5: /* LDX.UW */
801                                         error = misaligned_load(regs, opcode, 0, 1, 0);
802                                         break;
803                                 case 0x2: /* LDX.L */
804                                         error = misaligned_load(regs, opcode, 0, 2, 1);
805                                         break;
806                                 case 0x3: /* LDX.Q */
807                                         error = misaligned_load(regs, opcode, 0, 3, 0);
808                                         break;
809                                 default:
810                                         error = -1;
811                                         break;
812                         }
813                         break;
814
815                 case (0x60>>2): /* indexed stores */
816                         switch (minor) {
817                                 case 0x1: /* STX.W */
818                                         error = misaligned_store(regs, opcode, 0, 1);
819                                         break;
820                                 case 0x2: /* STX.L */
821                                         error = misaligned_store(regs, opcode, 0, 2);
822                                         break;
823                                 case 0x3: /* STX.Q */
824                                         error = misaligned_store(regs, opcode, 0, 3);
825                                         break;
826                                 default:
827                                         error = -1;
828                                         break;
829                         }
830                         break;
831
832 #if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP)
833                 case (0x94>>2): /* FLD.S */
834                         error = misaligned_fpu_load(regs, opcode, 1, 2, 0);
835                         break;
836                 case (0x98>>2): /* FLD.P */
837                         error = misaligned_fpu_load(regs, opcode, 1, 3, 1);
838                         break;
839                 case (0x9c>>2): /* FLD.D */
840                         error = misaligned_fpu_load(regs, opcode, 1, 3, 0);
841                         break;
842                 case (0x1c>>2): /* floating indexed loads */
843                         switch (minor) {
844                         case 0x8: /* FLDX.S */
845                                 error = misaligned_fpu_load(regs, opcode, 0, 2, 0);
846                                 break;
847                         case 0xd: /* FLDX.P */
848                                 error = misaligned_fpu_load(regs, opcode, 0, 3, 1);
849                                 break;
850                         case 0x9: /* FLDX.D */
851                                 error = misaligned_fpu_load(regs, opcode, 0, 3, 0);
852                                 break;
853                         default:
854                                 error = -1;
855                                 break;
856                         }
857                         break;
858                 case (0xb4>>2): /* FLD.S */
859                         error = misaligned_fpu_store(regs, opcode, 1, 2, 0);
860                         break;
861                 case (0xb8>>2): /* FLD.P */
862                         error = misaligned_fpu_store(regs, opcode, 1, 3, 1);
863                         break;
864                 case (0xbc>>2): /* FLD.D */
865                         error = misaligned_fpu_store(regs, opcode, 1, 3, 0);
866                         break;
867                 case (0x3c>>2): /* floating indexed stores */
868                         switch (minor) {
869                         case 0x8: /* FSTX.S */
870                                 error = misaligned_fpu_store(regs, opcode, 0, 2, 0);
871                                 break;
872                         case 0xd: /* FSTX.P */
873                                 error = misaligned_fpu_store(regs, opcode, 0, 3, 1);
874                                 break;
875                         case 0x9: /* FSTX.D */
876                                 error = misaligned_fpu_store(regs, opcode, 0, 3, 0);
877                                 break;
878                         default:
879                                 error = -1;
880                                 break;
881                         }
882                         break;
883 #endif
884
885                 default:
886                         /* Fault */
887                         error = -1;
888                         break;
889         }
890
891         if (error < 0) {
892                 return error;
893         } else {
894                 regs->pc += 4; /* Skip the instruction that's just been emulated */
895                 return 0;
896         }
897
898 }
899
900 static ctl_table unaligned_table[] = {
901         {
902                 .ctl_name       = CTL_UNNUMBERED,
903                 .procname       = "kernel_reports",
904                 .data           = &kernel_mode_unaligned_fixup_count,
905                 .maxlen         = sizeof(int),
906                 .mode           = 0644,
907                 .proc_handler   = &proc_dointvec
908         },
909 #if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP)
910         {
911                 .ctl_name       = CTL_UNNUMBERED,
912                 .procname       = "user_reports",
913                 .data           = &user_mode_unaligned_fixup_count,
914                 .maxlen         = sizeof(int),
915                 .mode           = 0644,
916                 .proc_handler   = &proc_dointvec
917         },
918         {
919                 .ctl_name       = CTL_UNNUMBERED,
920                 .procname       = "user_enable",
921                 .data           = &user_mode_unaligned_fixup_enable,
922                 .maxlen         = sizeof(int),
923                 .mode           = 0644,
924                 .proc_handler   = &proc_dointvec},
925 #endif
926         {}
927 };
928
929 static ctl_table unaligned_root[] = {
930         {
931                 .ctl_name       = CTL_UNNUMBERED,
932                 .procname       = "unaligned_fixup",
933                 .mode           = 0555,
934                 unaligned_table
935         },
936         {}
937 };
938
939 static ctl_table sh64_root[] = {
940         {
941                 .ctl_name       = CTL_UNNUMBERED,
942                 .procname       = "sh64",
943                 .mode           = 0555,
944                 .child          = unaligned_root
945         },
946         {}
947 };
948 static struct ctl_table_header *sysctl_header;
949 static int __init init_sysctl(void)
950 {
951         sysctl_header = register_sysctl_table(sh64_root);
952         return 0;
953 }
954
955 __initcall(init_sysctl);
956
957
958 asmlinkage void do_debug_interrupt(unsigned long code, struct pt_regs *regs)
959 {
960         u64 peek_real_address_q(u64 addr);
961         u64 poke_real_address_q(u64 addr, u64 val);
962         unsigned long long DM_EXP_CAUSE_PHY = 0x0c100010;
963         unsigned long long exp_cause;
964         /* It's not worth ioremapping the debug module registers for the amount
965            of access we make to them - just go direct to their physical
966            addresses. */
967         exp_cause = peek_real_address_q(DM_EXP_CAUSE_PHY);
968         if (exp_cause & ~4) {
969                 printk("DM.EXP_CAUSE had unexpected bits set (=%08lx)\n",
970                         (unsigned long)(exp_cause & 0xffffffff));
971         }
972         show_state();
973         /* Clear all DEBUGINT causes */
974         poke_real_address_q(DM_EXP_CAUSE_PHY, 0x0);
975 }