Merge tag 'ceph-for-4.14-rc4' of git://github.com/ceph/ceph-client
[sfrench/cifs-2.6.git] / arch / m32r / kernel / traps.c
1 /*
2  *  linux/arch/m32r/kernel/traps.c
3  *
4  *  Copyright (C) 2001, 2002  Hirokazu Takata, Hiroyuki Kondo,
5  *                            Hitoshi Yamamoto
6  */
7
8 /*
9  * 'traps.c' handles hardware traps and faults after we have saved some
10  * state in 'entry.S'.
11  */
12 #include <linux/init.h>
13 #include <linux/kernel.h>
14 #include <linux/kallsyms.h>
15 #include <linux/stddef.h>
16 #include <linux/ptrace.h>
17 #include <linux/sched/debug.h>
18 #include <linux/sched/task_stack.h>
19 #include <linux/mm.h>
20 #include <linux/cpu.h>
21
22 #include <asm/page.h>
23 #include <asm/processor.h>
24
25 #include <linux/uaccess.h>
26 #include <asm/io.h>
27 #include <linux/atomic.h>
28
29 #include <asm/smp.h>
30
31 #include <linux/module.h>
32
33 asmlinkage void alignment_check(void);
34 asmlinkage void ei_handler(void);
35 asmlinkage void rie_handler(void);
36 asmlinkage void debug_trap(void);
37 asmlinkage void cache_flushing_handler(void);
38 asmlinkage void ill_trap(void);
39
40 #ifdef CONFIG_SMP
41 extern void smp_reschedule_interrupt(void);
42 extern void smp_invalidate_interrupt(void);
43 extern void smp_call_function_interrupt(void);
44 extern void smp_ipi_timer_interrupt(void);
45 extern void smp_flush_cache_all_interrupt(void);
46 extern void smp_call_function_single_interrupt(void);
47
48 /*
49  * for Boot AP function
50  */
51 asm (
52         "       .section .eit_vector4,\"ax\"    \n"
53         "       .global _AP_RE                  \n"
54         "       .global startup_AP              \n"
55         "_AP_RE:                                \n"
56         "       .fill 32, 4, 0                  \n"
57         "_AP_EI: bra    startup_AP              \n"
58         "       .previous                       \n"
59 );
60 #endif  /* CONFIG_SMP */
61
62 extern unsigned long    eit_vector[];
63 #define BRA_INSN(func, entry)   \
64         ((unsigned long)func - (unsigned long)eit_vector - entry*4)/4 \
65         + 0xff000000UL
66
67 static void set_eit_vector_entries(void)
68 {
69         extern void default_eit_handler(void);
70         extern void system_call(void);
71         extern void pie_handler(void);
72         extern void ace_handler(void);
73         extern void tme_handler(void);
74         extern void _flush_cache_copyback_all(void);
75
76         eit_vector[0] = 0xd0c00001; /* seth r0, 0x01 */
77         eit_vector[1] = BRA_INSN(default_eit_handler, 1);
78         eit_vector[4] = 0xd0c00010; /* seth r0, 0x10 */
79         eit_vector[5] = BRA_INSN(default_eit_handler, 5);
80         eit_vector[8] = BRA_INSN(rie_handler, 8);
81         eit_vector[12] = BRA_INSN(alignment_check, 12);
82         eit_vector[16] = BRA_INSN(ill_trap, 16);
83         eit_vector[17] = BRA_INSN(debug_trap, 17);
84         eit_vector[18] = BRA_INSN(system_call, 18);
85         eit_vector[19] = BRA_INSN(ill_trap, 19);
86         eit_vector[20] = BRA_INSN(ill_trap, 20);
87         eit_vector[21] = BRA_INSN(ill_trap, 21);
88         eit_vector[22] = BRA_INSN(ill_trap, 22);
89         eit_vector[23] = BRA_INSN(ill_trap, 23);
90         eit_vector[24] = BRA_INSN(ill_trap, 24);
91         eit_vector[25] = BRA_INSN(ill_trap, 25);
92         eit_vector[26] = BRA_INSN(ill_trap, 26);
93         eit_vector[27] = BRA_INSN(ill_trap, 27);
94         eit_vector[28] = BRA_INSN(cache_flushing_handler, 28);
95         eit_vector[29] = BRA_INSN(ill_trap, 29);
96         eit_vector[30] = BRA_INSN(ill_trap, 30);
97         eit_vector[31] = BRA_INSN(ill_trap, 31);
98         eit_vector[32] = BRA_INSN(ei_handler, 32);
99         eit_vector[64] = BRA_INSN(pie_handler, 64);
100 #ifdef CONFIG_MMU
101         eit_vector[68] = BRA_INSN(ace_handler, 68);
102         eit_vector[72] = BRA_INSN(tme_handler, 72);
103 #endif /* CONFIG_MMU */
104 #ifdef CONFIG_SMP
105         eit_vector[184] = (unsigned long)smp_reschedule_interrupt;
106         eit_vector[185] = (unsigned long)smp_invalidate_interrupt;
107         eit_vector[186] = (unsigned long)smp_call_function_interrupt;
108         eit_vector[187] = (unsigned long)smp_ipi_timer_interrupt;
109         eit_vector[188] = (unsigned long)smp_flush_cache_all_interrupt;
110         eit_vector[189] = 0;    /* CPU_BOOT_IPI */
111         eit_vector[190] = (unsigned long)smp_call_function_single_interrupt;
112         eit_vector[191] = 0;
113 #endif
114         _flush_cache_copyback_all();
115 }
116
117 void abort(void)
118 {
119         BUG();
120
121         /* if that doesn't kill us, halt */
122         panic("Oops failed to kill thread");
123 }
124 EXPORT_SYMBOL(abort);
125
126 void __init trap_init(void)
127 {
128         set_eit_vector_entries();
129
130         /*
131          * Should be a barrier for any external CPU state.
132          */
133         cpu_init();
134 }
135
136 static int kstack_depth_to_print = 24;
137
138 static void show_trace(struct task_struct *task, unsigned long *stack)
139 {
140         unsigned long addr;
141
142         if (!stack)
143                 stack = (unsigned long*)&stack;
144
145         printk("Call Trace: ");
146         while (!kstack_end(stack)) {
147                 addr = *stack++;
148                 if (__kernel_text_address(addr))
149                         printk("[<%08lx>] %pSR\n", addr, (void *)addr);
150         }
151         printk("\n");
152 }
153
154 void show_stack(struct task_struct *task, unsigned long *sp)
155 {
156         unsigned long  *stack;
157         int  i;
158
159         /*
160          * debugging aid: "show_stack(NULL);" prints the
161          * back trace for this cpu.
162          */
163
164         if(sp==NULL) {
165                 if (task)
166                         sp = (unsigned long *)task->thread.sp;
167                 else
168                         sp=(unsigned long*)&sp;
169         }
170
171         stack = sp;
172         for(i=0; i < kstack_depth_to_print; i++) {
173                 if (kstack_end(stack))
174                         break;
175                 if (i && ((i % 4) == 0))
176                         printk("\n       ");
177                 printk("%08lx ", *stack++);
178         }
179         printk("\n");
180         show_trace(task, sp);
181 }
182
183 static void show_registers(struct pt_regs *regs)
184 {
185         int i = 0;
186         int in_kernel = 1;
187         unsigned long sp;
188
189         printk("CPU:    %d\n", smp_processor_id());
190         show_regs(regs);
191
192         sp = (unsigned long) (1+regs);
193         if (user_mode(regs)) {
194                 in_kernel = 0;
195                 sp = regs->spu;
196                 printk("SPU: %08lx\n", sp);
197         } else {
198                 printk("SPI: %08lx\n", sp);
199         }
200         printk("Process %s (pid: %d, process nr: %d, stackpage=%08lx)",
201                 current->comm, task_pid_nr(current), 0xffff & i, 4096+(unsigned long)current);
202
203         /*
204          * When in-kernel, we also print out the stack and code at the
205          * time of the fault..
206          */
207         if (in_kernel) {
208                 printk("\nStack: ");
209                 show_stack(current, (unsigned long*) sp);
210
211                 printk("\nCode: ");
212                 if (regs->bpc < PAGE_OFFSET)
213                         goto bad;
214
215                 for(i=0;i<20;i++) {
216                         unsigned char c;
217                         if (__get_user(c, &((unsigned char*)regs->bpc)[i])) {
218 bad:
219                                 printk(" Bad PC value.");
220                                 break;
221                         }
222                         printk("%02x ", c);
223                 }
224         }
225         printk("\n");
226 }
227
228 static DEFINE_SPINLOCK(die_lock);
229
230 void die(const char * str, struct pt_regs * regs, long err)
231 {
232         console_verbose();
233         spin_lock_irq(&die_lock);
234         bust_spinlocks(1);
235         printk("%s: %04lx\n", str, err & 0xffff);
236         show_registers(regs);
237         bust_spinlocks(0);
238         spin_unlock_irq(&die_lock);
239         do_exit(SIGSEGV);
240 }
241
242 static __inline__ void die_if_kernel(const char * str,
243         struct pt_regs * regs, long err)
244 {
245         if (!user_mode(regs))
246                 die(str, regs, err);
247 }
248
249 static __inline__ void do_trap(int trapnr, int signr, const char * str,
250         struct pt_regs * regs, long error_code, siginfo_t *info)
251 {
252         if (user_mode(regs)) {
253                 /* trap_signal */
254                 struct task_struct *tsk = current;
255                 tsk->thread.error_code = error_code;
256                 tsk->thread.trap_no = trapnr;
257                 if (info)
258                         force_sig_info(signr, info, tsk);
259                 else
260                         force_sig(signr, tsk);
261                 return;
262         } else {
263                 /* kernel_trap */
264                 if (!fixup_exception(regs))
265                         die(str, regs, error_code);
266                 return;
267         }
268 }
269
270 #define DO_ERROR(trapnr, signr, str, name) \
271 asmlinkage void do_##name(struct pt_regs * regs, long error_code) \
272 { \
273         do_trap(trapnr, signr, NULL, regs, error_code, NULL); \
274 }
275
276 #define DO_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \
277 asmlinkage void do_##name(struct pt_regs * regs, long error_code) \
278 { \
279         siginfo_t info; \
280         info.si_signo = signr; \
281         info.si_errno = 0; \
282         info.si_code = sicode; \
283         info.si_addr = (void __user *)siaddr; \
284         do_trap(trapnr, signr, str, regs, error_code, &info); \
285 }
286
287 DO_ERROR( 1, SIGTRAP, "debug trap", debug_trap)
288 DO_ERROR_INFO(0x20, SIGILL,  "reserved instruction ", rie_handler, ILL_ILLOPC, regs->bpc)
289 DO_ERROR_INFO(0x100, SIGILL,  "privileged instruction", pie_handler, ILL_PRVOPC, regs->bpc)
290 DO_ERROR_INFO(-1, SIGILL,  "illegal trap", ill_trap, ILL_ILLTRP, regs->bpc)
291
292 extern int handle_unaligned_access(unsigned long, struct pt_regs *);
293
294 /* This code taken from arch/sh/kernel/traps.c */
295 asmlinkage void do_alignment_check(struct pt_regs *regs, long error_code)
296 {
297         mm_segment_t oldfs;
298         unsigned long insn;
299         int tmp;
300
301         oldfs = get_fs();
302
303         if (user_mode(regs)) {
304                 local_irq_enable();
305                 current->thread.error_code = error_code;
306                 current->thread.trap_no = 0x17;
307
308                 set_fs(USER_DS);
309                 if (copy_from_user(&insn, (void *)regs->bpc, 4)) {
310                         set_fs(oldfs);
311                         goto uspace_segv;
312                 }
313                 tmp = handle_unaligned_access(insn, regs);
314                 set_fs(oldfs);
315
316                 if (!tmp)
317                         return;
318
319         uspace_segv:
320                 printk(KERN_NOTICE "Killing process \"%s\" due to unaligned "
321                         "access\n", current->comm);
322                 force_sig(SIGSEGV, current);
323         } else {
324                 set_fs(KERNEL_DS);
325                 if (copy_from_user(&insn, (void *)regs->bpc, 4)) {
326                         set_fs(oldfs);
327                         die("insn faulting in do_address_error", regs, 0);
328                 }
329                 handle_unaligned_access(insn, regs);
330                 set_fs(oldfs);
331         }
332 }