Merge tag '9p-for-4.20' of git://github.com/martinetd/linux
[sfrench/cifs-2.6.git] / arch / nds32 / kernel / traps.c
1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (C) 2005-2017 Andes Technology Corporation
3
4 #include <linux/module.h>
5 #include <linux/personality.h>
6 #include <linux/kallsyms.h>
7 #include <linux/hardirq.h>
8 #include <linux/kdebug.h>
9 #include <linux/sched/task_stack.h>
10 #include <linux/uaccess.h>
11 #include <linux/ftrace.h>
12
13 #include <asm/proc-fns.h>
14 #include <asm/unistd.h>
15
16 #include <linux/ptrace.h>
17 #include <nds32_intrinsic.h>
18
19 extern void show_pte(struct mm_struct *mm, unsigned long addr);
20
21 /*
22  * Dump out the contents of some memory nicely...
23  */
24 void dump_mem(const char *lvl, unsigned long bottom, unsigned long top)
25 {
26         unsigned long first;
27         mm_segment_t fs;
28         int i;
29
30         /*
31          * We need to switch to kernel mode so that we can use __get_user
32          * to safely read from kernel space.  Note that we now dump the
33          * code first, just in case the backtrace kills us.
34          */
35         fs = get_fs();
36         set_fs(KERNEL_DS);
37
38         pr_emerg("%s(0x%08lx to 0x%08lx)\n", lvl, bottom, top);
39
40         for (first = bottom & ~31; first < top; first += 32) {
41                 unsigned long p;
42                 char str[sizeof(" 12345678") * 8 + 1];
43
44                 memset(str, ' ', sizeof(str));
45                 str[sizeof(str) - 1] = '\0';
46
47                 for (p = first, i = 0; i < 8 && p < top; i++, p += 4) {
48                         if (p >= bottom && p < top) {
49                                 unsigned long val;
50                                 if (__get_user(val, (unsigned long *)p) == 0)
51                                         sprintf(str + i * 9, " %08lx", val);
52                                 else
53                                         sprintf(str + i * 9, " ????????");
54                         }
55                 }
56                 pr_emerg("%s%04lx:%s\n", lvl, first & 0xffff, str);
57         }
58
59         set_fs(fs);
60 }
61
62 EXPORT_SYMBOL(dump_mem);
63
64 static void dump_instr(struct pt_regs *regs)
65 {
66         unsigned long addr = instruction_pointer(regs);
67         mm_segment_t fs;
68         char str[sizeof("00000000 ") * 5 + 2 + 1], *p = str;
69         int i;
70
71         return;
72         /*
73          * We need to switch to kernel mode so that we can use __get_user
74          * to safely read from kernel space.  Note that we now dump the
75          * code first, just in case the backtrace kills us.
76          */
77         fs = get_fs();
78         set_fs(KERNEL_DS);
79
80         pr_emerg("Code: ");
81         for (i = -4; i < 1; i++) {
82                 unsigned int val, bad;
83
84                 bad = __get_user(val, &((u32 *) addr)[i]);
85
86                 if (!bad) {
87                         p += sprintf(p, i == 0 ? "(%08x) " : "%08x ", val);
88                 } else {
89                         p += sprintf(p, "bad PC value");
90                         break;
91                 }
92         }
93         pr_emerg("Code: %s\n", str);
94
95         set_fs(fs);
96 }
97
98 #define LOOP_TIMES (100)
99 static void __dump(struct task_struct *tsk, unsigned long *base_reg)
100 {
101         unsigned long ret_addr;
102         int cnt = LOOP_TIMES, graph = 0;
103         pr_emerg("Call Trace:\n");
104         if (!IS_ENABLED(CONFIG_FRAME_POINTER)) {
105                 while (!kstack_end(base_reg)) {
106                         ret_addr = *base_reg++;
107                         if (__kernel_text_address(ret_addr)) {
108                                 ret_addr = ftrace_graph_ret_addr(
109                                                 tsk, &graph, ret_addr, NULL);
110                                 print_ip_sym(ret_addr);
111                         }
112                         if (--cnt < 0)
113                                 break;
114                 }
115         } else {
116                 while (!kstack_end((void *)base_reg) &&
117                        !((unsigned long)base_reg & 0x3) &&
118                        ((unsigned long)base_reg >= TASK_SIZE)) {
119                         unsigned long next_fp;
120                         ret_addr = base_reg[LP_OFFSET];
121                         next_fp = base_reg[FP_OFFSET];
122                         if (__kernel_text_address(ret_addr)) {
123
124                                 ret_addr = ftrace_graph_ret_addr(
125                                                 tsk, &graph, ret_addr, NULL);
126                                 print_ip_sym(ret_addr);
127                         }
128                         if (--cnt < 0)
129                                 break;
130                         base_reg = (unsigned long *)next_fp;
131                 }
132         }
133         pr_emerg("\n");
134 }
135
136 void show_stack(struct task_struct *tsk, unsigned long *sp)
137 {
138         unsigned long *base_reg;
139
140         if (!tsk)
141                 tsk = current;
142         if (!IS_ENABLED(CONFIG_FRAME_POINTER)) {
143                 if (tsk != current)
144                         base_reg = (unsigned long *)(tsk->thread.cpu_context.sp);
145                 else
146                         __asm__ __volatile__("\tori\t%0, $sp, #0\n":"=r"(base_reg));
147         } else {
148                 if (tsk != current)
149                         base_reg = (unsigned long *)(tsk->thread.cpu_context.fp);
150                 else
151                         __asm__ __volatile__("\tori\t%0, $fp, #0\n":"=r"(base_reg));
152         }
153         __dump(tsk, base_reg);
154         barrier();
155 }
156
157 DEFINE_SPINLOCK(die_lock);
158
159 /*
160  * This function is protected against re-entrancy.
161  */
162 void die(const char *str, struct pt_regs *regs, int err)
163 {
164         struct task_struct *tsk = current;
165         static int die_counter;
166
167         console_verbose();
168         spin_lock_irq(&die_lock);
169         bust_spinlocks(1);
170
171         pr_emerg("Internal error: %s: %x [#%d]\n", str, err, ++die_counter);
172         print_modules();
173         pr_emerg("CPU: %i\n", smp_processor_id());
174         show_regs(regs);
175         pr_emerg("Process %s (pid: %d, stack limit = 0x%p)\n",
176                  tsk->comm, tsk->pid, end_of_stack(tsk));
177
178         if (!user_mode(regs) || in_interrupt()) {
179                 dump_mem("Stack: ", regs->sp, (regs->sp + PAGE_SIZE) & PAGE_MASK);
180                 dump_instr(regs);
181                 dump_stack();
182         }
183
184         bust_spinlocks(0);
185         spin_unlock_irq(&die_lock);
186         do_exit(SIGSEGV);
187 }
188
189 EXPORT_SYMBOL(die);
190
191 void die_if_kernel(const char *str, struct pt_regs *regs, int err)
192 {
193         if (user_mode(regs))
194                 return;
195
196         die(str, regs, err);
197 }
198
199 int bad_syscall(int n, struct pt_regs *regs)
200 {
201         if (current->personality != PER_LINUX) {
202                 send_sig(SIGSEGV, current, 1);
203                 return regs->uregs[0];
204         }
205
206         force_sig_fault(SIGILL, ILL_ILLTRP,
207                         (void __user *)instruction_pointer(regs) - 4, current);
208         die_if_kernel("Oops - bad syscall", regs, n);
209         return regs->uregs[0];
210 }
211
212 void __pte_error(const char *file, int line, unsigned long val)
213 {
214         pr_emerg("%s:%d: bad pte %08lx.\n", file, line, val);
215 }
216
217 void __pmd_error(const char *file, int line, unsigned long val)
218 {
219         pr_emerg("%s:%d: bad pmd %08lx.\n", file, line, val);
220 }
221
222 void __pgd_error(const char *file, int line, unsigned long val)
223 {
224         pr_emerg("%s:%d: bad pgd %08lx.\n", file, line, val);
225 }
226
227 extern char *exception_vector, *exception_vector_end;
228 void __init trap_init(void)
229 {
230         return;
231 }
232
233 void __init early_trap_init(void)
234 {
235         unsigned long ivb = 0;
236         unsigned long base = PAGE_OFFSET;
237
238         memcpy((unsigned long *)base, (unsigned long *)&exception_vector,
239                ((unsigned long)&exception_vector_end -
240                 (unsigned long)&exception_vector));
241         ivb = __nds32__mfsr(NDS32_SR_IVB);
242         /* Check platform support. */
243         if (((ivb & IVB_mskNIVIC) >> IVB_offNIVIC) < 2)
244                 panic
245                     ("IVIC mode is not allowed on the platform with interrupt controller\n");
246         __nds32__mtsr((ivb & ~IVB_mskESZ) | (IVB_valESZ16 << IVB_offESZ) |
247                       IVB_BASE, NDS32_SR_IVB);
248         __nds32__mtsr(INT_MASK_INITAIAL_VAL, NDS32_SR_INT_MASK);
249
250         /*
251          * 0x800 = 128 vectors * 16byte.
252          * It should be enough to flush a page.
253          */
254         cpu_cache_wbinval_page(base, true);
255 }
256
257 void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
258                   int error_code, int si_code)
259 {
260         tsk->thread.trap_no = ENTRY_DEBUG_RELATED;
261         tsk->thread.error_code = error_code;
262
263         force_sig_fault(SIGTRAP, si_code,
264                         (void __user *)instruction_pointer(regs), tsk);
265 }
266
267 void do_debug_trap(unsigned long entry, unsigned long addr,
268                    unsigned long type, struct pt_regs *regs)
269 {
270         if (notify_die(DIE_OOPS, "Oops", regs, addr, type, SIGTRAP)
271             == NOTIFY_STOP)
272                 return;
273
274         if (user_mode(regs)) {
275                 /* trap_signal */
276                 send_sigtrap(current, regs, 0, TRAP_BRKPT);
277         } else {
278                 /* kernel_trap */
279                 if (!fixup_exception(regs))
280                         die("unexpected kernel_trap", regs, 0);
281         }
282 }
283
284 void unhandled_interruption(struct pt_regs *regs)
285 {
286         pr_emerg("unhandled_interruption\n");
287         show_regs(regs);
288         if (!user_mode(regs))
289                 do_exit(SIGKILL);
290         force_sig(SIGKILL, current);
291 }
292
293 void unhandled_exceptions(unsigned long entry, unsigned long addr,
294                           unsigned long type, struct pt_regs *regs)
295 {
296         pr_emerg("Unhandled Exception: entry: %lx addr:%lx itype:%lx\n", entry,
297                  addr, type);
298         show_regs(regs);
299         if (!user_mode(regs))
300                 do_exit(SIGKILL);
301         force_sig(SIGKILL, current);
302 }
303
304 extern int do_page_fault(unsigned long entry, unsigned long addr,
305                          unsigned int error_code, struct pt_regs *regs);
306
307 /*
308  * 2:DEF dispatch for TLB MISC exception handler
309 */
310
311 void do_dispatch_tlb_misc(unsigned long entry, unsigned long addr,
312                           unsigned long type, struct pt_regs *regs)
313 {
314         type = type & (ITYPE_mskINST | ITYPE_mskETYPE);
315         if ((type & ITYPE_mskETYPE) < 5) {
316                 /* Permission exceptions */
317                 do_page_fault(entry, addr, type, regs);
318         } else
319                 unhandled_exceptions(entry, addr, type, regs);
320 }
321
322 void do_revinsn(struct pt_regs *regs)
323 {
324         pr_emerg("Reserved Instruction\n");
325         show_regs(regs);
326         if (!user_mode(regs))
327                 do_exit(SIGILL);
328         force_sig(SIGILL, current);
329 }
330
331 #ifdef CONFIG_ALIGNMENT_TRAP
332 extern int unalign_access_mode;
333 extern int do_unaligned_access(unsigned long addr, struct pt_regs *regs);
334 #endif
335 void do_dispatch_general(unsigned long entry, unsigned long addr,
336                          unsigned long itype, struct pt_regs *regs,
337                          unsigned long oipc)
338 {
339         unsigned int swid = itype >> ITYPE_offSWID;
340         unsigned long type = itype & (ITYPE_mskINST | ITYPE_mskETYPE);
341         if (type == ETYPE_ALIGNMENT_CHECK) {
342 #ifdef CONFIG_ALIGNMENT_TRAP
343                 /* Alignment check */
344                 if (user_mode(regs) && unalign_access_mode) {
345                         int ret;
346                         ret = do_unaligned_access(addr, regs);
347
348                         if (ret == 0)
349                                 return;
350
351                         if (ret == -EFAULT)
352                                 pr_emerg
353                                     ("Unhandled unaligned access exception\n");
354                 }
355 #endif
356                 do_page_fault(entry, addr, type, regs);
357         } else if (type == ETYPE_RESERVED_INSTRUCTION) {
358                 /* Reserved instruction */
359                 do_revinsn(regs);
360         } else if (type == ETYPE_TRAP && swid == SWID_RAISE_INTERRUPT_LEVEL) {
361                 /* trap, used on v3 EDM target debugging workaround */
362                 /*
363                  * DIPC(OIPC) is passed as parameter before
364                  * interrupt is enabled, so the DIPC will not be corrupted
365                  * even though interrupts are coming in
366                  */
367                 /*
368                  * 1. update ipc
369                  * 2. update pt_regs ipc with oipc
370                  * 3. update pt_regs ipsw (clear DEX)
371                  */
372                 __asm__ volatile ("mtsr %0, $IPC\n\t"::"r" (oipc));
373                 regs->ipc = oipc;
374                 if (regs->pipsw & PSW_mskDEX) {
375                         pr_emerg
376                             ("Nested Debug exception is possibly happened\n");
377                         pr_emerg("ipc:%08x pipc:%08x\n",
378                                  (unsigned int)regs->ipc,
379                                  (unsigned int)regs->pipc);
380                 }
381                 do_debug_trap(entry, addr, itype, regs);
382                 regs->ipsw &= ~PSW_mskDEX;
383         } else
384                 unhandled_exceptions(entry, addr, type, regs);
385 }