Merge branch 'for-5.3/upstream-fixes' into for-linus
[sfrench/cifs-2.6.git] / arch / arm64 / kernel / traps.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Based on arch/arm/kernel/traps.c
4  *
5  * Copyright (C) 1995-2009 Russell King
6  * Copyright (C) 2012 ARM Ltd.
7  */
8
9 #include <linux/bug.h>
10 #include <linux/signal.h>
11 #include <linux/personality.h>
12 #include <linux/kallsyms.h>
13 #include <linux/spinlock.h>
14 #include <linux/uaccess.h>
15 #include <linux/hardirq.h>
16 #include <linux/kdebug.h>
17 #include <linux/module.h>
18 #include <linux/kexec.h>
19 #include <linux/delay.h>
20 #include <linux/init.h>
21 #include <linux/sched/signal.h>
22 #include <linux/sched/debug.h>
23 #include <linux/sched/task_stack.h>
24 #include <linux/sizes.h>
25 #include <linux/syscalls.h>
26 #include <linux/mm_types.h>
27 #include <linux/kasan.h>
28
29 #include <asm/atomic.h>
30 #include <asm/bug.h>
31 #include <asm/cpufeature.h>
32 #include <asm/daifflags.h>
33 #include <asm/debug-monitors.h>
34 #include <asm/esr.h>
35 #include <asm/insn.h>
36 #include <asm/traps.h>
37 #include <asm/smp.h>
38 #include <asm/stack_pointer.h>
39 #include <asm/stacktrace.h>
40 #include <asm/exception.h>
41 #include <asm/system_misc.h>
42 #include <asm/sysreg.h>
43
44 static const char *handler[]= {
45         "Synchronous Abort",
46         "IRQ",
47         "FIQ",
48         "Error"
49 };
50
51 int show_unhandled_signals = 0;
52
53 static void dump_backtrace_entry(unsigned long where)
54 {
55         printk(" %pS\n", (void *)where);
56 }
57
58 static void dump_kernel_instr(const char *lvl, struct pt_regs *regs)
59 {
60         unsigned long addr = instruction_pointer(regs);
61         char str[sizeof("00000000 ") * 5 + 2 + 1], *p = str;
62         int i;
63
64         if (user_mode(regs))
65                 return;
66
67         for (i = -4; i < 1; i++) {
68                 unsigned int val, bad;
69
70                 bad = aarch64_insn_read(&((u32 *)addr)[i], &val);
71
72                 if (!bad)
73                         p += sprintf(p, i == 0 ? "(%08x) " : "%08x ", val);
74                 else {
75                         p += sprintf(p, "bad PC value");
76                         break;
77                 }
78         }
79
80         printk("%sCode: %s\n", lvl, str);
81 }
82
83 void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
84 {
85         struct stackframe frame;
86         int skip = 0;
87
88         pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk);
89
90         if (regs) {
91                 if (user_mode(regs))
92                         return;
93                 skip = 1;
94         }
95
96         if (!tsk)
97                 tsk = current;
98
99         if (!try_get_task_stack(tsk))
100                 return;
101
102         if (tsk == current) {
103                 start_backtrace(&frame,
104                                 (unsigned long)__builtin_frame_address(0),
105                                 (unsigned long)dump_backtrace);
106         } else {
107                 /*
108                  * task blocked in __switch_to
109                  */
110                 start_backtrace(&frame,
111                                 thread_saved_fp(tsk),
112                                 thread_saved_pc(tsk));
113         }
114
115         printk("Call trace:\n");
116         do {
117                 /* skip until specified stack frame */
118                 if (!skip) {
119                         dump_backtrace_entry(frame.pc);
120                 } else if (frame.fp == regs->regs[29]) {
121                         skip = 0;
122                         /*
123                          * Mostly, this is the case where this function is
124                          * called in panic/abort. As exception handler's
125                          * stack frame does not contain the corresponding pc
126                          * at which an exception has taken place, use regs->pc
127                          * instead.
128                          */
129                         dump_backtrace_entry(regs->pc);
130                 }
131         } while (!unwind_frame(tsk, &frame));
132
133         put_task_stack(tsk);
134 }
135
136 void show_stack(struct task_struct *tsk, unsigned long *sp)
137 {
138         dump_backtrace(NULL, tsk);
139         barrier();
140 }
141
142 #ifdef CONFIG_PREEMPT
143 #define S_PREEMPT " PREEMPT"
144 #else
145 #define S_PREEMPT ""
146 #endif
147 #define S_SMP " SMP"
148
149 static int __die(const char *str, int err, struct pt_regs *regs)
150 {
151         static int die_counter;
152         int ret;
153
154         pr_emerg("Internal error: %s: %x [#%d]" S_PREEMPT S_SMP "\n",
155                  str, err, ++die_counter);
156
157         /* trap and error numbers are mostly meaningless on ARM */
158         ret = notify_die(DIE_OOPS, str, regs, err, 0, SIGSEGV);
159         if (ret == NOTIFY_STOP)
160                 return ret;
161
162         print_modules();
163         show_regs(regs);
164
165         dump_kernel_instr(KERN_EMERG, regs);
166
167         return ret;
168 }
169
170 static DEFINE_RAW_SPINLOCK(die_lock);
171
172 /*
173  * This function is protected against re-entrancy.
174  */
175 void die(const char *str, struct pt_regs *regs, int err)
176 {
177         int ret;
178         unsigned long flags;
179
180         raw_spin_lock_irqsave(&die_lock, flags);
181
182         oops_enter();
183
184         console_verbose();
185         bust_spinlocks(1);
186         ret = __die(str, err, regs);
187
188         if (regs && kexec_should_crash(current))
189                 crash_kexec(regs);
190
191         bust_spinlocks(0);
192         add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
193         oops_exit();
194
195         if (in_interrupt())
196                 panic("Fatal exception in interrupt");
197         if (panic_on_oops)
198                 panic("Fatal exception");
199
200         raw_spin_unlock_irqrestore(&die_lock, flags);
201
202         if (ret != NOTIFY_STOP)
203                 do_exit(SIGSEGV);
204 }
205
206 static void arm64_show_signal(int signo, const char *str)
207 {
208         static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL,
209                                       DEFAULT_RATELIMIT_BURST);
210         struct task_struct *tsk = current;
211         unsigned int esr = tsk->thread.fault_code;
212         struct pt_regs *regs = task_pt_regs(tsk);
213
214         /* Leave if the signal won't be shown */
215         if (!show_unhandled_signals ||
216             !unhandled_signal(tsk, signo) ||
217             !__ratelimit(&rs))
218                 return;
219
220         pr_info("%s[%d]: unhandled exception: ", tsk->comm, task_pid_nr(tsk));
221         if (esr)
222                 pr_cont("%s, ESR 0x%08x, ", esr_get_class_string(esr), esr);
223
224         pr_cont("%s", str);
225         print_vma_addr(KERN_CONT " in ", regs->pc);
226         pr_cont("\n");
227         __show_regs(regs);
228 }
229
230 void arm64_force_sig_fault(int signo, int code, void __user *addr,
231                            const char *str)
232 {
233         arm64_show_signal(signo, str);
234         if (signo == SIGKILL)
235                 force_sig(SIGKILL);
236         else
237                 force_sig_fault(signo, code, addr);
238 }
239
240 void arm64_force_sig_mceerr(int code, void __user *addr, short lsb,
241                             const char *str)
242 {
243         arm64_show_signal(SIGBUS, str);
244         force_sig_mceerr(code, addr, lsb);
245 }
246
247 void arm64_force_sig_ptrace_errno_trap(int errno, void __user *addr,
248                                        const char *str)
249 {
250         arm64_show_signal(SIGTRAP, str);
251         force_sig_ptrace_errno_trap(errno, addr);
252 }
253
254 void arm64_notify_die(const char *str, struct pt_regs *regs,
255                       int signo, int sicode, void __user *addr,
256                       int err)
257 {
258         if (user_mode(regs)) {
259                 WARN_ON(regs != current_pt_regs());
260                 current->thread.fault_address = 0;
261                 current->thread.fault_code = err;
262
263                 arm64_force_sig_fault(signo, sicode, addr, str);
264         } else {
265                 die(str, regs, err);
266         }
267 }
268
269 void arm64_skip_faulting_instruction(struct pt_regs *regs, unsigned long size)
270 {
271         regs->pc += size;
272
273         /*
274          * If we were single stepping, we want to get the step exception after
275          * we return from the trap.
276          */
277         if (user_mode(regs))
278                 user_fastforward_single_step(current);
279 }
280
281 static LIST_HEAD(undef_hook);
282 static DEFINE_RAW_SPINLOCK(undef_lock);
283
284 void register_undef_hook(struct undef_hook *hook)
285 {
286         unsigned long flags;
287
288         raw_spin_lock_irqsave(&undef_lock, flags);
289         list_add(&hook->node, &undef_hook);
290         raw_spin_unlock_irqrestore(&undef_lock, flags);
291 }
292
293 void unregister_undef_hook(struct undef_hook *hook)
294 {
295         unsigned long flags;
296
297         raw_spin_lock_irqsave(&undef_lock, flags);
298         list_del(&hook->node);
299         raw_spin_unlock_irqrestore(&undef_lock, flags);
300 }
301
302 static int call_undef_hook(struct pt_regs *regs)
303 {
304         struct undef_hook *hook;
305         unsigned long flags;
306         u32 instr;
307         int (*fn)(struct pt_regs *regs, u32 instr) = NULL;
308         void __user *pc = (void __user *)instruction_pointer(regs);
309
310         if (!user_mode(regs)) {
311                 __le32 instr_le;
312                 if (probe_kernel_address((__force __le32 *)pc, instr_le))
313                         goto exit;
314                 instr = le32_to_cpu(instr_le);
315         } else if (compat_thumb_mode(regs)) {
316                 /* 16-bit Thumb instruction */
317                 __le16 instr_le;
318                 if (get_user(instr_le, (__le16 __user *)pc))
319                         goto exit;
320                 instr = le16_to_cpu(instr_le);
321                 if (aarch32_insn_is_wide(instr)) {
322                         u32 instr2;
323
324                         if (get_user(instr_le, (__le16 __user *)(pc + 2)))
325                                 goto exit;
326                         instr2 = le16_to_cpu(instr_le);
327                         instr = (instr << 16) | instr2;
328                 }
329         } else {
330                 /* 32-bit ARM instruction */
331                 __le32 instr_le;
332                 if (get_user(instr_le, (__le32 __user *)pc))
333                         goto exit;
334                 instr = le32_to_cpu(instr_le);
335         }
336
337         raw_spin_lock_irqsave(&undef_lock, flags);
338         list_for_each_entry(hook, &undef_hook, node)
339                 if ((instr & hook->instr_mask) == hook->instr_val &&
340                         (regs->pstate & hook->pstate_mask) == hook->pstate_val)
341                         fn = hook->fn;
342
343         raw_spin_unlock_irqrestore(&undef_lock, flags);
344 exit:
345         return fn ? fn(regs, instr) : 1;
346 }
347
348 void force_signal_inject(int signal, int code, unsigned long address)
349 {
350         const char *desc;
351         struct pt_regs *regs = current_pt_regs();
352
353         if (WARN_ON(!user_mode(regs)))
354                 return;
355
356         switch (signal) {
357         case SIGILL:
358                 desc = "undefined instruction";
359                 break;
360         case SIGSEGV:
361                 desc = "illegal memory access";
362                 break;
363         default:
364                 desc = "unknown or unrecoverable error";
365                 break;
366         }
367
368         /* Force signals we don't understand to SIGKILL */
369         if (WARN_ON(signal != SIGKILL &&
370                     siginfo_layout(signal, code) != SIL_FAULT)) {
371                 signal = SIGKILL;
372         }
373
374         arm64_notify_die(desc, regs, signal, code, (void __user *)address, 0);
375 }
376
377 /*
378  * Set up process info to signal segmentation fault - called on access error.
379  */
380 void arm64_notify_segfault(unsigned long addr)
381 {
382         int code;
383
384         down_read(&current->mm->mmap_sem);
385         if (find_vma(current->mm, addr) == NULL)
386                 code = SEGV_MAPERR;
387         else
388                 code = SEGV_ACCERR;
389         up_read(&current->mm->mmap_sem);
390
391         force_signal_inject(SIGSEGV, code, addr);
392 }
393
394 asmlinkage void __exception do_undefinstr(struct pt_regs *regs)
395 {
396         /* check for AArch32 breakpoint instructions */
397         if (!aarch32_break_handler(regs))
398                 return;
399
400         if (call_undef_hook(regs) == 0)
401                 return;
402
403         BUG_ON(!user_mode(regs));
404         force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc);
405 }
406
407 #define __user_cache_maint(insn, address, res)                  \
408         if (address >= user_addr_max()) {                       \
409                 res = -EFAULT;                                  \
410         } else {                                                \
411                 uaccess_ttbr0_enable();                         \
412                 asm volatile (                                  \
413                         "1:     " insn ", %1\n"                 \
414                         "       mov     %w0, #0\n"              \
415                         "2:\n"                                  \
416                         "       .pushsection .fixup,\"ax\"\n"   \
417                         "       .align  2\n"                    \
418                         "3:     mov     %w0, %w2\n"             \
419                         "       b       2b\n"                   \
420                         "       .popsection\n"                  \
421                         _ASM_EXTABLE(1b, 3b)                    \
422                         : "=r" (res)                            \
423                         : "r" (address), "i" (-EFAULT));        \
424                 uaccess_ttbr0_disable();                        \
425         }
426
427 static void user_cache_maint_handler(unsigned int esr, struct pt_regs *regs)
428 {
429         unsigned long address;
430         int rt = ESR_ELx_SYS64_ISS_RT(esr);
431         int crm = (esr & ESR_ELx_SYS64_ISS_CRM_MASK) >> ESR_ELx_SYS64_ISS_CRM_SHIFT;
432         int ret = 0;
433
434         address = untagged_addr(pt_regs_read_reg(regs, rt));
435
436         switch (crm) {
437         case ESR_ELx_SYS64_ISS_CRM_DC_CVAU:     /* DC CVAU, gets promoted */
438                 __user_cache_maint("dc civac", address, ret);
439                 break;
440         case ESR_ELx_SYS64_ISS_CRM_DC_CVAC:     /* DC CVAC, gets promoted */
441                 __user_cache_maint("dc civac", address, ret);
442                 break;
443         case ESR_ELx_SYS64_ISS_CRM_DC_CVADP:    /* DC CVADP */
444                 __user_cache_maint("sys 3, c7, c13, 1", address, ret);
445                 break;
446         case ESR_ELx_SYS64_ISS_CRM_DC_CVAP:     /* DC CVAP */
447                 __user_cache_maint("sys 3, c7, c12, 1", address, ret);
448                 break;
449         case ESR_ELx_SYS64_ISS_CRM_DC_CIVAC:    /* DC CIVAC */
450                 __user_cache_maint("dc civac", address, ret);
451                 break;
452         case ESR_ELx_SYS64_ISS_CRM_IC_IVAU:     /* IC IVAU */
453                 __user_cache_maint("ic ivau", address, ret);
454                 break;
455         default:
456                 force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc);
457                 return;
458         }
459
460         if (ret)
461                 arm64_notify_segfault(address);
462         else
463                 arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
464 }
465
466 static void ctr_read_handler(unsigned int esr, struct pt_regs *regs)
467 {
468         int rt = ESR_ELx_SYS64_ISS_RT(esr);
469         unsigned long val = arm64_ftr_reg_user_value(&arm64_ftr_reg_ctrel0);
470
471         pt_regs_write_reg(regs, rt, val);
472
473         arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
474 }
475
476 static void cntvct_read_handler(unsigned int esr, struct pt_regs *regs)
477 {
478         int rt = ESR_ELx_SYS64_ISS_RT(esr);
479
480         pt_regs_write_reg(regs, rt, arch_timer_read_counter());
481         arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
482 }
483
484 static void cntfrq_read_handler(unsigned int esr, struct pt_regs *regs)
485 {
486         int rt = ESR_ELx_SYS64_ISS_RT(esr);
487
488         pt_regs_write_reg(regs, rt, arch_timer_get_rate());
489         arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
490 }
491
492 static void mrs_handler(unsigned int esr, struct pt_regs *regs)
493 {
494         u32 sysreg, rt;
495
496         rt = ESR_ELx_SYS64_ISS_RT(esr);
497         sysreg = esr_sys64_to_sysreg(esr);
498
499         if (do_emulate_mrs(regs, sysreg, rt) != 0)
500                 force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc);
501 }
502
503 static void wfi_handler(unsigned int esr, struct pt_regs *regs)
504 {
505         arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
506 }
507
508 struct sys64_hook {
509         unsigned int esr_mask;
510         unsigned int esr_val;
511         void (*handler)(unsigned int esr, struct pt_regs *regs);
512 };
513
514 static struct sys64_hook sys64_hooks[] = {
515         {
516                 .esr_mask = ESR_ELx_SYS64_ISS_EL0_CACHE_OP_MASK,
517                 .esr_val = ESR_ELx_SYS64_ISS_EL0_CACHE_OP_VAL,
518                 .handler = user_cache_maint_handler,
519         },
520         {
521                 /* Trap read access to CTR_EL0 */
522                 .esr_mask = ESR_ELx_SYS64_ISS_SYS_OP_MASK,
523                 .esr_val = ESR_ELx_SYS64_ISS_SYS_CTR_READ,
524                 .handler = ctr_read_handler,
525         },
526         {
527                 /* Trap read access to CNTVCT_EL0 */
528                 .esr_mask = ESR_ELx_SYS64_ISS_SYS_OP_MASK,
529                 .esr_val = ESR_ELx_SYS64_ISS_SYS_CNTVCT,
530                 .handler = cntvct_read_handler,
531         },
532         {
533                 /* Trap read access to CNTFRQ_EL0 */
534                 .esr_mask = ESR_ELx_SYS64_ISS_SYS_OP_MASK,
535                 .esr_val = ESR_ELx_SYS64_ISS_SYS_CNTFRQ,
536                 .handler = cntfrq_read_handler,
537         },
538         {
539                 /* Trap read access to CPUID registers */
540                 .esr_mask = ESR_ELx_SYS64_ISS_SYS_MRS_OP_MASK,
541                 .esr_val = ESR_ELx_SYS64_ISS_SYS_MRS_OP_VAL,
542                 .handler = mrs_handler,
543         },
544         {
545                 /* Trap WFI instructions executed in userspace */
546                 .esr_mask = ESR_ELx_WFx_MASK,
547                 .esr_val = ESR_ELx_WFx_WFI_VAL,
548                 .handler = wfi_handler,
549         },
550         {},
551 };
552
553
554 #ifdef CONFIG_COMPAT
555 #define PSTATE_IT_1_0_SHIFT     25
556 #define PSTATE_IT_1_0_MASK      (0x3 << PSTATE_IT_1_0_SHIFT)
557 #define PSTATE_IT_7_2_SHIFT     10
558 #define PSTATE_IT_7_2_MASK      (0x3f << PSTATE_IT_7_2_SHIFT)
559
560 static u32 compat_get_it_state(struct pt_regs *regs)
561 {
562         u32 it, pstate = regs->pstate;
563
564         it  = (pstate & PSTATE_IT_1_0_MASK) >> PSTATE_IT_1_0_SHIFT;
565         it |= ((pstate & PSTATE_IT_7_2_MASK) >> PSTATE_IT_7_2_SHIFT) << 2;
566
567         return it;
568 }
569
570 static void compat_set_it_state(struct pt_regs *regs, u32 it)
571 {
572         u32 pstate_it;
573
574         pstate_it  = (it << PSTATE_IT_1_0_SHIFT) & PSTATE_IT_1_0_MASK;
575         pstate_it |= ((it >> 2) << PSTATE_IT_7_2_SHIFT) & PSTATE_IT_7_2_MASK;
576
577         regs->pstate &= ~PSR_AA32_IT_MASK;
578         regs->pstate |= pstate_it;
579 }
580
581 static bool cp15_cond_valid(unsigned int esr, struct pt_regs *regs)
582 {
583         int cond;
584
585         /* Only a T32 instruction can trap without CV being set */
586         if (!(esr & ESR_ELx_CV)) {
587                 u32 it;
588
589                 it = compat_get_it_state(regs);
590                 if (!it)
591                         return true;
592
593                 cond = it >> 4;
594         } else {
595                 cond = (esr & ESR_ELx_COND_MASK) >> ESR_ELx_COND_SHIFT;
596         }
597
598         return aarch32_opcode_cond_checks[cond](regs->pstate);
599 }
600
601 static void advance_itstate(struct pt_regs *regs)
602 {
603         u32 it;
604
605         /* ARM mode */
606         if (!(regs->pstate & PSR_AA32_T_BIT) ||
607             !(regs->pstate & PSR_AA32_IT_MASK))
608                 return;
609
610         it  = compat_get_it_state(regs);
611
612         /*
613          * If this is the last instruction of the block, wipe the IT
614          * state. Otherwise advance it.
615          */
616         if (!(it & 7))
617                 it = 0;
618         else
619                 it = (it & 0xe0) | ((it << 1) & 0x1f);
620
621         compat_set_it_state(regs, it);
622 }
623
624 static void arm64_compat_skip_faulting_instruction(struct pt_regs *regs,
625                                                    unsigned int sz)
626 {
627         advance_itstate(regs);
628         arm64_skip_faulting_instruction(regs, sz);
629 }
630
631 static void compat_cntfrq_read_handler(unsigned int esr, struct pt_regs *regs)
632 {
633         int reg = (esr & ESR_ELx_CP15_32_ISS_RT_MASK) >> ESR_ELx_CP15_32_ISS_RT_SHIFT;
634
635         pt_regs_write_reg(regs, reg, arch_timer_get_rate());
636         arm64_compat_skip_faulting_instruction(regs, 4);
637 }
638
639 static struct sys64_hook cp15_32_hooks[] = {
640         {
641                 .esr_mask = ESR_ELx_CP15_32_ISS_SYS_MASK,
642                 .esr_val = ESR_ELx_CP15_32_ISS_SYS_CNTFRQ,
643                 .handler = compat_cntfrq_read_handler,
644         },
645         {},
646 };
647
648 static void compat_cntvct_read_handler(unsigned int esr, struct pt_regs *regs)
649 {
650         int rt = (esr & ESR_ELx_CP15_64_ISS_RT_MASK) >> ESR_ELx_CP15_64_ISS_RT_SHIFT;
651         int rt2 = (esr & ESR_ELx_CP15_64_ISS_RT2_MASK) >> ESR_ELx_CP15_64_ISS_RT2_SHIFT;
652         u64 val = arch_timer_read_counter();
653
654         pt_regs_write_reg(regs, rt, lower_32_bits(val));
655         pt_regs_write_reg(regs, rt2, upper_32_bits(val));
656         arm64_compat_skip_faulting_instruction(regs, 4);
657 }
658
659 static struct sys64_hook cp15_64_hooks[] = {
660         {
661                 .esr_mask = ESR_ELx_CP15_64_ISS_SYS_MASK,
662                 .esr_val = ESR_ELx_CP15_64_ISS_SYS_CNTVCT,
663                 .handler = compat_cntvct_read_handler,
664         },
665         {},
666 };
667
668 asmlinkage void __exception do_cp15instr(unsigned int esr, struct pt_regs *regs)
669 {
670         struct sys64_hook *hook, *hook_base;
671
672         if (!cp15_cond_valid(esr, regs)) {
673                 /*
674                  * There is no T16 variant of a CP access, so we
675                  * always advance PC by 4 bytes.
676                  */
677                 arm64_compat_skip_faulting_instruction(regs, 4);
678                 return;
679         }
680
681         switch (ESR_ELx_EC(esr)) {
682         case ESR_ELx_EC_CP15_32:
683                 hook_base = cp15_32_hooks;
684                 break;
685         case ESR_ELx_EC_CP15_64:
686                 hook_base = cp15_64_hooks;
687                 break;
688         default:
689                 do_undefinstr(regs);
690                 return;
691         }
692
693         for (hook = hook_base; hook->handler; hook++)
694                 if ((hook->esr_mask & esr) == hook->esr_val) {
695                         hook->handler(esr, regs);
696                         return;
697                 }
698
699         /*
700          * New cp15 instructions may previously have been undefined at
701          * EL0. Fall back to our usual undefined instruction handler
702          * so that we handle these consistently.
703          */
704         do_undefinstr(regs);
705 }
706 #endif
707
708 asmlinkage void __exception do_sysinstr(unsigned int esr, struct pt_regs *regs)
709 {
710         struct sys64_hook *hook;
711
712         for (hook = sys64_hooks; hook->handler; hook++)
713                 if ((hook->esr_mask & esr) == hook->esr_val) {
714                         hook->handler(esr, regs);
715                         return;
716                 }
717
718         /*
719          * New SYS instructions may previously have been undefined at EL0. Fall
720          * back to our usual undefined instruction handler so that we handle
721          * these consistently.
722          */
723         do_undefinstr(regs);
724 }
725
726 static const char *esr_class_str[] = {
727         [0 ... ESR_ELx_EC_MAX]          = "UNRECOGNIZED EC",
728         [ESR_ELx_EC_UNKNOWN]            = "Unknown/Uncategorized",
729         [ESR_ELx_EC_WFx]                = "WFI/WFE",
730         [ESR_ELx_EC_CP15_32]            = "CP15 MCR/MRC",
731         [ESR_ELx_EC_CP15_64]            = "CP15 MCRR/MRRC",
732         [ESR_ELx_EC_CP14_MR]            = "CP14 MCR/MRC",
733         [ESR_ELx_EC_CP14_LS]            = "CP14 LDC/STC",
734         [ESR_ELx_EC_FP_ASIMD]           = "ASIMD",
735         [ESR_ELx_EC_CP10_ID]            = "CP10 MRC/VMRS",
736         [ESR_ELx_EC_CP14_64]            = "CP14 MCRR/MRRC",
737         [ESR_ELx_EC_ILL]                = "PSTATE.IL",
738         [ESR_ELx_EC_SVC32]              = "SVC (AArch32)",
739         [ESR_ELx_EC_HVC32]              = "HVC (AArch32)",
740         [ESR_ELx_EC_SMC32]              = "SMC (AArch32)",
741         [ESR_ELx_EC_SVC64]              = "SVC (AArch64)",
742         [ESR_ELx_EC_HVC64]              = "HVC (AArch64)",
743         [ESR_ELx_EC_SMC64]              = "SMC (AArch64)",
744         [ESR_ELx_EC_SYS64]              = "MSR/MRS (AArch64)",
745         [ESR_ELx_EC_SVE]                = "SVE",
746         [ESR_ELx_EC_IMP_DEF]            = "EL3 IMP DEF",
747         [ESR_ELx_EC_IABT_LOW]           = "IABT (lower EL)",
748         [ESR_ELx_EC_IABT_CUR]           = "IABT (current EL)",
749         [ESR_ELx_EC_PC_ALIGN]           = "PC Alignment",
750         [ESR_ELx_EC_DABT_LOW]           = "DABT (lower EL)",
751         [ESR_ELx_EC_DABT_CUR]           = "DABT (current EL)",
752         [ESR_ELx_EC_SP_ALIGN]           = "SP Alignment",
753         [ESR_ELx_EC_FP_EXC32]           = "FP (AArch32)",
754         [ESR_ELx_EC_FP_EXC64]           = "FP (AArch64)",
755         [ESR_ELx_EC_SERROR]             = "SError",
756         [ESR_ELx_EC_BREAKPT_LOW]        = "Breakpoint (lower EL)",
757         [ESR_ELx_EC_BREAKPT_CUR]        = "Breakpoint (current EL)",
758         [ESR_ELx_EC_SOFTSTP_LOW]        = "Software Step (lower EL)",
759         [ESR_ELx_EC_SOFTSTP_CUR]        = "Software Step (current EL)",
760         [ESR_ELx_EC_WATCHPT_LOW]        = "Watchpoint (lower EL)",
761         [ESR_ELx_EC_WATCHPT_CUR]        = "Watchpoint (current EL)",
762         [ESR_ELx_EC_BKPT32]             = "BKPT (AArch32)",
763         [ESR_ELx_EC_VECTOR32]           = "Vector catch (AArch32)",
764         [ESR_ELx_EC_BRK64]              = "BRK (AArch64)",
765 };
766
767 const char *esr_get_class_string(u32 esr)
768 {
769         return esr_class_str[ESR_ELx_EC(esr)];
770 }
771
772 /*
773  * bad_mode handles the impossible case in the exception vector. This is always
774  * fatal.
775  */
776 asmlinkage void bad_mode(struct pt_regs *regs, int reason, unsigned int esr)
777 {
778         console_verbose();
779
780         pr_crit("Bad mode in %s handler detected on CPU%d, code 0x%08x -- %s\n",
781                 handler[reason], smp_processor_id(), esr,
782                 esr_get_class_string(esr));
783
784         local_daif_mask();
785         panic("bad mode");
786 }
787
788 /*
789  * bad_el0_sync handles unexpected, but potentially recoverable synchronous
790  * exceptions taken from EL0. Unlike bad_mode, this returns.
791  */
792 asmlinkage void bad_el0_sync(struct pt_regs *regs, int reason, unsigned int esr)
793 {
794         void __user *pc = (void __user *)instruction_pointer(regs);
795
796         current->thread.fault_address = 0;
797         current->thread.fault_code = esr;
798
799         arm64_force_sig_fault(SIGILL, ILL_ILLOPC, pc,
800                               "Bad EL0 synchronous exception");
801 }
802
803 #ifdef CONFIG_VMAP_STACK
804
805 DEFINE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)], overflow_stack)
806         __aligned(16);
807
808 asmlinkage void handle_bad_stack(struct pt_regs *regs)
809 {
810         unsigned long tsk_stk = (unsigned long)current->stack;
811         unsigned long irq_stk = (unsigned long)this_cpu_read(irq_stack_ptr);
812         unsigned long ovf_stk = (unsigned long)this_cpu_ptr(overflow_stack);
813         unsigned int esr = read_sysreg(esr_el1);
814         unsigned long far = read_sysreg(far_el1);
815
816         console_verbose();
817         pr_emerg("Insufficient stack space to handle exception!");
818
819         pr_emerg("ESR: 0x%08x -- %s\n", esr, esr_get_class_string(esr));
820         pr_emerg("FAR: 0x%016lx\n", far);
821
822         pr_emerg("Task stack:     [0x%016lx..0x%016lx]\n",
823                  tsk_stk, tsk_stk + THREAD_SIZE);
824         pr_emerg("IRQ stack:      [0x%016lx..0x%016lx]\n",
825                  irq_stk, irq_stk + THREAD_SIZE);
826         pr_emerg("Overflow stack: [0x%016lx..0x%016lx]\n",
827                  ovf_stk, ovf_stk + OVERFLOW_STACK_SIZE);
828
829         __show_regs(regs);
830
831         /*
832          * We use nmi_panic to limit the potential for recusive overflows, and
833          * to get a better stack trace.
834          */
835         nmi_panic(NULL, "kernel stack overflow");
836         cpu_park_loop();
837 }
838 #endif
839
840 void __noreturn arm64_serror_panic(struct pt_regs *regs, u32 esr)
841 {
842         console_verbose();
843
844         pr_crit("SError Interrupt on CPU%d, code 0x%08x -- %s\n",
845                 smp_processor_id(), esr, esr_get_class_string(esr));
846         if (regs)
847                 __show_regs(regs);
848
849         nmi_panic(regs, "Asynchronous SError Interrupt");
850
851         cpu_park_loop();
852         unreachable();
853 }
854
855 bool arm64_is_fatal_ras_serror(struct pt_regs *regs, unsigned int esr)
856 {
857         u32 aet = arm64_ras_serror_get_severity(esr);
858
859         switch (aet) {
860         case ESR_ELx_AET_CE:    /* corrected error */
861         case ESR_ELx_AET_UEO:   /* restartable, not yet consumed */
862                 /*
863                  * The CPU can make progress. We may take UEO again as
864                  * a more severe error.
865                  */
866                 return false;
867
868         case ESR_ELx_AET_UEU:   /* Uncorrected Unrecoverable */
869         case ESR_ELx_AET_UER:   /* Uncorrected Recoverable */
870                 /*
871                  * The CPU can't make progress. The exception may have
872                  * been imprecise.
873                  *
874                  * Neoverse-N1 #1349291 means a non-KVM SError reported as
875                  * Unrecoverable should be treated as Uncontainable. We
876                  * call arm64_serror_panic() in both cases.
877                  */
878                 return true;
879
880         case ESR_ELx_AET_UC:    /* Uncontainable or Uncategorized error */
881         default:
882                 /* Error has been silently propagated */
883                 arm64_serror_panic(regs, esr);
884         }
885 }
886
887 asmlinkage void do_serror(struct pt_regs *regs, unsigned int esr)
888 {
889         const bool was_in_nmi = in_nmi();
890
891         if (!was_in_nmi)
892                 nmi_enter();
893
894         /* non-RAS errors are not containable */
895         if (!arm64_is_ras_serror(esr) || arm64_is_fatal_ras_serror(regs, esr))
896                 arm64_serror_panic(regs, esr);
897
898         if (!was_in_nmi)
899                 nmi_exit();
900 }
901
902 void __pte_error(const char *file, int line, unsigned long val)
903 {
904         pr_err("%s:%d: bad pte %016lx.\n", file, line, val);
905 }
906
907 void __pmd_error(const char *file, int line, unsigned long val)
908 {
909         pr_err("%s:%d: bad pmd %016lx.\n", file, line, val);
910 }
911
912 void __pud_error(const char *file, int line, unsigned long val)
913 {
914         pr_err("%s:%d: bad pud %016lx.\n", file, line, val);
915 }
916
917 void __pgd_error(const char *file, int line, unsigned long val)
918 {
919         pr_err("%s:%d: bad pgd %016lx.\n", file, line, val);
920 }
921
922 /* GENERIC_BUG traps */
923
924 int is_valid_bugaddr(unsigned long addr)
925 {
926         /*
927          * bug_handler() only called for BRK #BUG_BRK_IMM.
928          * So the answer is trivial -- any spurious instances with no
929          * bug table entry will be rejected by report_bug() and passed
930          * back to the debug-monitors code and handled as a fatal
931          * unexpected debug exception.
932          */
933         return 1;
934 }
935
936 static int bug_handler(struct pt_regs *regs, unsigned int esr)
937 {
938         switch (report_bug(regs->pc, regs)) {
939         case BUG_TRAP_TYPE_BUG:
940                 die("Oops - BUG", regs, 0);
941                 break;
942
943         case BUG_TRAP_TYPE_WARN:
944                 break;
945
946         default:
947                 /* unknown/unrecognised bug trap type */
948                 return DBG_HOOK_ERROR;
949         }
950
951         /* If thread survives, skip over the BUG instruction and continue: */
952         arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
953         return DBG_HOOK_HANDLED;
954 }
955
956 static struct break_hook bug_break_hook = {
957         .fn = bug_handler,
958         .imm = BUG_BRK_IMM,
959 };
960
961 #ifdef CONFIG_KASAN_SW_TAGS
962
963 #define KASAN_ESR_RECOVER       0x20
964 #define KASAN_ESR_WRITE 0x10
965 #define KASAN_ESR_SIZE_MASK     0x0f
966 #define KASAN_ESR_SIZE(esr)     (1 << ((esr) & KASAN_ESR_SIZE_MASK))
967
968 static int kasan_handler(struct pt_regs *regs, unsigned int esr)
969 {
970         bool recover = esr & KASAN_ESR_RECOVER;
971         bool write = esr & KASAN_ESR_WRITE;
972         size_t size = KASAN_ESR_SIZE(esr);
973         u64 addr = regs->regs[0];
974         u64 pc = regs->pc;
975
976         kasan_report(addr, size, write, pc);
977
978         /*
979          * The instrumentation allows to control whether we can proceed after
980          * a crash was detected. This is done by passing the -recover flag to
981          * the compiler. Disabling recovery allows to generate more compact
982          * code.
983          *
984          * Unfortunately disabling recovery doesn't work for the kernel right
985          * now. KASAN reporting is disabled in some contexts (for example when
986          * the allocator accesses slab object metadata; this is controlled by
987          * current->kasan_depth). All these accesses are detected by the tool,
988          * even though the reports for them are not printed.
989          *
990          * This is something that might be fixed at some point in the future.
991          */
992         if (!recover)
993                 die("Oops - KASAN", regs, 0);
994
995         /* If thread survives, skip over the brk instruction and continue: */
996         arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
997         return DBG_HOOK_HANDLED;
998 }
999
1000 static struct break_hook kasan_break_hook = {
1001         .fn     = kasan_handler,
1002         .imm    = KASAN_BRK_IMM,
1003         .mask   = KASAN_BRK_MASK,
1004 };
1005 #endif
1006
1007 /*
1008  * Initial handler for AArch64 BRK exceptions
1009  * This handler only used until debug_traps_init().
1010  */
1011 int __init early_brk64(unsigned long addr, unsigned int esr,
1012                 struct pt_regs *regs)
1013 {
1014 #ifdef CONFIG_KASAN_SW_TAGS
1015         unsigned int comment = esr & ESR_ELx_BRK64_ISS_COMMENT_MASK;
1016
1017         if ((comment & ~KASAN_BRK_MASK) == KASAN_BRK_IMM)
1018                 return kasan_handler(regs, esr) != DBG_HOOK_HANDLED;
1019 #endif
1020         return bug_handler(regs, esr) != DBG_HOOK_HANDLED;
1021 }
1022
1023 /* This registration must happen early, before debug_traps_init(). */
1024 void __init trap_init(void)
1025 {
1026         register_kernel_break_hook(&bug_break_hook);
1027 #ifdef CONFIG_KASAN_SW_TAGS
1028         register_kernel_break_hook(&kasan_break_hook);
1029 #endif
1030 }