1 // SPDX-License-Identifier: GPL-2.0-only
3 * Based on arch/arm/kernel/traps.c
5 * Copyright (C) 1995-2009 Russell King
6 * Copyright (C) 2012 ARM Ltd.
10 #include <linux/context_tracking.h>
11 #include <linux/signal.h>
12 #include <linux/personality.h>
13 #include <linux/kallsyms.h>
14 #include <linux/kprobes.h>
15 #include <linux/spinlock.h>
16 #include <linux/uaccess.h>
17 #include <linux/hardirq.h>
18 #include <linux/kdebug.h>
19 #include <linux/module.h>
20 #include <linux/kexec.h>
21 #include <linux/delay.h>
22 #include <linux/init.h>
23 #include <linux/sched/signal.h>
24 #include <linux/sched/debug.h>
25 #include <linux/sched/task_stack.h>
26 #include <linux/sizes.h>
27 #include <linux/syscalls.h>
28 #include <linux/mm_types.h>
29 #include <linux/kasan.h>
31 #include <asm/atomic.h>
33 #include <asm/cpufeature.h>
34 #include <asm/daifflags.h>
35 #include <asm/debug-monitors.h>
37 #include <asm/exception.h>
38 #include <asm/extable.h>
40 #include <asm/kprobes.h>
41 #include <asm/traps.h>
43 #include <asm/stack_pointer.h>
44 #include <asm/stacktrace.h>
45 #include <asm/exception.h>
46 #include <asm/system_misc.h>
47 #include <asm/sysreg.h>
49 static const char *handler[]= {
56 int show_unhandled_signals = 0;
58 static void dump_kernel_instr(const char *lvl, struct pt_regs *regs)
60 unsigned long addr = instruction_pointer(regs);
61 char str[sizeof("00000000 ") * 5 + 2 + 1], *p = str;
67 for (i = -4; i < 1; i++) {
68 unsigned int val, bad;
70 bad = aarch64_insn_read(&((u32 *)addr)[i], &val);
73 p += sprintf(p, i == 0 ? "(%08x) " : "%08x ", val);
75 p += sprintf(p, "bad PC value");
80 printk("%sCode: %s\n", lvl, str);
84 #define S_PREEMPT " PREEMPT"
85 #elif defined(CONFIG_PREEMPT_RT)
86 #define S_PREEMPT " PREEMPT_RT"
93 static int __die(const char *str, int err, struct pt_regs *regs)
95 static int die_counter;
98 pr_emerg("Internal error: %s: %x [#%d]" S_PREEMPT S_SMP "\n",
99 str, err, ++die_counter);
101 /* trap and error numbers are mostly meaningless on ARM */
102 ret = notify_die(DIE_OOPS, str, regs, err, 0, SIGSEGV);
103 if (ret == NOTIFY_STOP)
109 dump_kernel_instr(KERN_EMERG, regs);
114 static DEFINE_RAW_SPINLOCK(die_lock);
117 * This function is protected against re-entrancy.
119 void die(const char *str, struct pt_regs *regs, int err)
124 raw_spin_lock_irqsave(&die_lock, flags);
130 ret = __die(str, err, regs);
132 if (regs && kexec_should_crash(current))
136 add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
140 panic("%s: Fatal exception in interrupt", str);
142 panic("%s: Fatal exception", str);
144 raw_spin_unlock_irqrestore(&die_lock, flags);
146 if (ret != NOTIFY_STOP)
150 static void arm64_show_signal(int signo, const char *str)
152 static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL,
153 DEFAULT_RATELIMIT_BURST);
154 struct task_struct *tsk = current;
155 unsigned int esr = tsk->thread.fault_code;
156 struct pt_regs *regs = task_pt_regs(tsk);
158 /* Leave if the signal won't be shown */
159 if (!show_unhandled_signals ||
160 !unhandled_signal(tsk, signo) ||
164 pr_info("%s[%d]: unhandled exception: ", tsk->comm, task_pid_nr(tsk));
166 pr_cont("%s, ESR 0x%08x, ", esr_get_class_string(esr), esr);
169 print_vma_addr(KERN_CONT " in ", regs->pc);
174 void arm64_force_sig_fault(int signo, int code, unsigned long far,
177 arm64_show_signal(signo, str);
178 if (signo == SIGKILL)
181 force_sig_fault(signo, code, (void __user *)far);
184 void arm64_force_sig_mceerr(int code, unsigned long far, short lsb,
187 arm64_show_signal(SIGBUS, str);
188 force_sig_mceerr(code, (void __user *)far, lsb);
191 void arm64_force_sig_ptrace_errno_trap(int errno, unsigned long far,
194 arm64_show_signal(SIGTRAP, str);
195 force_sig_ptrace_errno_trap(errno, (void __user *)far);
198 void arm64_notify_die(const char *str, struct pt_regs *regs,
199 int signo, int sicode, unsigned long far,
202 if (user_mode(regs)) {
203 WARN_ON(regs != current_pt_regs());
204 current->thread.fault_address = 0;
205 current->thread.fault_code = err;
207 arm64_force_sig_fault(signo, sicode, far, str);
214 #define PSTATE_IT_1_0_SHIFT 25
215 #define PSTATE_IT_1_0_MASK (0x3 << PSTATE_IT_1_0_SHIFT)
216 #define PSTATE_IT_7_2_SHIFT 10
217 #define PSTATE_IT_7_2_MASK (0x3f << PSTATE_IT_7_2_SHIFT)
219 static u32 compat_get_it_state(struct pt_regs *regs)
221 u32 it, pstate = regs->pstate;
223 it = (pstate & PSTATE_IT_1_0_MASK) >> PSTATE_IT_1_0_SHIFT;
224 it |= ((pstate & PSTATE_IT_7_2_MASK) >> PSTATE_IT_7_2_SHIFT) << 2;
229 static void compat_set_it_state(struct pt_regs *regs, u32 it)
233 pstate_it = (it << PSTATE_IT_1_0_SHIFT) & PSTATE_IT_1_0_MASK;
234 pstate_it |= ((it >> 2) << PSTATE_IT_7_2_SHIFT) & PSTATE_IT_7_2_MASK;
236 regs->pstate &= ~PSR_AA32_IT_MASK;
237 regs->pstate |= pstate_it;
240 static void advance_itstate(struct pt_regs *regs)
245 if (!(regs->pstate & PSR_AA32_T_BIT) ||
246 !(regs->pstate & PSR_AA32_IT_MASK))
249 it = compat_get_it_state(regs);
252 * If this is the last instruction of the block, wipe the IT
253 * state. Otherwise advance it.
258 it = (it & 0xe0) | ((it << 1) & 0x1f);
260 compat_set_it_state(regs, it);
263 static void advance_itstate(struct pt_regs *regs)
268 void arm64_skip_faulting_instruction(struct pt_regs *regs, unsigned long size)
273 * If we were single stepping, we want to get the step exception after
274 * we return from the trap.
277 user_fastforward_single_step(current);
279 if (compat_user_mode(regs))
280 advance_itstate(regs);
282 regs->pstate &= ~PSR_BTYPE_MASK;
285 static LIST_HEAD(undef_hook);
286 static DEFINE_RAW_SPINLOCK(undef_lock);
288 void register_undef_hook(struct undef_hook *hook)
292 raw_spin_lock_irqsave(&undef_lock, flags);
293 list_add(&hook->node, &undef_hook);
294 raw_spin_unlock_irqrestore(&undef_lock, flags);
297 void unregister_undef_hook(struct undef_hook *hook)
301 raw_spin_lock_irqsave(&undef_lock, flags);
302 list_del(&hook->node);
303 raw_spin_unlock_irqrestore(&undef_lock, flags);
306 static int call_undef_hook(struct pt_regs *regs)
308 struct undef_hook *hook;
311 int (*fn)(struct pt_regs *regs, u32 instr) = NULL;
312 void __user *pc = (void __user *)instruction_pointer(regs);
314 if (!user_mode(regs)) {
316 if (get_kernel_nofault(instr_le, (__force __le32 *)pc))
318 instr = le32_to_cpu(instr_le);
319 } else if (compat_thumb_mode(regs)) {
320 /* 16-bit Thumb instruction */
322 if (get_user(instr_le, (__le16 __user *)pc))
324 instr = le16_to_cpu(instr_le);
325 if (aarch32_insn_is_wide(instr)) {
328 if (get_user(instr_le, (__le16 __user *)(pc + 2)))
330 instr2 = le16_to_cpu(instr_le);
331 instr = (instr << 16) | instr2;
334 /* 32-bit ARM instruction */
336 if (get_user(instr_le, (__le32 __user *)pc))
338 instr = le32_to_cpu(instr_le);
341 raw_spin_lock_irqsave(&undef_lock, flags);
342 list_for_each_entry(hook, &undef_hook, node)
343 if ((instr & hook->instr_mask) == hook->instr_val &&
344 (regs->pstate & hook->pstate_mask) == hook->pstate_val)
347 raw_spin_unlock_irqrestore(&undef_lock, flags);
349 return fn ? fn(regs, instr) : 1;
352 void force_signal_inject(int signal, int code, unsigned long address, unsigned int err)
355 struct pt_regs *regs = current_pt_regs();
357 if (WARN_ON(!user_mode(regs)))
362 desc = "undefined instruction";
365 desc = "illegal memory access";
368 desc = "unknown or unrecoverable error";
372 /* Force signals we don't understand to SIGKILL */
373 if (WARN_ON(signal != SIGKILL &&
374 siginfo_layout(signal, code) != SIL_FAULT)) {
378 arm64_notify_die(desc, regs, signal, code, address, err);
382 * Set up process info to signal segmentation fault - called on access error.
384 void arm64_notify_segfault(unsigned long addr)
388 mmap_read_lock(current->mm);
389 if (find_vma(current->mm, untagged_addr(addr)) == NULL)
393 mmap_read_unlock(current->mm);
395 force_signal_inject(SIGSEGV, code, addr, 0);
398 void do_undefinstr(struct pt_regs *regs)
400 /* check for AArch32 breakpoint instructions */
401 if (!aarch32_break_handler(regs))
404 if (call_undef_hook(regs) == 0)
407 BUG_ON(!user_mode(regs));
408 force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc, 0);
410 NOKPROBE_SYMBOL(do_undefinstr);
412 void do_bti(struct pt_regs *regs)
414 BUG_ON(!user_mode(regs));
415 force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc, 0);
417 NOKPROBE_SYMBOL(do_bti);
419 void do_ptrauth_fault(struct pt_regs *regs, unsigned int esr)
422 * Unexpected FPAC exception or pointer authentication failure in
423 * the kernel: kill the task before it does any more harm.
425 BUG_ON(!user_mode(regs));
426 force_signal_inject(SIGILL, ILL_ILLOPN, regs->pc, esr);
428 NOKPROBE_SYMBOL(do_ptrauth_fault);
430 #define __user_cache_maint(insn, address, res) \
431 if (address >= user_addr_max()) { \
434 uaccess_ttbr0_enable(); \
436 "1: " insn ", %1\n" \
439 " .pushsection .fixup,\"ax\"\n" \
441 "3: mov %w0, %w2\n" \
444 _ASM_EXTABLE(1b, 3b) \
446 : "r" (address), "i" (-EFAULT)); \
447 uaccess_ttbr0_disable(); \
450 static void user_cache_maint_handler(unsigned int esr, struct pt_regs *regs)
452 unsigned long tagged_address, address;
453 int rt = ESR_ELx_SYS64_ISS_RT(esr);
454 int crm = (esr & ESR_ELx_SYS64_ISS_CRM_MASK) >> ESR_ELx_SYS64_ISS_CRM_SHIFT;
457 tagged_address = pt_regs_read_reg(regs, rt);
458 address = untagged_addr(tagged_address);
461 case ESR_ELx_SYS64_ISS_CRM_DC_CVAU: /* DC CVAU, gets promoted */
462 __user_cache_maint("dc civac", address, ret);
464 case ESR_ELx_SYS64_ISS_CRM_DC_CVAC: /* DC CVAC, gets promoted */
465 __user_cache_maint("dc civac", address, ret);
467 case ESR_ELx_SYS64_ISS_CRM_DC_CVADP: /* DC CVADP */
468 __user_cache_maint("sys 3, c7, c13, 1", address, ret);
470 case ESR_ELx_SYS64_ISS_CRM_DC_CVAP: /* DC CVAP */
471 __user_cache_maint("sys 3, c7, c12, 1", address, ret);
473 case ESR_ELx_SYS64_ISS_CRM_DC_CIVAC: /* DC CIVAC */
474 __user_cache_maint("dc civac", address, ret);
476 case ESR_ELx_SYS64_ISS_CRM_IC_IVAU: /* IC IVAU */
477 __user_cache_maint("ic ivau", address, ret);
480 force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc, 0);
485 arm64_notify_segfault(tagged_address);
487 arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
490 static void ctr_read_handler(unsigned int esr, struct pt_regs *regs)
492 int rt = ESR_ELx_SYS64_ISS_RT(esr);
493 unsigned long val = arm64_ftr_reg_user_value(&arm64_ftr_reg_ctrel0);
495 if (cpus_have_const_cap(ARM64_WORKAROUND_1542419)) {
496 /* Hide DIC so that we can trap the unnecessary maintenance...*/
497 val &= ~BIT(CTR_DIC_SHIFT);
499 /* ... and fake IminLine to reduce the number of traps. */
500 val &= ~CTR_IMINLINE_MASK;
501 val |= (PAGE_SHIFT - 2) & CTR_IMINLINE_MASK;
504 pt_regs_write_reg(regs, rt, val);
506 arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
509 static void cntvct_read_handler(unsigned int esr, struct pt_regs *regs)
511 int rt = ESR_ELx_SYS64_ISS_RT(esr);
513 pt_regs_write_reg(regs, rt, arch_timer_read_counter());
514 arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
517 static void cntfrq_read_handler(unsigned int esr, struct pt_regs *regs)
519 int rt = ESR_ELx_SYS64_ISS_RT(esr);
521 pt_regs_write_reg(regs, rt, arch_timer_get_rate());
522 arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
525 static void mrs_handler(unsigned int esr, struct pt_regs *regs)
529 rt = ESR_ELx_SYS64_ISS_RT(esr);
530 sysreg = esr_sys64_to_sysreg(esr);
532 if (do_emulate_mrs(regs, sysreg, rt) != 0)
533 force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc, 0);
536 static void wfi_handler(unsigned int esr, struct pt_regs *regs)
538 arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
542 unsigned int esr_mask;
543 unsigned int esr_val;
544 void (*handler)(unsigned int esr, struct pt_regs *regs);
547 static const struct sys64_hook sys64_hooks[] = {
549 .esr_mask = ESR_ELx_SYS64_ISS_EL0_CACHE_OP_MASK,
550 .esr_val = ESR_ELx_SYS64_ISS_EL0_CACHE_OP_VAL,
551 .handler = user_cache_maint_handler,
554 /* Trap read access to CTR_EL0 */
555 .esr_mask = ESR_ELx_SYS64_ISS_SYS_OP_MASK,
556 .esr_val = ESR_ELx_SYS64_ISS_SYS_CTR_READ,
557 .handler = ctr_read_handler,
560 /* Trap read access to CNTVCT_EL0 */
561 .esr_mask = ESR_ELx_SYS64_ISS_SYS_OP_MASK,
562 .esr_val = ESR_ELx_SYS64_ISS_SYS_CNTVCT,
563 .handler = cntvct_read_handler,
566 /* Trap read access to CNTFRQ_EL0 */
567 .esr_mask = ESR_ELx_SYS64_ISS_SYS_OP_MASK,
568 .esr_val = ESR_ELx_SYS64_ISS_SYS_CNTFRQ,
569 .handler = cntfrq_read_handler,
572 /* Trap read access to CPUID registers */
573 .esr_mask = ESR_ELx_SYS64_ISS_SYS_MRS_OP_MASK,
574 .esr_val = ESR_ELx_SYS64_ISS_SYS_MRS_OP_VAL,
575 .handler = mrs_handler,
578 /* Trap WFI instructions executed in userspace */
579 .esr_mask = ESR_ELx_WFx_MASK,
580 .esr_val = ESR_ELx_WFx_WFI_VAL,
581 .handler = wfi_handler,
587 static bool cp15_cond_valid(unsigned int esr, struct pt_regs *regs)
591 /* Only a T32 instruction can trap without CV being set */
592 if (!(esr & ESR_ELx_CV)) {
595 it = compat_get_it_state(regs);
601 cond = (esr & ESR_ELx_COND_MASK) >> ESR_ELx_COND_SHIFT;
604 return aarch32_opcode_cond_checks[cond](regs->pstate);
607 static void compat_cntfrq_read_handler(unsigned int esr, struct pt_regs *regs)
609 int reg = (esr & ESR_ELx_CP15_32_ISS_RT_MASK) >> ESR_ELx_CP15_32_ISS_RT_SHIFT;
611 pt_regs_write_reg(regs, reg, arch_timer_get_rate());
612 arm64_skip_faulting_instruction(regs, 4);
615 static const struct sys64_hook cp15_32_hooks[] = {
617 .esr_mask = ESR_ELx_CP15_32_ISS_SYS_MASK,
618 .esr_val = ESR_ELx_CP15_32_ISS_SYS_CNTFRQ,
619 .handler = compat_cntfrq_read_handler,
624 static void compat_cntvct_read_handler(unsigned int esr, struct pt_regs *regs)
626 int rt = (esr & ESR_ELx_CP15_64_ISS_RT_MASK) >> ESR_ELx_CP15_64_ISS_RT_SHIFT;
627 int rt2 = (esr & ESR_ELx_CP15_64_ISS_RT2_MASK) >> ESR_ELx_CP15_64_ISS_RT2_SHIFT;
628 u64 val = arch_timer_read_counter();
630 pt_regs_write_reg(regs, rt, lower_32_bits(val));
631 pt_regs_write_reg(regs, rt2, upper_32_bits(val));
632 arm64_skip_faulting_instruction(regs, 4);
635 static const struct sys64_hook cp15_64_hooks[] = {
637 .esr_mask = ESR_ELx_CP15_64_ISS_SYS_MASK,
638 .esr_val = ESR_ELx_CP15_64_ISS_SYS_CNTVCT,
639 .handler = compat_cntvct_read_handler,
644 void do_cp15instr(unsigned int esr, struct pt_regs *regs)
646 const struct sys64_hook *hook, *hook_base;
648 if (!cp15_cond_valid(esr, regs)) {
650 * There is no T16 variant of a CP access, so we
651 * always advance PC by 4 bytes.
653 arm64_skip_faulting_instruction(regs, 4);
657 switch (ESR_ELx_EC(esr)) {
658 case ESR_ELx_EC_CP15_32:
659 hook_base = cp15_32_hooks;
661 case ESR_ELx_EC_CP15_64:
662 hook_base = cp15_64_hooks;
669 for (hook = hook_base; hook->handler; hook++)
670 if ((hook->esr_mask & esr) == hook->esr_val) {
671 hook->handler(esr, regs);
676 * New cp15 instructions may previously have been undefined at
677 * EL0. Fall back to our usual undefined instruction handler
678 * so that we handle these consistently.
682 NOKPROBE_SYMBOL(do_cp15instr);
685 void do_sysinstr(unsigned int esr, struct pt_regs *regs)
687 const struct sys64_hook *hook;
689 for (hook = sys64_hooks; hook->handler; hook++)
690 if ((hook->esr_mask & esr) == hook->esr_val) {
691 hook->handler(esr, regs);
696 * New SYS instructions may previously have been undefined at EL0. Fall
697 * back to our usual undefined instruction handler so that we handle
698 * these consistently.
702 NOKPROBE_SYMBOL(do_sysinstr);
704 static const char *esr_class_str[] = {
705 [0 ... ESR_ELx_EC_MAX] = "UNRECOGNIZED EC",
706 [ESR_ELx_EC_UNKNOWN] = "Unknown/Uncategorized",
707 [ESR_ELx_EC_WFx] = "WFI/WFE",
708 [ESR_ELx_EC_CP15_32] = "CP15 MCR/MRC",
709 [ESR_ELx_EC_CP15_64] = "CP15 MCRR/MRRC",
710 [ESR_ELx_EC_CP14_MR] = "CP14 MCR/MRC",
711 [ESR_ELx_EC_CP14_LS] = "CP14 LDC/STC",
712 [ESR_ELx_EC_FP_ASIMD] = "ASIMD",
713 [ESR_ELx_EC_CP10_ID] = "CP10 MRC/VMRS",
714 [ESR_ELx_EC_PAC] = "PAC",
715 [ESR_ELx_EC_CP14_64] = "CP14 MCRR/MRRC",
716 [ESR_ELx_EC_BTI] = "BTI",
717 [ESR_ELx_EC_ILL] = "PSTATE.IL",
718 [ESR_ELx_EC_SVC32] = "SVC (AArch32)",
719 [ESR_ELx_EC_HVC32] = "HVC (AArch32)",
720 [ESR_ELx_EC_SMC32] = "SMC (AArch32)",
721 [ESR_ELx_EC_SVC64] = "SVC (AArch64)",
722 [ESR_ELx_EC_HVC64] = "HVC (AArch64)",
723 [ESR_ELx_EC_SMC64] = "SMC (AArch64)",
724 [ESR_ELx_EC_SYS64] = "MSR/MRS (AArch64)",
725 [ESR_ELx_EC_SVE] = "SVE",
726 [ESR_ELx_EC_ERET] = "ERET/ERETAA/ERETAB",
727 [ESR_ELx_EC_FPAC] = "FPAC",
728 [ESR_ELx_EC_IMP_DEF] = "EL3 IMP DEF",
729 [ESR_ELx_EC_IABT_LOW] = "IABT (lower EL)",
730 [ESR_ELx_EC_IABT_CUR] = "IABT (current EL)",
731 [ESR_ELx_EC_PC_ALIGN] = "PC Alignment",
732 [ESR_ELx_EC_DABT_LOW] = "DABT (lower EL)",
733 [ESR_ELx_EC_DABT_CUR] = "DABT (current EL)",
734 [ESR_ELx_EC_SP_ALIGN] = "SP Alignment",
735 [ESR_ELx_EC_FP_EXC32] = "FP (AArch32)",
736 [ESR_ELx_EC_FP_EXC64] = "FP (AArch64)",
737 [ESR_ELx_EC_SERROR] = "SError",
738 [ESR_ELx_EC_BREAKPT_LOW] = "Breakpoint (lower EL)",
739 [ESR_ELx_EC_BREAKPT_CUR] = "Breakpoint (current EL)",
740 [ESR_ELx_EC_SOFTSTP_LOW] = "Software Step (lower EL)",
741 [ESR_ELx_EC_SOFTSTP_CUR] = "Software Step (current EL)",
742 [ESR_ELx_EC_WATCHPT_LOW] = "Watchpoint (lower EL)",
743 [ESR_ELx_EC_WATCHPT_CUR] = "Watchpoint (current EL)",
744 [ESR_ELx_EC_BKPT32] = "BKPT (AArch32)",
745 [ESR_ELx_EC_VECTOR32] = "Vector catch (AArch32)",
746 [ESR_ELx_EC_BRK64] = "BRK (AArch64)",
749 const char *esr_get_class_string(u32 esr)
751 return esr_class_str[ESR_ELx_EC(esr)];
755 * bad_mode handles the impossible case in the exception vector. This is always
758 asmlinkage void notrace bad_mode(struct pt_regs *regs, int reason, unsigned int esr)
760 arm64_enter_nmi(regs);
764 pr_crit("Bad mode in %s handler detected on CPU%d, code 0x%08x -- %s\n",
765 handler[reason], smp_processor_id(), esr,
766 esr_get_class_string(esr));
774 * bad_el0_sync handles unexpected, but potentially recoverable synchronous
775 * exceptions taken from EL0. Unlike bad_mode, this returns.
777 void bad_el0_sync(struct pt_regs *regs, int reason, unsigned int esr)
779 unsigned long pc = instruction_pointer(regs);
781 current->thread.fault_address = 0;
782 current->thread.fault_code = esr;
784 arm64_force_sig_fault(SIGILL, ILL_ILLOPC, pc,
785 "Bad EL0 synchronous exception");
788 #ifdef CONFIG_VMAP_STACK
790 DEFINE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)], overflow_stack)
793 asmlinkage void noinstr handle_bad_stack(struct pt_regs *regs)
795 unsigned long tsk_stk = (unsigned long)current->stack;
796 unsigned long irq_stk = (unsigned long)this_cpu_read(irq_stack_ptr);
797 unsigned long ovf_stk = (unsigned long)this_cpu_ptr(overflow_stack);
798 unsigned int esr = read_sysreg(esr_el1);
799 unsigned long far = read_sysreg(far_el1);
801 arm64_enter_nmi(regs);
804 pr_emerg("Insufficient stack space to handle exception!");
806 pr_emerg("ESR: 0x%08x -- %s\n", esr, esr_get_class_string(esr));
807 pr_emerg("FAR: 0x%016lx\n", far);
809 pr_emerg("Task stack: [0x%016lx..0x%016lx]\n",
810 tsk_stk, tsk_stk + THREAD_SIZE);
811 pr_emerg("IRQ stack: [0x%016lx..0x%016lx]\n",
812 irq_stk, irq_stk + IRQ_STACK_SIZE);
813 pr_emerg("Overflow stack: [0x%016lx..0x%016lx]\n",
814 ovf_stk, ovf_stk + OVERFLOW_STACK_SIZE);
819 * We use nmi_panic to limit the potential for recusive overflows, and
820 * to get a better stack trace.
822 nmi_panic(NULL, "kernel stack overflow");
827 void __noreturn arm64_serror_panic(struct pt_regs *regs, u32 esr)
831 pr_crit("SError Interrupt on CPU%d, code 0x%08x -- %s\n",
832 smp_processor_id(), esr, esr_get_class_string(esr));
836 nmi_panic(regs, "Asynchronous SError Interrupt");
842 bool arm64_is_fatal_ras_serror(struct pt_regs *regs, unsigned int esr)
844 u32 aet = arm64_ras_serror_get_severity(esr);
847 case ESR_ELx_AET_CE: /* corrected error */
848 case ESR_ELx_AET_UEO: /* restartable, not yet consumed */
850 * The CPU can make progress. We may take UEO again as
851 * a more severe error.
855 case ESR_ELx_AET_UEU: /* Uncorrected Unrecoverable */
856 case ESR_ELx_AET_UER: /* Uncorrected Recoverable */
858 * The CPU can't make progress. The exception may have
861 * Neoverse-N1 #1349291 means a non-KVM SError reported as
862 * Unrecoverable should be treated as Uncontainable. We
863 * call arm64_serror_panic() in both cases.
867 case ESR_ELx_AET_UC: /* Uncontainable or Uncategorized error */
869 /* Error has been silently propagated */
870 arm64_serror_panic(regs, esr);
874 asmlinkage void noinstr do_serror(struct pt_regs *regs, unsigned int esr)
876 arm64_enter_nmi(regs);
878 /* non-RAS errors are not containable */
879 if (!arm64_is_ras_serror(esr) || arm64_is_fatal_ras_serror(regs, esr))
880 arm64_serror_panic(regs, esr);
882 arm64_exit_nmi(regs);
885 /* GENERIC_BUG traps */
887 int is_valid_bugaddr(unsigned long addr)
890 * bug_handler() only called for BRK #BUG_BRK_IMM.
891 * So the answer is trivial -- any spurious instances with no
892 * bug table entry will be rejected by report_bug() and passed
893 * back to the debug-monitors code and handled as a fatal
894 * unexpected debug exception.
899 static int bug_handler(struct pt_regs *regs, unsigned int esr)
901 switch (report_bug(regs->pc, regs)) {
902 case BUG_TRAP_TYPE_BUG:
903 die("Oops - BUG", regs, 0);
906 case BUG_TRAP_TYPE_WARN:
910 /* unknown/unrecognised bug trap type */
911 return DBG_HOOK_ERROR;
914 /* If thread survives, skip over the BUG instruction and continue: */
915 arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
916 return DBG_HOOK_HANDLED;
919 static struct break_hook bug_break_hook = {
924 static int reserved_fault_handler(struct pt_regs *regs, unsigned int esr)
926 pr_err("%s generated an invalid instruction at %pS!\n",
927 in_bpf_jit(regs) ? "BPF JIT" : "Kernel text patching",
928 (void *)instruction_pointer(regs));
930 /* We cannot handle this */
931 return DBG_HOOK_ERROR;
934 static struct break_hook fault_break_hook = {
935 .fn = reserved_fault_handler,
936 .imm = FAULT_BRK_IMM,
939 #ifdef CONFIG_KASAN_SW_TAGS
941 #define KASAN_ESR_RECOVER 0x20
942 #define KASAN_ESR_WRITE 0x10
943 #define KASAN_ESR_SIZE_MASK 0x0f
944 #define KASAN_ESR_SIZE(esr) (1 << ((esr) & KASAN_ESR_SIZE_MASK))
946 static int kasan_handler(struct pt_regs *regs, unsigned int esr)
948 bool recover = esr & KASAN_ESR_RECOVER;
949 bool write = esr & KASAN_ESR_WRITE;
950 size_t size = KASAN_ESR_SIZE(esr);
951 u64 addr = regs->regs[0];
954 kasan_report(addr, size, write, pc);
957 * The instrumentation allows to control whether we can proceed after
958 * a crash was detected. This is done by passing the -recover flag to
959 * the compiler. Disabling recovery allows to generate more compact
962 * Unfortunately disabling recovery doesn't work for the kernel right
963 * now. KASAN reporting is disabled in some contexts (for example when
964 * the allocator accesses slab object metadata; this is controlled by
965 * current->kasan_depth). All these accesses are detected by the tool,
966 * even though the reports for them are not printed.
968 * This is something that might be fixed at some point in the future.
971 die("Oops - KASAN", regs, 0);
973 /* If thread survives, skip over the brk instruction and continue: */
974 arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
975 return DBG_HOOK_HANDLED;
978 static struct break_hook kasan_break_hook = {
980 .imm = KASAN_BRK_IMM,
981 .mask = KASAN_BRK_MASK,
986 * Initial handler for AArch64 BRK exceptions
987 * This handler only used until debug_traps_init().
989 int __init early_brk64(unsigned long addr, unsigned int esr,
990 struct pt_regs *regs)
992 #ifdef CONFIG_KASAN_SW_TAGS
993 unsigned int comment = esr & ESR_ELx_BRK64_ISS_COMMENT_MASK;
995 if ((comment & ~KASAN_BRK_MASK) == KASAN_BRK_IMM)
996 return kasan_handler(regs, esr) != DBG_HOOK_HANDLED;
998 return bug_handler(regs, esr) != DBG_HOOK_HANDLED;
1001 void __init trap_init(void)
1003 register_kernel_break_hook(&bug_break_hook);
1004 register_kernel_break_hook(&fault_break_hook);
1005 #ifdef CONFIG_KASAN_SW_TAGS
1006 register_kernel_break_hook(&kasan_break_hook);