Merge remote-tracking branch 'arm64/for-next/fixes' into for-next/core
[sfrench/cifs-2.6.git] / arch / arm64 / kernel / traps.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Based on arch/arm/kernel/traps.c
4  *
5  * Copyright (C) 1995-2009 Russell King
6  * Copyright (C) 2012 ARM Ltd.
7  */
8
9 #include <linux/bug.h>
10 #include <linux/context_tracking.h>
11 #include <linux/signal.h>
12 #include <linux/personality.h>
13 #include <linux/kallsyms.h>
14 #include <linux/kprobes.h>
15 #include <linux/spinlock.h>
16 #include <linux/uaccess.h>
17 #include <linux/hardirq.h>
18 #include <linux/kdebug.h>
19 #include <linux/module.h>
20 #include <linux/kexec.h>
21 #include <linux/delay.h>
22 #include <linux/init.h>
23 #include <linux/sched/signal.h>
24 #include <linux/sched/debug.h>
25 #include <linux/sched/task_stack.h>
26 #include <linux/sizes.h>
27 #include <linux/syscalls.h>
28 #include <linux/mm_types.h>
29 #include <linux/kasan.h>
30
31 #include <asm/atomic.h>
32 #include <asm/bug.h>
33 #include <asm/cpufeature.h>
34 #include <asm/daifflags.h>
35 #include <asm/debug-monitors.h>
36 #include <asm/esr.h>
37 #include <asm/exception.h>
38 #include <asm/extable.h>
39 #include <asm/insn.h>
40 #include <asm/kprobes.h>
41 #include <asm/traps.h>
42 #include <asm/smp.h>
43 #include <asm/stack_pointer.h>
44 #include <asm/stacktrace.h>
45 #include <asm/exception.h>
46 #include <asm/system_misc.h>
47 #include <asm/sysreg.h>
48
49 static const char *handler[]= {
50         "Synchronous Abort",
51         "IRQ",
52         "FIQ",
53         "Error"
54 };
55
56 int show_unhandled_signals = 0;
57
58 static void dump_kernel_instr(const char *lvl, struct pt_regs *regs)
59 {
60         unsigned long addr = instruction_pointer(regs);
61         char str[sizeof("00000000 ") * 5 + 2 + 1], *p = str;
62         int i;
63
64         if (user_mode(regs))
65                 return;
66
67         for (i = -4; i < 1; i++) {
68                 unsigned int val, bad;
69
70                 bad = aarch64_insn_read(&((u32 *)addr)[i], &val);
71
72                 if (!bad)
73                         p += sprintf(p, i == 0 ? "(%08x) " : "%08x ", val);
74                 else {
75                         p += sprintf(p, "bad PC value");
76                         break;
77                 }
78         }
79
80         printk("%sCode: %s\n", lvl, str);
81 }
82
83 #ifdef CONFIG_PREEMPT
84 #define S_PREEMPT " PREEMPT"
85 #elif defined(CONFIG_PREEMPT_RT)
86 #define S_PREEMPT " PREEMPT_RT"
87 #else
88 #define S_PREEMPT ""
89 #endif
90
91 #define S_SMP " SMP"
92
93 static int __die(const char *str, int err, struct pt_regs *regs)
94 {
95         static int die_counter;
96         int ret;
97
98         pr_emerg("Internal error: %s: %x [#%d]" S_PREEMPT S_SMP "\n",
99                  str, err, ++die_counter);
100
101         /* trap and error numbers are mostly meaningless on ARM */
102         ret = notify_die(DIE_OOPS, str, regs, err, 0, SIGSEGV);
103         if (ret == NOTIFY_STOP)
104                 return ret;
105
106         print_modules();
107         show_regs(regs);
108
109         dump_kernel_instr(KERN_EMERG, regs);
110
111         return ret;
112 }
113
114 static DEFINE_RAW_SPINLOCK(die_lock);
115
116 /*
117  * This function is protected against re-entrancy.
118  */
119 void die(const char *str, struct pt_regs *regs, int err)
120 {
121         int ret;
122         unsigned long flags;
123
124         raw_spin_lock_irqsave(&die_lock, flags);
125
126         oops_enter();
127
128         console_verbose();
129         bust_spinlocks(1);
130         ret = __die(str, err, regs);
131
132         if (regs && kexec_should_crash(current))
133                 crash_kexec(regs);
134
135         bust_spinlocks(0);
136         add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
137         oops_exit();
138
139         if (in_interrupt())
140                 panic("%s: Fatal exception in interrupt", str);
141         if (panic_on_oops)
142                 panic("%s: Fatal exception", str);
143
144         raw_spin_unlock_irqrestore(&die_lock, flags);
145
146         if (ret != NOTIFY_STOP)
147                 do_exit(SIGSEGV);
148 }
149
150 static void arm64_show_signal(int signo, const char *str)
151 {
152         static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL,
153                                       DEFAULT_RATELIMIT_BURST);
154         struct task_struct *tsk = current;
155         unsigned int esr = tsk->thread.fault_code;
156         struct pt_regs *regs = task_pt_regs(tsk);
157
158         /* Leave if the signal won't be shown */
159         if (!show_unhandled_signals ||
160             !unhandled_signal(tsk, signo) ||
161             !__ratelimit(&rs))
162                 return;
163
164         pr_info("%s[%d]: unhandled exception: ", tsk->comm, task_pid_nr(tsk));
165         if (esr)
166                 pr_cont("%s, ESR 0x%08x, ", esr_get_class_string(esr), esr);
167
168         pr_cont("%s", str);
169         print_vma_addr(KERN_CONT " in ", regs->pc);
170         pr_cont("\n");
171         __show_regs(regs);
172 }
173
174 void arm64_force_sig_fault(int signo, int code, unsigned long far,
175                            const char *str)
176 {
177         arm64_show_signal(signo, str);
178         if (signo == SIGKILL)
179                 force_sig(SIGKILL);
180         else
181                 force_sig_fault(signo, code, (void __user *)far);
182 }
183
184 void arm64_force_sig_mceerr(int code, unsigned long far, short lsb,
185                             const char *str)
186 {
187         arm64_show_signal(SIGBUS, str);
188         force_sig_mceerr(code, (void __user *)far, lsb);
189 }
190
191 void arm64_force_sig_ptrace_errno_trap(int errno, unsigned long far,
192                                        const char *str)
193 {
194         arm64_show_signal(SIGTRAP, str);
195         force_sig_ptrace_errno_trap(errno, (void __user *)far);
196 }
197
198 void arm64_notify_die(const char *str, struct pt_regs *regs,
199                       int signo, int sicode, unsigned long far,
200                       int err)
201 {
202         if (user_mode(regs)) {
203                 WARN_ON(regs != current_pt_regs());
204                 current->thread.fault_address = 0;
205                 current->thread.fault_code = err;
206
207                 arm64_force_sig_fault(signo, sicode, far, str);
208         } else {
209                 die(str, regs, err);
210         }
211 }
212
213 #ifdef CONFIG_COMPAT
214 #define PSTATE_IT_1_0_SHIFT     25
215 #define PSTATE_IT_1_0_MASK      (0x3 << PSTATE_IT_1_0_SHIFT)
216 #define PSTATE_IT_7_2_SHIFT     10
217 #define PSTATE_IT_7_2_MASK      (0x3f << PSTATE_IT_7_2_SHIFT)
218
219 static u32 compat_get_it_state(struct pt_regs *regs)
220 {
221         u32 it, pstate = regs->pstate;
222
223         it  = (pstate & PSTATE_IT_1_0_MASK) >> PSTATE_IT_1_0_SHIFT;
224         it |= ((pstate & PSTATE_IT_7_2_MASK) >> PSTATE_IT_7_2_SHIFT) << 2;
225
226         return it;
227 }
228
229 static void compat_set_it_state(struct pt_regs *regs, u32 it)
230 {
231         u32 pstate_it;
232
233         pstate_it  = (it << PSTATE_IT_1_0_SHIFT) & PSTATE_IT_1_0_MASK;
234         pstate_it |= ((it >> 2) << PSTATE_IT_7_2_SHIFT) & PSTATE_IT_7_2_MASK;
235
236         regs->pstate &= ~PSR_AA32_IT_MASK;
237         regs->pstate |= pstate_it;
238 }
239
240 static void advance_itstate(struct pt_regs *regs)
241 {
242         u32 it;
243
244         /* ARM mode */
245         if (!(regs->pstate & PSR_AA32_T_BIT) ||
246             !(regs->pstate & PSR_AA32_IT_MASK))
247                 return;
248
249         it  = compat_get_it_state(regs);
250
251         /*
252          * If this is the last instruction of the block, wipe the IT
253          * state. Otherwise advance it.
254          */
255         if (!(it & 7))
256                 it = 0;
257         else
258                 it = (it & 0xe0) | ((it << 1) & 0x1f);
259
260         compat_set_it_state(regs, it);
261 }
262 #else
263 static void advance_itstate(struct pt_regs *regs)
264 {
265 }
266 #endif
267
268 void arm64_skip_faulting_instruction(struct pt_regs *regs, unsigned long size)
269 {
270         regs->pc += size;
271
272         /*
273          * If we were single stepping, we want to get the step exception after
274          * we return from the trap.
275          */
276         if (user_mode(regs))
277                 user_fastforward_single_step(current);
278
279         if (compat_user_mode(regs))
280                 advance_itstate(regs);
281         else
282                 regs->pstate &= ~PSR_BTYPE_MASK;
283 }
284
285 static LIST_HEAD(undef_hook);
286 static DEFINE_RAW_SPINLOCK(undef_lock);
287
288 void register_undef_hook(struct undef_hook *hook)
289 {
290         unsigned long flags;
291
292         raw_spin_lock_irqsave(&undef_lock, flags);
293         list_add(&hook->node, &undef_hook);
294         raw_spin_unlock_irqrestore(&undef_lock, flags);
295 }
296
297 void unregister_undef_hook(struct undef_hook *hook)
298 {
299         unsigned long flags;
300
301         raw_spin_lock_irqsave(&undef_lock, flags);
302         list_del(&hook->node);
303         raw_spin_unlock_irqrestore(&undef_lock, flags);
304 }
305
306 static int call_undef_hook(struct pt_regs *regs)
307 {
308         struct undef_hook *hook;
309         unsigned long flags;
310         u32 instr;
311         int (*fn)(struct pt_regs *regs, u32 instr) = NULL;
312         void __user *pc = (void __user *)instruction_pointer(regs);
313
314         if (!user_mode(regs)) {
315                 __le32 instr_le;
316                 if (get_kernel_nofault(instr_le, (__force __le32 *)pc))
317                         goto exit;
318                 instr = le32_to_cpu(instr_le);
319         } else if (compat_thumb_mode(regs)) {
320                 /* 16-bit Thumb instruction */
321                 __le16 instr_le;
322                 if (get_user(instr_le, (__le16 __user *)pc))
323                         goto exit;
324                 instr = le16_to_cpu(instr_le);
325                 if (aarch32_insn_is_wide(instr)) {
326                         u32 instr2;
327
328                         if (get_user(instr_le, (__le16 __user *)(pc + 2)))
329                                 goto exit;
330                         instr2 = le16_to_cpu(instr_le);
331                         instr = (instr << 16) | instr2;
332                 }
333         } else {
334                 /* 32-bit ARM instruction */
335                 __le32 instr_le;
336                 if (get_user(instr_le, (__le32 __user *)pc))
337                         goto exit;
338                 instr = le32_to_cpu(instr_le);
339         }
340
341         raw_spin_lock_irqsave(&undef_lock, flags);
342         list_for_each_entry(hook, &undef_hook, node)
343                 if ((instr & hook->instr_mask) == hook->instr_val &&
344                         (regs->pstate & hook->pstate_mask) == hook->pstate_val)
345                         fn = hook->fn;
346
347         raw_spin_unlock_irqrestore(&undef_lock, flags);
348 exit:
349         return fn ? fn(regs, instr) : 1;
350 }
351
352 void force_signal_inject(int signal, int code, unsigned long address, unsigned int err)
353 {
354         const char *desc;
355         struct pt_regs *regs = current_pt_regs();
356
357         if (WARN_ON(!user_mode(regs)))
358                 return;
359
360         switch (signal) {
361         case SIGILL:
362                 desc = "undefined instruction";
363                 break;
364         case SIGSEGV:
365                 desc = "illegal memory access";
366                 break;
367         default:
368                 desc = "unknown or unrecoverable error";
369                 break;
370         }
371
372         /* Force signals we don't understand to SIGKILL */
373         if (WARN_ON(signal != SIGKILL &&
374                     siginfo_layout(signal, code) != SIL_FAULT)) {
375                 signal = SIGKILL;
376         }
377
378         arm64_notify_die(desc, regs, signal, code, address, err);
379 }
380
381 /*
382  * Set up process info to signal segmentation fault - called on access error.
383  */
384 void arm64_notify_segfault(unsigned long addr)
385 {
386         int code;
387
388         mmap_read_lock(current->mm);
389         if (find_vma(current->mm, untagged_addr(addr)) == NULL)
390                 code = SEGV_MAPERR;
391         else
392                 code = SEGV_ACCERR;
393         mmap_read_unlock(current->mm);
394
395         force_signal_inject(SIGSEGV, code, addr, 0);
396 }
397
398 void do_undefinstr(struct pt_regs *regs)
399 {
400         /* check for AArch32 breakpoint instructions */
401         if (!aarch32_break_handler(regs))
402                 return;
403
404         if (call_undef_hook(regs) == 0)
405                 return;
406
407         BUG_ON(!user_mode(regs));
408         force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc, 0);
409 }
410 NOKPROBE_SYMBOL(do_undefinstr);
411
412 void do_bti(struct pt_regs *regs)
413 {
414         BUG_ON(!user_mode(regs));
415         force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc, 0);
416 }
417 NOKPROBE_SYMBOL(do_bti);
418
419 void do_ptrauth_fault(struct pt_regs *regs, unsigned int esr)
420 {
421         /*
422          * Unexpected FPAC exception or pointer authentication failure in
423          * the kernel: kill the task before it does any more harm.
424          */
425         BUG_ON(!user_mode(regs));
426         force_signal_inject(SIGILL, ILL_ILLOPN, regs->pc, esr);
427 }
428 NOKPROBE_SYMBOL(do_ptrauth_fault);
429
430 #define __user_cache_maint(insn, address, res)                  \
431         if (address >= user_addr_max()) {                       \
432                 res = -EFAULT;                                  \
433         } else {                                                \
434                 uaccess_ttbr0_enable();                         \
435                 asm volatile (                                  \
436                         "1:     " insn ", %1\n"                 \
437                         "       mov     %w0, #0\n"              \
438                         "2:\n"                                  \
439                         "       .pushsection .fixup,\"ax\"\n"   \
440                         "       .align  2\n"                    \
441                         "3:     mov     %w0, %w2\n"             \
442                         "       b       2b\n"                   \
443                         "       .popsection\n"                  \
444                         _ASM_EXTABLE(1b, 3b)                    \
445                         : "=r" (res)                            \
446                         : "r" (address), "i" (-EFAULT));        \
447                 uaccess_ttbr0_disable();                        \
448         }
449
450 static void user_cache_maint_handler(unsigned int esr, struct pt_regs *regs)
451 {
452         unsigned long tagged_address, address;
453         int rt = ESR_ELx_SYS64_ISS_RT(esr);
454         int crm = (esr & ESR_ELx_SYS64_ISS_CRM_MASK) >> ESR_ELx_SYS64_ISS_CRM_SHIFT;
455         int ret = 0;
456
457         tagged_address = pt_regs_read_reg(regs, rt);
458         address = untagged_addr(tagged_address);
459
460         switch (crm) {
461         case ESR_ELx_SYS64_ISS_CRM_DC_CVAU:     /* DC CVAU, gets promoted */
462                 __user_cache_maint("dc civac", address, ret);
463                 break;
464         case ESR_ELx_SYS64_ISS_CRM_DC_CVAC:     /* DC CVAC, gets promoted */
465                 __user_cache_maint("dc civac", address, ret);
466                 break;
467         case ESR_ELx_SYS64_ISS_CRM_DC_CVADP:    /* DC CVADP */
468                 __user_cache_maint("sys 3, c7, c13, 1", address, ret);
469                 break;
470         case ESR_ELx_SYS64_ISS_CRM_DC_CVAP:     /* DC CVAP */
471                 __user_cache_maint("sys 3, c7, c12, 1", address, ret);
472                 break;
473         case ESR_ELx_SYS64_ISS_CRM_DC_CIVAC:    /* DC CIVAC */
474                 __user_cache_maint("dc civac", address, ret);
475                 break;
476         case ESR_ELx_SYS64_ISS_CRM_IC_IVAU:     /* IC IVAU */
477                 __user_cache_maint("ic ivau", address, ret);
478                 break;
479         default:
480                 force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc, 0);
481                 return;
482         }
483
484         if (ret)
485                 arm64_notify_segfault(tagged_address);
486         else
487                 arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
488 }
489
490 static void ctr_read_handler(unsigned int esr, struct pt_regs *regs)
491 {
492         int rt = ESR_ELx_SYS64_ISS_RT(esr);
493         unsigned long val = arm64_ftr_reg_user_value(&arm64_ftr_reg_ctrel0);
494
495         if (cpus_have_const_cap(ARM64_WORKAROUND_1542419)) {
496                 /* Hide DIC so that we can trap the unnecessary maintenance...*/
497                 val &= ~BIT(CTR_DIC_SHIFT);
498
499                 /* ... and fake IminLine to reduce the number of traps. */
500                 val &= ~CTR_IMINLINE_MASK;
501                 val |= (PAGE_SHIFT - 2) & CTR_IMINLINE_MASK;
502         }
503
504         pt_regs_write_reg(regs, rt, val);
505
506         arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
507 }
508
509 static void cntvct_read_handler(unsigned int esr, struct pt_regs *regs)
510 {
511         int rt = ESR_ELx_SYS64_ISS_RT(esr);
512
513         pt_regs_write_reg(regs, rt, arch_timer_read_counter());
514         arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
515 }
516
517 static void cntfrq_read_handler(unsigned int esr, struct pt_regs *regs)
518 {
519         int rt = ESR_ELx_SYS64_ISS_RT(esr);
520
521         pt_regs_write_reg(regs, rt, arch_timer_get_rate());
522         arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
523 }
524
525 static void mrs_handler(unsigned int esr, struct pt_regs *regs)
526 {
527         u32 sysreg, rt;
528
529         rt = ESR_ELx_SYS64_ISS_RT(esr);
530         sysreg = esr_sys64_to_sysreg(esr);
531
532         if (do_emulate_mrs(regs, sysreg, rt) != 0)
533                 force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc, 0);
534 }
535
536 static void wfi_handler(unsigned int esr, struct pt_regs *regs)
537 {
538         arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
539 }
540
541 struct sys64_hook {
542         unsigned int esr_mask;
543         unsigned int esr_val;
544         void (*handler)(unsigned int esr, struct pt_regs *regs);
545 };
546
547 static const struct sys64_hook sys64_hooks[] = {
548         {
549                 .esr_mask = ESR_ELx_SYS64_ISS_EL0_CACHE_OP_MASK,
550                 .esr_val = ESR_ELx_SYS64_ISS_EL0_CACHE_OP_VAL,
551                 .handler = user_cache_maint_handler,
552         },
553         {
554                 /* Trap read access to CTR_EL0 */
555                 .esr_mask = ESR_ELx_SYS64_ISS_SYS_OP_MASK,
556                 .esr_val = ESR_ELx_SYS64_ISS_SYS_CTR_READ,
557                 .handler = ctr_read_handler,
558         },
559         {
560                 /* Trap read access to CNTVCT_EL0 */
561                 .esr_mask = ESR_ELx_SYS64_ISS_SYS_OP_MASK,
562                 .esr_val = ESR_ELx_SYS64_ISS_SYS_CNTVCT,
563                 .handler = cntvct_read_handler,
564         },
565         {
566                 /* Trap read access to CNTFRQ_EL0 */
567                 .esr_mask = ESR_ELx_SYS64_ISS_SYS_OP_MASK,
568                 .esr_val = ESR_ELx_SYS64_ISS_SYS_CNTFRQ,
569                 .handler = cntfrq_read_handler,
570         },
571         {
572                 /* Trap read access to CPUID registers */
573                 .esr_mask = ESR_ELx_SYS64_ISS_SYS_MRS_OP_MASK,
574                 .esr_val = ESR_ELx_SYS64_ISS_SYS_MRS_OP_VAL,
575                 .handler = mrs_handler,
576         },
577         {
578                 /* Trap WFI instructions executed in userspace */
579                 .esr_mask = ESR_ELx_WFx_MASK,
580                 .esr_val = ESR_ELx_WFx_WFI_VAL,
581                 .handler = wfi_handler,
582         },
583         {},
584 };
585
586 #ifdef CONFIG_COMPAT
587 static bool cp15_cond_valid(unsigned int esr, struct pt_regs *regs)
588 {
589         int cond;
590
591         /* Only a T32 instruction can trap without CV being set */
592         if (!(esr & ESR_ELx_CV)) {
593                 u32 it;
594
595                 it = compat_get_it_state(regs);
596                 if (!it)
597                         return true;
598
599                 cond = it >> 4;
600         } else {
601                 cond = (esr & ESR_ELx_COND_MASK) >> ESR_ELx_COND_SHIFT;
602         }
603
604         return aarch32_opcode_cond_checks[cond](regs->pstate);
605 }
606
607 static void compat_cntfrq_read_handler(unsigned int esr, struct pt_regs *regs)
608 {
609         int reg = (esr & ESR_ELx_CP15_32_ISS_RT_MASK) >> ESR_ELx_CP15_32_ISS_RT_SHIFT;
610
611         pt_regs_write_reg(regs, reg, arch_timer_get_rate());
612         arm64_skip_faulting_instruction(regs, 4);
613 }
614
615 static const struct sys64_hook cp15_32_hooks[] = {
616         {
617                 .esr_mask = ESR_ELx_CP15_32_ISS_SYS_MASK,
618                 .esr_val = ESR_ELx_CP15_32_ISS_SYS_CNTFRQ,
619                 .handler = compat_cntfrq_read_handler,
620         },
621         {},
622 };
623
624 static void compat_cntvct_read_handler(unsigned int esr, struct pt_regs *regs)
625 {
626         int rt = (esr & ESR_ELx_CP15_64_ISS_RT_MASK) >> ESR_ELx_CP15_64_ISS_RT_SHIFT;
627         int rt2 = (esr & ESR_ELx_CP15_64_ISS_RT2_MASK) >> ESR_ELx_CP15_64_ISS_RT2_SHIFT;
628         u64 val = arch_timer_read_counter();
629
630         pt_regs_write_reg(regs, rt, lower_32_bits(val));
631         pt_regs_write_reg(regs, rt2, upper_32_bits(val));
632         arm64_skip_faulting_instruction(regs, 4);
633 }
634
635 static const struct sys64_hook cp15_64_hooks[] = {
636         {
637                 .esr_mask = ESR_ELx_CP15_64_ISS_SYS_MASK,
638                 .esr_val = ESR_ELx_CP15_64_ISS_SYS_CNTVCT,
639                 .handler = compat_cntvct_read_handler,
640         },
641         {},
642 };
643
644 void do_cp15instr(unsigned int esr, struct pt_regs *regs)
645 {
646         const struct sys64_hook *hook, *hook_base;
647
648         if (!cp15_cond_valid(esr, regs)) {
649                 /*
650                  * There is no T16 variant of a CP access, so we
651                  * always advance PC by 4 bytes.
652                  */
653                 arm64_skip_faulting_instruction(regs, 4);
654                 return;
655         }
656
657         switch (ESR_ELx_EC(esr)) {
658         case ESR_ELx_EC_CP15_32:
659                 hook_base = cp15_32_hooks;
660                 break;
661         case ESR_ELx_EC_CP15_64:
662                 hook_base = cp15_64_hooks;
663                 break;
664         default:
665                 do_undefinstr(regs);
666                 return;
667         }
668
669         for (hook = hook_base; hook->handler; hook++)
670                 if ((hook->esr_mask & esr) == hook->esr_val) {
671                         hook->handler(esr, regs);
672                         return;
673                 }
674
675         /*
676          * New cp15 instructions may previously have been undefined at
677          * EL0. Fall back to our usual undefined instruction handler
678          * so that we handle these consistently.
679          */
680         do_undefinstr(regs);
681 }
682 NOKPROBE_SYMBOL(do_cp15instr);
683 #endif
684
685 void do_sysinstr(unsigned int esr, struct pt_regs *regs)
686 {
687         const struct sys64_hook *hook;
688
689         for (hook = sys64_hooks; hook->handler; hook++)
690                 if ((hook->esr_mask & esr) == hook->esr_val) {
691                         hook->handler(esr, regs);
692                         return;
693                 }
694
695         /*
696          * New SYS instructions may previously have been undefined at EL0. Fall
697          * back to our usual undefined instruction handler so that we handle
698          * these consistently.
699          */
700         do_undefinstr(regs);
701 }
702 NOKPROBE_SYMBOL(do_sysinstr);
703
704 static const char *esr_class_str[] = {
705         [0 ... ESR_ELx_EC_MAX]          = "UNRECOGNIZED EC",
706         [ESR_ELx_EC_UNKNOWN]            = "Unknown/Uncategorized",
707         [ESR_ELx_EC_WFx]                = "WFI/WFE",
708         [ESR_ELx_EC_CP15_32]            = "CP15 MCR/MRC",
709         [ESR_ELx_EC_CP15_64]            = "CP15 MCRR/MRRC",
710         [ESR_ELx_EC_CP14_MR]            = "CP14 MCR/MRC",
711         [ESR_ELx_EC_CP14_LS]            = "CP14 LDC/STC",
712         [ESR_ELx_EC_FP_ASIMD]           = "ASIMD",
713         [ESR_ELx_EC_CP10_ID]            = "CP10 MRC/VMRS",
714         [ESR_ELx_EC_PAC]                = "PAC",
715         [ESR_ELx_EC_CP14_64]            = "CP14 MCRR/MRRC",
716         [ESR_ELx_EC_BTI]                = "BTI",
717         [ESR_ELx_EC_ILL]                = "PSTATE.IL",
718         [ESR_ELx_EC_SVC32]              = "SVC (AArch32)",
719         [ESR_ELx_EC_HVC32]              = "HVC (AArch32)",
720         [ESR_ELx_EC_SMC32]              = "SMC (AArch32)",
721         [ESR_ELx_EC_SVC64]              = "SVC (AArch64)",
722         [ESR_ELx_EC_HVC64]              = "HVC (AArch64)",
723         [ESR_ELx_EC_SMC64]              = "SMC (AArch64)",
724         [ESR_ELx_EC_SYS64]              = "MSR/MRS (AArch64)",
725         [ESR_ELx_EC_SVE]                = "SVE",
726         [ESR_ELx_EC_ERET]               = "ERET/ERETAA/ERETAB",
727         [ESR_ELx_EC_FPAC]               = "FPAC",
728         [ESR_ELx_EC_IMP_DEF]            = "EL3 IMP DEF",
729         [ESR_ELx_EC_IABT_LOW]           = "IABT (lower EL)",
730         [ESR_ELx_EC_IABT_CUR]           = "IABT (current EL)",
731         [ESR_ELx_EC_PC_ALIGN]           = "PC Alignment",
732         [ESR_ELx_EC_DABT_LOW]           = "DABT (lower EL)",
733         [ESR_ELx_EC_DABT_CUR]           = "DABT (current EL)",
734         [ESR_ELx_EC_SP_ALIGN]           = "SP Alignment",
735         [ESR_ELx_EC_FP_EXC32]           = "FP (AArch32)",
736         [ESR_ELx_EC_FP_EXC64]           = "FP (AArch64)",
737         [ESR_ELx_EC_SERROR]             = "SError",
738         [ESR_ELx_EC_BREAKPT_LOW]        = "Breakpoint (lower EL)",
739         [ESR_ELx_EC_BREAKPT_CUR]        = "Breakpoint (current EL)",
740         [ESR_ELx_EC_SOFTSTP_LOW]        = "Software Step (lower EL)",
741         [ESR_ELx_EC_SOFTSTP_CUR]        = "Software Step (current EL)",
742         [ESR_ELx_EC_WATCHPT_LOW]        = "Watchpoint (lower EL)",
743         [ESR_ELx_EC_WATCHPT_CUR]        = "Watchpoint (current EL)",
744         [ESR_ELx_EC_BKPT32]             = "BKPT (AArch32)",
745         [ESR_ELx_EC_VECTOR32]           = "Vector catch (AArch32)",
746         [ESR_ELx_EC_BRK64]              = "BRK (AArch64)",
747 };
748
749 const char *esr_get_class_string(u32 esr)
750 {
751         return esr_class_str[ESR_ELx_EC(esr)];
752 }
753
754 /*
755  * bad_mode handles the impossible case in the exception vector. This is always
756  * fatal.
757  */
758 asmlinkage void notrace bad_mode(struct pt_regs *regs, int reason, unsigned int esr)
759 {
760         arm64_enter_nmi(regs);
761
762         console_verbose();
763
764         pr_crit("Bad mode in %s handler detected on CPU%d, code 0x%08x -- %s\n",
765                 handler[reason], smp_processor_id(), esr,
766                 esr_get_class_string(esr));
767
768         __show_regs(regs);
769         local_daif_mask();
770         panic("bad mode");
771 }
772
773 /*
774  * bad_el0_sync handles unexpected, but potentially recoverable synchronous
775  * exceptions taken from EL0. Unlike bad_mode, this returns.
776  */
777 void bad_el0_sync(struct pt_regs *regs, int reason, unsigned int esr)
778 {
779         unsigned long pc = instruction_pointer(regs);
780
781         current->thread.fault_address = 0;
782         current->thread.fault_code = esr;
783
784         arm64_force_sig_fault(SIGILL, ILL_ILLOPC, pc,
785                               "Bad EL0 synchronous exception");
786 }
787
788 #ifdef CONFIG_VMAP_STACK
789
790 DEFINE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)], overflow_stack)
791         __aligned(16);
792
793 asmlinkage void noinstr handle_bad_stack(struct pt_regs *regs)
794 {
795         unsigned long tsk_stk = (unsigned long)current->stack;
796         unsigned long irq_stk = (unsigned long)this_cpu_read(irq_stack_ptr);
797         unsigned long ovf_stk = (unsigned long)this_cpu_ptr(overflow_stack);
798         unsigned int esr = read_sysreg(esr_el1);
799         unsigned long far = read_sysreg(far_el1);
800
801         arm64_enter_nmi(regs);
802
803         console_verbose();
804         pr_emerg("Insufficient stack space to handle exception!");
805
806         pr_emerg("ESR: 0x%08x -- %s\n", esr, esr_get_class_string(esr));
807         pr_emerg("FAR: 0x%016lx\n", far);
808
809         pr_emerg("Task stack:     [0x%016lx..0x%016lx]\n",
810                  tsk_stk, tsk_stk + THREAD_SIZE);
811         pr_emerg("IRQ stack:      [0x%016lx..0x%016lx]\n",
812                  irq_stk, irq_stk + IRQ_STACK_SIZE);
813         pr_emerg("Overflow stack: [0x%016lx..0x%016lx]\n",
814                  ovf_stk, ovf_stk + OVERFLOW_STACK_SIZE);
815
816         __show_regs(regs);
817
818         /*
819          * We use nmi_panic to limit the potential for recusive overflows, and
820          * to get a better stack trace.
821          */
822         nmi_panic(NULL, "kernel stack overflow");
823         cpu_park_loop();
824 }
825 #endif
826
827 void __noreturn arm64_serror_panic(struct pt_regs *regs, u32 esr)
828 {
829         console_verbose();
830
831         pr_crit("SError Interrupt on CPU%d, code 0x%08x -- %s\n",
832                 smp_processor_id(), esr, esr_get_class_string(esr));
833         if (regs)
834                 __show_regs(regs);
835
836         nmi_panic(regs, "Asynchronous SError Interrupt");
837
838         cpu_park_loop();
839         unreachable();
840 }
841
842 bool arm64_is_fatal_ras_serror(struct pt_regs *regs, unsigned int esr)
843 {
844         u32 aet = arm64_ras_serror_get_severity(esr);
845
846         switch (aet) {
847         case ESR_ELx_AET_CE:    /* corrected error */
848         case ESR_ELx_AET_UEO:   /* restartable, not yet consumed */
849                 /*
850                  * The CPU can make progress. We may take UEO again as
851                  * a more severe error.
852                  */
853                 return false;
854
855         case ESR_ELx_AET_UEU:   /* Uncorrected Unrecoverable */
856         case ESR_ELx_AET_UER:   /* Uncorrected Recoverable */
857                 /*
858                  * The CPU can't make progress. The exception may have
859                  * been imprecise.
860                  *
861                  * Neoverse-N1 #1349291 means a non-KVM SError reported as
862                  * Unrecoverable should be treated as Uncontainable. We
863                  * call arm64_serror_panic() in both cases.
864                  */
865                 return true;
866
867         case ESR_ELx_AET_UC:    /* Uncontainable or Uncategorized error */
868         default:
869                 /* Error has been silently propagated */
870                 arm64_serror_panic(regs, esr);
871         }
872 }
873
874 asmlinkage void noinstr do_serror(struct pt_regs *regs, unsigned int esr)
875 {
876         arm64_enter_nmi(regs);
877
878         /* non-RAS errors are not containable */
879         if (!arm64_is_ras_serror(esr) || arm64_is_fatal_ras_serror(regs, esr))
880                 arm64_serror_panic(regs, esr);
881
882         arm64_exit_nmi(regs);
883 }
884
885 /* GENERIC_BUG traps */
886
887 int is_valid_bugaddr(unsigned long addr)
888 {
889         /*
890          * bug_handler() only called for BRK #BUG_BRK_IMM.
891          * So the answer is trivial -- any spurious instances with no
892          * bug table entry will be rejected by report_bug() and passed
893          * back to the debug-monitors code and handled as a fatal
894          * unexpected debug exception.
895          */
896         return 1;
897 }
898
899 static int bug_handler(struct pt_regs *regs, unsigned int esr)
900 {
901         switch (report_bug(regs->pc, regs)) {
902         case BUG_TRAP_TYPE_BUG:
903                 die("Oops - BUG", regs, 0);
904                 break;
905
906         case BUG_TRAP_TYPE_WARN:
907                 break;
908
909         default:
910                 /* unknown/unrecognised bug trap type */
911                 return DBG_HOOK_ERROR;
912         }
913
914         /* If thread survives, skip over the BUG instruction and continue: */
915         arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
916         return DBG_HOOK_HANDLED;
917 }
918
919 static struct break_hook bug_break_hook = {
920         .fn = bug_handler,
921         .imm = BUG_BRK_IMM,
922 };
923
924 static int reserved_fault_handler(struct pt_regs *regs, unsigned int esr)
925 {
926         pr_err("%s generated an invalid instruction at %pS!\n",
927                 in_bpf_jit(regs) ? "BPF JIT" : "Kernel text patching",
928                 (void *)instruction_pointer(regs));
929
930         /* We cannot handle this */
931         return DBG_HOOK_ERROR;
932 }
933
934 static struct break_hook fault_break_hook = {
935         .fn = reserved_fault_handler,
936         .imm = FAULT_BRK_IMM,
937 };
938
939 #ifdef CONFIG_KASAN_SW_TAGS
940
941 #define KASAN_ESR_RECOVER       0x20
942 #define KASAN_ESR_WRITE 0x10
943 #define KASAN_ESR_SIZE_MASK     0x0f
944 #define KASAN_ESR_SIZE(esr)     (1 << ((esr) & KASAN_ESR_SIZE_MASK))
945
946 static int kasan_handler(struct pt_regs *regs, unsigned int esr)
947 {
948         bool recover = esr & KASAN_ESR_RECOVER;
949         bool write = esr & KASAN_ESR_WRITE;
950         size_t size = KASAN_ESR_SIZE(esr);
951         u64 addr = regs->regs[0];
952         u64 pc = regs->pc;
953
954         kasan_report(addr, size, write, pc);
955
956         /*
957          * The instrumentation allows to control whether we can proceed after
958          * a crash was detected. This is done by passing the -recover flag to
959          * the compiler. Disabling recovery allows to generate more compact
960          * code.
961          *
962          * Unfortunately disabling recovery doesn't work for the kernel right
963          * now. KASAN reporting is disabled in some contexts (for example when
964          * the allocator accesses slab object metadata; this is controlled by
965          * current->kasan_depth). All these accesses are detected by the tool,
966          * even though the reports for them are not printed.
967          *
968          * This is something that might be fixed at some point in the future.
969          */
970         if (!recover)
971                 die("Oops - KASAN", regs, 0);
972
973         /* If thread survives, skip over the brk instruction and continue: */
974         arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
975         return DBG_HOOK_HANDLED;
976 }
977
978 static struct break_hook kasan_break_hook = {
979         .fn     = kasan_handler,
980         .imm    = KASAN_BRK_IMM,
981         .mask   = KASAN_BRK_MASK,
982 };
983 #endif
984
985 /*
986  * Initial handler for AArch64 BRK exceptions
987  * This handler only used until debug_traps_init().
988  */
989 int __init early_brk64(unsigned long addr, unsigned int esr,
990                 struct pt_regs *regs)
991 {
992 #ifdef CONFIG_KASAN_SW_TAGS
993         unsigned int comment = esr & ESR_ELx_BRK64_ISS_COMMENT_MASK;
994
995         if ((comment & ~KASAN_BRK_MASK) == KASAN_BRK_IMM)
996                 return kasan_handler(regs, esr) != DBG_HOOK_HANDLED;
997 #endif
998         return bug_handler(regs, esr) != DBG_HOOK_HANDLED;
999 }
1000
1001 void __init trap_init(void)
1002 {
1003         register_kernel_break_hook(&bug_break_hook);
1004         register_kernel_break_hook(&fault_break_hook);
1005 #ifdef CONFIG_KASAN_SW_TAGS
1006         register_kernel_break_hook(&kasan_break_hook);
1007 #endif
1008         debug_traps_init();
1009 }