2 * Based on arch/arm/mm/fault.c
4 * Copyright (C) 1995 Linus Torvalds
5 * Copyright (C) 1995-2004 Russell King
6 * Copyright (C) 2012 ARM Ltd.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program. If not, see <http://www.gnu.org/licenses/>.
21 #include <linux/acpi.h>
22 #include <linux/extable.h>
23 #include <linux/signal.h>
25 #include <linux/hardirq.h>
26 #include <linux/init.h>
27 #include <linux/kprobes.h>
28 #include <linux/uaccess.h>
29 #include <linux/page-flags.h>
30 #include <linux/sched/signal.h>
31 #include <linux/sched/debug.h>
32 #include <linux/highmem.h>
33 #include <linux/perf_event.h>
34 #include <linux/preempt.h>
35 #include <linux/hugetlb.h>
39 #include <asm/cmpxchg.h>
40 #include <asm/cpufeature.h>
41 #include <asm/exception.h>
42 #include <asm/daifflags.h>
43 #include <asm/debug-monitors.h>
45 #include <asm/kasan.h>
46 #include <asm/sysreg.h>
47 #include <asm/system_misc.h>
48 #include <asm/pgtable.h>
49 #include <asm/tlbflush.h>
50 #include <asm/traps.h>
53 int (*fn)(unsigned long addr, unsigned int esr,
54 struct pt_regs *regs);
60 static const struct fault_info fault_info[];
61 static struct fault_info debug_fault_info[];
63 static inline const struct fault_info *esr_to_fault_info(unsigned int esr)
65 return fault_info + (esr & ESR_ELx_FSC);
68 static inline const struct fault_info *esr_to_debug_fault_info(unsigned int esr)
70 return debug_fault_info + DBG_ESR_EVT(esr);
74 static inline int notify_page_fault(struct pt_regs *regs, unsigned int esr)
78 /* kprobe_running() needs smp_processor_id() */
79 if (!user_mode(regs)) {
81 if (kprobe_running() && kprobe_fault_handler(regs, esr))
89 static inline int notify_page_fault(struct pt_regs *regs, unsigned int esr)
95 static void data_abort_decode(unsigned int esr)
97 pr_alert("Data abort info:\n");
99 if (esr & ESR_ELx_ISV) {
100 pr_alert(" Access size = %u byte(s)\n",
101 1U << ((esr & ESR_ELx_SAS) >> ESR_ELx_SAS_SHIFT));
102 pr_alert(" SSE = %lu, SRT = %lu\n",
103 (esr & ESR_ELx_SSE) >> ESR_ELx_SSE_SHIFT,
104 (esr & ESR_ELx_SRT_MASK) >> ESR_ELx_SRT_SHIFT);
105 pr_alert(" SF = %lu, AR = %lu\n",
106 (esr & ESR_ELx_SF) >> ESR_ELx_SF_SHIFT,
107 (esr & ESR_ELx_AR) >> ESR_ELx_AR_SHIFT);
109 pr_alert(" ISV = 0, ISS = 0x%08lx\n", esr & ESR_ELx_ISS_MASK);
112 pr_alert(" CM = %lu, WnR = %lu\n",
113 (esr & ESR_ELx_CM) >> ESR_ELx_CM_SHIFT,
114 (esr & ESR_ELx_WNR) >> ESR_ELx_WNR_SHIFT);
117 static void mem_abort_decode(unsigned int esr)
119 pr_alert("Mem abort info:\n");
121 pr_alert(" ESR = 0x%08x\n", esr);
122 pr_alert(" Exception class = %s, IL = %u bits\n",
123 esr_get_class_string(esr),
124 (esr & ESR_ELx_IL) ? 32 : 16);
125 pr_alert(" SET = %lu, FnV = %lu\n",
126 (esr & ESR_ELx_SET_MASK) >> ESR_ELx_SET_SHIFT,
127 (esr & ESR_ELx_FnV) >> ESR_ELx_FnV_SHIFT);
128 pr_alert(" EA = %lu, S1PTW = %lu\n",
129 (esr & ESR_ELx_EA) >> ESR_ELx_EA_SHIFT,
130 (esr & ESR_ELx_S1PTW) >> ESR_ELx_S1PTW_SHIFT);
132 if (esr_is_data_abort(esr))
133 data_abort_decode(esr);
136 static inline bool is_ttbr0_addr(unsigned long addr)
138 /* entry assembly clears tags for TTBR0 addrs */
139 return addr < TASK_SIZE;
142 static inline bool is_ttbr1_addr(unsigned long addr)
144 /* TTBR1 addresses may have a tag if KASAN_SW_TAGS is in use */
145 return arch_kasan_reset_tag(addr) >= VA_START;
149 * Dump out the page tables associated with 'addr' in the currently active mm.
151 static void show_pte(unsigned long addr)
153 struct mm_struct *mm;
157 if (is_ttbr0_addr(addr)) {
159 mm = current->active_mm;
160 if (mm == &init_mm) {
161 pr_alert("[%016lx] user address but active_mm is swapper\n",
165 } else if (is_ttbr1_addr(addr)) {
169 pr_alert("[%016lx] address between user and kernel address ranges\n",
174 pr_alert("%s pgtable: %luk pages, %u-bit VAs, pgdp=%016lx\n",
175 mm == &init_mm ? "swapper" : "user", PAGE_SIZE / SZ_1K,
176 mm == &init_mm ? VA_BITS : (int)vabits_user,
177 (unsigned long)virt_to_phys(mm->pgd));
178 pgdp = pgd_offset(mm, addr);
179 pgd = READ_ONCE(*pgdp);
180 pr_alert("[%016lx] pgd=%016llx", addr, pgd_val(pgd));
187 if (pgd_none(pgd) || pgd_bad(pgd))
190 pudp = pud_offset(pgdp, addr);
191 pud = READ_ONCE(*pudp);
192 pr_cont(", pud=%016llx", pud_val(pud));
193 if (pud_none(pud) || pud_bad(pud))
196 pmdp = pmd_offset(pudp, addr);
197 pmd = READ_ONCE(*pmdp);
198 pr_cont(", pmd=%016llx", pmd_val(pmd));
199 if (pmd_none(pmd) || pmd_bad(pmd))
202 ptep = pte_offset_map(pmdp, addr);
203 pte = READ_ONCE(*ptep);
204 pr_cont(", pte=%016llx", pte_val(pte));
212 * This function sets the access flags (dirty, accessed), as well as write
213 * permission, and only to a more permissive setting.
215 * It needs to cope with hardware update of the accessed/dirty state by other
216 * agents in the system and can safely skip the __sync_icache_dcache() call as,
217 * like set_pte_at(), the PTE is never changed from no-exec to exec here.
219 * Returns whether or not the PTE actually changed.
221 int ptep_set_access_flags(struct vm_area_struct *vma,
222 unsigned long address, pte_t *ptep,
223 pte_t entry, int dirty)
225 pteval_t old_pteval, pteval;
226 pte_t pte = READ_ONCE(*ptep);
228 if (pte_same(pte, entry))
231 /* only preserve the access flags and write permission */
232 pte_val(entry) &= PTE_RDONLY | PTE_AF | PTE_WRITE | PTE_DIRTY;
235 * Setting the flags must be done atomically to avoid racing with the
236 * hardware update of the access/dirty state. The PTE_RDONLY bit must
237 * be set to the most permissive (lowest value) of *ptep and entry
238 * (calculated as: a & b == ~(~a | ~b)).
240 pte_val(entry) ^= PTE_RDONLY;
241 pteval = pte_val(pte);
244 pteval ^= PTE_RDONLY;
245 pteval |= pte_val(entry);
246 pteval ^= PTE_RDONLY;
247 pteval = cmpxchg_relaxed(&pte_val(*ptep), old_pteval, pteval);
248 } while (pteval != old_pteval);
250 flush_tlb_fix_spurious_fault(vma, address);
254 static bool is_el1_instruction_abort(unsigned int esr)
256 return ESR_ELx_EC(esr) == ESR_ELx_EC_IABT_CUR;
259 static inline bool is_el1_permission_fault(unsigned long addr, unsigned int esr,
260 struct pt_regs *regs)
262 unsigned int ec = ESR_ELx_EC(esr);
263 unsigned int fsc_type = esr & ESR_ELx_FSC_TYPE;
265 if (ec != ESR_ELx_EC_DABT_CUR && ec != ESR_ELx_EC_IABT_CUR)
268 if (fsc_type == ESR_ELx_FSC_PERM)
271 if (is_ttbr0_addr(addr) && system_uses_ttbr0_pan())
272 return fsc_type == ESR_ELx_FSC_FAULT &&
273 (regs->pstate & PSR_PAN_BIT);
278 static void die_kernel_fault(const char *msg, unsigned long addr,
279 unsigned int esr, struct pt_regs *regs)
283 pr_alert("Unable to handle kernel %s at virtual address %016lx\n", msg,
286 mem_abort_decode(esr);
289 die("Oops", regs, esr);
294 static void __do_kernel_fault(unsigned long addr, unsigned int esr,
295 struct pt_regs *regs)
300 * Are we prepared to handle this kernel fault?
301 * We are almost certainly not prepared to handle instruction faults.
303 if (!is_el1_instruction_abort(esr) && fixup_exception(regs))
306 if (is_el1_permission_fault(addr, esr, regs)) {
307 if (esr & ESR_ELx_WNR)
308 msg = "write to read-only memory";
310 msg = "read from unreadable memory";
311 } else if (addr < PAGE_SIZE) {
312 msg = "NULL pointer dereference";
314 msg = "paging request";
317 die_kernel_fault(msg, addr, esr, regs);
320 static void set_thread_esr(unsigned long address, unsigned int esr)
322 current->thread.fault_address = address;
325 * If the faulting address is in the kernel, we must sanitize the ESR.
326 * From userspace's point of view, kernel-only mappings don't exist
327 * at all, so we report them as level 0 translation faults.
328 * (This is not quite the way that "no mapping there at all" behaves:
329 * an alignment fault not caused by the memory type would take
330 * precedence over translation fault for a real access to empty
331 * space. Unfortunately we can't easily distinguish "alignment fault
332 * not caused by memory type" from "alignment fault caused by memory
333 * type", so we ignore this wrinkle and just return the translation
336 if (!is_ttbr0_addr(current->thread.fault_address)) {
337 switch (ESR_ELx_EC(esr)) {
338 case ESR_ELx_EC_DABT_LOW:
340 * These bits provide only information about the
341 * faulting instruction, which userspace knows already.
342 * We explicitly clear bits which are architecturally
343 * RES0 in case they are given meanings in future.
344 * We always report the ESR as if the fault was taken
345 * to EL1 and so ISV and the bits in ISS[23:14] are
346 * clear. (In fact it always will be a fault to EL1.)
348 esr &= ESR_ELx_EC_MASK | ESR_ELx_IL |
349 ESR_ELx_CM | ESR_ELx_WNR;
350 esr |= ESR_ELx_FSC_FAULT;
352 case ESR_ELx_EC_IABT_LOW:
354 * Claim a level 0 translation fault.
355 * All other bits are architecturally RES0 for faults
356 * reported with that DFSC value, so we clear them.
358 esr &= ESR_ELx_EC_MASK | ESR_ELx_IL;
359 esr |= ESR_ELx_FSC_FAULT;
363 * This should never happen (entry.S only brings us
364 * into this code for insn and data aborts from a lower
365 * exception level). Fail safe by not providing an ESR
366 * context record at all.
368 WARN(1, "ESR 0x%x is not DABT or IABT from EL0\n", esr);
374 current->thread.fault_code = esr;
377 static void do_bad_area(unsigned long addr, unsigned int esr, struct pt_regs *regs)
380 * If we are in kernel mode at this point, we have no context to
381 * handle this fault with.
383 if (user_mode(regs)) {
384 const struct fault_info *inf = esr_to_fault_info(esr);
386 set_thread_esr(addr, esr);
387 arm64_force_sig_fault(inf->sig, inf->code, (void __user *)addr,
390 __do_kernel_fault(addr, esr, regs);
394 #define VM_FAULT_BADMAP 0x010000
395 #define VM_FAULT_BADACCESS 0x020000
397 static vm_fault_t __do_page_fault(struct mm_struct *mm, unsigned long addr,
398 unsigned int mm_flags, unsigned long vm_flags,
399 struct task_struct *tsk)
401 struct vm_area_struct *vma;
404 vma = find_vma(mm, addr);
405 fault = VM_FAULT_BADMAP;
408 if (unlikely(vma->vm_start > addr))
412 * Ok, we have a good vm_area for this memory access, so we can handle
417 * Check that the permissions on the VMA allow for the fault which
420 if (!(vma->vm_flags & vm_flags)) {
421 fault = VM_FAULT_BADACCESS;
425 return handle_mm_fault(vma, addr & PAGE_MASK, mm_flags);
428 if (vma->vm_flags & VM_GROWSDOWN && !expand_stack(vma, addr))
434 static bool is_el0_instruction_abort(unsigned int esr)
436 return ESR_ELx_EC(esr) == ESR_ELx_EC_IABT_LOW;
439 static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
440 struct pt_regs *regs)
442 const struct fault_info *inf;
443 struct task_struct *tsk;
444 struct mm_struct *mm;
445 vm_fault_t fault, major = 0;
446 unsigned long vm_flags = VM_READ | VM_WRITE;
447 unsigned int mm_flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
449 if (notify_page_fault(regs, esr))
456 * If we're in an interrupt or have no user context, we must not take
459 if (faulthandler_disabled() || !mm)
463 mm_flags |= FAULT_FLAG_USER;
465 if (is_el0_instruction_abort(esr)) {
467 } else if ((esr & ESR_ELx_WNR) && !(esr & ESR_ELx_CM)) {
469 mm_flags |= FAULT_FLAG_WRITE;
472 if (is_ttbr0_addr(addr) && is_el1_permission_fault(addr, esr, regs)) {
473 /* regs->orig_addr_limit may be 0 if we entered from EL0 */
474 if (regs->orig_addr_limit == KERNEL_DS)
475 die_kernel_fault("access to user memory with fs=KERNEL_DS",
478 if (is_el1_instruction_abort(esr))
479 die_kernel_fault("execution of user memory",
482 if (!search_exception_tables(regs->pc))
483 die_kernel_fault("access to user memory outside uaccess routines",
487 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr);
490 * As per x86, we may deadlock here. However, since the kernel only
491 * validly references user space from well defined areas of the code,
492 * we can bug out early if this is from code which shouldn't.
494 if (!down_read_trylock(&mm->mmap_sem)) {
495 if (!user_mode(regs) && !search_exception_tables(regs->pc))
498 down_read(&mm->mmap_sem);
501 * The above down_read_trylock() might have succeeded in which
502 * case, we'll have missed the might_sleep() from down_read().
505 #ifdef CONFIG_DEBUG_VM
506 if (!user_mode(regs) && !search_exception_tables(regs->pc))
511 fault = __do_page_fault(mm, addr, mm_flags, vm_flags, tsk);
512 major |= fault & VM_FAULT_MAJOR;
514 if (fault & VM_FAULT_RETRY) {
516 * If we need to retry but a fatal signal is pending,
517 * handle the signal first. We do not need to release
518 * the mmap_sem because it would already be released
519 * in __lock_page_or_retry in mm/filemap.c.
521 if (fatal_signal_pending(current)) {
522 if (!user_mode(regs))
528 * Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk of
531 if (mm_flags & FAULT_FLAG_ALLOW_RETRY) {
532 mm_flags &= ~FAULT_FLAG_ALLOW_RETRY;
533 mm_flags |= FAULT_FLAG_TRIED;
537 up_read(&mm->mmap_sem);
540 * Handle the "normal" (no error) case first.
542 if (likely(!(fault & (VM_FAULT_ERROR | VM_FAULT_BADMAP |
543 VM_FAULT_BADACCESS)))) {
545 * Major/minor page fault accounting is only done
546 * once. If we go through a retry, it is extremely
547 * likely that the page will be found in page cache at
552 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs,
556 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs,
564 * If we are in kernel mode at this point, we have no context to
565 * handle this fault with.
567 if (!user_mode(regs))
570 if (fault & VM_FAULT_OOM) {
572 * We ran out of memory, call the OOM killer, and return to
573 * userspace (which will retry the fault, or kill us if we got
576 pagefault_out_of_memory();
580 inf = esr_to_fault_info(esr);
581 set_thread_esr(addr, esr);
582 if (fault & VM_FAULT_SIGBUS) {
584 * We had some memory, but were unable to successfully fix up
587 arm64_force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *)addr,
589 } else if (fault & (VM_FAULT_HWPOISON_LARGE | VM_FAULT_HWPOISON)) {
593 if (fault & VM_FAULT_HWPOISON_LARGE)
594 lsb = hstate_index_to_shift(VM_FAULT_GET_HINDEX(fault));
596 arm64_force_sig_mceerr(BUS_MCEERR_AR, (void __user *)addr, lsb,
600 * Something tried to access memory that isn't in our memory
603 arm64_force_sig_fault(SIGSEGV,
604 fault == VM_FAULT_BADACCESS ? SEGV_ACCERR : SEGV_MAPERR,
612 __do_kernel_fault(addr, esr, regs);
616 static int __kprobes do_translation_fault(unsigned long addr,
618 struct pt_regs *regs)
620 if (is_ttbr0_addr(addr))
621 return do_page_fault(addr, esr, regs);
623 do_bad_area(addr, esr, regs);
627 static int do_alignment_fault(unsigned long addr, unsigned int esr,
628 struct pt_regs *regs)
630 do_bad_area(addr, esr, regs);
634 static int do_bad(unsigned long addr, unsigned int esr, struct pt_regs *regs)
636 return 1; /* "fault" */
639 static int do_sea(unsigned long addr, unsigned int esr, struct pt_regs *regs)
641 const struct fault_info *inf;
644 inf = esr_to_fault_info(esr);
647 * Return value ignored as we rely on signal merging.
648 * Future patches will make this more robust.
650 apei_claim_sea(regs);
652 if (esr & ESR_ELx_FnV)
655 siaddr = (void __user *)addr;
656 arm64_notify_die(inf->name, regs, inf->sig, inf->code, siaddr, esr);
661 static const struct fault_info fault_info[] = {
662 { do_bad, SIGKILL, SI_KERNEL, "ttbr address size fault" },
663 { do_bad, SIGKILL, SI_KERNEL, "level 1 address size fault" },
664 { do_bad, SIGKILL, SI_KERNEL, "level 2 address size fault" },
665 { do_bad, SIGKILL, SI_KERNEL, "level 3 address size fault" },
666 { do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 0 translation fault" },
667 { do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 1 translation fault" },
668 { do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 2 translation fault" },
669 { do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 3 translation fault" },
670 { do_bad, SIGKILL, SI_KERNEL, "unknown 8" },
671 { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 1 access flag fault" },
672 { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 2 access flag fault" },
673 { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 3 access flag fault" },
674 { do_bad, SIGKILL, SI_KERNEL, "unknown 12" },
675 { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 1 permission fault" },
676 { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 2 permission fault" },
677 { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 3 permission fault" },
678 { do_sea, SIGBUS, BUS_OBJERR, "synchronous external abort" },
679 { do_bad, SIGKILL, SI_KERNEL, "unknown 17" },
680 { do_bad, SIGKILL, SI_KERNEL, "unknown 18" },
681 { do_bad, SIGKILL, SI_KERNEL, "unknown 19" },
682 { do_sea, SIGKILL, SI_KERNEL, "level 0 (translation table walk)" },
683 { do_sea, SIGKILL, SI_KERNEL, "level 1 (translation table walk)" },
684 { do_sea, SIGKILL, SI_KERNEL, "level 2 (translation table walk)" },
685 { do_sea, SIGKILL, SI_KERNEL, "level 3 (translation table walk)" },
686 { do_sea, SIGBUS, BUS_OBJERR, "synchronous parity or ECC error" }, // Reserved when RAS is implemented
687 { do_bad, SIGKILL, SI_KERNEL, "unknown 25" },
688 { do_bad, SIGKILL, SI_KERNEL, "unknown 26" },
689 { do_bad, SIGKILL, SI_KERNEL, "unknown 27" },
690 { do_sea, SIGKILL, SI_KERNEL, "level 0 synchronous parity error (translation table walk)" }, // Reserved when RAS is implemented
691 { do_sea, SIGKILL, SI_KERNEL, "level 1 synchronous parity error (translation table walk)" }, // Reserved when RAS is implemented
692 { do_sea, SIGKILL, SI_KERNEL, "level 2 synchronous parity error (translation table walk)" }, // Reserved when RAS is implemented
693 { do_sea, SIGKILL, SI_KERNEL, "level 3 synchronous parity error (translation table walk)" }, // Reserved when RAS is implemented
694 { do_bad, SIGKILL, SI_KERNEL, "unknown 32" },
695 { do_alignment_fault, SIGBUS, BUS_ADRALN, "alignment fault" },
696 { do_bad, SIGKILL, SI_KERNEL, "unknown 34" },
697 { do_bad, SIGKILL, SI_KERNEL, "unknown 35" },
698 { do_bad, SIGKILL, SI_KERNEL, "unknown 36" },
699 { do_bad, SIGKILL, SI_KERNEL, "unknown 37" },
700 { do_bad, SIGKILL, SI_KERNEL, "unknown 38" },
701 { do_bad, SIGKILL, SI_KERNEL, "unknown 39" },
702 { do_bad, SIGKILL, SI_KERNEL, "unknown 40" },
703 { do_bad, SIGKILL, SI_KERNEL, "unknown 41" },
704 { do_bad, SIGKILL, SI_KERNEL, "unknown 42" },
705 { do_bad, SIGKILL, SI_KERNEL, "unknown 43" },
706 { do_bad, SIGKILL, SI_KERNEL, "unknown 44" },
707 { do_bad, SIGKILL, SI_KERNEL, "unknown 45" },
708 { do_bad, SIGKILL, SI_KERNEL, "unknown 46" },
709 { do_bad, SIGKILL, SI_KERNEL, "unknown 47" },
710 { do_bad, SIGKILL, SI_KERNEL, "TLB conflict abort" },
711 { do_bad, SIGKILL, SI_KERNEL, "Unsupported atomic hardware update fault" },
712 { do_bad, SIGKILL, SI_KERNEL, "unknown 50" },
713 { do_bad, SIGKILL, SI_KERNEL, "unknown 51" },
714 { do_bad, SIGKILL, SI_KERNEL, "implementation fault (lockdown abort)" },
715 { do_bad, SIGBUS, BUS_OBJERR, "implementation fault (unsupported exclusive)" },
716 { do_bad, SIGKILL, SI_KERNEL, "unknown 54" },
717 { do_bad, SIGKILL, SI_KERNEL, "unknown 55" },
718 { do_bad, SIGKILL, SI_KERNEL, "unknown 56" },
719 { do_bad, SIGKILL, SI_KERNEL, "unknown 57" },
720 { do_bad, SIGKILL, SI_KERNEL, "unknown 58" },
721 { do_bad, SIGKILL, SI_KERNEL, "unknown 59" },
722 { do_bad, SIGKILL, SI_KERNEL, "unknown 60" },
723 { do_bad, SIGKILL, SI_KERNEL, "section domain fault" },
724 { do_bad, SIGKILL, SI_KERNEL, "page domain fault" },
725 { do_bad, SIGKILL, SI_KERNEL, "unknown 63" },
728 asmlinkage void __exception do_mem_abort(unsigned long addr, unsigned int esr,
729 struct pt_regs *regs)
731 const struct fault_info *inf = esr_to_fault_info(esr);
733 if (!inf->fn(addr, esr, regs))
736 if (!user_mode(regs)) {
737 pr_alert("Unhandled fault at 0x%016lx\n", addr);
738 mem_abort_decode(esr);
742 arm64_notify_die(inf->name, regs,
743 inf->sig, inf->code, (void __user *)addr, esr);
746 asmlinkage void __exception do_el0_irq_bp_hardening(void)
748 /* PC has already been checked in entry.S */
749 arm64_apply_bp_hardening();
752 asmlinkage void __exception do_el0_ia_bp_hardening(unsigned long addr,
754 struct pt_regs *regs)
757 * We've taken an instruction abort from userspace and not yet
758 * re-enabled IRQs. If the address is a kernel address, apply
759 * BP hardening prior to enabling IRQs and pre-emption.
761 if (!is_ttbr0_addr(addr))
762 arm64_apply_bp_hardening();
764 local_daif_restore(DAIF_PROCCTX);
765 do_mem_abort(addr, esr, regs);
769 asmlinkage void __exception do_sp_pc_abort(unsigned long addr,
771 struct pt_regs *regs)
773 if (user_mode(regs)) {
774 if (!is_ttbr0_addr(instruction_pointer(regs)))
775 arm64_apply_bp_hardening();
776 local_daif_restore(DAIF_PROCCTX);
779 arm64_notify_die("SP/PC alignment exception", regs,
780 SIGBUS, BUS_ADRALN, (void __user *)addr, esr);
783 int __init early_brk64(unsigned long addr, unsigned int esr,
784 struct pt_regs *regs);
787 * __refdata because early_brk64 is __init, but the reference to it is
788 * clobbered at arch_initcall time.
789 * See traps.c and debug-monitors.c:debug_traps_init().
791 static struct fault_info __refdata debug_fault_info[] = {
792 { do_bad, SIGTRAP, TRAP_HWBKPT, "hardware breakpoint" },
793 { do_bad, SIGTRAP, TRAP_HWBKPT, "hardware single-step" },
794 { do_bad, SIGTRAP, TRAP_HWBKPT, "hardware watchpoint" },
795 { do_bad, SIGKILL, SI_KERNEL, "unknown 3" },
796 { do_bad, SIGTRAP, TRAP_BRKPT, "aarch32 BKPT" },
797 { do_bad, SIGKILL, SI_KERNEL, "aarch32 vector catch" },
798 { early_brk64, SIGTRAP, TRAP_BRKPT, "aarch64 BRK" },
799 { do_bad, SIGKILL, SI_KERNEL, "unknown 7" },
802 void __init hook_debug_fault_code(int nr,
803 int (*fn)(unsigned long, unsigned int, struct pt_regs *),
804 int sig, int code, const char *name)
806 BUG_ON(nr < 0 || nr >= ARRAY_SIZE(debug_fault_info));
808 debug_fault_info[nr].fn = fn;
809 debug_fault_info[nr].sig = sig;
810 debug_fault_info[nr].code = code;
811 debug_fault_info[nr].name = name;
814 #ifdef CONFIG_ARM64_ERRATUM_1463225
815 DECLARE_PER_CPU(int, __in_cortex_a76_erratum_1463225_wa);
817 static int __exception
818 cortex_a76_erratum_1463225_debug_handler(struct pt_regs *regs)
823 if (!__this_cpu_read(__in_cortex_a76_erratum_1463225_wa))
827 * We've taken a dummy step exception from the kernel to ensure
828 * that interrupts are re-enabled on the syscall path. Return back
829 * to cortex_a76_erratum_1463225_svc_handler() with debug exceptions
830 * masked so that we can safely restore the mdscr and get on with
831 * handling the syscall.
833 regs->pstate |= PSR_D_BIT;
837 static int __exception
838 cortex_a76_erratum_1463225_debug_handler(struct pt_regs *regs)
842 #endif /* CONFIG_ARM64_ERRATUM_1463225 */
844 asmlinkage void __exception do_debug_exception(unsigned long addr_if_watchpoint,
846 struct pt_regs *regs)
848 const struct fault_info *inf = esr_to_debug_fault_info(esr);
849 unsigned long pc = instruction_pointer(regs);
851 if (cortex_a76_erratum_1463225_debug_handler(regs))
855 * Tell lockdep we disabled irqs in entry.S. Do nothing if they were
856 * already disabled to preserve the last enabled/disabled addresses.
858 if (interrupts_enabled(regs))
859 trace_hardirqs_off();
861 if (user_mode(regs) && !is_ttbr0_addr(pc))
862 arm64_apply_bp_hardening();
864 if (inf->fn(addr_if_watchpoint, esr, regs)) {
865 arm64_notify_die(inf->name, regs,
866 inf->sig, inf->code, (void __user *)pc, esr);
869 if (interrupts_enabled(regs))
872 NOKPROBE_SYMBOL(do_debug_exception);