2 * Copyright (C) 1991, 1992 Linus Torvalds
3 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
5 * Pentium III FXSR, SSE support
6 * Gareth Hughes <gareth@valinux.com>, May 2000
10 * Handle hardware traps and faults.
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 #include <linux/context_tracking.h>
16 #include <linux/interrupt.h>
17 #include <linux/kallsyms.h>
18 #include <linux/kmsan.h>
19 #include <linux/spinlock.h>
20 #include <linux/kprobes.h>
21 #include <linux/uaccess.h>
22 #include <linux/kdebug.h>
23 #include <linux/kgdb.h>
24 #include <linux/kernel.h>
25 #include <linux/export.h>
26 #include <linux/ptrace.h>
27 #include <linux/uprobes.h>
28 #include <linux/string.h>
29 #include <linux/delay.h>
30 #include <linux/errno.h>
31 #include <linux/kexec.h>
32 #include <linux/sched.h>
33 #include <linux/sched/task_stack.h>
34 #include <linux/timer.h>
35 #include <linux/init.h>
36 #include <linux/bug.h>
37 #include <linux/nmi.h>
39 #include <linux/smp.h>
41 #include <linux/hardirq.h>
42 #include <linux/atomic.h>
43 #include <linux/iommu.h>
45 #include <asm/stacktrace.h>
46 #include <asm/processor.h>
47 #include <asm/debugreg.h>
48 #include <asm/realmode.h>
49 #include <asm/text-patching.h>
50 #include <asm/ftrace.h>
51 #include <asm/traps.h>
53 #include <asm/fpu/api.h>
55 #include <asm/cpu_entry_area.h>
57 #include <asm/fixmap.h>
58 #include <asm/mach_traps.h>
59 #include <asm/alternative.h>
60 #include <asm/fpu/xstate.h>
64 #include <asm/insn-eval.h>
70 #include <asm/x86_init.h>
72 #include <asm/processor-flags.h>
73 #include <asm/setup.h>
76 #include <asm/proto.h>
78 DECLARE_BITMAP(system_vectors, NR_VECTORS);
80 __always_inline int is_valid_bugaddr(unsigned long addr)
82 if (addr < TASK_SIZE_MAX)
86 * We got #UD, if the text isn't readable we'd have gotten
87 * a different exception.
89 return *(unsigned short *)addr == INSN_UD2;
92 static nokprobe_inline int
93 do_trap_no_signal(struct task_struct *tsk, int trapnr, const char *str,
94 struct pt_regs *regs, long error_code)
96 if (v8086_mode(regs)) {
98 * Traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
99 * On nmi (interrupt 2), do_trap should not be called.
101 if (trapnr < X86_TRAP_UD) {
102 if (!handle_vm86_trap((struct kernel_vm86_regs *) regs,
106 } else if (!user_mode(regs)) {
107 if (fixup_exception(regs, trapnr, error_code, 0))
110 tsk->thread.error_code = error_code;
111 tsk->thread.trap_nr = trapnr;
112 die(str, regs, error_code);
114 if (fixup_vdso_exception(regs, trapnr, error_code, 0))
119 * We want error_code and trap_nr set for userspace faults and
120 * kernelspace faults which result in die(), but not
121 * kernelspace faults which are fixed up. die() gives the
122 * process no chance to handle the signal and notice the
123 * kernel fault information, so that won't result in polluting
124 * the information about previously queued, but not yet
125 * delivered, faults. See also exc_general_protection below.
127 tsk->thread.error_code = error_code;
128 tsk->thread.trap_nr = trapnr;
133 static void show_signal(struct task_struct *tsk, int signr,
134 const char *type, const char *desc,
135 struct pt_regs *regs, long error_code)
137 if (show_unhandled_signals && unhandled_signal(tsk, signr) &&
138 printk_ratelimit()) {
139 pr_info("%s[%d] %s%s ip:%lx sp:%lx error:%lx",
140 tsk->comm, task_pid_nr(tsk), type, desc,
141 regs->ip, regs->sp, error_code);
142 print_vma_addr(KERN_CONT " in ", regs->ip);
148 do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
149 long error_code, int sicode, void __user *addr)
151 struct task_struct *tsk = current;
153 if (!do_trap_no_signal(tsk, trapnr, str, regs, error_code))
156 show_signal(tsk, signr, "trap ", str, regs, error_code);
161 force_sig_fault(signr, sicode, addr);
163 NOKPROBE_SYMBOL(do_trap);
165 static void do_error_trap(struct pt_regs *regs, long error_code, char *str,
166 unsigned long trapnr, int signr, int sicode, void __user *addr)
168 RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
170 if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) !=
172 cond_local_irq_enable(regs);
173 do_trap(trapnr, signr, str, regs, error_code, sicode, addr);
174 cond_local_irq_disable(regs);
179 * Posix requires to provide the address of the faulting instruction for
180 * SIGILL (#UD) and SIGFPE (#DE) in the si_addr member of siginfo_t.
182 * This address is usually regs->ip, but when an uprobe moved the code out
183 * of line then regs->ip points to the XOL code which would confuse
184 * anything which analyzes the fault address vs. the unmodified binary. If
185 * a trap happened in XOL code then uprobe maps regs->ip back to the
186 * original instruction address.
188 static __always_inline void __user *error_get_trap_addr(struct pt_regs *regs)
190 return (void __user *)uprobe_get_trap_addr(regs);
193 DEFINE_IDTENTRY(exc_divide_error)
195 do_error_trap(regs, 0, "divide error", X86_TRAP_DE, SIGFPE,
196 FPE_INTDIV, error_get_trap_addr(regs));
199 DEFINE_IDTENTRY(exc_overflow)
201 do_error_trap(regs, 0, "overflow", X86_TRAP_OF, SIGSEGV, 0, NULL);
204 #ifdef CONFIG_X86_F00F_BUG
205 void handle_invalid_op(struct pt_regs *regs)
207 static inline void handle_invalid_op(struct pt_regs *regs)
210 do_error_trap(regs, 0, "invalid opcode", X86_TRAP_UD, SIGILL,
211 ILL_ILLOPN, error_get_trap_addr(regs));
214 static noinstr bool handle_bug(struct pt_regs *regs)
216 bool handled = false;
219 * Normally @regs are unpoisoned by irqentry_enter(), but handle_bug()
220 * is a rare case that uses @regs without passing them to
223 kmsan_unpoison_entry_regs(regs);
224 if (!is_valid_bugaddr(regs->ip))
228 * All lies, just get the WARN/BUG out.
230 instrumentation_begin();
232 * Since we're emulating a CALL with exceptions, restore the interrupt
233 * state to what it was at the exception site.
235 if (regs->flags & X86_EFLAGS_IF)
236 raw_local_irq_enable();
237 if (report_bug(regs->ip, regs) == BUG_TRAP_TYPE_WARN ||
238 handle_cfi_failure(regs) == BUG_TRAP_TYPE_WARN) {
242 if (regs->flags & X86_EFLAGS_IF)
243 raw_local_irq_disable();
244 instrumentation_end();
249 DEFINE_IDTENTRY_RAW(exc_invalid_op)
251 irqentry_state_t state;
254 * We use UD2 as a short encoding for 'CALL __WARN', as such
255 * handle it before exception entry to avoid recursive WARN
256 * in case exception entry is the one triggering WARNs.
258 if (!user_mode(regs) && handle_bug(regs))
261 state = irqentry_enter(regs);
262 instrumentation_begin();
263 handle_invalid_op(regs);
264 instrumentation_end();
265 irqentry_exit(regs, state);
268 DEFINE_IDTENTRY(exc_coproc_segment_overrun)
270 do_error_trap(regs, 0, "coprocessor segment overrun",
271 X86_TRAP_OLD_MF, SIGFPE, 0, NULL);
274 DEFINE_IDTENTRY_ERRORCODE(exc_invalid_tss)
276 do_error_trap(regs, error_code, "invalid TSS", X86_TRAP_TS, SIGSEGV,
280 DEFINE_IDTENTRY_ERRORCODE(exc_segment_not_present)
282 do_error_trap(regs, error_code, "segment not present", X86_TRAP_NP,
286 DEFINE_IDTENTRY_ERRORCODE(exc_stack_segment)
288 do_error_trap(regs, error_code, "stack segment", X86_TRAP_SS, SIGBUS,
292 DEFINE_IDTENTRY_ERRORCODE(exc_alignment_check)
294 char *str = "alignment check";
296 if (notify_die(DIE_TRAP, str, regs, error_code, X86_TRAP_AC, SIGBUS) == NOTIFY_STOP)
299 if (!user_mode(regs))
300 die("Split lock detected\n", regs, error_code);
304 if (handle_user_split_lock(regs, error_code))
307 do_trap(X86_TRAP_AC, SIGBUS, "alignment check", regs,
308 error_code, BUS_ADRALN, NULL);
314 #ifdef CONFIG_VMAP_STACK
315 __visible void __noreturn handle_stack_overflow(struct pt_regs *regs,
316 unsigned long fault_address,
317 struct stack_info *info)
319 const char *name = stack_type_name(info->type);
321 printk(KERN_EMERG "BUG: %s stack guard page was hit at %p (stack is %p..%p)\n",
322 name, (void *)fault_address, info->begin, info->end);
324 die("stack guard page", regs, 0);
326 /* Be absolutely certain we don't return. */
327 panic("%s stack guard hit", name);
332 * Runs on an IST stack for x86_64 and on a special task stack for x86_32.
334 * On x86_64, this is more or less a normal kernel entry. Notwithstanding the
335 * SDM's warnings about double faults being unrecoverable, returning works as
336 * expected. Presumably what the SDM actually means is that the CPU may get
337 * the register state wrong on entry, so returning could be a bad idea.
339 * Various CPU engineers have promised that double faults due to an IRET fault
340 * while the stack is read-only are, in fact, recoverable.
342 * On x86_32, this is entered through a task gate, and regs are synthesized
343 * from the TSS. Returning is, in principle, okay, but changes to regs will
344 * be lost. If, for some reason, we need to return to a context with modified
345 * regs, the shim code could be adjusted to synchronize the registers.
347 * The 32bit #DF shim provides CR2 already as an argument. On 64bit it needs
348 * to be read before doing anything else.
350 DEFINE_IDTENTRY_DF(exc_double_fault)
352 static const char str[] = "double fault";
353 struct task_struct *tsk = current;
355 #ifdef CONFIG_VMAP_STACK
356 unsigned long address = read_cr2();
357 struct stack_info info;
360 #ifdef CONFIG_X86_ESPFIX64
361 extern unsigned char native_irq_return_iret[];
364 * If IRET takes a non-IST fault on the espfix64 stack, then we
365 * end up promoting it to a doublefault. In that case, take
366 * advantage of the fact that we're not using the normal (TSS.sp0)
367 * stack right now. We can write a fake #GP(0) frame at TSS.sp0
368 * and then modify our own IRET frame so that, when we return,
369 * we land directly at the #GP(0) vector with the stack already
370 * set up according to its expectations.
372 * The net result is that our #GP handler will think that we
373 * entered from usermode with the bad user context.
375 * No need for nmi_enter() here because we don't use RCU.
377 if (((long)regs->sp >> P4D_SHIFT) == ESPFIX_PGD_ENTRY &&
378 regs->cs == __KERNEL_CS &&
379 regs->ip == (unsigned long)native_irq_return_iret)
381 struct pt_regs *gpregs = (struct pt_regs *)this_cpu_read(cpu_tss_rw.x86_tss.sp0) - 1;
382 unsigned long *p = (unsigned long *)regs->sp;
385 * regs->sp points to the failing IRET frame on the
386 * ESPFIX64 stack. Copy it to the entry stack. This fills
387 * in gpregs->ss through gpregs->ip.
392 gpregs->flags = p[2];
395 gpregs->orig_ax = 0; /* Missing (lost) #GP error code */
398 * Adjust our frame so that we return straight to the #GP
399 * vector with the expected RSP value. This is safe because
400 * we won't enable interrupts or schedule before we invoke
401 * general_protection, so nothing will clobber the stack
402 * frame we just set up.
404 * We will enter general_protection with kernel GSBASE,
405 * which is what the stub expects, given that the faulting
406 * RIP will be the IRET instruction.
408 regs->ip = (unsigned long)asm_exc_general_protection;
409 regs->sp = (unsigned long)&gpregs->orig_ax;
415 irqentry_nmi_enter(regs);
416 instrumentation_begin();
417 notify_die(DIE_TRAP, str, regs, error_code, X86_TRAP_DF, SIGSEGV);
419 tsk->thread.error_code = error_code;
420 tsk->thread.trap_nr = X86_TRAP_DF;
422 #ifdef CONFIG_VMAP_STACK
424 * If we overflow the stack into a guard page, the CPU will fail
425 * to deliver #PF and will send #DF instead. Similarly, if we
426 * take any non-IST exception while too close to the bottom of
427 * the stack, the processor will get a page fault while
428 * delivering the exception and will generate a double fault.
430 * According to the SDM (footnote in 6.15 under "Interrupt 14 -
431 * Page-Fault Exception (#PF):
433 * Processors update CR2 whenever a page fault is detected. If a
434 * second page fault occurs while an earlier page fault is being
435 * delivered, the faulting linear address of the second fault will
436 * overwrite the contents of CR2 (replacing the previous
437 * address). These updates to CR2 occur even if the page fault
438 * results in a double fault or occurs during the delivery of a
441 * The logic below has a small possibility of incorrectly diagnosing
442 * some errors as stack overflows. For example, if the IDT or GDT
443 * gets corrupted such that #GP delivery fails due to a bad descriptor
444 * causing #GP and we hit this condition while CR2 coincidentally
445 * points to the stack guard page, we'll think we overflowed the
446 * stack. Given that we're going to panic one way or another
447 * if this happens, this isn't necessarily worth fixing.
449 * If necessary, we could improve the test by only diagnosing
450 * a stack overflow if the saved RSP points within 47 bytes of
451 * the bottom of the stack: if RSP == tsk_stack + 48 and we
452 * take an exception, the stack is already aligned and there
453 * will be enough room SS, RSP, RFLAGS, CS, RIP, and a
454 * possible error code, so a stack overflow would *not* double
455 * fault. With any less space left, exception delivery could
456 * fail, and, as a practical matter, we've overflowed the
457 * stack even if the actual trigger for the double fault was
460 if (get_stack_guard_info((void *)address, &info))
461 handle_stack_overflow(regs, address, &info);
464 pr_emerg("PANIC: double fault, error_code: 0x%lx\n", error_code);
465 die("double fault", regs, error_code);
466 panic("Machine halted.");
467 instrumentation_end();
470 DEFINE_IDTENTRY(exc_bounds)
472 if (notify_die(DIE_TRAP, "bounds", regs, 0,
473 X86_TRAP_BR, SIGSEGV) == NOTIFY_STOP)
475 cond_local_irq_enable(regs);
477 if (!user_mode(regs))
478 die("bounds", regs, 0);
480 do_trap(X86_TRAP_BR, SIGSEGV, "bounds", regs, 0, 0, NULL);
482 cond_local_irq_disable(regs);
485 enum kernel_gp_hint {
492 * When an uncaught #GP occurs, try to determine the memory address accessed by
493 * the instruction and return that address to the caller. Also, try to figure
494 * out whether any part of the access to that address was non-canonical.
496 static enum kernel_gp_hint get_kernel_gp_address(struct pt_regs *regs,
499 u8 insn_buf[MAX_INSN_SIZE];
503 if (copy_from_kernel_nofault(insn_buf, (void *)regs->ip,
507 ret = insn_decode_kernel(&insn, insn_buf);
511 *addr = (unsigned long)insn_get_addr_ref(&insn, regs);
518 * - the operand is not in the kernel half
519 * - the last byte of the operand is not in the user canonical half
521 if (*addr < ~__VIRTUAL_MASK &&
522 *addr + insn.opnd_bytes - 1 > __VIRTUAL_MASK)
523 return GP_NON_CANONICAL;
529 #define GPFSTR "general protection fault"
531 static bool fixup_iopl_exception(struct pt_regs *regs)
533 struct thread_struct *t = ¤t->thread;
537 if (!IS_ENABLED(CONFIG_X86_IOPL_IOPERM) || t->iopl_emul != 3)
540 if (insn_get_effective_ip(regs, &ip))
543 if (get_user(byte, (const char __user *)ip))
546 if (byte != 0xfa && byte != 0xfb)
549 if (!t->iopl_warn && printk_ratelimit()) {
550 pr_err("%s[%d] attempts to use CLI/STI, pretending it's a NOP, ip:%lx",
551 current->comm, task_pid_nr(current), ip);
552 print_vma_addr(KERN_CONT " in ", ip);
562 * The unprivileged ENQCMD instruction generates #GPs if the
563 * IA32_PASID MSR has not been populated. If possible, populate
564 * the MSR from a PASID previously allocated to the mm.
566 static bool try_fixup_enqcmd_gp(void)
568 #ifdef CONFIG_IOMMU_SVA
572 * MSR_IA32_PASID is managed using XSAVE. Directly
573 * writing to the MSR is only possible when fpregs
574 * are valid and the fpstate is not. This is
575 * guaranteed when handling a userspace exception
576 * in *before* interrupts are re-enabled.
578 lockdep_assert_irqs_disabled();
581 * Hardware without ENQCMD will not generate
582 * #GPs that can be fixed up here.
584 if (!cpu_feature_enabled(X86_FEATURE_ENQCMD))
588 * If the mm has not been allocated a
589 * PASID, the #GP can not be fixed up.
591 if (!mm_valid_pasid(current->mm))
594 pasid = current->mm->pasid;
597 * Did this thread already have its PASID activated?
598 * If so, the #GP must be from something else.
600 if (current->pasid_activated)
603 wrmsrl(MSR_IA32_PASID, pasid | MSR_IA32_PASID_VALID);
604 current->pasid_activated = 1;
612 static bool gp_try_fixup_and_notify(struct pt_regs *regs, int trapnr,
613 unsigned long error_code, const char *str,
614 unsigned long address)
616 if (fixup_exception(regs, trapnr, error_code, address))
619 current->thread.error_code = error_code;
620 current->thread.trap_nr = trapnr;
623 * To be potentially processing a kprobe fault and to trust the result
624 * from kprobe_running(), we have to be non-preemptible.
626 if (!preemptible() && kprobe_running() &&
627 kprobe_fault_handler(regs, trapnr))
630 return notify_die(DIE_GPF, str, regs, error_code, trapnr, SIGSEGV) == NOTIFY_STOP;
633 static void gp_user_force_sig_segv(struct pt_regs *regs, int trapnr,
634 unsigned long error_code, const char *str)
636 current->thread.error_code = error_code;
637 current->thread.trap_nr = trapnr;
638 show_signal(current, SIGSEGV, "", str, regs, error_code);
642 DEFINE_IDTENTRY_ERRORCODE(exc_general_protection)
644 char desc[sizeof(GPFSTR) + 50 + 2*sizeof(unsigned long) + 1] = GPFSTR;
645 enum kernel_gp_hint hint = GP_NO_HINT;
646 unsigned long gp_addr;
648 if (user_mode(regs) && try_fixup_enqcmd_gp())
651 cond_local_irq_enable(regs);
653 if (static_cpu_has(X86_FEATURE_UMIP)) {
654 if (user_mode(regs) && fixup_umip_exception(regs))
658 if (v8086_mode(regs)) {
660 handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code);
665 if (user_mode(regs)) {
666 if (fixup_iopl_exception(regs))
669 if (fixup_vdso_exception(regs, X86_TRAP_GP, error_code, 0))
672 gp_user_force_sig_segv(regs, X86_TRAP_GP, error_code, desc);
676 if (gp_try_fixup_and_notify(regs, X86_TRAP_GP, error_code, desc, 0))
680 snprintf(desc, sizeof(desc), "segment-related " GPFSTR);
682 hint = get_kernel_gp_address(regs, &gp_addr);
684 if (hint != GP_NO_HINT)
685 snprintf(desc, sizeof(desc), GPFSTR ", %s 0x%lx",
686 (hint == GP_NON_CANONICAL) ? "probably for non-canonical address"
687 : "maybe for address",
691 * KASAN is interested only in the non-canonical case, clear it
694 if (hint != GP_NON_CANONICAL)
697 die_addr(desc, regs, error_code, gp_addr);
700 cond_local_irq_disable(regs);
703 static bool do_int3(struct pt_regs *regs)
707 #ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
708 if (kgdb_ll_trap(DIE_INT3, "int3", regs, 0, X86_TRAP_BP,
709 SIGTRAP) == NOTIFY_STOP)
711 #endif /* CONFIG_KGDB_LOW_LEVEL_TRAP */
713 #ifdef CONFIG_KPROBES
714 if (kprobe_int3_handler(regs))
717 res = notify_die(DIE_INT3, "int3", regs, 0, X86_TRAP_BP, SIGTRAP);
719 return res == NOTIFY_STOP;
721 NOKPROBE_SYMBOL(do_int3);
723 static void do_int3_user(struct pt_regs *regs)
728 cond_local_irq_enable(regs);
729 do_trap(X86_TRAP_BP, SIGTRAP, "int3", regs, 0, 0, NULL);
730 cond_local_irq_disable(regs);
733 DEFINE_IDTENTRY_RAW(exc_int3)
736 * poke_int3_handler() is completely self contained code; it does (and
737 * must) *NOT* call out to anything, lest it hits upon yet another
740 if (poke_int3_handler(regs))
744 * irqentry_enter_from_user_mode() uses static_branch_{,un}likely()
745 * and therefore can trigger INT3, hence poke_int3_handler() must
746 * be done before. If the entry came from kernel mode, then use
747 * nmi_enter() because the INT3 could have been hit in any context
750 if (user_mode(regs)) {
751 irqentry_enter_from_user_mode(regs);
752 instrumentation_begin();
754 instrumentation_end();
755 irqentry_exit_to_user_mode(regs);
757 irqentry_state_t irq_state = irqentry_nmi_enter(regs);
759 instrumentation_begin();
761 die("int3", regs, 0);
762 instrumentation_end();
763 irqentry_nmi_exit(regs, irq_state);
769 * Help handler running on a per-cpu (IST or entry trampoline) stack
770 * to switch to the normal thread stack if the interrupted code was in
771 * user mode. The actual stack switch is done in entry_64.S
773 asmlinkage __visible noinstr struct pt_regs *sync_regs(struct pt_regs *eregs)
775 struct pt_regs *regs = (struct pt_regs *)this_cpu_read(pcpu_hot.top_of_stack) - 1;
781 #ifdef CONFIG_AMD_MEM_ENCRYPT
782 asmlinkage __visible noinstr struct pt_regs *vc_switch_off_ist(struct pt_regs *regs)
784 unsigned long sp, *stack;
785 struct stack_info info;
786 struct pt_regs *regs_ret;
789 * In the SYSCALL entry path the RSP value comes from user-space - don't
790 * trust it and switch to the current kernel stack
792 if (ip_within_syscall_gap(regs)) {
793 sp = this_cpu_read(pcpu_hot.top_of_stack);
798 * From here on the RSP value is trusted. Now check whether entry
799 * happened from a safe stack. Not safe are the entry or unknown stacks,
800 * use the fall-back stack instead in this case.
803 stack = (unsigned long *)sp;
805 if (!get_stack_info_noinstr(stack, current, &info) || info.type == STACK_TYPE_ENTRY ||
806 info.type > STACK_TYPE_EXCEPTION_LAST)
807 sp = __this_cpu_ist_top_va(VC2);
811 * Found a safe stack - switch to it as if the entry didn't happen via
812 * IST stack. The code below only copies pt_regs, the real switch happens
815 sp = ALIGN_DOWN(sp, 8) - sizeof(*regs_ret);
817 regs_ret = (struct pt_regs *)sp;
824 asmlinkage __visible noinstr struct pt_regs *fixup_bad_iret(struct pt_regs *bad_regs)
826 struct pt_regs tmp, *new_stack;
829 * This is called from entry_64.S early in handling a fault
830 * caused by a bad iret to user mode. To handle the fault
831 * correctly, we want to move our stack frame to where it would
832 * be had we entered directly on the entry stack (rather than
833 * just below the IRET frame) and we want to pretend that the
834 * exception came from the IRET target.
836 new_stack = (struct pt_regs *)__this_cpu_read(cpu_tss_rw.x86_tss.sp0) - 1;
838 /* Copy the IRET target to the temporary storage. */
839 __memcpy(&tmp.ip, (void *)bad_regs->sp, 5*8);
841 /* Copy the remainder of the stack from the current stack. */
842 __memcpy(&tmp, bad_regs, offsetof(struct pt_regs, ip));
844 /* Update the entry stack */
845 __memcpy(new_stack, &tmp, sizeof(tmp));
847 BUG_ON(!user_mode(new_stack));
852 static bool is_sysenter_singlestep(struct pt_regs *regs)
855 * We don't try for precision here. If we're anywhere in the region of
856 * code that can be single-stepped in the SYSENTER entry path, then
857 * assume that this is a useless single-step trap due to SYSENTER
858 * being invoked with TF set. (We don't know in advance exactly
859 * which instructions will be hit because BTF could plausibly
863 return (regs->ip - (unsigned long)__begin_SYSENTER_singlestep_region) <
864 (unsigned long)__end_SYSENTER_singlestep_region -
865 (unsigned long)__begin_SYSENTER_singlestep_region;
866 #elif defined(CONFIG_IA32_EMULATION)
867 return (regs->ip - (unsigned long)entry_SYSENTER_compat) <
868 (unsigned long)__end_entry_SYSENTER_compat -
869 (unsigned long)entry_SYSENTER_compat;
875 static __always_inline unsigned long debug_read_clear_dr6(void)
880 * The Intel SDM says:
882 * Certain debug exceptions may clear bits 0-3. The remaining
883 * contents of the DR6 register are never cleared by the
884 * processor. To avoid confusion in identifying debug
885 * exceptions, debug handlers should clear the register before
886 * returning to the interrupted task.
888 * Keep it simple: clear DR6 immediately.
890 get_debugreg(dr6, 6);
891 set_debugreg(DR6_RESERVED, 6);
892 dr6 ^= DR6_RESERVED; /* Flip to positive polarity */
898 * Our handling of the processor debug registers is non-trivial.
899 * We do not clear them on entry and exit from the kernel. Therefore
900 * it is possible to get a watchpoint trap here from inside the kernel.
901 * However, the code in ./ptrace.c has ensured that the user can
902 * only set watchpoints on userspace addresses. Therefore the in-kernel
903 * watchpoint trap can only occur in code which is reading/writing
904 * from user space. Such code must not hold kernel locks (since it
905 * can equally take a page fault), therefore it is safe to call
906 * force_sig_info even though that claims and releases locks.
908 * Code in ./signal.c ensures that the debug control register
909 * is restored before we deliver any signal, and therefore that
910 * user code runs with the correct debug control register even though
913 * Being careful here means that we don't have to be as careful in a
914 * lot of more complicated places (task switching can be a bit lazy
915 * about restoring all the debug state, and ptrace doesn't have to
916 * find every occurrence of the TF bit that could be saved away even
919 * May run on IST stack.
922 static bool notify_debug(struct pt_regs *regs, unsigned long *dr6)
925 * Notifiers will clear bits in @dr6 to indicate the event has been
926 * consumed - hw_breakpoint_handler(), single_stop_cont().
928 * Notifiers will set bits in @virtual_dr6 to indicate the desire
929 * for signals - ptrace_triggered(), kgdb_hw_overflow_handler().
931 if (notify_die(DIE_DEBUG, "debug", regs, (long)dr6, 0, SIGTRAP) == NOTIFY_STOP)
937 static __always_inline void exc_debug_kernel(struct pt_regs *regs,
941 * Disable breakpoints during exception handling; recursive exceptions
942 * are exceedingly 'fun'.
944 * Since this function is NOKPROBE, and that also applies to
945 * HW_BREAKPOINT_X, we can't hit a breakpoint before this (XXX except a
946 * HW_BREAKPOINT_W on our stack)
948 * Entry text is excluded for HW_BP_X and cpu_entry_area, which
949 * includes the entry stack is excluded for everything.
951 unsigned long dr7 = local_db_save();
952 irqentry_state_t irq_state = irqentry_nmi_enter(regs);
953 instrumentation_begin();
956 * If something gets miswired and we end up here for a user mode
957 * #DB, we will malfunction.
959 WARN_ON_ONCE(user_mode(regs));
961 if (test_thread_flag(TIF_BLOCKSTEP)) {
963 * The SDM says "The processor clears the BTF flag when it
964 * generates a debug exception." but PTRACE_BLOCKSTEP requested
965 * it for userspace, but we just took a kernel #DB, so re-set
968 unsigned long debugctl;
970 rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
971 debugctl |= DEBUGCTLMSR_BTF;
972 wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
976 * Catch SYSENTER with TF set and clear DR_STEP. If this hit a
977 * watchpoint at the same time then that will still be handled.
979 if ((dr6 & DR_STEP) && is_sysenter_singlestep(regs))
983 * The kernel doesn't use INT1
988 if (notify_debug(regs, &dr6))
992 * The kernel doesn't use TF single-step outside of:
994 * - Kprobes, consumed through kprobe_debug_handler()
995 * - KGDB, consumed through notify_debug()
997 * So if we get here with DR_STEP set, something is wonky.
999 * A known way to trigger this is through QEMU's GDB stub,
1000 * which leaks #DB into the guest and causes IST recursion.
1002 if (WARN_ON_ONCE(dr6 & DR_STEP))
1003 regs->flags &= ~X86_EFLAGS_TF;
1005 instrumentation_end();
1006 irqentry_nmi_exit(regs, irq_state);
1008 local_db_restore(dr7);
1011 static __always_inline void exc_debug_user(struct pt_regs *regs,
1017 * If something gets miswired and we end up here for a kernel mode
1018 * #DB, we will malfunction.
1020 WARN_ON_ONCE(!user_mode(regs));
1023 * NB: We can't easily clear DR7 here because
1024 * irqentry_exit_to_usermode() can invoke ptrace, schedule, access
1025 * user memory, etc. This means that a recursive #DB is possible. If
1026 * this happens, that #DB will hit exc_debug_kernel() and clear DR7.
1027 * Since we're not on the IST stack right now, everything will be
1031 irqentry_enter_from_user_mode(regs);
1032 instrumentation_begin();
1035 * Start the virtual/ptrace DR6 value with just the DR_STEP mask
1036 * of the real DR6. ptrace_triggered() will set the DR_TRAPn bits.
1038 * Userspace expects DR_STEP to be visible in ptrace_get_debugreg(6)
1039 * even if it is not the result of PTRACE_SINGLESTEP.
1041 current->thread.virtual_dr6 = (dr6 & DR_STEP);
1044 * The SDM says "The processor clears the BTF flag when it
1045 * generates a debug exception." Clear TIF_BLOCKSTEP to keep
1046 * TIF_BLOCKSTEP in sync with the hardware BTF flag.
1048 clear_thread_flag(TIF_BLOCKSTEP);
1051 * If dr6 has no reason to give us about the origin of this trap,
1052 * then it's very likely the result of an icebp/int01 trap.
1053 * User wants a sigtrap for that.
1057 if (notify_debug(regs, &dr6))
1060 /* It's safe to allow irq's after DR6 has been saved */
1063 if (v8086_mode(regs)) {
1064 handle_vm86_trap((struct kernel_vm86_regs *)regs, 0, X86_TRAP_DB);
1068 /* #DB for bus lock can only be triggered from userspace. */
1069 if (dr6 & DR_BUS_LOCK)
1070 handle_bus_lock(regs);
1072 /* Add the virtual_dr6 bits for signals. */
1073 dr6 |= current->thread.virtual_dr6;
1074 if (dr6 & (DR_STEP | DR_TRAP_BITS) || icebp)
1075 send_sigtrap(regs, 0, get_si_code(dr6));
1078 local_irq_disable();
1080 instrumentation_end();
1081 irqentry_exit_to_user_mode(regs);
1084 #ifdef CONFIG_X86_64
1085 /* IST stack entry */
1086 DEFINE_IDTENTRY_DEBUG(exc_debug)
1088 exc_debug_kernel(regs, debug_read_clear_dr6());
1091 /* User entry, runs on regular task stack */
1092 DEFINE_IDTENTRY_DEBUG_USER(exc_debug)
1094 exc_debug_user(regs, debug_read_clear_dr6());
1097 /* 32 bit does not have separate entry points. */
1098 DEFINE_IDTENTRY_RAW(exc_debug)
1100 unsigned long dr6 = debug_read_clear_dr6();
1102 if (user_mode(regs))
1103 exc_debug_user(regs, dr6);
1105 exc_debug_kernel(regs, dr6);
1110 * Note that we play around with the 'TS' bit in an attempt to get
1111 * the correct behaviour even in the presence of the asynchronous
1114 static void math_error(struct pt_regs *regs, int trapnr)
1116 struct task_struct *task = current;
1117 struct fpu *fpu = &task->thread.fpu;
1119 char *str = (trapnr == X86_TRAP_MF) ? "fpu exception" :
1122 cond_local_irq_enable(regs);
1124 if (!user_mode(regs)) {
1125 if (fixup_exception(regs, trapnr, 0, 0))
1128 task->thread.error_code = 0;
1129 task->thread.trap_nr = trapnr;
1131 if (notify_die(DIE_TRAP, str, regs, 0, trapnr,
1132 SIGFPE) != NOTIFY_STOP)
1138 * Synchronize the FPU register state to the memory register state
1139 * if necessary. This allows the exception handler to inspect it.
1141 fpu_sync_fpstate(fpu);
1143 task->thread.trap_nr = trapnr;
1144 task->thread.error_code = 0;
1146 si_code = fpu__exception_code(fpu, trapnr);
1147 /* Retry when we get spurious exceptions: */
1151 if (fixup_vdso_exception(regs, trapnr, 0, 0))
1154 force_sig_fault(SIGFPE, si_code,
1155 (void __user *)uprobe_get_trap_addr(regs));
1157 cond_local_irq_disable(regs);
1160 DEFINE_IDTENTRY(exc_coprocessor_error)
1162 math_error(regs, X86_TRAP_MF);
1165 DEFINE_IDTENTRY(exc_simd_coprocessor_error)
1167 if (IS_ENABLED(CONFIG_X86_INVD_BUG)) {
1168 /* AMD 486 bug: INVD in CPL 0 raises #XF instead of #GP */
1169 if (!static_cpu_has(X86_FEATURE_XMM)) {
1170 __exc_general_protection(regs, 0);
1174 math_error(regs, X86_TRAP_XF);
1177 DEFINE_IDTENTRY(exc_spurious_interrupt_bug)
1180 * This addresses a Pentium Pro Erratum:
1182 * PROBLEM: If the APIC subsystem is configured in mixed mode with
1183 * Virtual Wire mode implemented through the local APIC, an
1184 * interrupt vector of 0Fh (Intel reserved encoding) may be
1185 * generated by the local APIC (Int 15). This vector may be
1186 * generated upon receipt of a spurious interrupt (an interrupt
1187 * which is removed before the system receives the INTA sequence)
1188 * instead of the programmed 8259 spurious interrupt vector.
1190 * IMPLICATION: The spurious interrupt vector programmed in the
1191 * 8259 is normally handled by an operating system's spurious
1192 * interrupt handler. However, a vector of 0Fh is unknown to some
1193 * operating systems, which would crash if this erratum occurred.
1195 * In theory this could be limited to 32bit, but the handler is not
1196 * hurting and who knows which other CPUs suffer from this.
1200 static bool handle_xfd_event(struct pt_regs *regs)
1205 if (!IS_ENABLED(CONFIG_X86_64) || !cpu_feature_enabled(X86_FEATURE_XFD))
1208 rdmsrl(MSR_IA32_XFD_ERR, xfd_err);
1212 wrmsrl(MSR_IA32_XFD_ERR, 0);
1214 /* Die if that happens in kernel space */
1215 if (WARN_ON(!user_mode(regs)))
1220 err = xfd_enable_feature(xfd_err);
1224 force_sig_fault(SIGILL, ILL_ILLOPC, error_get_trap_addr(regs));
1231 local_irq_disable();
1235 DEFINE_IDTENTRY(exc_device_not_available)
1237 unsigned long cr0 = read_cr0();
1239 if (handle_xfd_event(regs))
1242 #ifdef CONFIG_MATH_EMULATION
1243 if (!boot_cpu_has(X86_FEATURE_FPU) && (cr0 & X86_CR0_EM)) {
1244 struct math_emu_info info = { };
1246 cond_local_irq_enable(regs);
1249 math_emulate(&info);
1251 cond_local_irq_disable(regs);
1256 /* This should not happen. */
1257 if (WARN(cr0 & X86_CR0_TS, "CR0.TS was set")) {
1258 /* Try to fix it up and carry on. */
1259 write_cr0(cr0 & ~X86_CR0_TS);
1262 * Something terrible happened, and we're better off trying
1263 * to kill the task than getting stuck in a never-ending
1264 * loop of #NM faults.
1266 die("unexpected #NM exception", regs, 0);
1270 #ifdef CONFIG_INTEL_TDX_GUEST
1272 #define VE_FAULT_STR "VE fault"
1274 static void ve_raise_fault(struct pt_regs *regs, long error_code,
1275 unsigned long address)
1277 if (user_mode(regs)) {
1278 gp_user_force_sig_segv(regs, X86_TRAP_VE, error_code, VE_FAULT_STR);
1282 if (gp_try_fixup_and_notify(regs, X86_TRAP_VE, error_code,
1283 VE_FAULT_STR, address)) {
1287 die_addr(VE_FAULT_STR, regs, error_code, address);
1291 * Virtualization Exceptions (#VE) are delivered to TDX guests due to
1292 * specific guest actions which may happen in either user space or the
1295 * * Specific instructions (WBINVD, for example)
1296 * * Specific MSR accesses
1297 * * Specific CPUID leaf accesses
1298 * * Access to specific guest physical addresses
1300 * In the settings that Linux will run in, virtualization exceptions are
1301 * never generated on accesses to normal, TD-private memory that has been
1302 * accepted (by BIOS or with tdx_enc_status_changed()).
1304 * Syscall entry code has a critical window where the kernel stack is not
1305 * yet set up. Any exception in this window leads to hard to debug issues
1306 * and can be exploited for privilege escalation. Exceptions in the NMI
1307 * entry code also cause issues. Returning from the exception handler with
1308 * IRET will re-enable NMIs and nested NMI will corrupt the NMI stack.
1310 * For these reasons, the kernel avoids #VEs during the syscall gap and
1311 * the NMI entry code. Entry code paths do not access TD-shared memory,
1312 * MMIO regions, use #VE triggering MSRs, instructions, or CPUID leaves
1313 * that might generate #VE. VMM can remove memory from TD at any point,
1314 * but access to unaccepted (or missing) private memory leads to VM
1315 * termination, not to #VE.
1317 * Similarly to page faults and breakpoints, #VEs are allowed in NMI
1318 * handlers once the kernel is ready to deal with nested NMIs.
1320 * During #VE delivery, all interrupts, including NMIs, are blocked until
1321 * TDGETVEINFO is called. It prevents #VE nesting until the kernel reads
1324 * If a guest kernel action which would normally cause a #VE occurs in
1325 * the interrupt-disabled region before TDGETVEINFO, a #DF (fault
1326 * exception) is delivered to the guest which will result in an oops.
1328 * The entry code has been audited carefully for following these expectations.
1329 * Changes in the entry code have to be audited for correctness vs. this
1330 * aspect. Similarly to #PF, #VE in these places will expose kernel to
1331 * privilege escalation or may lead to random crashes.
1333 DEFINE_IDTENTRY(exc_virtualization_exception)
1338 * NMIs/Machine-checks/Interrupts will be in a disabled state
1339 * till TDGETVEINFO TDCALL is executed. This ensures that VE
1340 * info cannot be overwritten by a nested #VE.
1342 tdx_get_ve_info(&ve);
1344 cond_local_irq_enable(regs);
1347 * If tdx_handle_virt_exception() could not process
1348 * it successfully, treat it as #GP(0) and handle it.
1350 if (!tdx_handle_virt_exception(regs, &ve))
1351 ve_raise_fault(regs, 0, ve.gla);
1353 cond_local_irq_disable(regs);
1358 #ifdef CONFIG_X86_32
1359 DEFINE_IDTENTRY_SW(iret_error)
1362 if (notify_die(DIE_TRAP, "iret exception", regs, 0,
1363 X86_TRAP_IRET, SIGILL) != NOTIFY_STOP) {
1364 do_trap(X86_TRAP_IRET, SIGILL, "iret exception", regs, 0,
1365 ILL_BADSTK, (void __user *)NULL);
1367 local_irq_disable();
1371 void __init trap_init(void)
1373 /* Init cpu_entry_area before IST entries are set up */
1374 setup_cpu_entry_areas();
1376 /* Init GHCB memory pages when running as an SEV-ES guest */
1377 sev_es_init_vc_handling();
1379 /* Initialize TSS before setting up traps so ISTs work */
1380 cpu_init_exception_handling();
1381 /* Setup traps as cpu_init() might #GP */