1 /* SPDX-License-Identifier: GPL-2.0 */
3 * linux/arch/x86_64/entry.S
5 * Copyright (C) 1991, 1992 Linus Torvalds
6 * Copyright (C) 2000, 2001, 2002 Andi Kleen SuSE Labs
7 * Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
9 * entry.S contains the system-call and fault low-level handling routines.
11 * Some of this is documented in Documentation/x86/entry_64.rst
13 * A note on terminology:
14 * - iret frame: Architecture defined interrupt frame from SS to RIP
15 * at the top of the kernel process stack.
18 * - SYM_FUNC_START/END:Define functions in the symbol table.
19 * - TRACE_IRQ_*: Trace hardirq state for lock debugging.
20 * - idtentry: Define exception entry points.
22 #include <linux/linkage.h>
23 #include <asm/segment.h>
24 #include <asm/cache.h>
25 #include <asm/errno.h>
26 #include <asm/asm-offsets.h>
28 #include <asm/unistd.h>
29 #include <asm/thread_info.h>
30 #include <asm/hw_irq.h>
31 #include <asm/page_types.h>
32 #include <asm/irqflags.h>
33 #include <asm/paravirt.h>
34 #include <asm/percpu.h>
37 #include <asm/pgtable_types.h>
38 #include <asm/export.h>
39 #include <asm/frame.h>
40 #include <asm/trapnr.h>
41 #include <asm/nospec-branch.h>
42 #include <linux/err.h>
47 .section .entry.text, "ax"
49 #ifdef CONFIG_PARAVIRT
50 SYM_CODE_START(native_usergs_sysret64)
54 SYM_CODE_END(native_usergs_sysret64)
55 #endif /* CONFIG_PARAVIRT */
57 .macro TRACE_IRQS_FLAGS flags:req
58 #ifdef CONFIG_TRACE_IRQFLAGS
59 btl $9, \flags /* interrupts off? */
66 .macro TRACE_IRQS_IRETQ
67 TRACE_IRQS_FLAGS EFLAGS(%rsp)
71 * When dynamic function tracer is enabled it will add a breakpoint
72 * to all locations that it is about to modify, sync CPUs, update
73 * all the code, sync CPUs, then remove the breakpoints. In this time
74 * if lockdep is enabled, it might jump back into the debug handler
75 * outside the updating of the IST protection. (TRACE_IRQS_ON/OFF).
77 * We need to change the IDT table before calling TRACE_IRQS_ON/OFF to
78 * make sure the stack pointer does not get reset back to the top
79 * of the debug stack, and instead just reuses the current stack.
81 #if defined(CONFIG_DYNAMIC_FTRACE) && defined(CONFIG_TRACE_IRQFLAGS)
83 .macro TRACE_IRQS_OFF_DEBUG
84 call debug_stack_set_zero
86 call debug_stack_reset
89 .macro TRACE_IRQS_ON_DEBUG
90 call debug_stack_set_zero
92 call debug_stack_reset
95 .macro TRACE_IRQS_IRETQ_DEBUG
96 btl $9, EFLAGS(%rsp) /* interrupts off? */
103 # define TRACE_IRQS_OFF_DEBUG TRACE_IRQS_OFF
104 # define TRACE_IRQS_ON_DEBUG TRACE_IRQS_ON
105 # define TRACE_IRQS_IRETQ_DEBUG TRACE_IRQS_IRETQ
109 * 64-bit SYSCALL instruction entry. Up to 6 arguments in registers.
111 * This is the only entry point used for 64-bit system calls. The
112 * hardware interface is reasonably well designed and the register to
113 * argument mapping Linux uses fits well with the registers that are
114 * available when SYSCALL is used.
116 * SYSCALL instructions can be found inlined in libc implementations as
117 * well as some other programs and libraries. There are also a handful
118 * of SYSCALL instructions in the vDSO used, for example, as a
119 * clock_gettimeofday fallback.
121 * 64-bit SYSCALL saves rip to rcx, clears rflags.RF, then saves rflags to r11,
122 * then loads new ss, cs, and rip from previously programmed MSRs.
123 * rflags gets masked by a value from another MSR (so CLD and CLAC
124 * are not needed). SYSCALL does not save anything on the stack
125 * and does not change rsp.
127 * Registers on entry:
128 * rax system call number
130 * r11 saved rflags (note: r11 is callee-clobbered register in C ABI)
134 * r10 arg3 (needs to be moved to rcx to conform to C ABI)
137 * (note: r12-r15, rbp, rbx are callee-preserved in C ABI)
139 * Only called from user space.
141 * When user can change pt_regs->foo always force IRET. That is because
142 * it deals with uncanonical addresses better. SYSRET has trouble
143 * with them due to bugs in both AMD and Intel CPUs.
146 SYM_CODE_START(entry_SYSCALL_64)
149 * Interrupts are off on entry.
150 * We do not frame this tiny irq-off block with TRACE_IRQS_OFF/ON,
151 * it is too small to ever cause noticeable irq latency.
155 /* tss.sp2 is scratch space. */
156 movq %rsp, PER_CPU_VAR(cpu_tss_rw + TSS_sp2)
157 SWITCH_TO_KERNEL_CR3 scratch_reg=%rsp
158 movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp
160 /* Construct struct pt_regs on stack */
161 pushq $__USER_DS /* pt_regs->ss */
162 pushq PER_CPU_VAR(cpu_tss_rw + TSS_sp2) /* pt_regs->sp */
163 pushq %r11 /* pt_regs->flags */
164 pushq $__USER_CS /* pt_regs->cs */
165 pushq %rcx /* pt_regs->ip */
166 SYM_INNER_LABEL(entry_SYSCALL_64_after_hwframe, SYM_L_GLOBAL)
167 pushq %rax /* pt_regs->orig_ax */
169 PUSH_AND_CLEAR_REGS rax=$-ENOSYS
174 call do_syscall_64 /* returns with IRQs disabled */
177 * Try to use SYSRET instead of IRET if we're returning to
178 * a completely clean 64-bit userspace context. If we're not,
179 * go to the slow exit path.
184 cmpq %rcx, %r11 /* SYSRET requires RCX == RIP */
185 jne swapgs_restore_regs_and_return_to_usermode
188 * On Intel CPUs, SYSRET with non-canonical RCX/RIP will #GP
189 * in kernel space. This essentially lets the user take over
190 * the kernel, since userspace controls RSP.
192 * If width of "canonical tail" ever becomes variable, this will need
193 * to be updated to remain correct on both old and new CPUs.
195 * Change top bits to match most significant bit (47th or 56th bit
196 * depending on paging mode) in the address.
198 #ifdef CONFIG_X86_5LEVEL
199 ALTERNATIVE "shl $(64 - 48), %rcx; sar $(64 - 48), %rcx", \
200 "shl $(64 - 57), %rcx; sar $(64 - 57), %rcx", X86_FEATURE_LA57
202 shl $(64 - (__VIRTUAL_MASK_SHIFT+1)), %rcx
203 sar $(64 - (__VIRTUAL_MASK_SHIFT+1)), %rcx
206 /* If this changed %rcx, it was not canonical */
208 jne swapgs_restore_regs_and_return_to_usermode
210 cmpq $__USER_CS, CS(%rsp) /* CS must match SYSRET */
211 jne swapgs_restore_regs_and_return_to_usermode
214 cmpq %r11, EFLAGS(%rsp) /* R11 == RFLAGS */
215 jne swapgs_restore_regs_and_return_to_usermode
218 * SYSCALL clears RF when it saves RFLAGS in R11 and SYSRET cannot
219 * restore RF properly. If the slowpath sets it for whatever reason, we
220 * need to restore it correctly.
222 * SYSRET can restore TF, but unlike IRET, restoring TF results in a
223 * trap from userspace immediately after SYSRET. This would cause an
224 * infinite loop whenever #DB happens with register state that satisfies
225 * the opportunistic SYSRET conditions. For example, single-stepping
228 * movq $stuck_here, %rcx
233 * would never get past 'stuck_here'.
235 testq $(X86_EFLAGS_RF|X86_EFLAGS_TF), %r11
236 jnz swapgs_restore_regs_and_return_to_usermode
238 /* nothing to check for RSP */
240 cmpq $__USER_DS, SS(%rsp) /* SS must match SYSRET */
241 jne swapgs_restore_regs_and_return_to_usermode
244 * We win! This label is here just for ease of understanding
245 * perf profiles. Nothing jumps here.
247 syscall_return_via_sysret:
248 /* rcx and r11 are already restored (see code above) */
249 POP_REGS pop_rdi=0 skip_r11rcx=1
252 * Now all regs are restored except RSP and RDI.
253 * Save old stack pointer and switch to trampoline stack.
256 movq PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %rsp
259 pushq RSP-RDI(%rdi) /* RSP */
260 pushq (%rdi) /* RDI */
263 * We are on the trampoline stack. All regs except RDI are live.
264 * We can do future final exit work right here.
266 STACKLEAK_ERASE_NOCLOBBER
268 SWITCH_TO_USER_CR3_STACK scratch_reg=%rdi
273 SYM_CODE_END(entry_SYSCALL_64)
279 .pushsection .text, "ax"
280 SYM_FUNC_START(__switch_to_asm)
282 * Save callee-saved registers
283 * This must match the order in inactive_task_frame
293 movq %rsp, TASK_threadsp(%rdi)
294 movq TASK_threadsp(%rsi), %rsp
296 #ifdef CONFIG_STACKPROTECTOR
297 movq TASK_stack_canary(%rsi), %rbx
298 movq %rbx, PER_CPU_VAR(fixed_percpu_data) + stack_canary_offset
301 #ifdef CONFIG_RETPOLINE
303 * When switching from a shallower to a deeper call stack
304 * the RSB may either underflow or use entries populated
305 * with userspace addresses. On CPUs where those concerns
306 * exist, overwrite the RSB with entries which capture
307 * speculative execution to prevent attack.
309 FILL_RETURN_BUFFER %r12, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW
312 /* restore callee-saved registers */
321 SYM_FUNC_END(__switch_to_asm)
325 * A newly forked process directly context switches into this address.
327 * rax: prev task we switched from
328 * rbx: kernel thread func (NULL for user thread)
329 * r12: kernel thread arg
331 .pushsection .text, "ax"
332 SYM_CODE_START(ret_from_fork)
335 call schedule_tail /* rdi: 'prev' task parameter */
337 testq %rbx, %rbx /* from kernel_thread? */
338 jnz 1f /* kernel threads are uncommon */
343 call syscall_return_slowpath /* returns with IRQs disabled */
344 jmp swapgs_restore_regs_and_return_to_usermode
352 * A kernel thread is allowed to return here after successfully
353 * calling do_execve(). Exit to userspace to complete the execve()
358 SYM_CODE_END(ret_from_fork)
361 .macro DEBUG_ENTRY_ASSERT_IRQS_OFF
362 #ifdef CONFIG_DEBUG_ENTRY
365 testl $X86_EFLAGS_IF, %eax
374 * Enters the IRQ stack if we're not already using it. NMI-safe. Clobbers
375 * flags and puts old RSP into old_rsp, and leaves all other GPRs alone.
376 * Requires kernel GSBASE.
378 * The invariant is that, if irq_count != -1, then the IRQ stack is in use.
380 .macro ENTER_IRQ_STACK regs=1 old_rsp save_ret=0
381 DEBUG_ENTRY_ASSERT_IRQS_OFF
385 * If save_ret is set, the original stack contains one additional
386 * entry -- the return address. Therefore, move the address one
387 * entry below %rsp to \old_rsp.
389 leaq 8(%rsp), \old_rsp
395 UNWIND_HINT_REGS base=\old_rsp
398 incl PER_CPU_VAR(irq_count)
399 jnz .Lirq_stack_push_old_rsp_\@
402 * Right now, if we just incremented irq_count to zero, we've
403 * claimed the IRQ stack but we haven't switched to it yet.
405 * If anything is added that can interrupt us here without using IST,
406 * it must be *extremely* careful to limit its stack usage. This
407 * could include kprobes and a hypothetical future IST-less #DB
410 * The OOPS unwinder relies on the word at the top of the IRQ
411 * stack linking back to the previous RSP for the entire time we're
412 * on the IRQ stack. For this to work reliably, we need to write
413 * it before we actually move ourselves to the IRQ stack.
416 movq \old_rsp, PER_CPU_VAR(irq_stack_backing_store + IRQ_STACK_SIZE - 8)
417 movq PER_CPU_VAR(hardirq_stack_ptr), %rsp
419 #ifdef CONFIG_DEBUG_ENTRY
421 * If the first movq above becomes wrong due to IRQ stack layout
422 * changes, the only way we'll notice is if we try to unwind right
423 * here. Assert that we set up the stack right to catch this type
426 cmpq -8(%rsp), \old_rsp
427 je .Lirq_stack_okay\@
432 .Lirq_stack_push_old_rsp_\@:
436 UNWIND_HINT_REGS indirect=1
441 * Push the return address to the stack. This return address can
442 * be found at the "real" original RSP, which was offset by 8 at
443 * the beginning of this macro.
450 * Undoes ENTER_IRQ_STACK.
452 .macro LEAVE_IRQ_STACK regs=1
453 DEBUG_ENTRY_ASSERT_IRQS_OFF
454 /* We need to be off the IRQ stack before decrementing irq_count. */
462 * As in ENTER_IRQ_STACK, irq_count == 0, we are still claiming
463 * the irq stack but we're not on it.
466 decl PER_CPU_VAR(irq_count)
470 * idtentry_body - Macro to emit code calling the C function
471 * @cfunc: C function to be called
472 * @has_error_code: Hardware pushed error code on stack
474 .macro idtentry_body cfunc has_error_code:req
479 movq %rsp, %rdi /* pt_regs pointer into 1st argument*/
481 .if \has_error_code == 1
482 movq ORIG_RAX(%rsp), %rsi /* get error code into 2nd argument*/
483 movq $-1, ORIG_RAX(%rsp) /* no syscall to restart */
492 * idtentry - Macro to generate entry stubs for simple IDT entries
493 * @vector: Vector number
494 * @asmsym: ASM symbol for the entry point
495 * @cfunc: C function to be called
496 * @has_error_code: Hardware pushed error code on stack
498 * The macro emits code to set up the kernel context for straight forward
499 * and simple IDT entries. No IST stack, no paranoid entry checks.
501 .macro idtentry vector asmsym cfunc has_error_code:req
502 SYM_CODE_START(\asmsym)
503 UNWIND_HINT_IRET_REGS offset=\has_error_code*8
506 .if \has_error_code == 0
507 pushq $-1 /* ORIG_RAX: no syscall to restart */
510 .if \vector == X86_TRAP_BP
512 * If coming from kernel space, create a 6-word gap to allow the
513 * int3 handler to emulate a call instruction.
515 testb $3, CS-ORIG_RAX(%rsp)
516 jnz .Lfrom_usermode_no_gap_\@
520 UNWIND_HINT_IRET_REGS offset=8
521 .Lfrom_usermode_no_gap_\@:
524 idtentry_body \cfunc \has_error_code
526 _ASM_NOKPROBE(\asmsym)
527 SYM_CODE_END(\asmsym)
531 * MCE and DB exceptions
533 #define CPU_TSS_IST(x) PER_CPU_VAR(cpu_tss_rw) + (TSS_ist + (x) * 8)
536 * idtentry_mce_db - Macro to generate entry stubs for #MC and #DB
537 * @vector: Vector number
538 * @asmsym: ASM symbol for the entry point
539 * @cfunc: C function to be called
541 * The macro emits code to set up the kernel context for #MC and #DB
543 * If the entry comes from user space it uses the normal entry path
544 * including the return to user space work and preemption checks on
547 * If hits in kernel mode then it needs to go through the paranoid
548 * entry as the exception can hit any random state. No preemption
549 * check on exit to keep the paranoid path simple.
551 * If the trap is #DB then the interrupt stack entry in the IST is
552 * moved to the second stack, so a potential recursion will have a
555 .macro idtentry_mce_db vector asmsym cfunc
556 SYM_CODE_START(\asmsym)
557 UNWIND_HINT_IRET_REGS
560 pushq $-1 /* ORIG_RAX: no syscall to restart */
563 * If the entry is from userspace, switch stacks and treat it as
566 testb $3, CS-ORIG_RAX(%rsp)
567 jnz .Lfrom_usermode_switch_stack_\@
570 * paranoid_entry returns SWAPGS flag for paranoid_exit in EBX.
571 * EBX == 0 -> SWAPGS, EBX == 1 -> no SWAPGS
577 .if \vector == X86_TRAP_DB
583 movq %rsp, %rdi /* pt_regs pointer */
585 .if \vector == X86_TRAP_DB
586 subq $DB_STACK_OFFSET, CPU_TSS_IST(IST_INDEX_DB)
591 .if \vector == X86_TRAP_DB
592 addq $DB_STACK_OFFSET, CPU_TSS_IST(IST_INDEX_DB)
597 /* Switch to the regular task stack and use the noist entry point */
598 .Lfrom_usermode_switch_stack_\@:
599 idtentry_body noist_\cfunc, has_error_code=0
601 _ASM_NOKPROBE(\asmsym)
602 SYM_CODE_END(\asmsym)
606 * Double fault entry. Straight paranoid. No checks from which context
607 * this comes because for the espfix induced #DF this would do the wrong
610 .macro idtentry_df vector asmsym cfunc
611 SYM_CODE_START(\asmsym)
612 UNWIND_HINT_IRET_REGS offset=8
616 * paranoid_entry returns SWAPGS flag for paranoid_exit in EBX.
617 * EBX == 0 -> SWAPGS, EBX == 1 -> no SWAPGS
622 movq %rsp, %rdi /* pt_regs pointer into first argument */
623 movq ORIG_RAX(%rsp), %rsi /* get error code into 2nd argument*/
624 movq $-1, ORIG_RAX(%rsp) /* no syscall to restart */
629 _ASM_NOKPROBE(\asmsym)
630 SYM_CODE_END(\asmsym)
634 * Include the defines which emit the idt entries which are shared
635 * shared between 32 and 64 bit.
637 #include <asm/idtentry.h>
640 * Interrupt entry helper function.
642 * Entry runs with interrupts off. Stack layout at entry:
643 * +----------------------------------------------------+
649 * +----------------------------------------------------+
650 * | regs->orig_ax = ~(interrupt number) |
651 * +----------------------------------------------------+
653 * +----------------------------------------------------+
655 SYM_CODE_START(interrupt_entry)
656 UNWIND_HINT_IRET_REGS offset=16
660 testb $3, CS-ORIG_RAX+8(%rsp)
663 FENCE_SWAPGS_USER_ENTRY
665 * Switch to the thread stack. The IRET frame and orig_ax are
666 * on the stack, as well as the return address. RDI..R12 are
667 * not (yet) on the stack and space has not (yet) been
668 * allocated for them.
672 /* Need to switch before accessing the thread stack. */
673 SWITCH_TO_KERNEL_CR3 scratch_reg=%rdi
675 movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp
678 * We have RDI, return address, and orig_ax on the stack on
679 * top of the IRET frame. That means offset=24
681 UNWIND_HINT_IRET_REGS base=%rdi offset=24
683 pushq 7*8(%rdi) /* regs->ss */
684 pushq 6*8(%rdi) /* regs->rsp */
685 pushq 5*8(%rdi) /* regs->eflags */
686 pushq 4*8(%rdi) /* regs->cs */
687 pushq 3*8(%rdi) /* regs->ip */
688 UNWIND_HINT_IRET_REGS
689 pushq 2*8(%rdi) /* regs->orig_ax */
690 pushq 8(%rdi) /* return address */
695 FENCE_SWAPGS_KERNEL_ENTRY
697 PUSH_AND_CLEAR_REGS save_ret=1
698 ENCODE_FRAME_POINTER 8
704 * IRQ from user mode.
706 * We need to tell lockdep that IRQs are off. We can't do this until
707 * we fix gsbase, and we should do it before enter_from_user_mode
708 * (which can take locks). Since TRACE_IRQS_OFF is idempotent,
709 * the simplest way to handle it is to just call it twice if
710 * we enter from user mode. There's no reason to optimize this since
711 * TRACE_IRQS_OFF is a no-op if lockdep is off.
715 CALL_enter_from_user_mode
718 ENTER_IRQ_STACK old_rsp=%rdi save_ret=1
719 /* We entered an interrupt context - irqs are off: */
723 SYM_CODE_END(interrupt_entry)
724 _ASM_NOKPROBE(interrupt_entry)
727 /* Interrupt entry/exit. */
730 * The interrupt stubs push vector onto the stack and
731 * then jump to common_spurious/interrupt.
733 SYM_CODE_START_LOCAL(common_spurious)
735 UNWIND_HINT_REGS indirect=1
736 movq ORIG_RAX(%rdi), %rsi /* get vector from stack */
737 movq $-1, ORIG_RAX(%rdi) /* no syscall to restart */
738 call smp_spurious_interrupt /* rdi points to pt_regs */
740 SYM_CODE_END(common_spurious)
741 _ASM_NOKPROBE(common_spurious)
743 /* common_interrupt is a hotpath. Align it */
744 .p2align CONFIG_X86_L1_CACHE_SHIFT
745 SYM_CODE_START_LOCAL(common_interrupt)
747 UNWIND_HINT_REGS indirect=1
748 movq ORIG_RAX(%rdi), %rsi /* get vector from stack */
749 movq $-1, ORIG_RAX(%rdi) /* no syscall to restart */
750 call do_IRQ /* rdi points to pt_regs */
751 /* 0(%rsp): old RSP */
753 DISABLE_INTERRUPTS(CLBR_ANY)
761 /* Interrupt came from user space */
764 call prepare_exit_to_usermode
766 SYM_INNER_LABEL(swapgs_restore_regs_and_return_to_usermode, SYM_L_GLOBAL)
767 #ifdef CONFIG_DEBUG_ENTRY
768 /* Assert that pt_regs indicates user mode. */
777 * The stack is now user RDI, orig_ax, RIP, CS, EFLAGS, RSP, SS.
778 * Save old stack pointer and switch to trampoline stack.
781 movq PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %rsp
784 /* Copy the IRET frame to the trampoline stack. */
785 pushq 6*8(%rdi) /* SS */
786 pushq 5*8(%rdi) /* RSP */
787 pushq 4*8(%rdi) /* EFLAGS */
788 pushq 3*8(%rdi) /* CS */
789 pushq 2*8(%rdi) /* RIP */
791 /* Push user RDI on the trampoline stack. */
795 * We are on the trampoline stack. All regs except RDI are live.
796 * We can do future final exit work right here.
798 STACKLEAK_ERASE_NOCLOBBER
800 SWITCH_TO_USER_CR3_STACK scratch_reg=%rdi
808 /* Returning to kernel space */
810 #ifdef CONFIG_PREEMPTION
811 /* Interrupts are off */
812 /* Check if we need preemption */
813 btl $9, EFLAGS(%rsp) /* were interrupts off? */
815 cmpl $0, PER_CPU_VAR(__preempt_count)
817 call preempt_schedule_irq
821 * The iretq could re-enable interrupts:
825 SYM_INNER_LABEL(restore_regs_and_return_to_kernel, SYM_L_GLOBAL)
826 #ifdef CONFIG_DEBUG_ENTRY
827 /* Assert that pt_regs indicates kernel mode. */
834 addq $8, %rsp /* skip regs->orig_ax */
836 * ARCH_HAS_MEMBARRIER_SYNC_CORE rely on IRET core serialization
837 * when returning from IPI handler.
841 SYM_INNER_LABEL_ALIGN(native_iret, SYM_L_GLOBAL)
842 UNWIND_HINT_IRET_REGS
844 * Are we returning to a stack segment from the LDT? Note: in
845 * 64-bit mode SS:RSP on the exception stack is always valid.
847 #ifdef CONFIG_X86_ESPFIX64
848 testb $4, (SS-RIP)(%rsp)
849 jnz native_irq_return_ldt
852 SYM_INNER_LABEL(native_irq_return_iret, SYM_L_GLOBAL)
854 * This may fault. Non-paranoid faults on return to userspace are
855 * handled by fixup_bad_iret. These include #SS, #GP, and #NP.
856 * Double-faults due to espfix64 are handled in exc_double_fault.
857 * Other faults here are fatal.
861 #ifdef CONFIG_X86_ESPFIX64
862 native_irq_return_ldt:
864 * We are running with user GSBASE. All GPRs contain their user
865 * values. We have a percpu ESPFIX stack that is eight slots
866 * long (see ESPFIX_STACK_SIZE). espfix_waddr points to the bottom
867 * of the ESPFIX stack.
869 * We clobber RAX and RDI in this code. We stash RDI on the
870 * normal stack and RAX on the ESPFIX stack.
872 * The ESPFIX stack layout we set up looks like this:
874 * --- top of ESPFIX stack ---
879 * RIP <-- RSP points here when we're done
880 * RAX <-- espfix_waddr points here
881 * --- bottom of ESPFIX stack ---
884 pushq %rdi /* Stash user RDI */
885 SWAPGS /* to kernel GS */
886 SWITCH_TO_KERNEL_CR3 scratch_reg=%rdi /* to kernel CR3 */
888 movq PER_CPU_VAR(espfix_waddr), %rdi
889 movq %rax, (0*8)(%rdi) /* user RAX */
890 movq (1*8)(%rsp), %rax /* user RIP */
891 movq %rax, (1*8)(%rdi)
892 movq (2*8)(%rsp), %rax /* user CS */
893 movq %rax, (2*8)(%rdi)
894 movq (3*8)(%rsp), %rax /* user RFLAGS */
895 movq %rax, (3*8)(%rdi)
896 movq (5*8)(%rsp), %rax /* user SS */
897 movq %rax, (5*8)(%rdi)
898 movq (4*8)(%rsp), %rax /* user RSP */
899 movq %rax, (4*8)(%rdi)
900 /* Now RAX == RSP. */
902 andl $0xffff0000, %eax /* RAX = (RSP & 0xffff0000) */
905 * espfix_stack[31:16] == 0. The page tables are set up such that
906 * (espfix_stack | (X & 0xffff0000)) points to a read-only alias of
907 * espfix_waddr for any X. That is, there are 65536 RO aliases of
908 * the same page. Set up RSP so that RSP[31:16] contains the
909 * respective 16 bits of the /userspace/ RSP and RSP nonetheless
910 * still points to an RO alias of the ESPFIX stack.
912 orq PER_CPU_VAR(espfix_stack), %rax
914 SWITCH_TO_USER_CR3_STACK scratch_reg=%rdi
915 SWAPGS /* to user GS */
916 popq %rdi /* Restore user RDI */
919 UNWIND_HINT_IRET_REGS offset=8
922 * At this point, we cannot write to the stack any more, but we can
925 popq %rax /* Restore user RAX */
928 * RSP now points to an ordinary IRET frame, except that the page
929 * is read-only and RSP[31:16] are preloaded with the userspace
930 * values. We can now IRET back to userspace.
932 jmp native_irq_return_iret
934 SYM_CODE_END(common_interrupt)
935 _ASM_NOKPROBE(common_interrupt)
940 .macro apicinterrupt3 num sym do_sym
942 UNWIND_HINT_IRET_REGS
945 UNWIND_HINT_REGS indirect=1
946 call \do_sym /* rdi points to pt_regs */
952 /* Make sure APIC interrupt handlers end up in the irqentry section: */
953 #define PUSH_SECTION_IRQENTRY .pushsection .irqentry.text, "ax"
954 #define POP_SECTION_IRQENTRY .popsection
956 .macro apicinterrupt num sym do_sym
957 PUSH_SECTION_IRQENTRY
958 apicinterrupt3 \num \sym \do_sym
963 apicinterrupt3 IRQ_MOVE_CLEANUP_VECTOR irq_move_cleanup_interrupt smp_irq_move_cleanup_interrupt
964 apicinterrupt3 REBOOT_VECTOR reboot_interrupt smp_reboot_interrupt
968 apicinterrupt3 UV_BAU_MESSAGE uv_bau_message_intr1 uv_bau_message_interrupt
971 apicinterrupt LOCAL_TIMER_VECTOR apic_timer_interrupt smp_apic_timer_interrupt
972 apicinterrupt X86_PLATFORM_IPI_VECTOR x86_platform_ipi smp_x86_platform_ipi
974 #ifdef CONFIG_HAVE_KVM
975 apicinterrupt3 POSTED_INTR_VECTOR kvm_posted_intr_ipi smp_kvm_posted_intr_ipi
976 apicinterrupt3 POSTED_INTR_WAKEUP_VECTOR kvm_posted_intr_wakeup_ipi smp_kvm_posted_intr_wakeup_ipi
977 apicinterrupt3 POSTED_INTR_NESTED_VECTOR kvm_posted_intr_nested_ipi smp_kvm_posted_intr_nested_ipi
980 #ifdef CONFIG_X86_MCE_THRESHOLD
981 apicinterrupt THRESHOLD_APIC_VECTOR threshold_interrupt smp_threshold_interrupt
984 #ifdef CONFIG_X86_MCE_AMD
985 apicinterrupt DEFERRED_ERROR_VECTOR deferred_error_interrupt smp_deferred_error_interrupt
988 #ifdef CONFIG_X86_THERMAL_VECTOR
989 apicinterrupt THERMAL_APIC_VECTOR thermal_interrupt smp_thermal_interrupt
993 apicinterrupt CALL_FUNCTION_SINGLE_VECTOR call_function_single_interrupt smp_call_function_single_interrupt
994 apicinterrupt CALL_FUNCTION_VECTOR call_function_interrupt smp_call_function_interrupt
995 apicinterrupt RESCHEDULE_VECTOR reschedule_interrupt smp_reschedule_interrupt
998 apicinterrupt ERROR_APIC_VECTOR error_interrupt smp_error_interrupt
999 apicinterrupt SPURIOUS_APIC_VECTOR spurious_apic_interrupt smp_spurious_apic_interrupt
1001 #ifdef CONFIG_IRQ_WORK
1002 apicinterrupt IRQ_WORK_VECTOR irq_work_interrupt smp_irq_work_interrupt
1006 * Reload gs selector with exception handling
1009 * Is in entry.text as it shouldn't be instrumented.
1011 SYM_FUNC_START(asm_load_gs_index)
1016 2: ALTERNATIVE "", "mfence", X86_BUG_SWAPGS_FENCE
1020 SYM_FUNC_END(asm_load_gs_index)
1021 EXPORT_SYMBOL(asm_load_gs_index)
1023 _ASM_EXTABLE(.Lgs_change, .Lbad_gs)
1024 .section .fixup, "ax"
1025 /* running with kernelgs */
1026 SYM_CODE_START_LOCAL_NOALIGN(.Lbad_gs)
1027 swapgs /* switch back to user gs */
1029 /* This can't be a string because the preprocessor needs to see it. */
1030 movl $__USER_DS, %eax
1033 ALTERNATIVE "", "ZAP_GS", X86_BUG_NULL_SEG
1037 SYM_CODE_END(.Lbad_gs)
1041 * rdi: New stack pointer points to the top word of the stack
1042 * rsi: Function pointer
1043 * rdx: Function argument (can be NULL if none)
1045 SYM_FUNC_START(asm_call_on_stack)
1047 * Save the frame pointer unconditionally. This allows the ORC
1048 * unwinder to handle the stack switch.
1054 * The unwinder relies on the word at the top of the new stack
1055 * page linking back to the previous RSP.
1059 /* Move the argument to the right place */
1063 .pushsection .discard.instr_begin
1070 .pushsection .discard.instr_end
1074 /* Restore the previous stack pointer from RBP. */
1077 SYM_FUNC_END(asm_call_on_stack)
1079 #ifdef CONFIG_XEN_PV
1081 * A note on the "critical region" in our callback handler.
1082 * We want to avoid stacking callback handlers due to events occurring
1083 * during handling of the last event. To do this, we keep events disabled
1084 * until we've done all processing. HOWEVER, we must enable events before
1085 * popping the stack frame (can't be done atomically) and so it would still
1086 * be possible to get enough handler activations to overflow the stack.
1087 * Although unlikely, bugs of that kind are hard to track down, so we'd
1088 * like to avoid the possibility.
1089 * So, on entry to the handler we detect whether we interrupted an
1090 * existing activation in its critical region -- if so, we pop the current
1091 * activation and restart the handler using the previous one.
1093 * C calling convention: exc_xen_hypervisor_callback(struct *pt_regs)
1095 SYM_CODE_START_LOCAL(exc_xen_hypervisor_callback)
1098 * Since we don't modify %rdi, evtchn_do_upall(struct *pt_regs) will
1099 * see the correct pointer to the pt_regs
1102 movq %rdi, %rsp /* we don't return, adjust the stack frame */
1105 call xen_pv_evtchn_do_upcall
1108 SYM_CODE_END(exc_xen_hypervisor_callback)
1111 * Hypervisor uses this for application faults while it executes.
1112 * We get here for two reasons:
1113 * 1. Fault while reloading DS, ES, FS or GS
1114 * 2. Fault while executing IRET
1115 * Category 1 we do not need to fix up as Xen has already reloaded all segment
1116 * registers that could be reloaded and zeroed the others.
1117 * Category 2 we fix up by killing the current process. We cannot use the
1118 * normal Linux return path in this case because if we use the IRET hypercall
1119 * to pop the stack frame we end up in an infinite loop of failsafe callbacks.
1120 * We distinguish between categories by comparing each saved segment register
1121 * with its current contents: any discrepancy means we in category 1.
1123 SYM_CODE_START(xen_failsafe_callback)
1126 cmpw %cx, 0x10(%rsp)
1129 cmpw %cx, 0x18(%rsp)
1132 cmpw %cx, 0x20(%rsp)
1135 cmpw %cx, 0x28(%rsp)
1137 /* All segments match their saved values => Category 2 (Bad IRET). */
1142 UNWIND_HINT_IRET_REGS offset=8
1143 jmp asm_exc_general_protection
1144 1: /* Segment mismatch => Category 1 (Bad segment). Retry the IRET. */
1148 UNWIND_HINT_IRET_REGS
1149 pushq $-1 /* orig_ax = -1 => not a system call */
1151 ENCODE_FRAME_POINTER
1153 SYM_CODE_END(xen_failsafe_callback)
1154 #endif /* CONFIG_XEN_PV */
1156 #ifdef CONFIG_XEN_PVHVM
1157 apicinterrupt3 HYPERVISOR_CALLBACK_VECTOR \
1158 xen_hvm_callback_vector xen_evtchn_do_upcall
1162 #if IS_ENABLED(CONFIG_HYPERV)
1163 apicinterrupt3 HYPERVISOR_CALLBACK_VECTOR \
1164 hyperv_callback_vector hyperv_vector_handler
1166 apicinterrupt3 HYPERV_REENLIGHTENMENT_VECTOR \
1167 hyperv_reenlightenment_vector hyperv_reenlightenment_intr
1169 apicinterrupt3 HYPERV_STIMER0_VECTOR \
1170 hv_stimer0_callback_vector hv_stimer0_vector_handler
1171 #endif /* CONFIG_HYPERV */
1173 #if IS_ENABLED(CONFIG_ACRN_GUEST)
1174 apicinterrupt3 HYPERVISOR_CALLBACK_VECTOR \
1175 acrn_hv_callback_vector acrn_hv_vector_handler
1179 * Save all registers in pt_regs, and switch gs if needed.
1180 * Use slow, but surefire "are we in kernel?" check.
1181 * Return: ebx=0: need swapgs on exit, ebx=1: otherwise
1183 SYM_CODE_START_LOCAL(paranoid_entry)
1186 PUSH_AND_CLEAR_REGS save_ret=1
1187 ENCODE_FRAME_POINTER 8
1189 movl $MSR_GS_BASE, %ecx
1192 js 1f /* negative -> in kernel */
1198 * Always stash CR3 in %r14. This value will be restored,
1199 * verbatim, at exit. Needed if paranoid_entry interrupted
1200 * another entry that already switched to the user CR3 value
1201 * but has not yet returned to userspace.
1203 * This is also why CS (stashed in the "iret frame" by the
1204 * hardware at entry) can not be used: this may be a return
1205 * to kernel code, but with a user CR3 value.
1207 SAVE_AND_SWITCH_TO_KERNEL_CR3 scratch_reg=%rax save_reg=%r14
1210 * The above SAVE_AND_SWITCH_TO_KERNEL_CR3 macro doesn't do an
1211 * unconditional CR3 write, even in the PTI case. So do an lfence
1212 * to prevent GS speculation, regardless of whether PTI is enabled.
1214 FENCE_SWAPGS_KERNEL_ENTRY
1217 SYM_CODE_END(paranoid_entry)
1220 * "Paranoid" exit path from exception stack. This is invoked
1221 * only on return from non-NMI IST interrupts that came
1222 * from kernel space.
1224 * We may be returning to very strange contexts (e.g. very early
1225 * in syscall entry), so checking for preemption here would
1226 * be complicated. Fortunately, we there's no good reason
1227 * to try to handle preemption here.
1229 * On entry, ebx is "no swapgs" flag (1: don't need swapgs, 0: need it)
1231 SYM_CODE_START_LOCAL(paranoid_exit)
1233 DISABLE_INTERRUPTS(CLBR_ANY)
1234 TRACE_IRQS_OFF_DEBUG
1235 testl %ebx, %ebx /* swapgs needed? */
1236 jnz .Lparanoid_exit_no_swapgs
1238 /* Always restore stashed CR3 value (see paranoid_entry) */
1239 RESTORE_CR3 scratch_reg=%rbx save_reg=%r14
1241 jmp restore_regs_and_return_to_kernel
1242 .Lparanoid_exit_no_swapgs:
1243 TRACE_IRQS_IRETQ_DEBUG
1244 /* Always restore stashed CR3 value (see paranoid_entry) */
1245 RESTORE_CR3 scratch_reg=%rbx save_reg=%r14
1246 jmp restore_regs_and_return_to_kernel
1247 SYM_CODE_END(paranoid_exit)
1250 * Save all registers in pt_regs, and switch GS if needed.
1252 SYM_CODE_START_LOCAL(error_entry)
1255 PUSH_AND_CLEAR_REGS save_ret=1
1256 ENCODE_FRAME_POINTER 8
1257 testb $3, CS+8(%rsp)
1258 jz .Lerror_kernelspace
1261 * We entered from user mode or we're pretending to have entered
1262 * from user mode due to an IRET fault.
1265 FENCE_SWAPGS_USER_ENTRY
1266 /* We have user CR3. Change to kernel CR3. */
1267 SWITCH_TO_KERNEL_CR3 scratch_reg=%rax
1269 .Lerror_entry_from_usermode_after_swapgs:
1270 /* Put us onto the real thread stack. */
1271 popq %r12 /* save return addr in %12 */
1272 movq %rsp, %rdi /* arg0 = pt_regs pointer */
1274 movq %rax, %rsp /* switch stack */
1275 ENCODE_FRAME_POINTER
1279 .Lerror_entry_done_lfence:
1280 FENCE_SWAPGS_KERNEL_ENTRY
1285 * There are two places in the kernel that can potentially fault with
1286 * usergs. Handle them here. B stepping K8s sometimes report a
1287 * truncated RIP for IRET exceptions returning to compat mode. Check
1288 * for these here too.
1290 .Lerror_kernelspace:
1291 leaq native_irq_return_iret(%rip), %rcx
1292 cmpq %rcx, RIP+8(%rsp)
1294 movl %ecx, %eax /* zero extend */
1295 cmpq %rax, RIP+8(%rsp)
1297 cmpq $.Lgs_change, RIP+8(%rsp)
1298 jne .Lerror_entry_done_lfence
1301 * hack: .Lgs_change can fail with user gsbase. If this happens, fix up
1302 * gsbase and proceed. We'll fix up the exception and land in
1303 * .Lgs_change's error handler with kernel gsbase.
1306 FENCE_SWAPGS_USER_ENTRY
1307 jmp .Lerror_entry_done
1310 /* Fix truncated RIP */
1311 movq %rcx, RIP+8(%rsp)
1316 * We came from an IRET to user mode, so we have user
1317 * gsbase and CR3. Switch to kernel gsbase and CR3:
1320 FENCE_SWAPGS_USER_ENTRY
1321 SWITCH_TO_KERNEL_CR3 scratch_reg=%rax
1324 * Pretend that the exception came from user mode: set up pt_regs
1325 * as if we faulted immediately after IRET.
1330 jmp .Lerror_entry_from_usermode_after_swapgs
1331 SYM_CODE_END(error_entry)
1333 SYM_CODE_START_LOCAL(error_return)
1335 DEBUG_ENTRY_ASSERT_IRQS_OFF
1337 jz restore_regs_and_return_to_kernel
1338 jmp swapgs_restore_regs_and_return_to_usermode
1339 SYM_CODE_END(error_return)
1342 * Runs on exception stack. Xen PV does not go through this path at all,
1343 * so we can use real assembly here.
1346 * %r14: Used to save/restore the CR3 of the interrupted context
1347 * when PAGE_TABLE_ISOLATION is in use. Do not clobber.
1349 SYM_CODE_START(asm_exc_nmi)
1350 UNWIND_HINT_IRET_REGS
1353 * We allow breakpoints in NMIs. If a breakpoint occurs, then
1354 * the iretq it performs will take us out of NMI context.
1355 * This means that we can have nested NMIs where the next
1356 * NMI is using the top of the stack of the previous NMI. We
1357 * can't let it execute because the nested NMI will corrupt the
1358 * stack of the previous NMI. NMI handlers are not re-entrant
1361 * To handle this case we do the following:
1362 * Check the a special location on the stack that contains
1363 * a variable that is set when NMIs are executing.
1364 * The interrupted task's stack is also checked to see if it
1366 * If the variable is not set and the stack is not the NMI
1368 * o Set the special variable on the stack
1369 * o Copy the interrupt frame into an "outermost" location on the
1371 * o Copy the interrupt frame into an "iret" location on the stack
1372 * o Continue processing the NMI
1373 * If the variable is set or the previous stack is the NMI stack:
1374 * o Modify the "iret" location to jump to the repeat_nmi
1375 * o return back to the first NMI
1377 * Now on exit of the first NMI, we first clear the stack variable
1378 * The NMI stack will tell any nested NMIs at that point that it is
1379 * nested. Then we pop the stack normally with iret, and if there was
1380 * a nested NMI that updated the copy interrupt stack frame, a
1381 * jump will be made to the repeat_nmi code that will handle the second
1384 * However, espfix prevents us from directly returning to userspace
1385 * with a single IRET instruction. Similarly, IRET to user mode
1386 * can fault. We therefore handle NMIs from user space like
1387 * other IST entries.
1392 /* Use %rdx as our temp variable throughout */
1395 testb $3, CS-RIP+8(%rsp)
1396 jz .Lnmi_from_kernel
1399 * NMI from user mode. We need to run on the thread stack, but we
1400 * can't go through the normal entry paths: NMIs are masked, and
1401 * we don't want to enable interrupts, because then we'll end
1402 * up in an awkward situation in which IRQs are on but NMIs
1405 * We also must not push anything to the stack before switching
1406 * stacks lest we corrupt the "NMI executing" variable.
1411 FENCE_SWAPGS_USER_ENTRY
1412 SWITCH_TO_KERNEL_CR3 scratch_reg=%rdx
1414 movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp
1415 UNWIND_HINT_IRET_REGS base=%rdx offset=8
1416 pushq 5*8(%rdx) /* pt_regs->ss */
1417 pushq 4*8(%rdx) /* pt_regs->rsp */
1418 pushq 3*8(%rdx) /* pt_regs->flags */
1419 pushq 2*8(%rdx) /* pt_regs->cs */
1420 pushq 1*8(%rdx) /* pt_regs->rip */
1421 UNWIND_HINT_IRET_REGS
1422 pushq $-1 /* pt_regs->orig_ax */
1423 PUSH_AND_CLEAR_REGS rdx=(%rdx)
1424 ENCODE_FRAME_POINTER
1427 * At this point we no longer need to worry about stack damage
1428 * due to nesting -- we're on the normal thread stack and we're
1429 * done with the NMI stack.
1437 * Return back to user mode. We must *not* do the normal exit
1438 * work, because we don't want to enable interrupts.
1440 jmp swapgs_restore_regs_and_return_to_usermode
1444 * Here's what our stack frame will look like:
1445 * +---------------------------------------------------------+
1447 * | original Return RSP |
1448 * | original RFLAGS |
1451 * +---------------------------------------------------------+
1452 * | temp storage for rdx |
1453 * +---------------------------------------------------------+
1454 * | "NMI executing" variable |
1455 * +---------------------------------------------------------+
1456 * | iret SS } Copied from "outermost" frame |
1457 * | iret Return RSP } on each loop iteration; overwritten |
1458 * | iret RFLAGS } by a nested NMI to force another |
1459 * | iret CS } iteration if needed. |
1461 * +---------------------------------------------------------+
1462 * | outermost SS } initialized in first_nmi; |
1463 * | outermost Return RSP } will not be changed before |
1464 * | outermost RFLAGS } NMI processing is done. |
1465 * | outermost CS } Copied to "iret" frame on each |
1466 * | outermost RIP } iteration. |
1467 * +---------------------------------------------------------+
1469 * +---------------------------------------------------------+
1471 * The "original" frame is used by hardware. Before re-enabling
1472 * NMIs, we need to be done with it, and we need to leave enough
1473 * space for the asm code here.
1475 * We return by executing IRET while RSP points to the "iret" frame.
1476 * That will either return for real or it will loop back into NMI
1479 * The "outermost" frame is copied to the "iret" frame on each
1480 * iteration of the loop, so each iteration starts with the "iret"
1481 * frame pointing to the final return target.
1485 * Determine whether we're a nested NMI.
1487 * If we interrupted kernel code between repeat_nmi and
1488 * end_repeat_nmi, then we are a nested NMI. We must not
1489 * modify the "iret" frame because it's being written by
1490 * the outer NMI. That's okay; the outer NMI handler is
1491 * about to about to call exc_nmi() anyway, so we can just
1492 * resume the outer NMI.
1495 movq $repeat_nmi, %rdx
1498 movq $end_repeat_nmi, %rdx
1504 * Now check "NMI executing". If it's set, then we're nested.
1505 * This will not detect if we interrupted an outer NMI just
1512 * Now test if the previous stack was an NMI stack. This covers
1513 * the case where we interrupt an outer NMI after it clears
1514 * "NMI executing" but before IRET. We need to be careful, though:
1515 * there is one case in which RSP could point to the NMI stack
1516 * despite there being no NMI active: naughty userspace controls
1517 * RSP at the very beginning of the SYSCALL targets. We can
1518 * pull a fast one on naughty userspace, though: we program
1519 * SYSCALL to mask DF, so userspace cannot cause DF to be set
1520 * if it controls the kernel's RSP. We set DF before we clear
1524 /* Compare the NMI stack (rdx) with the stack we came from (4*8(%rsp)) */
1525 cmpq %rdx, 4*8(%rsp)
1526 /* If the stack pointer is above the NMI stack, this is a normal NMI */
1529 subq $EXCEPTION_STKSZ, %rdx
1530 cmpq %rdx, 4*8(%rsp)
1531 /* If it is below the NMI stack, it is a normal NMI */
1534 /* Ah, it is within the NMI stack. */
1536 testb $(X86_EFLAGS_DF >> 8), (3*8 + 1)(%rsp)
1537 jz first_nmi /* RSP was user controlled. */
1539 /* This is a nested NMI. */
1543 * Modify the "iret" frame to point to repeat_nmi, forcing another
1544 * iteration of NMI handling.
1547 leaq -10*8(%rsp), %rdx
1554 /* Put stack back */
1560 /* We are returning to kernel mode, so this cannot result in a fault. */
1567 /* Make room for "NMI executing". */
1570 /* Leave room for the "iret" frame */
1573 /* Copy the "original" frame to the "outermost" frame */
1577 UNWIND_HINT_IRET_REGS
1579 /* Everything up to here is safe from nested NMIs */
1581 #ifdef CONFIG_DEBUG_ENTRY
1583 * For ease of testing, unmask NMIs right away. Disabled by
1584 * default because IRET is very expensive.
1587 pushq %rsp /* RSP (minus 8 because of the previous push) */
1588 addq $8, (%rsp) /* Fix up RSP */
1590 pushq $__KERNEL_CS /* CS */
1592 iretq /* continues at repeat_nmi below */
1593 UNWIND_HINT_IRET_REGS
1599 * If there was a nested NMI, the first NMI's iret will return
1600 * here. But NMIs are still enabled and we can take another
1601 * nested NMI. The nested NMI checks the interrupted RIP to see
1602 * if it is between repeat_nmi and end_repeat_nmi, and if so
1603 * it will just return, as we are about to repeat an NMI anyway.
1604 * This makes it safe to copy to the stack frame that a nested
1607 * RSP is pointing to "outermost RIP". gsbase is unknown, but, if
1608 * we're repeating an NMI, gsbase has the same value that it had on
1609 * the first iteration. paranoid_entry will load the kernel
1610 * gsbase if needed before we call exc_nmi(). "NMI executing"
1613 movq $1, 10*8(%rsp) /* Set "NMI executing". */
1616 * Copy the "outermost" frame to the "iret" frame. NMIs that nest
1617 * here must not modify the "iret" frame while we're writing to
1618 * it or it will end up containing garbage.
1628 * Everything below this point can be preempted by a nested NMI.
1629 * If this happens, then the inner NMI will change the "iret"
1630 * frame to point back to repeat_nmi.
1632 pushq $-1 /* ORIG_RAX: no syscall to restart */
1635 * Use paranoid_entry to handle SWAPGS, but no need to use paranoid_exit
1636 * as we should not be calling schedule in NMI context.
1637 * Even with normal interrupts enabled. An NMI should not be
1638 * setting NEED_RESCHED or anything that normal interrupts and
1639 * exceptions might do.
1644 /* paranoidentry exc_nmi(), 0; without TRACE_IRQS_OFF */
1649 /* Always restore stashed CR3 value (see paranoid_entry) */
1650 RESTORE_CR3 scratch_reg=%r15 save_reg=%r14
1652 testl %ebx, %ebx /* swapgs needed? */
1660 * Skip orig_ax and the "outermost" frame to point RSP at the "iret"
1661 * at the "iret" frame.
1666 * Clear "NMI executing". Set DF first so that we can easily
1667 * distinguish the remaining code between here and IRET from
1668 * the SYSCALL entry and exit paths.
1670 * We arguably should just inspect RIP instead, but I (Andy) wrote
1671 * this code when I had the misapprehension that Xen PV supported
1672 * NMIs, and Xen PV would break that approach.
1675 movq $0, 5*8(%rsp) /* clear "NMI executing" */
1678 * iretq reads the "iret" frame and exits the NMI stack in a
1679 * single instruction. We are returning to kernel mode, so this
1680 * cannot result in a fault. Similarly, we don't need to worry
1681 * about espfix64 on the way back to kernel mode.
1684 SYM_CODE_END(asm_exc_nmi)
1686 #ifndef CONFIG_IA32_EMULATION
1688 * This handles SYSCALL from 32-bit code. There is no way to program
1689 * MSRs to fully disable 32-bit SYSCALL.
1691 SYM_CODE_START(ignore_sysret)
1695 SYM_CODE_END(ignore_sysret)
1698 .pushsection .text, "ax"
1699 SYM_CODE_START(rewind_stack_do_exit)
1701 /* Prevent any naive code from trying to unwind to our caller. */
1704 movq PER_CPU_VAR(cpu_current_top_of_stack), %rax
1705 leaq -PTREGS_SIZE(%rax), %rsp
1709 SYM_CODE_END(rewind_stack_do_exit)