1 /* SPDX-License-Identifier: GPL-2.0 */
3 * linux/arch/x86/kernel/head_64.S -- start in 32bit and switch to 64bit
5 * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
6 * Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
7 * Copyright (C) 2000 Karsten Keil <kkeil@suse.de>
8 * Copyright (C) 2001,2002 Andi Kleen <ak@suse.de>
9 * Copyright (C) 2005 Eric Biederman <ebiederm@xmission.com>
12 #include <linux/export.h>
13 #include <linux/linkage.h>
14 #include <linux/threads.h>
15 #include <linux/init.h>
16 #include <linux/pgtable.h>
17 #include <asm/segment.h>
20 #include <asm/cache.h>
21 #include <asm/processor-flags.h>
22 #include <asm/percpu.h>
24 #include "../entry/calling.h"
25 #include <asm/nospec-branch.h>
26 #include <asm/apicdef.h>
27 #include <asm/fixmap.h>
29 #include <asm/thread_info.h>
32 * We are not able to switch in one step to the final KERNEL ADDRESS SPACE
33 * because we need identity-mapped pages.
35 #define l4_index(x) (((x) >> 39) & 511)
36 #define pud_index(x) (((x) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
38 L4_PAGE_OFFSET = l4_index(__PAGE_OFFSET_BASE_L4)
39 L4_START_KERNEL = l4_index(__START_KERNEL_map)
41 L3_START_KERNEL = pud_index(__START_KERNEL_map)
46 SYM_CODE_START_NOALIGN(startup_64)
47 UNWIND_HINT_END_OF_STACK
49 * At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 0,
50 * and someone has loaded an identity mapped page table
51 * for us. These identity mapped page tables map all of the
52 * kernel pages and possibly all of memory.
54 * %RSI holds the physical address of the boot_params structure
55 * provided by the bootloader. Preserve it in %R15 so C function calls
56 * will not clobber it.
58 * We come here either directly from a 64bit bootloader, or from
59 * arch/x86/boot/compressed/head_64.S.
61 * We only come here initially at boot nothing else comes here.
63 * Since we may be loaded at an address different from what we were
64 * compiled to run at we first fixup the physical addresses in our page
65 * tables and then reload them.
69 /* Set up the stack for verify_cpu() */
70 leaq (__end_init_task - TOP_OF_KERNEL_STACK_PADDING - PTREGS_SIZE)(%rip), %rsp
72 leaq _text(%rip), %rdi
74 /* Setup GSBASE to allow stack canary access for C code */
75 movl $MSR_GS_BASE, %ecx
76 leaq INIT_PER_CPU_VAR(fixed_percpu_data)(%rip), %rdx
81 call startup_64_setup_env
83 /* Now switch to __KERNEL_CS so IRET works reliably */
85 leaq .Lon_kernel_cs(%rip), %rax
90 UNWIND_HINT_END_OF_STACK
92 #ifdef CONFIG_AMD_MEM_ENCRYPT
94 * Activate SEV/SME memory encryption if supported/enabled. This needs to
95 * be done now, since this also includes setup of the SEV-SNP CPUID table,
96 * which needs to be done before any CPUID instructions are executed in
97 * subsequent code. Pass the boot_params pointer as the first argument.
103 /* Sanitize CPU configuration */
107 * Perform pagetable fixups. Additionally, if SME is active, encrypt
108 * the kernel and retrieve the modifier (SME encryption mask if SME
109 * is active) to be added to the initial pgdir entry that will be
110 * programmed into CR3.
112 leaq _text(%rip), %rdi
116 /* Form the CR3 value being sure to include the CR3 modifier */
117 addq $(early_top_pgt - __START_KERNEL_map), %rax
119 #ifdef CONFIG_AMD_MEM_ENCRYPT
123 addq phys_base(%rip), %rdi
126 * For SEV guests: Verify that the C-bit is correct. A malicious
127 * hypervisor could lie about the C-bit position to perform a ROP
128 * attack on the guest by writing to the unencrypted stack and wait for
129 * the next RET instruction.
134 * Restore CR3 value without the phys_base which will be added
135 * below, before writing %cr3.
141 SYM_CODE_END(startup_64)
143 SYM_CODE_START(secondary_startup_64)
144 UNWIND_HINT_END_OF_STACK
147 * At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 0,
148 * and someone has loaded a mapped page table.
150 * We come here either from startup_64 (using physical addresses)
151 * or from trampoline.S (using virtual addresses).
153 * Using virtual addresses from trampoline.S removes the need
154 * to have any identity mapped pages in the kernel page table
155 * after the boot processor executes this code.
158 /* Sanitize CPU configuration */
162 * The secondary_startup_64_no_verify entry point is only used by
163 * SEV-ES guests. In those guests the call to verify_cpu() would cause
164 * #VC exceptions which can not be handled at this stage of secondary
167 * All non SEV-ES systems, especially Intel systems, need to execute
168 * verify_cpu() above to make sure NX is enabled.
170 SYM_INNER_LABEL(secondary_startup_64_no_verify, SYM_L_GLOBAL)
171 UNWIND_HINT_END_OF_STACK
174 /* Clear %R15 which holds the boot_params pointer on the boot CPU */
178 * Retrieve the modifier (SME encryption mask if SME is active) to be
179 * added to the initial pgdir entry that will be programmed into CR3.
181 #ifdef CONFIG_AMD_MEM_ENCRYPT
182 movq sme_me_mask, %rax
187 /* Form the CR3 value being sure to include the CR3 modifier */
188 addq $(init_top_pgt - __START_KERNEL_map), %rax
191 #ifdef CONFIG_X86_MCE
193 * Preserve CR4.MCE if the kernel will enable #MC support.
194 * Clearing MCE may fault in some environments (that also force #MC
195 * support). Any machine check that occurs before #MC support is fully
196 * configured will crash the system regardless of the CR4.MCE value set
200 andl $X86_CR4_MCE, %ecx
205 /* Enable PAE mode, PSE, PGE and LA57 */
206 orl $(X86_CR4_PAE | X86_CR4_PSE | X86_CR4_PGE), %ecx
207 #ifdef CONFIG_X86_5LEVEL
208 testb $1, __pgtable_l5_enabled(%rip)
210 orl $X86_CR4_LA57, %ecx
215 /* Setup early boot stage 4-/5-level pagetables. */
216 addq phys_base(%rip), %rax
219 * Switch to new page-table
221 * For the boot CPU this switches to early_top_pgt which still has the
222 * identity mappings present. The secondary CPUs will switch to the
223 * init_top_pgt here, away from the trampoline_pgd and unmap the
224 * identity mapped ranges.
229 * Do a global TLB flush after the CR3 switch to make sure the TLB
230 * entries from the identity mapping are flushed.
234 xorq $X86_CR4_PGE, %rcx
238 /* Ensure I am executing from virtual addresses */
240 ANNOTATE_RETPOLINE_SAFE
243 UNWIND_HINT_END_OF_STACK
244 ANNOTATE_NOENDBR // above
248 * For parallel boot, the APIC ID is read from the APIC, and then
249 * used to look up the CPU number. For booting a single CPU, the
250 * CPU number is encoded in smpboot_control.
252 * Bit 31 STARTUP_READ_APICID (Read APICID from APIC)
253 * Bit 0-23 CPU# if STARTUP_xx flags are not set
255 movl smpboot_control(%rip), %ecx
256 testl $STARTUP_READ_APICID, %ecx
259 * No control bit set, single CPU bringup. CPU number is provided
260 * in bit 0-23. This is also the boot CPU case (CPU number 0).
262 andl $(~STARTUP_PARALLEL_MASK), %ecx
266 /* Check whether X2APIC mode is already enabled */
267 mov $MSR_IA32_APICBASE, %ecx
269 testl $X2APIC_ENABLE, %eax
270 jnz .Lread_apicid_msr
272 #ifdef CONFIG_X86_X2APIC
274 * If system is in X2APIC mode then MMIO base might not be
275 * mapped causing the MMIO read below to fault. Faults can't
276 * be handled at that point.
278 cmpl $0, x2apic_mode(%rip)
279 jz .Lread_apicid_mmio
281 /* Force the AP into X2APIC mode. */
282 orl $X2APIC_ENABLE, %eax
284 jmp .Lread_apicid_msr
288 /* Read the APIC ID from the fix-mapped MMIO space. */
289 movq apic_mmio_base(%rip), %rcx
296 mov $APIC_X2APIC_ID_MSR, %ecx
300 /* EAX contains the APIC ID of the current CPU */
302 leaq cpuid_to_apicid(%rip), %rbx
305 cmpl (%rbx,%rcx,4), %eax
308 #ifdef CONFIG_FORCE_NR_CPUS
311 cmpl nr_cpu_ids(%rip), %ecx
315 /* APIC ID not found in the table. Drop the trampoline lock and bail. */
316 movq trampoline_lock(%rip), %rax
324 /* Get the per cpu offset for the given CPU# which is in ECX */
325 movq __per_cpu_offset(,%rcx,8), %rdx
327 xorl %edx, %edx /* zero-extended to clear all of RDX */
328 #endif /* CONFIG_SMP */
331 * Setup a boot time stack - Any secondary CPU will have lost its stack
332 * by now because the cr3-switch above unmaps the real-mode stack.
334 * RDX contains the per-cpu offset
336 movq pcpu_hot + X86_current_task(%rdx), %rax
337 movq TASK_threadsp(%rax), %rsp
340 * Now that this CPU is running on its own stack, drop the realmode
341 * protection. For the boot CPU the pointer is NULL!
343 movq trampoline_lock(%rip), %rax
350 * We must switch to a new descriptor in kernel space for the GDT
351 * because soon the kernel won't have access anymore to the userspace
352 * addresses where we're currently running on. We have to do that here
353 * because in 32bit we couldn't load a 64bit linear address.
356 movw $(GDT_SIZE-1), (%rsp)
357 leaq gdt_page(%rdx), %rax
362 /* set up data segments */
369 * We don't really need to load %fs or %gs, but load them anyway
370 * to kill any stale realmode selectors. This allows execution
378 * The base of %gs always points to fixed_percpu_data. If the
379 * stack protector canary is enabled, it is located at %gs:40.
380 * Note that, on SMP, the boot cpu uses init data section until
381 * the per cpu areas are set up.
383 movl $MSR_GS_BASE,%ecx
385 leaq INIT_PER_CPU_VAR(fixed_percpu_data)(%rip), %rdx
391 /* Setup and Load IDT */
394 /* Check if nx is implemented */
395 movl $0x80000001, %eax
399 /* Setup EFER (Extended Feature Enable Register) */
403 * Preserve current value of EFER for comparison and to skip
404 * EFER writes if no change was made (for TDX guest)
407 btsl $_EFER_SCE, %eax /* Enable System Call */
408 btl $20,%edi /* No Execute supported? */
411 btsq $_PAGE_BIT_NX,early_pmd_flags(%rip)
413 /* Avoid writing EFER if no change was made (for TDX guest) */
417 wrmsr /* Make changes effective */
420 movl $CR0_STATE, %eax
421 /* Make changes effective */
424 /* zero EFLAGS after setting rsp */
428 /* Pass the boot_params pointer as first argument */
433 * Jump to run C code and to be on a real kernel address.
434 * Since we are running on identity-mapped space we have to jump
435 * to the full 64bit address, this is only possible as indirect
436 * jump. In addition we need to ensure %cs is set so we make this
439 * Note: do not change to far jump indirect with 64bit offset.
441 * AMD does not support far jump indirect with 64bit offset.
442 * AMD64 Architecture Programmer's Manual, Volume 3: states only
443 * JMP FAR mem16:16 FF /5 Far jump indirect,
444 * with the target specified by a far pointer in memory.
445 * JMP FAR mem16:32 FF /5 Far jump indirect,
446 * with the target specified by a far pointer in memory.
448 * Intel64 does support 64bit offset.
449 * Software Developer Manual Vol 2: states:
450 * FF /5 JMP m16:16 Jump far, absolute indirect,
451 * address given in m16:16
452 * FF /5 JMP m16:32 Jump far, absolute indirect,
453 * address given in m16:32.
454 * REX.W + FF /5 JMP m16:64 Jump far, absolute indirect,
455 * address given in m16:64.
457 pushq $.Lafter_lret # put return address on stack for unwinder
458 xorl %ebp, %ebp # clear frame pointer
459 movq initial_code(%rip), %rax
460 pushq $__KERNEL_CS # set correct cs
461 pushq %rax # target address in negative space
465 SYM_CODE_END(secondary_startup_64)
467 #include "verify_cpu.S"
468 #include "sev_verify_cbit.S"
470 #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_AMD_MEM_ENCRYPT)
472 * Entry point for soft restart of a CPU. Invoked from xxx_play_dead() for
473 * restarting the boot CPU or for restarting SEV guest CPUs after CPU hot
474 * unplug. Everything is set up already except the stack.
476 SYM_CODE_START(soft_restart_cpu)
478 UNWIND_HINT_END_OF_STACK
480 /* Find the idle task stack */
481 movq PER_CPU_VAR(pcpu_hot) + X86_current_task, %rcx
482 movq TASK_threadsp(%rcx), %rsp
485 SYM_CODE_END(soft_restart_cpu)
488 #ifdef CONFIG_AMD_MEM_ENCRYPT
490 * VC Exception handler used during early boot when running on kernel
491 * addresses, but before the switch to the idt_table can be made.
492 * The early_idt_handler_array can't be used here because it calls into a lot
493 * of __init code and this handler is also used during CPU offlining/onlining.
494 * Therefore this handler ends up in the .text section so that it stays around
495 * when .init.text is freed.
497 SYM_CODE_START_NOALIGN(vc_boot_ghcb)
498 UNWIND_HINT_IRET_REGS offset=8
506 movq ORIG_RAX(%rsp), %rsi
507 movq initial_vc_handler(%rip), %rax
508 ANNOTATE_RETPOLINE_SAFE
514 /* Remove Error Code */
518 SYM_CODE_END(vc_boot_ghcb)
521 /* Both SMP bootup and ACPI suspend change these variables */
524 SYM_DATA(initial_code, .quad x86_64_start_kernel)
525 #ifdef CONFIG_AMD_MEM_ENCRYPT
526 SYM_DATA(initial_vc_handler, .quad handle_vc_boot_ghcb)
529 SYM_DATA(trampoline_lock, .quad 0);
533 SYM_CODE_START(early_idt_handler_array)
535 .rept NUM_EXCEPTION_VECTORS
536 .if ((EXCEPTION_ERRCODE_MASK >> i) & 1) == 0
537 UNWIND_HINT_IRET_REGS
539 pushq $0 # Dummy error code, to make stack frame uniform
541 UNWIND_HINT_IRET_REGS offset=8
544 pushq $i # 72(%rsp) Vector number
545 jmp early_idt_handler_common
546 UNWIND_HINT_IRET_REGS
548 .fill early_idt_handler_array + i*EARLY_IDT_HANDLER_SIZE - ., 1, 0xcc
550 SYM_CODE_END(early_idt_handler_array)
551 ANNOTATE_NOENDBR // early_idt_handler_array[NUM_EXCEPTION_VECTORS]
553 SYM_CODE_START_LOCAL(early_idt_handler_common)
554 UNWIND_HINT_IRET_REGS offset=16
556 * The stack is the hardware frame, an error code or zero, and the
561 incl early_recursion_flag(%rip)
563 /* The vector number is currently in the pt_regs->di slot. */
564 pushq %rsi /* pt_regs->si */
565 movq 8(%rsp), %rsi /* RSI = vector number */
566 movq %rdi, 8(%rsp) /* pt_regs->di = RDI */
567 pushq %rdx /* pt_regs->dx */
568 pushq %rcx /* pt_regs->cx */
569 pushq %rax /* pt_regs->ax */
570 pushq %r8 /* pt_regs->r8 */
571 pushq %r9 /* pt_regs->r9 */
572 pushq %r10 /* pt_regs->r10 */
573 pushq %r11 /* pt_regs->r11 */
574 pushq %rbx /* pt_regs->bx */
575 pushq %rbp /* pt_regs->bp */
576 pushq %r12 /* pt_regs->r12 */
577 pushq %r13 /* pt_regs->r13 */
578 pushq %r14 /* pt_regs->r14 */
579 pushq %r15 /* pt_regs->r15 */
582 movq %rsp,%rdi /* RDI = pt_regs; RSI is already trapnr */
583 call do_early_exception
585 decl early_recursion_flag(%rip)
586 jmp restore_regs_and_return_to_kernel
587 SYM_CODE_END(early_idt_handler_common)
589 #ifdef CONFIG_AMD_MEM_ENCRYPT
591 * VC Exception handler used during very early boot. The
592 * early_idt_handler_array can't be used because it returns via the
593 * paravirtualized INTERRUPT_RETURN and pv-ops don't work that early.
595 * XXX it does, fix this.
597 * This handler will end up in the .init.text section and not be
598 * available to boot secondary CPUs.
600 SYM_CODE_START_NOALIGN(vc_no_ghcb)
601 UNWIND_HINT_IRET_REGS offset=8
609 movq ORIG_RAX(%rsp), %rsi
615 /* Remove Error Code */
618 /* Pure iret required here - don't use INTERRUPT_RETURN */
620 SYM_CODE_END(vc_no_ghcb)
623 #define SYM_DATA_START_PAGE_ALIGNED(name) \
624 SYM_START(name, SYM_L_GLOBAL, .balign PAGE_SIZE)
626 #ifdef CONFIG_PAGE_TABLE_ISOLATION
628 * Each PGD needs to be 8k long and 8k aligned. We do not
629 * ever go out to userspace with these, so we do not
630 * strictly *need* the second page, but this allows us to
631 * have a single set_pgd() implementation that does not
632 * need to worry about whether it has 4k or 8k to work
635 * This ensures PGDs are 8k long:
637 #define PTI_USER_PGD_FILL 512
638 /* This ensures they are 8k-aligned: */
639 #define SYM_DATA_START_PTI_ALIGNED(name) \
640 SYM_START(name, SYM_L_GLOBAL, .balign 2 * PAGE_SIZE)
642 #define SYM_DATA_START_PTI_ALIGNED(name) \
643 SYM_DATA_START_PAGE_ALIGNED(name)
644 #define PTI_USER_PGD_FILL 0
647 /* Automate the creation of 1 to 1 mapping pmd entries */
648 #define PMDS(START, PERM, COUNT) \
651 .quad (START) + (i << PMD_SHIFT) + (PERM) ; \
658 SYM_DATA_START_PTI_ALIGNED(early_top_pgt)
660 .fill PTI_USER_PGD_FILL,8,0
661 SYM_DATA_END(early_top_pgt)
663 SYM_DATA_START_PAGE_ALIGNED(early_dynamic_pgts)
664 .fill 512*EARLY_DYNAMIC_PAGE_TABLES,8,0
665 SYM_DATA_END(early_dynamic_pgts)
667 SYM_DATA(early_recursion_flag, .long 0)
671 #if defined(CONFIG_XEN_PV) || defined(CONFIG_PVH)
672 SYM_DATA_START_PTI_ALIGNED(init_top_pgt)
673 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC
674 .org init_top_pgt + L4_PAGE_OFFSET*8, 0
675 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC
676 .org init_top_pgt + L4_START_KERNEL*8, 0
677 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
678 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC
679 .fill PTI_USER_PGD_FILL,8,0
680 SYM_DATA_END(init_top_pgt)
682 SYM_DATA_START_PAGE_ALIGNED(level3_ident_pgt)
683 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC
685 SYM_DATA_END(level3_ident_pgt)
686 SYM_DATA_START_PAGE_ALIGNED(level2_ident_pgt)
688 * Since I easily can, map the first 1G.
689 * Don't set NX because code runs from these pages.
691 * Note: This sets _PAGE_GLOBAL despite whether
692 * the CPU supports it or it is enabled. But,
693 * the CPU should ignore the bit.
695 PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
696 SYM_DATA_END(level2_ident_pgt)
698 SYM_DATA_START_PTI_ALIGNED(init_top_pgt)
700 .fill PTI_USER_PGD_FILL,8,0
701 SYM_DATA_END(init_top_pgt)
704 #ifdef CONFIG_X86_5LEVEL
705 SYM_DATA_START_PAGE_ALIGNED(level4_kernel_pgt)
707 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC
708 SYM_DATA_END(level4_kernel_pgt)
711 SYM_DATA_START_PAGE_ALIGNED(level3_kernel_pgt)
712 .fill L3_START_KERNEL,8,0
713 /* (2^48-(2*1024*1024*1024)-((2^39)*511))/(2^30) = 510 */
714 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC
715 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC
716 SYM_DATA_END(level3_kernel_pgt)
718 SYM_DATA_START_PAGE_ALIGNED(level2_kernel_pgt)
720 * Kernel high mapping.
722 * The kernel code+data+bss must be located below KERNEL_IMAGE_SIZE in
723 * virtual address space, which is 1 GiB if RANDOMIZE_BASE is enabled,
726 * (NOTE: after that starts the module area, see MODULES_VADDR.)
728 * This table is eventually used by the kernel during normal runtime.
729 * Care must be taken to clear out undesired bits later, like _PAGE_RW
730 * or _PAGE_GLOBAL in some cases.
732 PMDS(0, __PAGE_KERNEL_LARGE_EXEC, KERNEL_IMAGE_SIZE/PMD_SIZE)
733 SYM_DATA_END(level2_kernel_pgt)
735 SYM_DATA_START_PAGE_ALIGNED(level2_fixmap_pgt)
736 .fill (512 - 4 - FIXMAP_PMD_NUM),8,0
738 .rept (FIXMAP_PMD_NUM)
739 .quad level1_fixmap_pgt + (pgtno << PAGE_SHIFT) - __START_KERNEL_map \
743 /* 6 MB reserved space + a 2MB hole */
745 SYM_DATA_END(level2_fixmap_pgt)
747 SYM_DATA_START_PAGE_ALIGNED(level1_fixmap_pgt)
748 .rept (FIXMAP_PMD_NUM)
751 SYM_DATA_END(level1_fixmap_pgt)
758 SYM_DATA(smpboot_control, .long 0)
761 /* This must match the first entry in level2_kernel_pgt */
762 SYM_DATA(phys_base, .quad 0x0)
763 EXPORT_SYMBOL(phys_base)
765 #include "../../x86/xen/xen-head.S"
768 SYM_DATA_START_PAGE_ALIGNED(empty_zero_page)
770 SYM_DATA_END(empty_zero_page)
771 EXPORT_SYMBOL(empty_zero_page)