x86/boot/compressed/64: Add stage1 #VC handler
authorJoerg Roedel <jroedel@suse.de>
Mon, 7 Sep 2020 13:15:20 +0000 (15:15 +0200)
committerBorislav Petkov <bp@suse.de>
Mon, 7 Sep 2020 17:45:25 +0000 (19:45 +0200)
Add the first handler for #VC exceptions. At stage 1 there is no GHCB
yet because the kernel might still be running on the EFI page table.

The stage 1 handler is limited to the MSR-based protocol to talk to the
hypervisor and can only support CPUID exit-codes, but that is enough to
get to stage 2.

 [ bp: Zap superfluous newlines after rd/wrmsr instruction mnemonics. ]

Signed-off-by: Joerg Roedel <jroedel@suse.de>
Signed-off-by: Borislav Petkov <bp@suse.de>
Link: https://lkml.kernel.org/r/20200907131613.12703-20-joro@8bytes.org
arch/x86/boot/compressed/Makefile
arch/x86/boot/compressed/idt_64.c
arch/x86/boot/compressed/idt_handlers_64.S
arch/x86/boot/compressed/misc.h
arch/x86/boot/compressed/sev-es.c [new file with mode: 0644]
arch/x86/include/asm/msr-index.h
arch/x86/include/asm/sev-es.h [new file with mode: 0644]
arch/x86/include/asm/trapnr.h
arch/x86/kernel/sev-es-shared.c [new file with mode: 0644]

index e7f3eba99ea2f644268cf35070b8da68a2bf66ce..38f4a52a4eda3533d32d1e8bc6aa57e74d1ff97b 100644 (file)
@@ -88,6 +88,7 @@ ifdef CONFIG_X86_64
        vmlinux-objs-y += $(obj)/idt_64.o $(obj)/idt_handlers_64.o
        vmlinux-objs-y += $(obj)/mem_encrypt.o
        vmlinux-objs-y += $(obj)/pgtable_64.o
+       vmlinux-objs-$(CONFIG_AMD_MEM_ENCRYPT) += $(obj)/sev-es.o
 endif
 
 vmlinux-objs-$(CONFIG_ACPI) += $(obj)/acpi.o
index 5f083092a86de883bbf26f954fe52d49e660a352..f3ca7324be446fd579bc9debb9638e26e5b18a63 100644 (file)
@@ -32,6 +32,10 @@ void load_stage1_idt(void)
 {
        boot_idt_desc.address = (unsigned long)boot_idt;
 
+
+       if (IS_ENABLED(CONFIG_AMD_MEM_ENCRYPT))
+               set_idt_entry(X86_TRAP_VC, boot_stage1_vc);
+
        load_boot_idt(&boot_idt_desc);
 }
 
index b20e57504a94a57d767b4480ee00d778d5654ba5..92eb4df478a1720f6b0aa25590b3c96d1c609bae 100644 (file)
@@ -70,3 +70,7 @@ SYM_FUNC_END(\name)
        .code64
 
 EXCEPTION_HANDLER      boot_page_fault do_boot_page_fault error_code=1
+
+#ifdef CONFIG_AMD_MEM_ENCRYPT
+EXCEPTION_HANDLER      boot_stage1_vc do_vc_no_ghcb error_code=1
+#endif
index 9840c82a39f134a5b564c296e381ae79e6901ffc..eaa8b45ebccbc952f58c511f410ea609f555ba32 100644 (file)
@@ -141,5 +141,6 @@ extern struct desc_ptr boot_idt_desc;
 
 /* IDT Entry Points */
 void boot_page_fault(void);
+void boot_stage1_vc(void);
 
 #endif /* BOOT_COMPRESSED_MISC_H */
diff --git a/arch/x86/boot/compressed/sev-es.c b/arch/x86/boot/compressed/sev-es.c
new file mode 100644 (file)
index 0000000..99c3bcd
--- /dev/null
@@ -0,0 +1,45 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * AMD Encrypted Register State Support
+ *
+ * Author: Joerg Roedel <jroedel@suse.de>
+ */
+
+/*
+ * misc.h needs to be first because it knows how to include the other kernel
+ * headers in the pre-decompression code in a way that does not break
+ * compilation.
+ */
+#include "misc.h"
+
+#include <asm/sev-es.h>
+#include <asm/msr-index.h>
+#include <asm/ptrace.h>
+#include <asm/svm.h>
+
+static inline u64 sev_es_rd_ghcb_msr(void)
+{
+       unsigned long low, high;
+
+       asm volatile("rdmsr" : "=a" (low), "=d" (high) :
+                       "c" (MSR_AMD64_SEV_ES_GHCB));
+
+       return ((high << 32) | low);
+}
+
+static inline void sev_es_wr_ghcb_msr(u64 val)
+{
+       u32 low, high;
+
+       low  = val & 0xffffffffUL;
+       high = val >> 32;
+
+       asm volatile("wrmsr" : : "c" (MSR_AMD64_SEV_ES_GHCB),
+                       "a"(low), "d" (high) : "memory");
+}
+
+#undef __init
+#define __init
+
+/* Include code for early handlers */
+#include "../../kernel/sev-es-shared.c"
index 2859ee4f39a83fa4d73cc0f261e3effff7a455e7..da34fdba7c5a4d7a506045e93ad643d79ffd6bb7 100644 (file)
 #define MSR_AMD64_IBSBRTARGET          0xc001103b
 #define MSR_AMD64_IBSOPDATA4           0xc001103d
 #define MSR_AMD64_IBS_REG_COUNT_MAX    8 /* includes MSR_AMD64_IBSBRTARGET */
+#define MSR_AMD64_SEV_ES_GHCB          0xc0010130
 #define MSR_AMD64_SEV                  0xc0010131
 #define MSR_AMD64_SEV_ENABLED_BIT      0
 #define MSR_AMD64_SEV_ENABLED          BIT_ULL(MSR_AMD64_SEV_ENABLED_BIT)
diff --git a/arch/x86/include/asm/sev-es.h b/arch/x86/include/asm/sev-es.h
new file mode 100644 (file)
index 0000000..48a4403
--- /dev/null
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * AMD Encrypted Register State Support
+ *
+ * Author: Joerg Roedel <jroedel@suse.de>
+ */
+
+#ifndef __ASM_ENCRYPTED_STATE_H
+#define __ASM_ENCRYPTED_STATE_H
+
+#include <linux/types.h>
+
+#define GHCB_SEV_CPUID_REQ     0x004UL
+#define                GHCB_CPUID_REQ_EAX      0
+#define                GHCB_CPUID_REQ_EBX      1
+#define                GHCB_CPUID_REQ_ECX      2
+#define                GHCB_CPUID_REQ_EDX      3
+#define                GHCB_CPUID_REQ(fn, reg) (GHCB_SEV_CPUID_REQ | \
+                                       (((unsigned long)reg & 3) << 30) | \
+                                       (((unsigned long)fn) << 32))
+
+#define GHCB_SEV_CPUID_RESP    0x005UL
+#define GHCB_SEV_TERMINATE     0x100UL
+
+#define        GHCB_SEV_GHCB_RESP_CODE(v)      ((v) & 0xfff)
+#define        VMGEXIT()                       { asm volatile("rep; vmmcall\n\r"); }
+
+void do_vc_no_ghcb(struct pt_regs *regs, unsigned long exit_code);
+
+static inline u64 lower_bits(u64 val, unsigned int bits)
+{
+       u64 mask = (1ULL << bits) - 1;
+
+       return (val & mask);
+}
+
+#endif
index 082f45631fa955d3ee93aa60d8960e8bc41fa254..f5d2325aa0b749db0b6743adb85c94cb0f303624 100644 (file)
@@ -26,6 +26,7 @@
 #define X86_TRAP_XF            19      /* SIMD Floating-Point Exception */
 #define X86_TRAP_VE            20      /* Virtualization Exception */
 #define X86_TRAP_CP            21      /* Control Protection Exception */
+#define X86_TRAP_VC            29      /* VMM Communication Exception */
 #define X86_TRAP_IRET          32      /* IRET Exception */
 
 #endif
diff --git a/arch/x86/kernel/sev-es-shared.c b/arch/x86/kernel/sev-es-shared.c
new file mode 100644 (file)
index 0000000..0bea323
--- /dev/null
@@ -0,0 +1,66 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * AMD Encrypted Register State Support
+ *
+ * Author: Joerg Roedel <jroedel@suse.de>
+ *
+ * This file is not compiled stand-alone. It contains code shared
+ * between the pre-decompression boot code and the running Linux kernel
+ * and is included directly into both code-bases.
+ */
+
+/*
+ * Boot VC Handler - This is the first VC handler during boot, there is no GHCB
+ * page yet, so it only supports the MSR based communication with the
+ * hypervisor and only the CPUID exit-code.
+ */
+void __init do_vc_no_ghcb(struct pt_regs *regs, unsigned long exit_code)
+{
+       unsigned int fn = lower_bits(regs->ax, 32);
+       unsigned long val;
+
+       /* Only CPUID is supported via MSR protocol */
+       if (exit_code != SVM_EXIT_CPUID)
+               goto fail;
+
+       sev_es_wr_ghcb_msr(GHCB_CPUID_REQ(fn, GHCB_CPUID_REQ_EAX));
+       VMGEXIT();
+       val = sev_es_rd_ghcb_msr();
+       if (GHCB_SEV_GHCB_RESP_CODE(val) != GHCB_SEV_CPUID_RESP)
+               goto fail;
+       regs->ax = val >> 32;
+
+       sev_es_wr_ghcb_msr(GHCB_CPUID_REQ(fn, GHCB_CPUID_REQ_EBX));
+       VMGEXIT();
+       val = sev_es_rd_ghcb_msr();
+       if (GHCB_SEV_GHCB_RESP_CODE(val) != GHCB_SEV_CPUID_RESP)
+               goto fail;
+       regs->bx = val >> 32;
+
+       sev_es_wr_ghcb_msr(GHCB_CPUID_REQ(fn, GHCB_CPUID_REQ_ECX));
+       VMGEXIT();
+       val = sev_es_rd_ghcb_msr();
+       if (GHCB_SEV_GHCB_RESP_CODE(val) != GHCB_SEV_CPUID_RESP)
+               goto fail;
+       regs->cx = val >> 32;
+
+       sev_es_wr_ghcb_msr(GHCB_CPUID_REQ(fn, GHCB_CPUID_REQ_EDX));
+       VMGEXIT();
+       val = sev_es_rd_ghcb_msr();
+       if (GHCB_SEV_GHCB_RESP_CODE(val) != GHCB_SEV_CPUID_RESP)
+               goto fail;
+       regs->dx = val >> 32;
+
+       /* Skip over the CPUID two-byte opcode */
+       regs->ip += 2;
+
+       return;
+
+fail:
+       sev_es_wr_ghcb_msr(GHCB_SEV_TERMINATE);
+       VMGEXIT();
+
+       /* Shouldn't get here - if we do halt the machine */
+       while (true)
+               asm volatile("hlt\n");
+}