x86/litf: Introduce vmx status variable
authorThomas Gleixner <tglx@linutronix.de>
Fri, 13 Jul 2018 14:23:16 +0000 (16:23 +0200)
committerThomas Gleixner <tglx@linutronix.de>
Fri, 13 Jul 2018 14:29:53 +0000 (16:29 +0200)
Store the effective mitigation of VMX in a status variable and use it to
report the VMX state in the l1tf sysfs file.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Tested-by: Jiri Kosina <jkosina@suse.cz>
Reviewed-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Reviewed-by: Josh Poimboeuf <jpoimboe@redhat.com>
Link: https://lkml.kernel.org/r/20180713142322.433098358@linutronix.de
arch/x86/include/asm/vmx.h
arch/x86/kernel/cpu/bugs.c
arch/x86/kvm/vmx.c

index 425e6b8b95478248dd3a32122b1aca408691cadf..c98aa9aed056e43335d378af904334d2351810dd 100644 (file)
@@ -573,4 +573,13 @@ enum vm_instruction_error_number {
        VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID = 28,
 };
 
+enum vmx_l1d_flush_state {
+       VMENTER_L1D_FLUSH_AUTO,
+       VMENTER_L1D_FLUSH_NEVER,
+       VMENTER_L1D_FLUSH_COND,
+       VMENTER_L1D_FLUSH_ALWAYS,
+};
+
+extern enum vmx_l1d_flush_state l1tf_vmx_mitigation;
+
 #endif
index 50500cea6ebadb8cce60cf671ae8dd9b30740968..8aba7d3abccb503464d8db379b013dee4a52ba69 100644 (file)
@@ -22,6 +22,7 @@
 #include <asm/processor-flags.h>
 #include <asm/fpu/internal.h>
 #include <asm/msr.h>
+#include <asm/vmx.h>
 #include <asm/paravirt.h>
 #include <asm/alternative.h>
 #include <asm/pgtable.h>
@@ -657,6 +658,12 @@ void x86_spec_ctrl_setup_ap(void)
 
 #undef pr_fmt
 #define pr_fmt(fmt)    "L1TF: " fmt
+
+#if IS_ENABLED(CONFIG_KVM_INTEL)
+enum vmx_l1d_flush_state l1tf_vmx_mitigation __ro_after_init = VMENTER_L1D_FLUSH_AUTO;
+EXPORT_SYMBOL_GPL(l1tf_vmx_mitigation);
+#endif
+
 static void __init l1tf_select_mitigation(void)
 {
        u64 half_pa;
@@ -686,6 +693,32 @@ static void __init l1tf_select_mitigation(void)
 
 #ifdef CONFIG_SYSFS
 
+#define L1TF_DEFAULT_MSG "Mitigation: PTE Inversion"
+
+#if IS_ENABLED(CONFIG_KVM_INTEL)
+static const char *l1tf_vmx_states[] = {
+       [VMENTER_L1D_FLUSH_AUTO]        = "auto",
+       [VMENTER_L1D_FLUSH_NEVER]       = "vulnerable",
+       [VMENTER_L1D_FLUSH_COND]        = "conditional cache flushes",
+       [VMENTER_L1D_FLUSH_ALWAYS]      = "cache flushes",
+};
+
+static ssize_t l1tf_show_state(char *buf)
+{
+       if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_AUTO)
+               return sprintf(buf, "%s\n", L1TF_DEFAULT_MSG);
+
+       return sprintf(buf, "%s; VMX: SMT %s, L1D %s\n", L1TF_DEFAULT_MSG,
+                      cpu_smt_control == CPU_SMT_ENABLED ? "vulnerable" : "disabled",
+                      l1tf_vmx_states[l1tf_vmx_mitigation]);
+}
+#else
+static ssize_t l1tf_show_state(char *buf)
+{
+       return sprintf(buf, "%s\n", L1TF_DEFAULT_MSG);
+}
+#endif
+
 static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr,
                               char *buf, unsigned int bug)
 {
@@ -713,9 +746,8 @@ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr
 
        case X86_BUG_L1TF:
                if (boot_cpu_has(X86_FEATURE_L1TF_PTEINV))
-                       return sprintf(buf, "Mitigation: Page Table Inversion\n");
+                       return l1tf_show_state(buf);
                break;
-
        default:
                break;
        }
index eb7c207a3bc3e505a65e50fc12eb7120d7d5abbd..385cbfaa3c7189b57d1768c85f31426fe8fafb5f 100644 (file)
@@ -193,19 +193,13 @@ extern const ulong vmx_return;
 
 static DEFINE_STATIC_KEY_FALSE(vmx_l1d_should_flush);
 
-/* These MUST be in sync with vmentry_l1d_param order. */
-enum vmx_l1d_flush_state {
-       VMENTER_L1D_FLUSH_NEVER,
-       VMENTER_L1D_FLUSH_COND,
-       VMENTER_L1D_FLUSH_ALWAYS,
-};
-
 static enum vmx_l1d_flush_state __read_mostly vmentry_l1d_flush = VMENTER_L1D_FLUSH_COND;
 
 static const struct {
        const char *option;
        enum vmx_l1d_flush_state cmd;
 } vmentry_l1d_param[] = {
+       {"auto",        VMENTER_L1D_FLUSH_AUTO},
        {"never",       VMENTER_L1D_FLUSH_NEVER},
        {"cond",        VMENTER_L1D_FLUSH_COND},
        {"always",      VMENTER_L1D_FLUSH_ALWAYS},
@@ -13235,8 +13229,12 @@ static int __init vmx_setup_l1d_flush(void)
 {
        struct page *page;
 
+       if (!boot_cpu_has_bug(X86_BUG_L1TF))
+               return 0;
+
+       l1tf_vmx_mitigation = vmentry_l1d_flush;
+
        if (vmentry_l1d_flush == VMENTER_L1D_FLUSH_NEVER ||
-           !boot_cpu_has_bug(X86_BUG_L1TF) ||
            vmx_l1d_use_msr_save_list())
                return 0;
 
@@ -13251,12 +13249,14 @@ static int __init vmx_setup_l1d_flush(void)
        return 0;
 }
 
-static void vmx_free_l1d_flush_pages(void)
+static void vmx_cleanup_l1d_flush(void)
 {
        if (vmx_l1d_flush_pages) {
                free_pages((unsigned long)vmx_l1d_flush_pages, L1D_CACHE_ORDER);
                vmx_l1d_flush_pages = NULL;
        }
+       /* Restore state so sysfs ignores VMX */
+       l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_AUTO;
 }
 
 static int __init vmx_init(void)
@@ -13299,7 +13299,7 @@ static int __init vmx_init(void)
        r = kvm_init(&vmx_x86_ops, sizeof(struct vcpu_vmx),
                     __alignof__(struct vcpu_vmx), THIS_MODULE);
        if (r) {
-               vmx_free_l1d_flush_pages();
+               vmx_cleanup_l1d_flush();
                return r;
        }
 
@@ -13343,7 +13343,7 @@ static void __exit vmx_exit(void)
                static_branch_disable(&enable_evmcs);
        }
 #endif
-       vmx_free_l1d_flush_pages();
+       vmx_cleanup_l1d_flush();
 }
 
 module_init(vmx_init)