KVM: VMX: Add error handling to VMREAD helper
authorSean Christopherson <sean.j.christopherson@intel.com>
Fri, 19 Jul 2019 20:41:08 +0000 (13:41 -0700)
committerPaolo Bonzini <pbonzini@redhat.com>
Wed, 25 Sep 2019 13:30:09 +0000 (15:30 +0200)
Now that VMREAD flows require a taken branch, courtesy of commit

  3901336ed9887 ("x86/kvm: Don't call kvm_spurious_fault() from .fixup")

bite the bullet and add full error handling to VMREAD, i.e. replace the
JMP added by __ex()/____kvm_handle_fault_on_reboot() with a hinted Jcc.

To minimize the code footprint, add a helper function, vmread_error(),
to handle both faults and failures so that the inline flow has a single
CALL.

Acked-by: Paolo Bonzini <pbonzini@redhat.com>
Cc: Josh Poimboeuf <jpoimboe@redhat.com>
Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/kvm/vmx/ops.h
arch/x86/kvm/vmx/vmx.c

index 79e25d49d4d93e906d3c8e16d95b343b09c04e32..45eaedee2ac03b8e8131e82bf99c9c754423ab12 100644 (file)
@@ -11,9 +11,8 @@
 #include "vmcs.h"
 
 #define __ex(x) __kvm_handle_fault_on_reboot(x)
-#define __ex_clear(x, reg) \
-       ____kvm_handle_fault_on_reboot(x, "xor " reg ", " reg)
 
+asmlinkage void vmread_error(unsigned long field, bool fault);
 void vmwrite_error(unsigned long field, unsigned long value);
 void vmclear_error(struct vmcs *vmcs, u64 phys_addr);
 void vmptrld_error(struct vmcs *vmcs, u64 phys_addr);
@@ -68,8 +67,22 @@ static __always_inline unsigned long __vmcs_readl(unsigned long field)
 {
        unsigned long value;
 
-       asm volatile (__ex_clear("vmread %1, %0", "%k0")
-                     : "=r"(value) : "r"(field));
+       asm volatile("1: vmread %2, %1\n\t"
+                    ".byte 0x3e\n\t" /* branch taken hint */
+                    "ja 3f\n\t"
+                    "mov %2, %%" _ASM_ARG1 "\n\t"
+                    "xor %%" _ASM_ARG2 ", %%" _ASM_ARG2 "\n\t"
+                    "2: call vmread_error\n\t"
+                    "xor %k1, %k1\n\t"
+                    "3:\n\t"
+
+                    ".pushsection .fixup, \"ax\"\n\t"
+                    "4: mov %2, %%" _ASM_ARG1 "\n\t"
+                    "mov $1, %%" _ASM_ARG2 "\n\t"
+                    "jmp 2b\n\t"
+                    ".popsection\n\t"
+                    _ASM_EXTABLE(1b, 4b)
+                    : ASM_CALL_CONSTRAINT, "=r"(value) : "r"(field) : "cc");
        return value;
 }
 
index 52c7614b5ecd2acda7b432f53ff20a4f0e3bbf42..1852706e6acc2a9359ad36bcdeec6f6ab6001586 100644 (file)
@@ -349,6 +349,14 @@ do {                                       \
        pr_warn_ratelimited(fmt);       \
 } while (0)
 
+asmlinkage void vmread_error(unsigned long field, bool fault)
+{
+       if (fault)
+               kvm_spurious_fault();
+       else
+               vmx_insn_failed("kvm: vmread failed: field=%lx\n", field);
+}
+
 noinline void vmwrite_error(unsigned long field, unsigned long value)
 {
        vmx_insn_failed("kvm: vmwrite failed: field=%lx val=%lx err=%d\n",