Merge tag 'x86-fsgsbase-2020-08-04' of git://git.kernel.org/pub/scm/linux/kernel...
[sfrench/cifs-2.6.git] / arch / x86 / kvm / vmx / vmx.c
index cb22f33bf1d8041d83738ac86b1e3afd1ef6c6ab..559634b59d2a25c0a0e48eebfb2464ead9b6fdba 100644 (file)
@@ -27,6 +27,7 @@
 #include <linux/slab.h>
 #include <linux/tboot.h>
 #include <linux/trace_events.h>
+#include <linux/entry-kvm.h>
 
 #include <asm/apic.h>
 #include <asm/asm.h>
@@ -133,9 +134,6 @@ module_param_named(preemption_timer, enable_preemption_timer, bool, S_IRUGO);
 #define KVM_VM_CR0_ALWAYS_ON                           \
        (KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST |      \
         X86_CR0_WP | X86_CR0_PG | X86_CR0_PE)
-#define KVM_CR4_GUEST_OWNED_BITS                                     \
-       (X86_CR4_PVI | X86_CR4_DE | X86_CR4_PCE | X86_CR4_OSFXSR      \
-        | X86_CR4_OSXMMEXCPT | X86_CR4_LA57 | X86_CR4_TSD)
 
 #define KVM_VM_CR4_ALWAYS_ON_UNRESTRICTED_GUEST X86_CR4_VMXE
 #define KVM_PMODE_VM_CR4_ALWAYS_ON (X86_CR4_PAE | X86_CR4_VMXE)
@@ -1172,7 +1170,7 @@ void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
 
        gs_base = cpu_kernelmode_gs_base(cpu);
        if (likely(is_64bit_mm(current->mm))) {
-               save_fsgs_for_kvm();
+               current_save_fsgs();
                fs_sel = current->thread.fsindex;
                gs_sel = current->thread.gsindex;
                fs_base = current->thread.fsbase;
@@ -4034,9 +4032,9 @@ void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
 
 void set_cr4_guest_host_mask(struct vcpu_vmx *vmx)
 {
-       vmx->vcpu.arch.cr4_guest_owned_bits = KVM_CR4_GUEST_OWNED_BITS;
-       if (enable_ept)
-               vmx->vcpu.arch.cr4_guest_owned_bits |= X86_CR4_PGE;
+       vmx->vcpu.arch.cr4_guest_owned_bits = KVM_POSSIBLE_CR4_GUEST_BITS;
+       if (!enable_ept)
+               vmx->vcpu.arch.cr4_guest_owned_bits &= ~X86_CR4_PGE;
        if (is_guest_mode(&vmx->vcpu))
                vmx->vcpu.arch.cr4_guest_owned_bits &=
                        ~get_vmcs12(&vmx->vcpu)->cr4_guest_host_mask;
@@ -4333,8 +4331,8 @@ static void init_vmcs(struct vcpu_vmx *vmx)
        /* 22.2.1, 20.8.1 */
        vm_entry_controls_set(vmx, vmx_vmentry_ctrl());
 
-       vmx->vcpu.arch.cr0_guest_owned_bits = X86_CR0_TS;
-       vmcs_writel(CR0_GUEST_HOST_MASK, ~X86_CR0_TS);
+       vmx->vcpu.arch.cr0_guest_owned_bits = KVM_POSSIBLE_CR0_GUEST_BITS;
+       vmcs_writel(CR0_GUEST_HOST_MASK, ~vmx->vcpu.arch.cr0_guest_owned_bits);
 
        set_cr4_guest_host_mask(vmx);
 
@@ -5376,14 +5374,12 @@ static int handle_invalid_guest_state(struct kvm_vcpu *vcpu)
                }
 
                /*
-                * Note, return 1 and not 0, vcpu_run() is responsible for
-                * morphing the pending signal into the proper return code.
+                * Note, return 1 and not 0, vcpu_run() will invoke
+                * xfer_to_guest_mode() which will create a proper return
+                * code.
                 */
-               if (signal_pending(current))
+               if (__xfer_to_guest_mode_work_pending())
                        return 1;
-
-               if (need_resched())
-                       schedule();
        }
 
        return 1;