Merge tag 'imx-fixes-5.0-2' of git://git.kernel.org/pub/scm/linux/kernel/git/shawnguo...
[sfrench/cifs-2.6.git] / arch / x86 / kvm / vmx / vmx.c
index f6915f10e584a5f568e2e6a38123ac70c4d3881c..4341175339f32a84bb96d9ec18031e4fadd9baa8 100644 (file)
@@ -423,7 +423,7 @@ static void check_ept_pointer_match(struct kvm *kvm)
        to_kvm_vmx(kvm)->ept_pointers_match = EPT_POINTERS_MATCH;
 }
 
-int kvm_fill_hv_flush_list_func(struct hv_guest_mapping_flush_list *flush,
+static int kvm_fill_hv_flush_list_func(struct hv_guest_mapping_flush_list *flush,
                void *data)
 {
        struct kvm_tlb_range *range = data;
@@ -1773,7 +1773,7 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
                if (!msr_info->host_initiated &&
                    !guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP))
                        return 1;
-               /* Otherwise falls through */
+               /* Else, falls through */
        default:
                msr = find_msr_entry(vmx, msr_info->index);
                if (msr) {
@@ -2014,7 +2014,7 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
                /* Check reserved bit, higher 32 bits should be zero */
                if ((data >> 32) != 0)
                        return 1;
-               /* Otherwise falls through */
+               /* Else, falls through */
        default:
                msr = find_msr_entry(vmx, msr_index);
                if (msr) {
@@ -2344,7 +2344,7 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf,
                case 37: /* AAT100 */
                case 44: /* BC86,AAY89,BD102 */
                case 46: /* BA97 */
-                       _vmexit_control &= ~VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL;
+                       _vmentry_control &= ~VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL;
                        _vmexit_control &= ~VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL;
                        pr_warn_once("kvm: VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL "
                                        "does not work properly. Using workaround\n");
@@ -6362,72 +6362,9 @@ static void vmx_update_hv_timer(struct kvm_vcpu *vcpu)
        vmx->loaded_vmcs->hv_timer_armed = false;
 }
 
-static void vmx_vcpu_run(struct kvm_vcpu *vcpu)
+static void __vmx_vcpu_run(struct kvm_vcpu *vcpu, struct vcpu_vmx *vmx)
 {
-       struct vcpu_vmx *vmx = to_vmx(vcpu);
-       unsigned long cr3, cr4, evmcs_rsp;
-
-       /* Record the guest's net vcpu time for enforced NMI injections. */
-       if (unlikely(!enable_vnmi &&
-                    vmx->loaded_vmcs->soft_vnmi_blocked))
-               vmx->loaded_vmcs->entry_time = ktime_get();
-
-       /* Don't enter VMX if guest state is invalid, let the exit handler
-          start emulation until we arrive back to a valid state */
-       if (vmx->emulation_required)
-               return;
-
-       if (vmx->ple_window_dirty) {
-               vmx->ple_window_dirty = false;
-               vmcs_write32(PLE_WINDOW, vmx->ple_window);
-       }
-
-       if (vmx->nested.need_vmcs12_sync)
-               nested_sync_from_vmcs12(vcpu);
-
-       if (test_bit(VCPU_REGS_RSP, (unsigned long *)&vcpu->arch.regs_dirty))
-               vmcs_writel(GUEST_RSP, vcpu->arch.regs[VCPU_REGS_RSP]);
-       if (test_bit(VCPU_REGS_RIP, (unsigned long *)&vcpu->arch.regs_dirty))
-               vmcs_writel(GUEST_RIP, vcpu->arch.regs[VCPU_REGS_RIP]);
-
-       cr3 = __get_current_cr3_fast();
-       if (unlikely(cr3 != vmx->loaded_vmcs->host_state.cr3)) {
-               vmcs_writel(HOST_CR3, cr3);
-               vmx->loaded_vmcs->host_state.cr3 = cr3;
-       }
-
-       cr4 = cr4_read_shadow();
-       if (unlikely(cr4 != vmx->loaded_vmcs->host_state.cr4)) {
-               vmcs_writel(HOST_CR4, cr4);
-               vmx->loaded_vmcs->host_state.cr4 = cr4;
-       }
-
-       /* When single-stepping over STI and MOV SS, we must clear the
-        * corresponding interruptibility bits in the guest state. Otherwise
-        * vmentry fails as it then expects bit 14 (BS) in pending debug
-        * exceptions being set, but that's not correct for the guest debugging
-        * case. */
-       if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
-               vmx_set_interrupt_shadow(vcpu, 0);
-
-       if (static_cpu_has(X86_FEATURE_PKU) &&
-           kvm_read_cr4_bits(vcpu, X86_CR4_PKE) &&
-           vcpu->arch.pkru != vmx->host_pkru)
-               __write_pkru(vcpu->arch.pkru);
-
-       pt_guest_enter(vmx);
-
-       atomic_switch_perf_msrs(vmx);
-
-       vmx_update_hv_timer(vcpu);
-
-       /*
-        * If this vCPU has touched SPEC_CTRL, restore the guest's value if
-        * it's non-zero. Since vmentry is serialising on affected CPUs, there
-        * is no need to worry about the conditional branch over the wrmsr
-        * being speculatively taken.
-        */
-       x86_spec_ctrl_set_guest(vmx->spec_ctrl, 0);
+       unsigned long evmcs_rsp;
 
        vmx->__launched = vmx->loaded_vmcs->launched;
 
@@ -6567,6 +6504,77 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu)
                , "eax", "ebx", "edi"
 #endif
              );
+}
+STACK_FRAME_NON_STANDARD(__vmx_vcpu_run);
+
+static void vmx_vcpu_run(struct kvm_vcpu *vcpu)
+{
+       struct vcpu_vmx *vmx = to_vmx(vcpu);
+       unsigned long cr3, cr4;
+
+       /* Record the guest's net vcpu time for enforced NMI injections. */
+       if (unlikely(!enable_vnmi &&
+                    vmx->loaded_vmcs->soft_vnmi_blocked))
+               vmx->loaded_vmcs->entry_time = ktime_get();
+
+       /* Don't enter VMX if guest state is invalid, let the exit handler
+          start emulation until we arrive back to a valid state */
+       if (vmx->emulation_required)
+               return;
+
+       if (vmx->ple_window_dirty) {
+               vmx->ple_window_dirty = false;
+               vmcs_write32(PLE_WINDOW, vmx->ple_window);
+       }
+
+       if (vmx->nested.need_vmcs12_sync)
+               nested_sync_from_vmcs12(vcpu);
+
+       if (test_bit(VCPU_REGS_RSP, (unsigned long *)&vcpu->arch.regs_dirty))
+               vmcs_writel(GUEST_RSP, vcpu->arch.regs[VCPU_REGS_RSP]);
+       if (test_bit(VCPU_REGS_RIP, (unsigned long *)&vcpu->arch.regs_dirty))
+               vmcs_writel(GUEST_RIP, vcpu->arch.regs[VCPU_REGS_RIP]);
+
+       cr3 = __get_current_cr3_fast();
+       if (unlikely(cr3 != vmx->loaded_vmcs->host_state.cr3)) {
+               vmcs_writel(HOST_CR3, cr3);
+               vmx->loaded_vmcs->host_state.cr3 = cr3;
+       }
+
+       cr4 = cr4_read_shadow();
+       if (unlikely(cr4 != vmx->loaded_vmcs->host_state.cr4)) {
+               vmcs_writel(HOST_CR4, cr4);
+               vmx->loaded_vmcs->host_state.cr4 = cr4;
+       }
+
+       /* When single-stepping over STI and MOV SS, we must clear the
+        * corresponding interruptibility bits in the guest state. Otherwise
+        * vmentry fails as it then expects bit 14 (BS) in pending debug
+        * exceptions being set, but that's not correct for the guest debugging
+        * case. */
+       if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
+               vmx_set_interrupt_shadow(vcpu, 0);
+
+       if (static_cpu_has(X86_FEATURE_PKU) &&
+           kvm_read_cr4_bits(vcpu, X86_CR4_PKE) &&
+           vcpu->arch.pkru != vmx->host_pkru)
+               __write_pkru(vcpu->arch.pkru);
+
+       pt_guest_enter(vmx);
+
+       atomic_switch_perf_msrs(vmx);
+
+       vmx_update_hv_timer(vcpu);
+
+       /*
+        * If this vCPU has touched SPEC_CTRL, restore the guest's value if
+        * it's non-zero. Since vmentry is serialising on affected CPUs, there
+        * is no need to worry about the conditional branch over the wrmsr
+        * being speculatively taken.
+        */
+       x86_spec_ctrl_set_guest(vmx->spec_ctrl, 0);
+
+       __vmx_vcpu_run(vcpu, vmx);
 
        /*
         * We do not use IBRS in the kernel. If this vCPU has used the
@@ -6648,7 +6656,6 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu)
        vmx_recover_nmi_blocking(vmx);
        vmx_complete_interrupts(vmx);
 }
-STACK_FRAME_NON_STANDARD(vmx_vcpu_run);
 
 static struct kvm *vmx_vm_alloc(void)
 {