KVM: nVMX: initialize descriptor cache fields in prepare_vmcs02_full
authorPaolo Bonzini <pbonzini@redhat.com>
Wed, 20 Dec 2017 12:56:53 +0000 (13:56 +0100)
committerRadim Krčmář <rkrcmar@redhat.com>
Tue, 16 Jan 2018 15:50:17 +0000 (16:50 +0100)
This part is separate for ease of review, because git prefers to move
prepare_vmcs02 below the initial long sequence of vmcs_write* operations.

Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: Radim Krčmář <rkrcmar@redhat.com>
arch/x86/kvm/vmx.c

index 1cc787f639726e738eac14da350658c85c70edd5..f83f5e7bf87a43a87db37d1f5cd657dba660cf47 100644 (file)
@@ -10317,28 +10317,10 @@ static int nested_vmx_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3, bool ne
 
 static void prepare_vmcs02_full(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
                               bool from_vmentry)
-{
-}
-
-/*
- * prepare_vmcs02 is called when the L1 guest hypervisor runs its nested
- * L2 guest. L1 has a vmcs for L2 (vmcs12), and this function "merges" it
- * with L0's requirements for its guest (a.k.a. vmcs01), so we can run the L2
- * guest in a way that will both be appropriate to L1's requests, and our
- * needs. In addition to modifying the active vmcs (which is vmcs02), this
- * function also has additional necessary side-effects, like setting various
- * vcpu->arch fields.
- * Returns 0 on success, 1 on failure. Invalid state exit qualification code
- * is assigned to entry_failure_code on failure.
- */
-static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
-                         bool from_vmentry, u32 *entry_failure_code)
 {
        struct vcpu_vmx *vmx = to_vmx(vcpu);
-       u32 exec_control, vmcs12_exec_ctrl;
 
        vmcs_write16(GUEST_ES_SELECTOR, vmcs12->guest_es_selector);
-       vmcs_write16(GUEST_CS_SELECTOR, vmcs12->guest_cs_selector);
        vmcs_write16(GUEST_SS_SELECTOR, vmcs12->guest_ss_selector);
        vmcs_write16(GUEST_DS_SELECTOR, vmcs12->guest_ds_selector);
        vmcs_write16(GUEST_FS_SELECTOR, vmcs12->guest_fs_selector);
@@ -10346,7 +10328,6 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
        vmcs_write16(GUEST_LDTR_SELECTOR, vmcs12->guest_ldtr_selector);
        vmcs_write16(GUEST_TR_SELECTOR, vmcs12->guest_tr_selector);
        vmcs_write32(GUEST_ES_LIMIT, vmcs12->guest_es_limit);
-       vmcs_write32(GUEST_CS_LIMIT, vmcs12->guest_cs_limit);
        vmcs_write32(GUEST_SS_LIMIT, vmcs12->guest_ss_limit);
        vmcs_write32(GUEST_DS_LIMIT, vmcs12->guest_ds_limit);
        vmcs_write32(GUEST_FS_LIMIT, vmcs12->guest_fs_limit);
@@ -10356,15 +10337,12 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
        vmcs_write32(GUEST_GDTR_LIMIT, vmcs12->guest_gdtr_limit);
        vmcs_write32(GUEST_IDTR_LIMIT, vmcs12->guest_idtr_limit);
        vmcs_write32(GUEST_ES_AR_BYTES, vmcs12->guest_es_ar_bytes);
-       vmcs_write32(GUEST_CS_AR_BYTES, vmcs12->guest_cs_ar_bytes);
        vmcs_write32(GUEST_SS_AR_BYTES, vmcs12->guest_ss_ar_bytes);
        vmcs_write32(GUEST_DS_AR_BYTES, vmcs12->guest_ds_ar_bytes);
        vmcs_write32(GUEST_FS_AR_BYTES, vmcs12->guest_fs_ar_bytes);
        vmcs_write32(GUEST_GS_AR_BYTES, vmcs12->guest_gs_ar_bytes);
        vmcs_write32(GUEST_LDTR_AR_BYTES, vmcs12->guest_ldtr_ar_bytes);
        vmcs_write32(GUEST_TR_AR_BYTES, vmcs12->guest_tr_ar_bytes);
-       vmcs_writel(GUEST_ES_BASE, vmcs12->guest_es_base);
-       vmcs_writel(GUEST_CS_BASE, vmcs12->guest_cs_base);
        vmcs_writel(GUEST_SS_BASE, vmcs12->guest_ss_base);
        vmcs_writel(GUEST_DS_BASE, vmcs12->guest_ds_base);
        vmcs_writel(GUEST_FS_BASE, vmcs12->guest_fs_base);
@@ -10373,6 +10351,40 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
        vmcs_writel(GUEST_TR_BASE, vmcs12->guest_tr_base);
        vmcs_writel(GUEST_GDTR_BASE, vmcs12->guest_gdtr_base);
        vmcs_writel(GUEST_IDTR_BASE, vmcs12->guest_idtr_base);
+}
+
+/*
+ * prepare_vmcs02 is called when the L1 guest hypervisor runs its nested
+ * L2 guest. L1 has a vmcs for L2 (vmcs12), and this function "merges" it
+ * with L0's requirements for its guest (a.k.a. vmcs01), so we can run the L2
+ * guest in a way that will both be appropriate to L1's requests, and our
+ * needs. In addition to modifying the active vmcs (which is vmcs02), this
+ * function also has additional necessary side-effects, like setting various
+ * vcpu->arch fields.
+ * Returns 0 on success, 1 on failure. Invalid state exit qualification code
+ * is assigned to entry_failure_code on failure.
+ */
+static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
+                         bool from_vmentry, u32 *entry_failure_code)
+{
+       struct vcpu_vmx *vmx = to_vmx(vcpu);
+       u32 exec_control, vmcs12_exec_ctrl;
+
+       /*
+        * First, the fields that are shadowed.  This must be kept in sync
+        * with vmx_shadow_fields.h.
+        */
+
+       vmcs_write16(GUEST_CS_SELECTOR, vmcs12->guest_cs_selector);
+       vmcs_write32(GUEST_CS_LIMIT, vmcs12->guest_cs_limit);
+       vmcs_write32(GUEST_CS_AR_BYTES, vmcs12->guest_cs_ar_bytes);
+       vmcs_writel(GUEST_ES_BASE, vmcs12->guest_es_base);
+       vmcs_writel(GUEST_CS_BASE, vmcs12->guest_cs_base);
+
+       /*
+        * Not in vmcs02: GUEST_PML_INDEX, HOST_FS_SELECTOR, HOST_GS_SELECTOR,
+        * HOST_FS_BASE, HOST_GS_BASE.
+        */
 
        if (from_vmentry &&
            (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS)) {