KVM: x86: rename KVM_REQ_GET_VMCS12_PAGES
authorPaolo Bonzini <pbonzini@redhat.com>
Tue, 22 Sep 2020 10:53:57 +0000 (06:53 -0400)
committerPaolo Bonzini <pbonzini@redhat.com>
Mon, 28 Sep 2020 11:58:49 +0000 (07:58 -0400)
We are going to use it for SVM too, so use a more generic name.

Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/include/asm/kvm_host.h
arch/x86/kvm/vmx/nested.c
arch/x86/kvm/x86.c

index dc7a58b39faf7079bbe5e497f694abd775e0439e..d0f77235da923b5c084e452730e06461c72ff79f 100644 (file)
@@ -80,7 +80,7 @@
 #define KVM_REQ_HV_EXIT                        KVM_ARCH_REQ(21)
 #define KVM_REQ_HV_STIMER              KVM_ARCH_REQ(22)
 #define KVM_REQ_LOAD_EOI_EXITMAP       KVM_ARCH_REQ(23)
-#define KVM_REQ_GET_VMCS12_PAGES       KVM_ARCH_REQ(24)
+#define KVM_REQ_GET_NESTED_STATE_PAGES KVM_ARCH_REQ(24)
 #define KVM_REQ_APICV_UPDATE \
        KVM_ARCH_REQ_FLAGS(25, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
 #define KVM_REQ_TLB_FLUSH_CURRENT      KVM_ARCH_REQ(26)
@@ -1261,7 +1261,7 @@ struct kvm_x86_nested_ops {
        int (*set_state)(struct kvm_vcpu *vcpu,
                         struct kvm_nested_state __user *user_kvm_nested_state,
                         struct kvm_nested_state *kvm_state);
-       bool (*get_vmcs12_pages)(struct kvm_vcpu *vcpu);
+       bool (*get_nested_state_pages)(struct kvm_vcpu *vcpu);
        int (*write_log_dirty)(struct kvm_vcpu *vcpu, gpa_t l2_gpa);
 
        int (*enable_evmcs)(struct kvm_vcpu *vcpu,
index 91ce6e642db2972c48ea633971e486bc4753eef0..6eca8a7deed19a2f7bc7ce1dbf5be0457e0b4300 100644 (file)
@@ -285,7 +285,7 @@ static void free_nested(struct kvm_vcpu *vcpu)
        if (!vmx->nested.vmxon && !vmx->nested.smm.vmxon)
                return;
 
-       kvm_clear_request(KVM_REQ_GET_VMCS12_PAGES, vcpu);
+       kvm_clear_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu);
 
        vmx->nested.vmxon = false;
        vmx->nested.smm.vmxon = false;
@@ -3395,7 +3395,7 @@ enum nvmx_vmentry_status nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu,
                 * to nested_get_vmcs12_pages before the next VM-entry.  The MSRs
                 * have already been set at vmentry time and should not be reset.
                 */
-               kvm_make_request(KVM_REQ_GET_VMCS12_PAGES, vcpu);
+               kvm_make_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu);
        }
 
        /*
@@ -6192,7 +6192,7 @@ static int vmx_set_nested_state(struct kvm_vcpu *vcpu,
                 * restored yet. EVMCS will be mapped from
                 * nested_get_vmcs12_pages().
                 */
-               kvm_make_request(KVM_REQ_GET_VMCS12_PAGES, vcpu);
+               kvm_make_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu);
        } else {
                return -EINVAL;
        }
@@ -6573,7 +6573,7 @@ struct kvm_x86_nested_ops vmx_nested_ops = {
        .hv_timer_pending = nested_vmx_preemption_timer_pending,
        .get_state = vmx_get_nested_state,
        .set_state = vmx_set_nested_state,
-       .get_vmcs12_pages = nested_get_vmcs12_pages,
+       .get_nested_state_pages = nested_get_vmcs12_pages,
        .write_log_dirty = nested_vmx_write_pml_buffer,
        .enable_evmcs = nested_enable_evmcs,
        .get_evmcs_version = nested_get_evmcs_version,
index 72f91f3640f3e5becfa22f6b7624060d6cf4e444..411f6103532b68efe585caf3fad21e9eff824153 100644 (file)
@@ -8640,8 +8640,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
        bool req_immediate_exit = false;
 
        if (kvm_request_pending(vcpu)) {
-               if (kvm_check_request(KVM_REQ_GET_VMCS12_PAGES, vcpu)) {
-                       if (unlikely(!kvm_x86_ops.nested_ops->get_vmcs12_pages(vcpu))) {
+               if (kvm_check_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu)) {
+                       if (unlikely(!kvm_x86_ops.nested_ops->get_nested_state_pages(vcpu))) {
                                r = 0;
                                goto out;
                        }