KVM: VMX: Rename vcpu_vmx's "save_nmsrs" to "nr_active_uret_msrs"
authorSean Christopherson <sean.j.christopherson@intel.com>
Wed, 23 Sep 2020 18:04:00 +0000 (11:04 -0700)
committerPaolo Bonzini <pbonzini@redhat.com>
Mon, 28 Sep 2020 11:57:57 +0000 (07:57 -0400)
Add "uret" into the name of "save_nmsrs" to explicitly associate it with
the guest_uret_msrs array, and replace "save" with "active" (for lack of
a better word) to better describe what is being tracked.  While "save"
is more or less accurate when viewed as a literal description of the
field, e.g. it holds the number of MSRs that were saved into the array
the last time setup_msrs() was invoked, it can easily be misinterpreted
by the reader, e.g. as meaning the number of MSRs that were saved from
hardware at some point in the past, or as the number of MSRs that need
to be saved at some point in the future, both of which are wrong.

No functional change intended.

Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
Message-Id: <20200923180409.32255-7-sean.j.christopherson@intel.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/kvm/vmx/vmx.c
arch/x86/kvm/vmx/vmx.h

index 07249571f6d5d32ed1f2763f3b22e96ee23aa059..24087abf607766592392f6f727ef9dc1740c877c 100644 (file)
@@ -637,7 +637,7 @@ static int vmx_set_guest_msr(struct vcpu_vmx *vmx, struct vmx_uret_msr *msr, u64
 
        u64 old_msr_data = msr->data;
        msr->data = data;
-       if (msr - vmx->guest_uret_msrs < vmx->save_nmsrs) {
+       if (msr - vmx->guest_uret_msrs < vmx->nr_active_uret_msrs) {
                preempt_disable();
                ret = kvm_set_user_return_msr(msr->index, msr->data, msr->mask);
                preempt_enable();
@@ -1136,7 +1136,7 @@ void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
         */
        if (!vmx->guest_msrs_ready) {
                vmx->guest_msrs_ready = true;
-               for (i = 0; i < vmx->save_nmsrs; ++i)
+               for (i = 0; i < vmx->nr_active_uret_msrs; ++i)
                        kvm_set_user_return_msr(vmx->guest_uret_msrs[i].index,
                                                vmx->guest_uret_msrs[i].data,
                                                vmx->guest_uret_msrs[i].mask);
@@ -1628,9 +1628,9 @@ static void move_msr_up(struct vcpu_vmx *vmx, int from, int to)
  */
 static void setup_msrs(struct vcpu_vmx *vmx)
 {
-       int save_nmsrs, index;
+       int nr_active_uret_msrs, index;
 
-       save_nmsrs = 0;
+       nr_active_uret_msrs = 0;
 #ifdef CONFIG_X86_64
        /*
         * The SYSCALL MSRs are only needed on long mode guests, and only
@@ -1639,26 +1639,26 @@ static void setup_msrs(struct vcpu_vmx *vmx)
        if (is_long_mode(&vmx->vcpu) && (vmx->vcpu.arch.efer & EFER_SCE)) {
                index = __find_msr_index(vmx, MSR_STAR);
                if (index >= 0)
-                       move_msr_up(vmx, index, save_nmsrs++);
+                       move_msr_up(vmx, index, nr_active_uret_msrs++);
                index = __find_msr_index(vmx, MSR_LSTAR);
                if (index >= 0)
-                       move_msr_up(vmx, index, save_nmsrs++);
+                       move_msr_up(vmx, index, nr_active_uret_msrs++);
                index = __find_msr_index(vmx, MSR_SYSCALL_MASK);
                if (index >= 0)
-                       move_msr_up(vmx, index, save_nmsrs++);
+                       move_msr_up(vmx, index, nr_active_uret_msrs++);
        }
 #endif
        index = __find_msr_index(vmx, MSR_EFER);
        if (index >= 0 && update_transition_efer(vmx, index))
-               move_msr_up(vmx, index, save_nmsrs++);
+               move_msr_up(vmx, index, nr_active_uret_msrs++);
        index = __find_msr_index(vmx, MSR_TSC_AUX);
        if (index >= 0 && guest_cpuid_has(&vmx->vcpu, X86_FEATURE_RDTSCP))
-               move_msr_up(vmx, index, save_nmsrs++);
+               move_msr_up(vmx, index, nr_active_uret_msrs++);
        index = __find_msr_index(vmx, MSR_IA32_TSX_CTRL);
        if (index >= 0)
-               move_msr_up(vmx, index, save_nmsrs++);
+               move_msr_up(vmx, index, nr_active_uret_msrs++);
 
-       vmx->save_nmsrs = save_nmsrs;
+       vmx->nr_active_uret_msrs = nr_active_uret_msrs;
        vmx->guest_msrs_ready = false;
 
        if (cpu_has_vmx_msr_bitmap())
index 82c39ac53165d6a05bab887ee70771847b275370..3928992de0237082576b3efa9deda19546ee319a 100644 (file)
@@ -198,7 +198,7 @@ struct vcpu_vmx {
 
        struct vmx_uret_msr   guest_uret_msrs[MAX_NR_USER_RETURN_MSRS];
        int                   nr_uret_msrs;
-       int                   save_nmsrs;
+       int                   nr_active_uret_msrs;
        bool                  guest_msrs_ready;
 #ifdef CONFIG_X86_64
        u64                   msr_host_kernel_gs_base;