Merge tag 'kvm-x86-pmu-6.5' of https://github.com/kvm-x86/linux into HEAD
[sfrench/cifs-2.6.git] / arch / x86 / kvm / x86.c
index abfba3cae0baae2c8e2fde7a717e2ff07f6bc570..7d6e044504482d9f2e136606fe4a4db13c684b57 100644 (file)
@@ -1017,13 +1017,11 @@ void kvm_load_guest_xsave_state(struct kvm_vcpu *vcpu)
                        wrmsrl(MSR_IA32_XSS, vcpu->arch.ia32_xss);
        }
 
-#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
-       if (static_cpu_has(X86_FEATURE_PKU) &&
+       if (cpu_feature_enabled(X86_FEATURE_PKU) &&
            vcpu->arch.pkru != vcpu->arch.host_pkru &&
            ((vcpu->arch.xcr0 & XFEATURE_MASK_PKRU) ||
             kvm_is_cr4_bit_set(vcpu, X86_CR4_PKE)))
                write_pkru(vcpu->arch.pkru);
-#endif /* CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS */
 }
 EXPORT_SYMBOL_GPL(kvm_load_guest_xsave_state);
 
@@ -1032,15 +1030,13 @@ void kvm_load_host_xsave_state(struct kvm_vcpu *vcpu)
        if (vcpu->arch.guest_state_protected)
                return;
 
-#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
-       if (static_cpu_has(X86_FEATURE_PKU) &&
+       if (cpu_feature_enabled(X86_FEATURE_PKU) &&
            ((vcpu->arch.xcr0 & XFEATURE_MASK_PKRU) ||
             kvm_is_cr4_bit_set(vcpu, X86_CR4_PKE))) {
                vcpu->arch.pkru = rdpkru();
                if (vcpu->arch.pkru != vcpu->arch.host_pkru)
                        write_pkru(vcpu->arch.host_pkru);
        }
-#endif /* CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS */
 
        if (kvm_is_cr4_bit_set(vcpu, X86_CR4_OSXSAVE)) {
 
@@ -1427,15 +1423,14 @@ int kvm_emulate_rdpmc(struct kvm_vcpu *vcpu)
 EXPORT_SYMBOL_GPL(kvm_emulate_rdpmc);
 
 /*
- * List of msr numbers which we expose to userspace through KVM_GET_MSRS
- * and KVM_SET_MSRS, and KVM_GET_MSR_INDEX_LIST.
- *
- * The three MSR lists(msrs_to_save, emulated_msrs, msr_based_features)
- * extract the supported MSRs from the related const lists.
- * msrs_to_save is selected from the msrs_to_save_all to reflect the
- * capabilities of the host cpu. This capabilities test skips MSRs that are
- * kvm-specific. Those are put in emulated_msrs_all; filtering of emulated_msrs
- * may depend on host virtualization features rather than host cpu features.
+ * The three MSR lists(msrs_to_save, emulated_msrs, msr_based_features) track
+ * the set of MSRs that KVM exposes to userspace through KVM_GET_MSRS,
+ * KVM_SET_MSRS, and KVM_GET_MSR_INDEX_LIST.  msrs_to_save holds MSRs that
+ * require host support, i.e. should be probed via RDMSR.  emulated_msrs holds
+ * MSRs that KVM emulates without strictly requiring host support.
+ * msr_based_features holds MSRs that enumerate features, i.e. are effectively
+ * CPUID leafs.  Note, msr_based_features isn't mutually exclusive with
+ * msrs_to_save and emulated_msrs.
  */
 
 static const u32 msrs_to_save_base[] = {
@@ -1535,11 +1530,11 @@ static const u32 emulated_msrs_all[] = {
        MSR_IA32_UCODE_REV,
 
        /*
-        * The following list leaves out MSRs whose values are determined
-        * by arch/x86/kvm/vmx/nested.c based on CPUID or other MSRs.
-        * We always support the "true" VMX control MSRs, even if the host
-        * processor does not, so I am putting these registers here rather
-        * than in msrs_to_save_all.
+        * KVM always supports the "true" VMX control MSRs, even if the host
+        * does not.  The VMX MSRs as a whole are considered "emulated" as KVM
+        * doesn't strictly require them to exist in the host (ignoring that
+        * KVM would refuse to load in the first place if the core set of MSRs
+        * aren't supported).
         */
        MSR_IA32_VMX_BASIC,
        MSR_IA32_VMX_TRUE_PINBASED_CTLS,
@@ -1635,7 +1630,7 @@ static u64 kvm_get_arch_capabilities(void)
         * If we're doing cache flushes (either "always" or "cond")
         * we will do one whenever the guest does a vmlaunch/vmresume.
         * If an outer hypervisor is doing the cache flush for us
-        * (VMENTER_L1D_FLUSH_NESTED_VM), we can safely pass that
+        * (ARCH_CAP_SKIP_VMENTRY_L1DFLUSH), we can safely pass that
         * capability to the guest too, and if EPT is disabled we're not
         * vulnerable.  Overall, only VMENTER_L1D_FLUSH_NEVER will
         * require a nested hypervisor to do a flush of its own.
@@ -1813,7 +1808,7 @@ bool kvm_msr_allowed(struct kvm_vcpu *vcpu, u32 index, u32 type)
                unsigned long *bitmap = ranges[i].bitmap;
 
                if ((index >= start) && (index < end) && (flags & type)) {
-                       allowed = !!test_bit(index - start, bitmap);
+                       allowed = test_bit(index - start, bitmap);
                        break;
                }
        }
@@ -3706,8 +3701,14 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
                        return 1;
                }
                break;
-       case 0x200 ... MSR_IA32_MC0_CTL2 - 1:
-       case MSR_IA32_MCx_CTL2(KVM_MAX_MCE_BANKS) ... 0x2ff:
+       case MSR_IA32_CR_PAT:
+               if (!kvm_pat_valid(data))
+                       return 1;
+
+               vcpu->arch.pat = data;
+               break;
+       case MTRRphysBase_MSR(0) ... MSR_MTRRfix4K_F8000:
+       case MSR_MTRRdefType:
                return kvm_mtrr_set_msr(vcpu, msr, data);
        case MSR_IA32_APICBASE:
                return kvm_set_apic_base(vcpu, msr_info);
@@ -4114,9 +4115,12 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
                msr_info->data = kvm_scale_tsc(rdtsc(), ratio) + offset;
                break;
        }
+       case MSR_IA32_CR_PAT:
+               msr_info->data = vcpu->arch.pat;
+               break;
        case MSR_MTRRcap:
-       case 0x200 ... MSR_IA32_MC0_CTL2 - 1:
-       case MSR_IA32_MCx_CTL2(KVM_MAX_MCE_BANKS) ... 0x2ff:
+       case MTRRphysBase_MSR(0) ... MSR_MTRRfix4K_F8000:
+       case MSR_MTRRdefType:
                return kvm_mtrr_get_msr(vcpu, msr_info->index, &msr_info->data);
        case 0xcd: /* fsb frequency */
                msr_info->data = 3;
@@ -10768,6 +10772,9 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
                        exit_fastpath = EXIT_FASTPATH_EXIT_HANDLED;
                        break;
                }
+
+               /* Note, VM-Exits that go down the "slow" path are accounted below. */
+               ++vcpu->stat.exits;
        }
 
        /*