KVM: selftests: Test PMC virtualization with forced emulation
authorSean Christopherson <seanjc@google.com>
Tue, 9 Jan 2024 23:02:46 +0000 (15:02 -0800)
committerSean Christopherson <seanjc@google.com>
Tue, 30 Jan 2024 23:29:44 +0000 (15:29 -0800)
Extend the PMC counters test to use forced emulation to verify that KVM
emulates counter events for instructions retired and branches retired.
Force emulation for only a subset of the measured code to test that KVM
does the right thing when mixing perf events with emulated events.

Reviewed-by: Dapeng Mi <dapeng1.mi@linux.intel.com>
Tested-by: Dapeng Mi <dapeng1.mi@linux.intel.com>
Link: https://lore.kernel.org/r/20240109230250.424295-27-seanjc@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
tools/testing/selftests/kvm/x86_64/pmu_counters_test.c

index 9e9dc4084c0d98b7746c6a9109e1280ac7e9264c..cb808ac827ba8b12ab1b56abd190c765fc8eb16e 100644 (file)
@@ -21,6 +21,7 @@
 
 static uint8_t kvm_pmu_version;
 static bool kvm_has_perf_caps;
+static bool is_forced_emulation_enabled;
 
 static struct kvm_vm *pmu_vm_create_with_one_vcpu(struct kvm_vcpu **vcpu,
                                                  void *guest_code,
@@ -34,6 +35,7 @@ static struct kvm_vm *pmu_vm_create_with_one_vcpu(struct kvm_vcpu **vcpu,
        vcpu_init_descriptor_tables(*vcpu);
 
        sync_global_to_guest(vm, kvm_pmu_version);
+       sync_global_to_guest(vm, is_forced_emulation_enabled);
 
        /*
         * Set PERF_CAPABILITIES before PMU version as KVM disallows enabling
@@ -138,37 +140,50 @@ sanity_checks:
  * If CLFUSH{,OPT} is supported, flush the cacheline containing (at least) the
  * start of the loop to force LLC references and misses, i.e. to allow testing
  * that those events actually count.
+ *
+ * If forced emulation is enabled (and specified), force emulation on a subset
+ * of the measured code to verify that KVM correctly emulates instructions and
+ * branches retired events in conjunction with hardware also counting said
+ * events.
  */
-#define GUEST_MEASURE_EVENT(_msr, _value, clflush)                             \
+#define GUEST_MEASURE_EVENT(_msr, _value, clflush, FEP)                                \
 do {                                                                           \
        __asm__ __volatile__("wrmsr\n\t"                                        \
                             clflush "\n\t"                                     \
                             "mfence\n\t"                                       \
                             "1: mov $" __stringify(NUM_BRANCHES) ", %%ecx\n\t" \
-                            "loop .\n\t"                                       \
-                            "mov %%edi, %%ecx\n\t"                             \
-                            "xor %%eax, %%eax\n\t"                             \
-                            "xor %%edx, %%edx\n\t"                             \
+                            FEP "loop .\n\t"                                   \
+                            FEP "mov %%edi, %%ecx\n\t"                         \
+                            FEP "xor %%eax, %%eax\n\t"                         \
+                            FEP "xor %%edx, %%edx\n\t"                         \
                             "wrmsr\n\t"                                        \
                             :: "a"((uint32_t)_value), "d"(_value >> 32),       \
                                "c"(_msr), "D"(_msr)                            \
        );                                                                      \
 } while (0)
 
+#define GUEST_TEST_EVENT(_idx, _event, _pmc, _pmc_msr, _ctrl_msr, _value, FEP) \
+do {                                                                           \
+       wrmsr(pmc_msr, 0);                                                      \
+                                                                               \
+       if (this_cpu_has(X86_FEATURE_CLFLUSHOPT))                               \
+               GUEST_MEASURE_EVENT(_ctrl_msr, _value, "clflushopt 1f", FEP);   \
+       else if (this_cpu_has(X86_FEATURE_CLFLUSH))                             \
+               GUEST_MEASURE_EVENT(_ctrl_msr, _value, "clflush 1f", FEP);      \
+       else                                                                    \
+               GUEST_MEASURE_EVENT(_ctrl_msr, _value, "nop", FEP);             \
+                                                                               \
+       guest_assert_event_count(_idx, _event, _pmc, _pmc_msr);                 \
+} while (0)
+
 static void __guest_test_arch_event(uint8_t idx, struct kvm_x86_pmu_feature event,
                                    uint32_t pmc, uint32_t pmc_msr,
                                    uint32_t ctrl_msr, uint64_t ctrl_msr_value)
 {
-       wrmsr(pmc_msr, 0);
-
-       if (this_cpu_has(X86_FEATURE_CLFLUSHOPT))
-               GUEST_MEASURE_EVENT(ctrl_msr, ctrl_msr_value, "clflushopt 1f");
-       else if (this_cpu_has(X86_FEATURE_CLFLUSH))
-               GUEST_MEASURE_EVENT(ctrl_msr, ctrl_msr_value, "clflush 1f");
-       else
-               GUEST_MEASURE_EVENT(ctrl_msr, ctrl_msr_value, "nop");
+       GUEST_TEST_EVENT(idx, event, pmc, pmc_msr, ctrl_msr, ctrl_msr_value, "");
 
-       guest_assert_event_count(idx, event, pmc, pmc_msr);
+       if (is_forced_emulation_enabled)
+               GUEST_TEST_EVENT(idx, event, pmc, pmc_msr, ctrl_msr, ctrl_msr_value, KVM_FEP);
 }
 
 #define X86_PMU_FEATURE_NULL                                           \
@@ -553,6 +568,7 @@ int main(int argc, char *argv[])
 
        kvm_pmu_version = kvm_cpu_property(X86_PROPERTY_PMU_VERSION);
        kvm_has_perf_caps = kvm_cpu_has(X86_FEATURE_PDCM);
+       is_forced_emulation_enabled = kvm_is_forced_emulation_enabled();
 
        test_intel_counters();