1 // SPDX-License-Identifier: GPL-2.0
3 * Test for x86 KVM_SET_PMU_EVENT_FILTER.
5 * Copyright (C) 2022, Google LLC.
7 * This work is licensed under the terms of the GNU GPL, version 2.
9 * Verifies the expected behavior of allow lists and deny lists for
13 #define _GNU_SOURCE /* for program_invocation_short_name */
14 #include "test_util.h"
16 #include "processor.h"
19 * In lieu of copying perf_event.h into tools...
21 #define ARCH_PERFMON_EVENTSEL_OS (1ULL << 17)
22 #define ARCH_PERFMON_EVENTSEL_ENABLE (1ULL << 22)
24 /* End of stuff taken from perf_event.h. */
26 /* Oddly, this isn't in perf_event.h. */
27 #define ARCH_PERFMON_BRANCHES_RETIRED 5
29 #define NUM_BRANCHES 42
32 * This is how the event selector and unit mask are stored in an AMD
33 * core performance event-select register. Intel's format is similar,
34 * but the event selector is only 8 bits.
36 #define EVENT(select, umask) ((select & 0xf00UL) << 24 | (select & 0xff) | \
40 * "Branch instructions retired", from the Intel SDM, volume 3,
41 * "Pre-defined Architectural Performance Events."
44 #define INTEL_BR_RETIRED EVENT(0xc4, 0)
47 * "Retired branch instructions", from Processor Programming Reference
48 * (PPR) for AMD Family 17h Model 01h, Revision B1 Processors,
49 * Preliminary Processor Programming Reference (PPR) for AMD Family
50 * 17h Model 31h, Revision B0 Processors, and Preliminary Processor
51 * Programming Reference (PPR) for AMD Family 19h Model 01h, Revision
52 * B1 Processors Volume 1 of 2.
55 #define AMD_ZEN_BR_RETIRED EVENT(0xc2, 0)
59 * "Retired instructions", from Processor Programming Reference
60 * (PPR) for AMD Family 17h Model 01h, Revision B1 Processors,
61 * Preliminary Processor Programming Reference (PPR) for AMD Family
62 * 17h Model 31h, Revision B0 Processors, and Preliminary Processor
63 * Programming Reference (PPR) for AMD Family 19h Model 01h, Revision
64 * B1 Processors Volume 1 of 2.
66 * "Instructions retired", from the Intel SDM, volume 3,
67 * "Pre-defined Architectural Performance Events."
70 #define INST_RETIRED EVENT(0xc0, 0)
73 * This event list comprises Intel's eight architectural events plus
74 * AMD's "retired branch instructions" for Zen[123] (and possibly
77 static const uint64_t event_list[] = {
92 uint64_t loads_stores;
93 uint64_t branches_retired;
94 uint64_t instructions_retired;
98 * If we encounter a #GP during the guest PMU sanity check, then the guest
99 * PMU is not functional. Inform the hypervisor via GUEST_SYNC(0).
101 static void guest_gp_handler(struct ex_regs *regs)
107 * Check that we can write a new value to the given MSR and read it back.
108 * The caller should provide a non-empty set of bits that are safe to flip.
110 * Return on success. GUEST_SYNC(0) on error.
112 static void check_msr(uint32_t msr, uint64_t bits_to_flip)
114 uint64_t v = rdmsr(msr) ^ bits_to_flip;
126 static void run_and_measure_loop(uint32_t msr_base)
128 const uint64_t branches_retired = rdmsr(msr_base + 0);
129 const uint64_t insn_retired = rdmsr(msr_base + 1);
131 __asm__ __volatile__("loop ." : "+c"((int){NUM_BRANCHES}));
133 pmc_results.branches_retired = rdmsr(msr_base + 0) - branches_retired;
134 pmc_results.instructions_retired = rdmsr(msr_base + 1) - insn_retired;
137 static void intel_guest_code(void)
139 check_msr(MSR_CORE_PERF_GLOBAL_CTRL, 1);
140 check_msr(MSR_P6_EVNTSEL0, 0xffff);
141 check_msr(MSR_IA32_PMC0, 0xffff);
145 wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0);
146 wrmsr(MSR_P6_EVNTSEL0, ARCH_PERFMON_EVENTSEL_ENABLE |
147 ARCH_PERFMON_EVENTSEL_OS | INTEL_BR_RETIRED);
148 wrmsr(MSR_P6_EVNTSEL1, ARCH_PERFMON_EVENTSEL_ENABLE |
149 ARCH_PERFMON_EVENTSEL_OS | INST_RETIRED);
150 wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0x3);
152 run_and_measure_loop(MSR_IA32_PMC0);
158 * To avoid needing a check for CPUID.80000001:ECX.PerfCtrExtCore[bit 23],
159 * this code uses the always-available, legacy K7 PMU MSRs, which alias to
160 * the first four of the six extended core PMU MSRs.
162 static void amd_guest_code(void)
164 check_msr(MSR_K7_EVNTSEL0, 0xffff);
165 check_msr(MSR_K7_PERFCTR0, 0xffff);
169 wrmsr(MSR_K7_EVNTSEL0, 0);
170 wrmsr(MSR_K7_EVNTSEL0, ARCH_PERFMON_EVENTSEL_ENABLE |
171 ARCH_PERFMON_EVENTSEL_OS | AMD_ZEN_BR_RETIRED);
172 wrmsr(MSR_K7_EVNTSEL1, ARCH_PERFMON_EVENTSEL_ENABLE |
173 ARCH_PERFMON_EVENTSEL_OS | INST_RETIRED);
175 run_and_measure_loop(MSR_K7_PERFCTR0);
181 * Run the VM to the next GUEST_SYNC(value), and return the value passed
182 * to the sync. Any other exit from the guest is fatal.
184 static uint64_t run_vcpu_to_sync(struct kvm_vcpu *vcpu)
189 TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
190 get_ucall(vcpu, &uc);
191 TEST_ASSERT(uc.cmd == UCALL_SYNC,
192 "Received ucall other than UCALL_SYNC: %lu", uc.cmd);
196 static void run_vcpu_and_sync_pmc_results(struct kvm_vcpu *vcpu)
200 memset(&pmc_results, 0, sizeof(pmc_results));
201 sync_global_to_guest(vcpu->vm, pmc_results);
203 r = run_vcpu_to_sync(vcpu);
204 TEST_ASSERT(!r, "Unexpected sync value: 0x%lx", r);
206 sync_global_from_guest(vcpu->vm, pmc_results);
210 * In a nested environment or if the vPMU is disabled, the guest PMU
211 * might not work as architected (accessing the PMU MSRs may raise
212 * #GP, or writes could simply be discarded). In those situations,
213 * there is no point in running these tests. The guest code will perform
214 * a sanity check and then GUEST_SYNC(success). In the case of failure,
215 * the behavior of the guest on resumption is undefined.
217 static bool sanity_check_pmu(struct kvm_vcpu *vcpu)
221 vm_install_exception_handler(vcpu->vm, GP_VECTOR, guest_gp_handler);
222 r = run_vcpu_to_sync(vcpu);
223 vm_install_exception_handler(vcpu->vm, GP_VECTOR, NULL);
228 static struct kvm_pmu_event_filter *alloc_pmu_event_filter(uint32_t nevents)
230 struct kvm_pmu_event_filter *f;
231 int size = sizeof(*f) + nevents * sizeof(f->events[0]);
234 TEST_ASSERT(f, "Out of memory");
236 f->nevents = nevents;
241 static struct kvm_pmu_event_filter *
242 create_pmu_event_filter(const uint64_t event_list[], int nevents,
243 uint32_t action, uint32_t flags)
245 struct kvm_pmu_event_filter *f;
248 f = alloc_pmu_event_filter(nevents);
251 for (i = 0; i < nevents; i++)
252 f->events[i] = event_list[i];
257 static struct kvm_pmu_event_filter *event_filter(uint32_t action)
259 return create_pmu_event_filter(event_list,
260 ARRAY_SIZE(event_list),
265 * Remove the first occurrence of 'event' (if any) from the filter's
268 static struct kvm_pmu_event_filter *remove_event(struct kvm_pmu_event_filter *f,
274 for (i = 0; i < f->nevents; i++) {
276 f->events[i - 1] = f->events[i];
278 found = f->events[i] == event;
285 #define ASSERT_PMC_COUNTING_INSTRUCTIONS() \
287 uint64_t br = pmc_results.branches_retired; \
288 uint64_t ir = pmc_results.instructions_retired; \
290 if (br && br != NUM_BRANCHES) \
291 pr_info("%s: Branch instructions retired = %lu (expected %u)\n", \
292 __func__, br, NUM_BRANCHES); \
293 TEST_ASSERT(br, "%s: Branch instructions retired = %lu (expected > 0)", \
295 TEST_ASSERT(ir, "%s: Instructions retired = %lu (expected > 0)", \
299 #define ASSERT_PMC_NOT_COUNTING_INSTRUCTIONS() \
301 uint64_t br = pmc_results.branches_retired; \
302 uint64_t ir = pmc_results.instructions_retired; \
304 TEST_ASSERT(!br, "%s: Branch instructions retired = %lu (expected 0)", \
306 TEST_ASSERT(!ir, "%s: Instructions retired = %lu (expected 0)", \
310 static void test_without_filter(struct kvm_vcpu *vcpu)
312 run_vcpu_and_sync_pmc_results(vcpu);
314 ASSERT_PMC_COUNTING_INSTRUCTIONS();
317 static void test_with_filter(struct kvm_vcpu *vcpu,
318 struct kvm_pmu_event_filter *f)
320 vm_ioctl(vcpu->vm, KVM_SET_PMU_EVENT_FILTER, f);
321 run_vcpu_and_sync_pmc_results(vcpu);
324 static void test_amd_deny_list(struct kvm_vcpu *vcpu)
326 uint64_t event = EVENT(0x1C2, 0);
327 struct kvm_pmu_event_filter *f;
329 f = create_pmu_event_filter(&event, 1, KVM_PMU_EVENT_DENY, 0);
330 test_with_filter(vcpu, f);
333 ASSERT_PMC_COUNTING_INSTRUCTIONS();
336 static void test_member_deny_list(struct kvm_vcpu *vcpu)
338 struct kvm_pmu_event_filter *f = event_filter(KVM_PMU_EVENT_DENY);
340 test_with_filter(vcpu, f);
343 ASSERT_PMC_NOT_COUNTING_INSTRUCTIONS();
346 static void test_member_allow_list(struct kvm_vcpu *vcpu)
348 struct kvm_pmu_event_filter *f = event_filter(KVM_PMU_EVENT_ALLOW);
350 test_with_filter(vcpu, f);
353 ASSERT_PMC_COUNTING_INSTRUCTIONS();
356 static void test_not_member_deny_list(struct kvm_vcpu *vcpu)
358 struct kvm_pmu_event_filter *f = event_filter(KVM_PMU_EVENT_DENY);
360 remove_event(f, INST_RETIRED);
361 remove_event(f, INTEL_BR_RETIRED);
362 remove_event(f, AMD_ZEN_BR_RETIRED);
363 test_with_filter(vcpu, f);
366 ASSERT_PMC_COUNTING_INSTRUCTIONS();
369 static void test_not_member_allow_list(struct kvm_vcpu *vcpu)
371 struct kvm_pmu_event_filter *f = event_filter(KVM_PMU_EVENT_ALLOW);
373 remove_event(f, INST_RETIRED);
374 remove_event(f, INTEL_BR_RETIRED);
375 remove_event(f, AMD_ZEN_BR_RETIRED);
376 test_with_filter(vcpu, f);
379 ASSERT_PMC_NOT_COUNTING_INSTRUCTIONS();
383 * Verify that setting KVM_PMU_CAP_DISABLE prevents the use of the PMU.
385 * Note that KVM_CAP_PMU_CAPABILITY must be invoked prior to creating VCPUs.
387 static void test_pmu_config_disable(void (*guest_code)(void))
389 struct kvm_vcpu *vcpu;
393 r = kvm_check_cap(KVM_CAP_PMU_CAPABILITY);
394 if (!(r & KVM_PMU_CAP_DISABLE))
399 vm_enable_cap(vm, KVM_CAP_PMU_CAPABILITY, KVM_PMU_CAP_DISABLE);
401 vcpu = vm_vcpu_add(vm, 0, guest_code);
402 vm_init_descriptor_tables(vm);
403 vcpu_init_descriptor_tables(vcpu);
405 TEST_ASSERT(!sanity_check_pmu(vcpu),
406 "Guest should not be able to use disabled PMU.");
412 * On Intel, check for a non-zero PMU version, at least one general-purpose
413 * counter per logical processor, and support for counting the number of branch
414 * instructions retired.
416 static bool use_intel_pmu(void)
418 return host_cpu_is_intel &&
419 kvm_cpu_property(X86_PROPERTY_PMU_VERSION) &&
420 kvm_cpu_property(X86_PROPERTY_PMU_NR_GP_COUNTERS) &&
421 kvm_pmu_has(X86_PMU_FEATURE_BRANCH_INSNS_RETIRED);
424 static bool is_zen1(uint32_t family, uint32_t model)
426 return family == 0x17 && model <= 0x0f;
429 static bool is_zen2(uint32_t family, uint32_t model)
431 return family == 0x17 && model >= 0x30 && model <= 0x3f;
434 static bool is_zen3(uint32_t family, uint32_t model)
436 return family == 0x19 && model <= 0x0f;
440 * Determining AMD support for a PMU event requires consulting the AMD
441 * PPR for the CPU or reference material derived therefrom. The AMD
442 * test code herein has been verified to work on Zen1, Zen2, and Zen3.
444 * Feel free to add more AMD CPUs that are documented to support event
445 * select 0xc2 umask 0 as "retired branch instructions."
447 static bool use_amd_pmu(void)
449 uint32_t family = kvm_cpu_family();
450 uint32_t model = kvm_cpu_model();
452 return host_cpu_is_amd &&
453 (is_zen1(family, model) ||
454 is_zen2(family, model) ||
455 is_zen3(family, model));
459 * "MEM_INST_RETIRED.ALL_LOADS", "MEM_INST_RETIRED.ALL_STORES", and
460 * "MEM_INST_RETIRED.ANY" from https://perfmon-events.intel.com/
461 * supported on Intel Xeon processors:
462 * - Sapphire Rapids, Ice Lake, Cascade Lake, Skylake.
464 #define MEM_INST_RETIRED 0xD0
465 #define MEM_INST_RETIRED_LOAD EVENT(MEM_INST_RETIRED, 0x81)
466 #define MEM_INST_RETIRED_STORE EVENT(MEM_INST_RETIRED, 0x82)
467 #define MEM_INST_RETIRED_LOAD_STORE EVENT(MEM_INST_RETIRED, 0x83)
469 static bool supports_event_mem_inst_retired(void)
471 uint32_t eax, ebx, ecx, edx;
473 cpuid(1, &eax, &ebx, &ecx, &edx);
474 if (x86_family(eax) == 0x6) {
475 switch (x86_model(eax)) {
476 /* Sapphire Rapids */
491 * "LS Dispatch", from Processor Programming Reference
492 * (PPR) for AMD Family 17h Model 01h, Revision B1 Processors,
493 * Preliminary Processor Programming Reference (PPR) for AMD Family
494 * 17h Model 31h, Revision B0 Processors, and Preliminary Processor
495 * Programming Reference (PPR) for AMD Family 19h Model 01h, Revision
496 * B1 Processors Volume 1 of 2.
498 #define LS_DISPATCH 0x29
499 #define LS_DISPATCH_LOAD EVENT(LS_DISPATCH, BIT(0))
500 #define LS_DISPATCH_STORE EVENT(LS_DISPATCH, BIT(1))
501 #define LS_DISPATCH_LOAD_STORE EVENT(LS_DISPATCH, BIT(2))
503 #define INCLUDE_MASKED_ENTRY(event_select, mask, match) \
504 KVM_PMU_ENCODE_MASKED_ENTRY(event_select, mask, match, false)
505 #define EXCLUDE_MASKED_ENTRY(event_select, mask, match) \
506 KVM_PMU_ENCODE_MASKED_ENTRY(event_select, mask, match, true)
508 static void masked_events_guest_test(uint32_t msr_base)
511 * The actual value of the counters don't determine the outcome of
512 * the test. Only that they are zero or non-zero.
514 const uint64_t loads = rdmsr(msr_base + 0);
515 const uint64_t stores = rdmsr(msr_base + 1);
516 const uint64_t loads_stores = rdmsr(msr_base + 2);
520 __asm__ __volatile__("movl $0, %[v];"
523 : [v]"+m"(val) :: "eax");
525 pmc_results.loads = rdmsr(msr_base + 0) - loads;
526 pmc_results.stores = rdmsr(msr_base + 1) - stores;
527 pmc_results.loads_stores = rdmsr(msr_base + 2) - loads_stores;
530 static void intel_masked_events_guest_code(void)
533 wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0);
535 wrmsr(MSR_P6_EVNTSEL0 + 0, ARCH_PERFMON_EVENTSEL_ENABLE |
536 ARCH_PERFMON_EVENTSEL_OS | MEM_INST_RETIRED_LOAD);
537 wrmsr(MSR_P6_EVNTSEL0 + 1, ARCH_PERFMON_EVENTSEL_ENABLE |
538 ARCH_PERFMON_EVENTSEL_OS | MEM_INST_RETIRED_STORE);
539 wrmsr(MSR_P6_EVNTSEL0 + 2, ARCH_PERFMON_EVENTSEL_ENABLE |
540 ARCH_PERFMON_EVENTSEL_OS | MEM_INST_RETIRED_LOAD_STORE);
542 wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0x7);
544 masked_events_guest_test(MSR_IA32_PMC0);
549 static void amd_masked_events_guest_code(void)
552 wrmsr(MSR_K7_EVNTSEL0, 0);
553 wrmsr(MSR_K7_EVNTSEL1, 0);
554 wrmsr(MSR_K7_EVNTSEL2, 0);
556 wrmsr(MSR_K7_EVNTSEL0, ARCH_PERFMON_EVENTSEL_ENABLE |
557 ARCH_PERFMON_EVENTSEL_OS | LS_DISPATCH_LOAD);
558 wrmsr(MSR_K7_EVNTSEL1, ARCH_PERFMON_EVENTSEL_ENABLE |
559 ARCH_PERFMON_EVENTSEL_OS | LS_DISPATCH_STORE);
560 wrmsr(MSR_K7_EVNTSEL2, ARCH_PERFMON_EVENTSEL_ENABLE |
561 ARCH_PERFMON_EVENTSEL_OS | LS_DISPATCH_LOAD_STORE);
563 masked_events_guest_test(MSR_K7_PERFCTR0);
568 static void run_masked_events_test(struct kvm_vcpu *vcpu,
569 const uint64_t masked_events[],
570 const int nmasked_events)
572 struct kvm_pmu_event_filter *f;
574 f = create_pmu_event_filter(masked_events, nmasked_events,
576 KVM_PMU_EVENT_FLAG_MASKED_EVENTS);
577 test_with_filter(vcpu, f);
581 /* Matches KVM_PMU_EVENT_FILTER_MAX_EVENTS in pmu.c */
582 #define MAX_FILTER_EVENTS 300
583 #define MAX_TEST_EVENTS 10
585 #define ALLOW_LOADS BIT(0)
586 #define ALLOW_STORES BIT(1)
587 #define ALLOW_LOADS_STORES BIT(2)
589 struct masked_events_test {
590 uint64_t intel_events[MAX_TEST_EVENTS];
591 uint64_t intel_event_end;
592 uint64_t amd_events[MAX_TEST_EVENTS];
593 uint64_t amd_event_end;
599 * These are the test cases for the masked events tests.
601 * For each test, the guest enables 3 PMU counters (loads, stores,
602 * loads + stores). The filter is then set in KVM with the masked events
603 * provided. The test then verifies that the counters agree with which
604 * ones should be counting and which ones should be filtered.
606 const struct masked_events_test test_cases[] = {
609 INCLUDE_MASKED_ENTRY(MEM_INST_RETIRED, 0xFF, 0x81),
612 INCLUDE_MASKED_ENTRY(LS_DISPATCH, 0xFF, BIT(0)),
614 .msg = "Only allow loads.",
615 .flags = ALLOW_LOADS,
618 INCLUDE_MASKED_ENTRY(MEM_INST_RETIRED, 0xFF, 0x82),
621 INCLUDE_MASKED_ENTRY(LS_DISPATCH, 0xFF, BIT(1)),
623 .msg = "Only allow stores.",
624 .flags = ALLOW_STORES,
627 INCLUDE_MASKED_ENTRY(MEM_INST_RETIRED, 0xFF, 0x83),
630 INCLUDE_MASKED_ENTRY(LS_DISPATCH, 0xFF, BIT(2)),
632 .msg = "Only allow loads + stores.",
633 .flags = ALLOW_LOADS_STORES,
636 INCLUDE_MASKED_ENTRY(MEM_INST_RETIRED, 0x7C, 0),
637 EXCLUDE_MASKED_ENTRY(MEM_INST_RETIRED, 0xFF, 0x83),
640 INCLUDE_MASKED_ENTRY(LS_DISPATCH, ~(BIT(0) | BIT(1)), 0),
642 .msg = "Only allow loads and stores.",
643 .flags = ALLOW_LOADS | ALLOW_STORES,
646 INCLUDE_MASKED_ENTRY(MEM_INST_RETIRED, 0x7C, 0),
647 EXCLUDE_MASKED_ENTRY(MEM_INST_RETIRED, 0xFF, 0x82),
650 INCLUDE_MASKED_ENTRY(LS_DISPATCH, 0xF8, 0),
651 EXCLUDE_MASKED_ENTRY(LS_DISPATCH, 0xFF, BIT(1)),
653 .msg = "Only allow loads and loads + stores.",
654 .flags = ALLOW_LOADS | ALLOW_LOADS_STORES
657 INCLUDE_MASKED_ENTRY(MEM_INST_RETIRED, 0xFE, 0x82),
660 INCLUDE_MASKED_ENTRY(LS_DISPATCH, 0xF8, 0),
661 EXCLUDE_MASKED_ENTRY(LS_DISPATCH, 0xFF, BIT(0)),
663 .msg = "Only allow stores and loads + stores.",
664 .flags = ALLOW_STORES | ALLOW_LOADS_STORES
667 INCLUDE_MASKED_ENTRY(MEM_INST_RETIRED, 0x7C, 0),
670 INCLUDE_MASKED_ENTRY(LS_DISPATCH, 0xF8, 0),
672 .msg = "Only allow loads, stores, and loads + stores.",
673 .flags = ALLOW_LOADS | ALLOW_STORES | ALLOW_LOADS_STORES
677 static int append_test_events(const struct masked_events_test *test,
678 uint64_t *events, int nevents)
680 const uint64_t *evts;
683 evts = use_intel_pmu() ? test->intel_events : test->amd_events;
684 for (i = 0; i < MAX_TEST_EVENTS; i++) {
688 events[nevents + i] = evts[i];
694 static bool bool_eq(bool a, bool b)
699 static void run_masked_events_tests(struct kvm_vcpu *vcpu, uint64_t *events,
702 int ntests = ARRAY_SIZE(test_cases);
705 for (i = 0; i < ntests; i++) {
706 const struct masked_events_test *test = &test_cases[i];
708 /* Do any test case events overflow MAX_TEST_EVENTS? */
709 assert(test->intel_event_end == 0);
710 assert(test->amd_event_end == 0);
712 n = append_test_events(test, events, nevents);
714 run_masked_events_test(vcpu, events, n);
716 TEST_ASSERT(bool_eq(pmc_results.loads, test->flags & ALLOW_LOADS) &&
717 bool_eq(pmc_results.stores, test->flags & ALLOW_STORES) &&
718 bool_eq(pmc_results.loads_stores,
719 test->flags & ALLOW_LOADS_STORES),
720 "%s loads: %lu, stores: %lu, loads + stores: %lu",
721 test->msg, pmc_results.loads, pmc_results.stores,
722 pmc_results.loads_stores);
726 static void add_dummy_events(uint64_t *events, int nevents)
730 for (i = 0; i < nevents; i++) {
731 int event_select = i % 0xFF;
732 bool exclude = ((i % 4) == 0);
734 if (event_select == MEM_INST_RETIRED ||
735 event_select == LS_DISPATCH)
738 events[i] = KVM_PMU_ENCODE_MASKED_ENTRY(event_select, 0,
743 static void test_masked_events(struct kvm_vcpu *vcpu)
745 int nevents = MAX_FILTER_EVENTS - MAX_TEST_EVENTS;
746 uint64_t events[MAX_FILTER_EVENTS];
748 /* Run the test cases against a sparse PMU event filter. */
749 run_masked_events_tests(vcpu, events, 0);
751 /* Run the test cases against a dense PMU event filter. */
752 add_dummy_events(events, MAX_FILTER_EVENTS);
753 run_masked_events_tests(vcpu, events, nevents);
756 static int run_filter_test(struct kvm_vcpu *vcpu, const uint64_t *events,
757 int nevents, uint32_t flags)
759 struct kvm_pmu_event_filter *f;
762 f = create_pmu_event_filter(events, nevents, KVM_PMU_EVENT_ALLOW, flags);
763 r = __vm_ioctl(vcpu->vm, KVM_SET_PMU_EVENT_FILTER, f);
769 static void test_filter_ioctl(struct kvm_vcpu *vcpu)
775 * Unfortunately having invalid bits set in event data is expected to
776 * pass when flags == 0 (bits other than eventsel+umask).
778 r = run_filter_test(vcpu, &e, 1, 0);
779 TEST_ASSERT(r == 0, "Valid PMU Event Filter is failing");
781 r = run_filter_test(vcpu, &e, 1, KVM_PMU_EVENT_FLAG_MASKED_EVENTS);
782 TEST_ASSERT(r != 0, "Invalid PMU Event Filter is expected to fail");
784 e = KVM_PMU_ENCODE_MASKED_ENTRY(0xff, 0xff, 0xff, 0xf);
785 r = run_filter_test(vcpu, &e, 1, KVM_PMU_EVENT_FLAG_MASKED_EVENTS);
786 TEST_ASSERT(r == 0, "Valid PMU Event Filter is failing");
789 int main(int argc, char *argv[])
791 void (*guest_code)(void);
792 struct kvm_vcpu *vcpu, *vcpu2 = NULL;
795 TEST_REQUIRE(get_kvm_param_bool("enable_pmu"));
796 TEST_REQUIRE(kvm_has_cap(KVM_CAP_PMU_EVENT_FILTER));
797 TEST_REQUIRE(kvm_has_cap(KVM_CAP_PMU_EVENT_MASKED_EVENTS));
799 TEST_REQUIRE(use_intel_pmu() || use_amd_pmu());
800 guest_code = use_intel_pmu() ? intel_guest_code : amd_guest_code;
802 vm = vm_create_with_one_vcpu(&vcpu, guest_code);
804 vm_init_descriptor_tables(vm);
805 vcpu_init_descriptor_tables(vcpu);
807 TEST_REQUIRE(sanity_check_pmu(vcpu));
810 test_amd_deny_list(vcpu);
812 test_without_filter(vcpu);
813 test_member_deny_list(vcpu);
814 test_member_allow_list(vcpu);
815 test_not_member_deny_list(vcpu);
816 test_not_member_allow_list(vcpu);
818 if (use_intel_pmu() &&
819 supports_event_mem_inst_retired() &&
820 kvm_cpu_property(X86_PROPERTY_PMU_NR_GP_COUNTERS) >= 3)
821 vcpu2 = vm_vcpu_add(vm, 2, intel_masked_events_guest_code);
822 else if (use_amd_pmu())
823 vcpu2 = vm_vcpu_add(vm, 2, amd_masked_events_guest_code);
826 test_masked_events(vcpu2);
827 test_filter_ioctl(vcpu);
831 test_pmu_config_disable(guest_code);