From 97d0d1655ea82ac8a54241d5457e6944fbff954c Mon Sep 17 00:00:00 2001 From: Colton Lewis Date: Wed, 18 Sep 2024 20:53:14 +0000 Subject: KVM: selftests: Fix typos in x86's PMU counter test's macro variable use Fix goofs in PMU counter test's assertion macros where the macros unintentionally reference variables in the parent scope. The code "works" as-is purely by accident, as all users define a variable with the correct name (and usage). Fixes: cd34fd8c758e ("KVM: selftests: Test PMC virtualization with forced emulation") Signed-off-by: Colton Lewis Reviewed-by: Mingwei Zhang Link: https://lore.kernel.org/r/20240918205319.3517569-2-coltonlewis@google.com [sean: massage changelog] Signed-off-by: Sean Christopherson --- tools/testing/selftests/kvm/x86/pmu_counters_test.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'tools/testing/selftests/kvm/x86/pmu_counters_test.c') diff --git a/tools/testing/selftests/kvm/x86/pmu_counters_test.c b/tools/testing/selftests/kvm/x86/pmu_counters_test.c index 698cb36989db..0e305e43a93b 100644 --- a/tools/testing/selftests/kvm/x86/pmu_counters_test.c +++ b/tools/testing/selftests/kvm/x86/pmu_counters_test.c @@ -174,7 +174,7 @@ do { \ #define GUEST_TEST_EVENT(_idx, _event, _pmc, _pmc_msr, _ctrl_msr, _value, FEP) \ do { \ - wrmsr(pmc_msr, 0); \ + wrmsr(_pmc_msr, 0); \ \ if (this_cpu_has(X86_FEATURE_CLFLUSHOPT)) \ GUEST_MEASURE_EVENT(_ctrl_msr, _value, "clflushopt .", FEP); \ @@ -331,9 +331,9 @@ __GUEST_ASSERT(expect_gp ? vector == GP_VECTOR : !vector, \ expect_gp ? "#GP" : "no fault", msr, vector) \ #define GUEST_ASSERT_PMC_VALUE(insn, msr, val, expected) \ - __GUEST_ASSERT(val == expected_val, \ + __GUEST_ASSERT(val == expected, \ "Expected " #insn "(0x%x) to yield 0x%lx, got 0x%lx", \ - msr, expected_val, val); + msr, expected, val); static void guest_test_rdpmc(uint32_t rdpmc_idx, bool expect_success, uint64_t expected_val) -- cgit v1.2.3 From 7803339fa929387bbc66479532afbaf8cbebb41b Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Wed, 27 Nov 2024 15:56:27 -0800 Subject: KVM: selftests: Use data load to trigger LLC references/misses in Intel PMU In the PMU counters test, add a data load in the measured loop and target the data with CLFLUSH{OPT} in order to (try to) guarantee the loop generates LLC misses and fills. Per the SDM, some hardware prefetchers are allowed to omit relevant PMU events, and Emerald Rapids (and possibly Sapphire Rapids) appears to have gained an instruction prefetcher that bypasses event counts. E.g. the test will consistently fail on EMR CPUs, but then pass with seemingly benign changes to the code. The event count includes speculation and cache line fills due to the first-level cache hardware prefetcher, but may exclude cache line fills due to other hardware-prefetchers. Generate a data load as a last ditch effort to preserve the (minimal) test coverage for LLC references and misses. Cc: Maxim Levitsky Link: https://lore.kernel.org/r/20241127235627.4049619-1-seanjc@google.com Signed-off-by: Sean Christopherson --- tools/testing/selftests/kvm/x86/pmu_counters_test.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) (limited to 'tools/testing/selftests/kvm/x86/pmu_counters_test.c') diff --git a/tools/testing/selftests/kvm/x86/pmu_counters_test.c b/tools/testing/selftests/kvm/x86/pmu_counters_test.c index 0e305e43a93b..accd7ecd3e5f 100644 --- a/tools/testing/selftests/kvm/x86/pmu_counters_test.c +++ b/tools/testing/selftests/kvm/x86/pmu_counters_test.c @@ -17,7 +17,7 @@ * Number of instructions in each loop. 1 CLFLUSH/CLFLUSHOPT/NOP, 1 MFENCE, * 1 LOOP. */ -#define NUM_INSNS_PER_LOOP 3 +#define NUM_INSNS_PER_LOOP 4 /* * Number of "extra" instructions that will be counted, i.e. the number of @@ -162,13 +162,14 @@ do { \ "1:\n\t" \ clflush "\n\t" \ "mfence\n\t" \ + "mov %[m], %%eax\n\t" \ FEP "loop 1b\n\t" \ FEP "mov %%edi, %%ecx\n\t" \ FEP "xor %%eax, %%eax\n\t" \ FEP "xor %%edx, %%edx\n\t" \ "wrmsr\n\t" \ :: "a"((uint32_t)_value), "d"(_value >> 32), \ - "c"(_msr), "D"(_msr) \ + "c"(_msr), "D"(_msr), [m]"m"(kvm_pmu_version) \ ); \ } while (0) @@ -177,9 +178,9 @@ do { \ wrmsr(_pmc_msr, 0); \ \ if (this_cpu_has(X86_FEATURE_CLFLUSHOPT)) \ - GUEST_MEASURE_EVENT(_ctrl_msr, _value, "clflushopt .", FEP); \ + GUEST_MEASURE_EVENT(_ctrl_msr, _value, "clflushopt %[m]", FEP); \ else if (this_cpu_has(X86_FEATURE_CLFLUSH)) \ - GUEST_MEASURE_EVENT(_ctrl_msr, _value, "clflush .", FEP); \ + GUEST_MEASURE_EVENT(_ctrl_msr, _value, "clflush %[m]", FEP); \ else \ GUEST_MEASURE_EVENT(_ctrl_msr, _value, "nop", FEP); \ \ -- cgit v1.2.3 From 933178ddf73a031bfa9efa67452504ea3be43b18 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 17 Jan 2025 15:41:59 -0800 Subject: KVM: selftests: Make Intel arch events globally available in PMU counters test Wrap PMU counter test's array of Intel architectrual in a helper function so that the events can be queried in multiple locations. Add a comment to explain the need for a wrapper. No functional change intended. Link: https://lore.kernel.org/r/20250117234204.2600624-2-seanjc@google.com Signed-off-by: Sean Christopherson --- .../testing/selftests/kvm/x86/pmu_counters_test.c | 84 +++++++++++++--------- 1 file changed, 49 insertions(+), 35 deletions(-) (limited to 'tools/testing/selftests/kvm/x86/pmu_counters_test.c') diff --git a/tools/testing/selftests/kvm/x86/pmu_counters_test.c b/tools/testing/selftests/kvm/x86/pmu_counters_test.c index accd7ecd3e5f..fe7d72fc8a75 100644 --- a/tools/testing/selftests/kvm/x86/pmu_counters_test.c +++ b/tools/testing/selftests/kvm/x86/pmu_counters_test.c @@ -33,6 +33,53 @@ static uint8_t kvm_pmu_version; static bool kvm_has_perf_caps; +#define X86_PMU_FEATURE_NULL \ +({ \ + struct kvm_x86_pmu_feature feature = {}; \ + \ + feature; \ +}) + +static bool pmu_is_null_feature(struct kvm_x86_pmu_feature event) +{ + return !(*(u64 *)&event); +} + +struct kvm_intel_pmu_event { + struct kvm_x86_pmu_feature gp_event; + struct kvm_x86_pmu_feature fixed_event; +}; + +/* + * Wrap the array to appease the compiler, as the macros used to construct each + * kvm_x86_pmu_feature use syntax that's only valid in function scope, and the + * compiler often thinks the feature definitions aren't compile-time constants. + */ +static struct kvm_intel_pmu_event intel_event_to_feature(uint8_t idx) +{ + const struct kvm_intel_pmu_event __intel_event_to_feature[] = { + [INTEL_ARCH_CPU_CYCLES_INDEX] = { X86_PMU_FEATURE_CPU_CYCLES, X86_PMU_FEATURE_CPU_CYCLES_FIXED }, + [INTEL_ARCH_INSTRUCTIONS_RETIRED_INDEX] = { X86_PMU_FEATURE_INSNS_RETIRED, X86_PMU_FEATURE_INSNS_RETIRED_FIXED }, + /* + * Note, the fixed counter for reference cycles is NOT the same as the + * general purpose architectural event. The fixed counter explicitly + * counts at the same frequency as the TSC, whereas the GP event counts + * at a fixed, but uarch specific, frequency. Bundle them here for + * simplicity. + */ + [INTEL_ARCH_REFERENCE_CYCLES_INDEX] = { X86_PMU_FEATURE_REFERENCE_CYCLES, X86_PMU_FEATURE_REFERENCE_TSC_CYCLES_FIXED }, + [INTEL_ARCH_LLC_REFERENCES_INDEX] = { X86_PMU_FEATURE_LLC_REFERENCES, X86_PMU_FEATURE_NULL }, + [INTEL_ARCH_LLC_MISSES_INDEX] = { X86_PMU_FEATURE_LLC_MISSES, X86_PMU_FEATURE_NULL }, + [INTEL_ARCH_BRANCHES_RETIRED_INDEX] = { X86_PMU_FEATURE_BRANCH_INSNS_RETIRED, X86_PMU_FEATURE_NULL }, + [INTEL_ARCH_BRANCHES_MISPREDICTED_INDEX] = { X86_PMU_FEATURE_BRANCHES_MISPREDICTED, X86_PMU_FEATURE_NULL }, + [INTEL_ARCH_TOPDOWN_SLOTS_INDEX] = { X86_PMU_FEATURE_TOPDOWN_SLOTS, X86_PMU_FEATURE_TOPDOWN_SLOTS_FIXED }, + }; + + kvm_static_assert(ARRAY_SIZE(__intel_event_to_feature) == NR_INTEL_ARCH_EVENTS); + + return __intel_event_to_feature[idx]; +} + static struct kvm_vm *pmu_vm_create_with_one_vcpu(struct kvm_vcpu **vcpu, void *guest_code, uint8_t pmu_version, @@ -197,41 +244,8 @@ static void __guest_test_arch_event(uint8_t idx, struct kvm_x86_pmu_feature even GUEST_TEST_EVENT(idx, event, pmc, pmc_msr, ctrl_msr, ctrl_msr_value, KVM_FEP); } -#define X86_PMU_FEATURE_NULL \ -({ \ - struct kvm_x86_pmu_feature feature = {}; \ - \ - feature; \ -}) - -static bool pmu_is_null_feature(struct kvm_x86_pmu_feature event) -{ - return !(*(u64 *)&event); -} - static void guest_test_arch_event(uint8_t idx) { - const struct { - struct kvm_x86_pmu_feature gp_event; - struct kvm_x86_pmu_feature fixed_event; - } intel_event_to_feature[] = { - [INTEL_ARCH_CPU_CYCLES_INDEX] = { X86_PMU_FEATURE_CPU_CYCLES, X86_PMU_FEATURE_CPU_CYCLES_FIXED }, - [INTEL_ARCH_INSTRUCTIONS_RETIRED_INDEX] = { X86_PMU_FEATURE_INSNS_RETIRED, X86_PMU_FEATURE_INSNS_RETIRED_FIXED }, - /* - * Note, the fixed counter for reference cycles is NOT the same - * as the general purpose architectural event. The fixed counter - * explicitly counts at the same frequency as the TSC, whereas - * the GP event counts at a fixed, but uarch specific, frequency. - * Bundle them here for simplicity. - */ - [INTEL_ARCH_REFERENCE_CYCLES_INDEX] = { X86_PMU_FEATURE_REFERENCE_CYCLES, X86_PMU_FEATURE_REFERENCE_TSC_CYCLES_FIXED }, - [INTEL_ARCH_LLC_REFERENCES_INDEX] = { X86_PMU_FEATURE_LLC_REFERENCES, X86_PMU_FEATURE_NULL }, - [INTEL_ARCH_LLC_MISSES_INDEX] = { X86_PMU_FEATURE_LLC_MISSES, X86_PMU_FEATURE_NULL }, - [INTEL_ARCH_BRANCHES_RETIRED_INDEX] = { X86_PMU_FEATURE_BRANCH_INSNS_RETIRED, X86_PMU_FEATURE_NULL }, - [INTEL_ARCH_BRANCHES_MISPREDICTED_INDEX] = { X86_PMU_FEATURE_BRANCHES_MISPREDICTED, X86_PMU_FEATURE_NULL }, - [INTEL_ARCH_TOPDOWN_SLOTS_INDEX] = { X86_PMU_FEATURE_TOPDOWN_SLOTS, X86_PMU_FEATURE_TOPDOWN_SLOTS_FIXED }, - }; - uint32_t nr_gp_counters = this_cpu_property(X86_PROPERTY_PMU_NR_GP_COUNTERS); uint32_t pmu_version = guest_get_pmu_version(); /* PERF_GLOBAL_CTRL exists only for Architectural PMU Version 2+. */ @@ -249,7 +263,7 @@ static void guest_test_arch_event(uint8_t idx) else base_pmc_msr = MSR_IA32_PERFCTR0; - gp_event = intel_event_to_feature[idx].gp_event; + gp_event = intel_event_to_feature(idx).gp_event; GUEST_ASSERT_EQ(idx, gp_event.f.bit); GUEST_ASSERT(nr_gp_counters); @@ -270,7 +284,7 @@ static void guest_test_arch_event(uint8_t idx) if (!guest_has_perf_global_ctrl) return; - fixed_event = intel_event_to_feature[idx].fixed_event; + fixed_event = intel_event_to_feature(idx).fixed_event; if (pmu_is_null_feature(fixed_event) || !this_pmu_has(fixed_event)) return; -- cgit v1.2.3 From 8752e2b4a2b770932c3f99dcf626cb326b02d518 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 17 Jan 2025 15:42:00 -0800 Subject: KVM: selftests: Only validate counts for hardware-supported arch events In the Intel PMU counters test, only validate the counts for architectural events that are supported in hardware. If an arch event isn't supported, the event selector may enable a completely different event, and thus the logic for the expected count is bogus. This fixes test failures on pre-Icelake systems due to the encoding for the architectural Top-Down Slots event corresponding to something else (at least on the Skylake family of CPUs). Note, validation relies on *hardware* support, not KVM support and not guest support. Architectural events are all about enumerating the event selector encoding; lack of enumeration for an architectural event doesn't mean the event itself is unsupported, i.e. the event should still count as expected even if KVM and/or guest CPUID doesn't enumerate the event as being "architectural". Note #2, it's desirable to _program_ the architectural event encoding even if hardware doesn't support the event. The count can't be validated when the event is fully enabled, but KVM should still let the guest program the event selector, and the PMC shouldn't count if the event is disabled. Fixes: 4f1bd6b16074 ("KVM: selftests: Test Intel PMU architectural events on gp counters") Reported-by: kernel test robot Closes: https://lore.kernel.org/oe-lkp/202501141009.30c629b4-lkp@intel.com Debugged-by: Dapeng Mi Link: https://lore.kernel.org/r/20250117234204.2600624-3-seanjc@google.com Signed-off-by: Sean Christopherson --- .../testing/selftests/kvm/x86/pmu_counters_test.c | 25 ++++++++++++++++------ 1 file changed, 18 insertions(+), 7 deletions(-) (limited to 'tools/testing/selftests/kvm/x86/pmu_counters_test.c') diff --git a/tools/testing/selftests/kvm/x86/pmu_counters_test.c b/tools/testing/selftests/kvm/x86/pmu_counters_test.c index fe7d72fc8a75..8159615ad492 100644 --- a/tools/testing/selftests/kvm/x86/pmu_counters_test.c +++ b/tools/testing/selftests/kvm/x86/pmu_counters_test.c @@ -29,6 +29,8 @@ /* Total number of instructions retired within the measured section. */ #define NUM_INSNS_RETIRED (NUM_LOOPS * NUM_INSNS_PER_LOOP + NUM_EXTRA_INSNS) +/* Track which architectural events are supported by hardware. */ +static uint32_t hardware_pmu_arch_events; static uint8_t kvm_pmu_version; static bool kvm_has_perf_caps; @@ -89,6 +91,7 @@ static struct kvm_vm *pmu_vm_create_with_one_vcpu(struct kvm_vcpu **vcpu, vm = vm_create_with_one_vcpu(vcpu, guest_code); sync_global_to_guest(vm, kvm_pmu_version); + sync_global_to_guest(vm, hardware_pmu_arch_events); /* * Set PERF_CAPABILITIES before PMU version as KVM disallows enabling @@ -152,7 +155,7 @@ static void guest_assert_event_count(uint8_t idx, uint64_t count; count = _rdpmc(pmc); - if (!this_pmu_has(event)) + if (!(hardware_pmu_arch_events & BIT(idx))) goto sanity_checks; switch (idx) { @@ -560,7 +563,7 @@ static void test_fixed_counters(uint8_t pmu_version, uint64_t perf_capabilities, static void test_intel_counters(void) { - uint8_t nr_arch_events = kvm_cpu_property(X86_PROPERTY_PMU_EBX_BIT_VECTOR_LENGTH); + uint8_t nr_arch_events = this_cpu_property(X86_PROPERTY_PMU_EBX_BIT_VECTOR_LENGTH); uint8_t nr_fixed_counters = kvm_cpu_property(X86_PROPERTY_PMU_NR_FIXED_COUNTERS); uint8_t nr_gp_counters = kvm_cpu_property(X86_PROPERTY_PMU_NR_GP_COUNTERS); uint8_t pmu_version = kvm_cpu_property(X86_PROPERTY_PMU_VERSION); @@ -582,18 +585,26 @@ static void test_intel_counters(void) /* * Detect the existence of events that aren't supported by selftests. - * This will (obviously) fail any time the kernel adds support for a - * new event, but it's worth paying that price to keep the test fresh. + * This will (obviously) fail any time hardware adds support for a new + * event, but it's worth paying that price to keep the test fresh. */ TEST_ASSERT(nr_arch_events <= NR_INTEL_ARCH_EVENTS, "New architectural event(s) detected; please update this test (length = %u, mask = %x)", - nr_arch_events, kvm_cpu_property(X86_PROPERTY_PMU_EVENTS_MASK)); + nr_arch_events, this_cpu_property(X86_PROPERTY_PMU_EVENTS_MASK)); /* - * Force iterating over known arch events regardless of whether or not - * KVM/hardware supports a given event. + * Iterate over known arch events irrespective of KVM/hardware support + * to verify that KVM doesn't reject programming of events just because + * the *architectural* encoding is unsupported. Track which events are + * supported in hardware; the guest side will validate supported events + * count correctly, even if *enumeration* of the event is unsupported + * by KVM and/or isn't exposed to the guest. */ nr_arch_events = max_t(typeof(nr_arch_events), nr_arch_events, NR_INTEL_ARCH_EVENTS); + for (i = 0; i < nr_arch_events; i++) { + if (this_pmu_has(intel_event_to_feature(i).gp_event)) + hardware_pmu_arch_events |= BIT(i); + } for (v = 0; v <= max_pmu_version; v++) { for (i = 0; i < ARRAY_SIZE(perf_caps); i++) { -- cgit v1.2.3 From e327630e2a0c2540dc97913bacb60b5b88151994 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 17 Jan 2025 15:42:01 -0800 Subject: KVM: selftests: Remove dead code in Intel PMU counters test Drop the local "nr_arch_events" in the Intel PMU counters test as the test asserts that "nr_arch_events <= NR_INTEL_ARCH_EVENTS", and then sets nr_arch_events to the max of the two. I.e. nr_arch_events is guaranteed to be NR_INTEL_ARCH_EVENTS for the meat of the test, just use NR_INTEL_ARCH_EVENTS directly. No functional change intended. Link: https://lore.kernel.org/r/20250117234204.2600624-4-seanjc@google.com Signed-off-by: Sean Christopherson --- tools/testing/selftests/kvm/x86/pmu_counters_test.c | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) (limited to 'tools/testing/selftests/kvm/x86/pmu_counters_test.c') diff --git a/tools/testing/selftests/kvm/x86/pmu_counters_test.c b/tools/testing/selftests/kvm/x86/pmu_counters_test.c index 8159615ad492..5d6a5b9c17b3 100644 --- a/tools/testing/selftests/kvm/x86/pmu_counters_test.c +++ b/tools/testing/selftests/kvm/x86/pmu_counters_test.c @@ -563,7 +563,6 @@ static void test_fixed_counters(uint8_t pmu_version, uint64_t perf_capabilities, static void test_intel_counters(void) { - uint8_t nr_arch_events = this_cpu_property(X86_PROPERTY_PMU_EBX_BIT_VECTOR_LENGTH); uint8_t nr_fixed_counters = kvm_cpu_property(X86_PROPERTY_PMU_NR_FIXED_COUNTERS); uint8_t nr_gp_counters = kvm_cpu_property(X86_PROPERTY_PMU_NR_GP_COUNTERS); uint8_t pmu_version = kvm_cpu_property(X86_PROPERTY_PMU_VERSION); @@ -588,9 +587,10 @@ static void test_intel_counters(void) * This will (obviously) fail any time hardware adds support for a new * event, but it's worth paying that price to keep the test fresh. */ - TEST_ASSERT(nr_arch_events <= NR_INTEL_ARCH_EVENTS, + TEST_ASSERT(this_cpu_property(X86_PROPERTY_PMU_EBX_BIT_VECTOR_LENGTH) <= NR_INTEL_ARCH_EVENTS, "New architectural event(s) detected; please update this test (length = %u, mask = %x)", - nr_arch_events, this_cpu_property(X86_PROPERTY_PMU_EVENTS_MASK)); + this_cpu_property(X86_PROPERTY_PMU_EBX_BIT_VECTOR_LENGTH), + this_cpu_property(X86_PROPERTY_PMU_EVENTS_MASK)); /* * Iterate over known arch events irrespective of KVM/hardware support @@ -600,8 +600,7 @@ static void test_intel_counters(void) * count correctly, even if *enumeration* of the event is unsupported * by KVM and/or isn't exposed to the guest. */ - nr_arch_events = max_t(typeof(nr_arch_events), nr_arch_events, NR_INTEL_ARCH_EVENTS); - for (i = 0; i < nr_arch_events; i++) { + for (i = 0; i < NR_INTEL_ARCH_EVENTS; i++) { if (this_pmu_has(intel_event_to_feature(i).gp_event)) hardware_pmu_arch_events |= BIT(i); } @@ -620,8 +619,8 @@ static void test_intel_counters(void) * vector length. */ if (v == pmu_version) { - for (k = 1; k < (BIT(nr_arch_events) - 1); k++) - test_arch_events(v, perf_caps[i], nr_arch_events, k); + for (k = 1; k < (BIT(NR_INTEL_ARCH_EVENTS) - 1); k++) + test_arch_events(v, perf_caps[i], NR_INTEL_ARCH_EVENTS, k); } /* * Test single bits for all PMU version and lengths up @@ -630,11 +629,11 @@ static void test_intel_counters(void) * host length). Explicitly test a mask of '0' and all * ones i.e. all events being available and unavailable. */ - for (j = 0; j <= nr_arch_events + 1; j++) { + for (j = 0; j <= NR_INTEL_ARCH_EVENTS + 1; j++) { test_arch_events(v, perf_caps[i], j, 0); test_arch_events(v, perf_caps[i], j, 0xff); - for (k = 0; k < nr_arch_events; k++) + for (k = 0; k < NR_INTEL_ARCH_EVENTS; k++) test_arch_events(v, perf_caps[i], j, BIT(k)); } -- cgit v1.2.3 From 0e6714735c012251f3cbfbe26b96222c7d5e45a7 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 17 Jan 2025 15:42:02 -0800 Subject: KVM: selftests: Drop the "feature event" param from guest test helpers Now that validation of event count is tied to hardware support for event, and not to guest support for an event, drop the unused "event" parameter from the various helpers. No functional change intended. Link: https://lore.kernel.org/r/20250117234204.2600624-5-seanjc@google.com Signed-off-by: Sean Christopherson --- tools/testing/selftests/kvm/x86/pmu_counters_test.c | 19 ++++++++----------- 1 file changed, 8 insertions(+), 11 deletions(-) (limited to 'tools/testing/selftests/kvm/x86/pmu_counters_test.c') diff --git a/tools/testing/selftests/kvm/x86/pmu_counters_test.c b/tools/testing/selftests/kvm/x86/pmu_counters_test.c index 5d6a5b9c17b3..ea1485a08c78 100644 --- a/tools/testing/selftests/kvm/x86/pmu_counters_test.c +++ b/tools/testing/selftests/kvm/x86/pmu_counters_test.c @@ -148,9 +148,7 @@ static uint8_t guest_get_pmu_version(void) * Sanity check that in all cases, the event doesn't count when it's disabled, * and that KVM correctly emulates the write of an arbitrary value. */ -static void guest_assert_event_count(uint8_t idx, - struct kvm_x86_pmu_feature event, - uint32_t pmc, uint32_t pmc_msr) +static void guest_assert_event_count(uint8_t idx, uint32_t pmc, uint32_t pmc_msr) { uint64_t count; @@ -223,7 +221,7 @@ do { \ ); \ } while (0) -#define GUEST_TEST_EVENT(_idx, _event, _pmc, _pmc_msr, _ctrl_msr, _value, FEP) \ +#define GUEST_TEST_EVENT(_idx, _pmc, _pmc_msr, _ctrl_msr, _value, FEP) \ do { \ wrmsr(_pmc_msr, 0); \ \ @@ -234,17 +232,16 @@ do { \ else \ GUEST_MEASURE_EVENT(_ctrl_msr, _value, "nop", FEP); \ \ - guest_assert_event_count(_idx, _event, _pmc, _pmc_msr); \ + guest_assert_event_count(_idx, _pmc, _pmc_msr); \ } while (0) -static void __guest_test_arch_event(uint8_t idx, struct kvm_x86_pmu_feature event, - uint32_t pmc, uint32_t pmc_msr, +static void __guest_test_arch_event(uint8_t idx, uint32_t pmc, uint32_t pmc_msr, uint32_t ctrl_msr, uint64_t ctrl_msr_value) { - GUEST_TEST_EVENT(idx, event, pmc, pmc_msr, ctrl_msr, ctrl_msr_value, ""); + GUEST_TEST_EVENT(idx, pmc, pmc_msr, ctrl_msr, ctrl_msr_value, ""); if (is_forced_emulation_enabled) - GUEST_TEST_EVENT(idx, event, pmc, pmc_msr, ctrl_msr, ctrl_msr_value, KVM_FEP); + GUEST_TEST_EVENT(idx, pmc, pmc_msr, ctrl_msr, ctrl_msr_value, KVM_FEP); } static void guest_test_arch_event(uint8_t idx) @@ -280,7 +277,7 @@ static void guest_test_arch_event(uint8_t idx) if (guest_has_perf_global_ctrl) wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, BIT_ULL(i)); - __guest_test_arch_event(idx, gp_event, i, base_pmc_msr + i, + __guest_test_arch_event(idx, i, base_pmc_msr + i, MSR_P6_EVNTSEL0 + i, eventsel); } @@ -295,7 +292,7 @@ static void guest_test_arch_event(uint8_t idx) wrmsr(MSR_CORE_PERF_FIXED_CTR_CTRL, FIXED_PMC_CTRL(i, FIXED_PMC_KERNEL)); - __guest_test_arch_event(idx, fixed_event, i | INTEL_RDPMC_FIXED, + __guest_test_arch_event(idx, i | INTEL_RDPMC_FIXED, MSR_CORE_PERF_FIXED_CTR0 + i, MSR_CORE_PERF_GLOBAL_CTRL, FIXED_PMC_GLOBAL_CTRL_ENABLE(i)); -- cgit v1.2.3 From 54108e73344480c3e5f3799129970009f52c59f4 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 17 Jan 2025 15:42:03 -0800 Subject: KVM: selftests: Print out the actual Top-Down Slots count on failure Print out the expected vs. actual count of the Top-Down Slots event on failure in the Intel PMU counters test. GUEST_ASSERT() only expands constants/macros, i.e. only prints the value of the expected count, which makes it difficult to debug and triage failures. Link: https://lore.kernel.org/r/20250117234204.2600624-6-seanjc@google.com Signed-off-by: Sean Christopherson --- tools/testing/selftests/kvm/x86/pmu_counters_test.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'tools/testing/selftests/kvm/x86/pmu_counters_test.c') diff --git a/tools/testing/selftests/kvm/x86/pmu_counters_test.c b/tools/testing/selftests/kvm/x86/pmu_counters_test.c index ea1485a08c78..8aaaf25b6111 100644 --- a/tools/testing/selftests/kvm/x86/pmu_counters_test.c +++ b/tools/testing/selftests/kvm/x86/pmu_counters_test.c @@ -174,7 +174,9 @@ static void guest_assert_event_count(uint8_t idx, uint32_t pmc, uint32_t pmc_msr GUEST_ASSERT_NE(count, 0); break; case INTEL_ARCH_TOPDOWN_SLOTS_INDEX: - GUEST_ASSERT(count >= NUM_INSNS_RETIRED); + __GUEST_ASSERT(count >= NUM_INSNS_RETIRED, + "Expected top-down slots >= %u, got count = %lu", + NUM_INSNS_RETIRED, count); break; default: break; -- cgit v1.2.3