diff options
| author | Kan Liang <kan.liang@linux.intel.com> | 2025-12-05 16:16:43 -0800 |
|---|---|---|
| committer | Peter Zijlstra <peterz@infradead.org> | 2025-12-17 13:31:05 +0100 |
| commit | 42457a7fb6cacca83be4deaf202ac3e45830daf2 (patch) | |
| tree | 3216c0a657d386407280f60e84662f9e6a203c05 /kernel | |
| parent | 4593b4b6e218a0f21afbacc8124cf469d2d04094 (diff) | |
perf: Add APIs to load/put guest mediated PMU context
Add exported APIs to load/put a guest mediated PMU context. KVM will
load the guest PMU shortly before VM-Enter, and put the guest PMU shortly
after VM-Exit.
On the perf side of things, schedule out all exclude_guest events when the
guest context is loaded, and schedule them back in when the guest context
is put. I.e. yield the hardware PMU resources to the guest, by way of KVM.
Note, perf is only responsible for managing host context. KVM is
responsible for loading/storing guest state to/from hardware.
[sean: shuffle patches around, write changelog]
Suggested-by: Sean Christopherson <seanjc@google.com>
Signed-off-by: Kan Liang <kan.liang@linux.intel.com>
Signed-off-by: Mingwei Zhang <mizhang@google.com>
Signed-off-by: Sean Christopherson <seanjc@google.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Tested-by: Xudong Hao <xudong.hao@intel.com>
Link: https://patch.msgid.link/20251206001720.468579-8-seanjc@google.com
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/events/core.c | 61 |
1 files changed, 61 insertions, 0 deletions
diff --git a/kernel/events/core.c b/kernel/events/core.c index 6781d39f3158..bbb81a4a3196 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -470,10 +470,19 @@ static cpumask_var_t perf_online_pkg_mask; static cpumask_var_t perf_online_sys_mask; static struct kmem_cache *perf_event_cache; +#ifdef CONFIG_PERF_GUEST_MEDIATED_PMU +static DEFINE_PER_CPU(bool, guest_ctx_loaded); + +static __always_inline bool is_guest_mediated_pmu_loaded(void) +{ + return __this_cpu_read(guest_ctx_loaded); +} +#else static __always_inline bool is_guest_mediated_pmu_loaded(void) { return false; } +#endif /* * perf event paranoia level: @@ -6384,6 +6393,58 @@ void perf_release_mediated_pmu(void) atomic_dec(&nr_mediated_pmu_vms); } EXPORT_SYMBOL_GPL(perf_release_mediated_pmu); + +/* When loading a guest's mediated PMU, schedule out all exclude_guest events. */ +void perf_load_guest_context(void) +{ + struct perf_cpu_context *cpuctx = this_cpu_ptr(&perf_cpu_context); + + lockdep_assert_irqs_disabled(); + + guard(perf_ctx_lock)(cpuctx, cpuctx->task_ctx); + + if (WARN_ON_ONCE(__this_cpu_read(guest_ctx_loaded))) + return; + + perf_ctx_disable(&cpuctx->ctx, EVENT_GUEST); + ctx_sched_out(&cpuctx->ctx, NULL, EVENT_GUEST); + if (cpuctx->task_ctx) { + perf_ctx_disable(cpuctx->task_ctx, EVENT_GUEST); + task_ctx_sched_out(cpuctx->task_ctx, NULL, EVENT_GUEST); + } + + perf_ctx_enable(&cpuctx->ctx, EVENT_GUEST); + if (cpuctx->task_ctx) + perf_ctx_enable(cpuctx->task_ctx, EVENT_GUEST); + + __this_cpu_write(guest_ctx_loaded, true); +} +EXPORT_SYMBOL_GPL(perf_load_guest_context); + +void perf_put_guest_context(void) +{ + struct perf_cpu_context *cpuctx = this_cpu_ptr(&perf_cpu_context); + + lockdep_assert_irqs_disabled(); + + guard(perf_ctx_lock)(cpuctx, cpuctx->task_ctx); + + if (WARN_ON_ONCE(!__this_cpu_read(guest_ctx_loaded))) + return; + + perf_ctx_disable(&cpuctx->ctx, EVENT_GUEST); + if (cpuctx->task_ctx) + perf_ctx_disable(cpuctx->task_ctx, EVENT_GUEST); + + perf_event_sched_in(cpuctx, cpuctx->task_ctx, NULL, EVENT_GUEST); + + if (cpuctx->task_ctx) + perf_ctx_enable(cpuctx->task_ctx, EVENT_GUEST); + perf_ctx_enable(&cpuctx->ctx, EVENT_GUEST); + + __this_cpu_write(guest_ctx_loaded, false); +} +EXPORT_SYMBOL_GPL(perf_put_guest_context); #else static int mediated_pmu_account_event(struct perf_event *event) { return 0; } static void mediated_pmu_unaccount_event(struct perf_event *event) {} |
