diff options
| author | Juergen Gross <jgross@suse.com> | 2026-01-05 12:05:20 +0100 |
|---|---|---|
| committer | Borislav Petkov (AMD) <bp@alien8.de> | 2026-01-13 14:57:45 +0100 |
| commit | b0b449e6fec4cd182bd4384f7eb9002596079f68 (patch) | |
| tree | ce058ab4c1c42353515b63fe76b017f09113d920 /arch/x86/kernel | |
| parent | 392afe83165a54080ec48e50d45049bd5aaad332 (diff) | |
x86/pvlocks: Move paravirt spinlock functions into own header
Instead of having the pv spinlock function definitions in paravirt.h,
move them into the new header paravirt-spinlock.h.
Signed-off-by: Juergen Gross <jgross@suse.com>
Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
Link: https://patch.msgid.link/20260105110520.21356-22-jgross@suse.com
Diffstat (limited to 'arch/x86/kernel')
| -rw-r--r-- | arch/x86/kernel/Makefile | 2 | ||||
| -rw-r--r-- | arch/x86/kernel/kvm.c | 12 | ||||
| -rw-r--r-- | arch/x86/kernel/paravirt-spinlocks.c | 26 | ||||
| -rw-r--r-- | arch/x86/kernel/paravirt.c | 21 |
4 files changed, 31 insertions, 30 deletions
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index bc184dd38d99..e9aeeeafad17 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile @@ -126,7 +126,7 @@ obj-$(CONFIG_DEBUG_NMI_SELFTEST) += nmi_selftest.o obj-$(CONFIG_KVM_GUEST) += kvm.o kvmclock.o obj-$(CONFIG_PARAVIRT) += paravirt.o -obj-$(CONFIG_PARAVIRT_SPINLOCKS)+= paravirt-spinlocks.o +obj-$(CONFIG_PARAVIRT) += paravirt-spinlocks.o obj-$(CONFIG_PARAVIRT_CLOCK) += pvclock.o obj-$(CONFIG_X86_PMEM_LEGACY_DEVICE) += pmem.o diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c index 21b4de55f823..de550b12d9ab 100644 --- a/arch/x86/kernel/kvm.c +++ b/arch/x86/kernel/kvm.c @@ -829,8 +829,10 @@ static void __init kvm_guest_init(void) has_steal_clock = 1; static_call_update(pv_steal_clock, kvm_steal_clock); - pv_ops.lock.vcpu_is_preempted = +#ifdef CONFIG_PARAVIRT_SPINLOCKS + pv_ops_lock.vcpu_is_preempted = PV_CALLEE_SAVE(__kvm_vcpu_is_preempted); +#endif } if (kvm_para_has_feature(KVM_FEATURE_PV_EOI)) @@ -1126,11 +1128,11 @@ void __init kvm_spinlock_init(void) pr_info("PV spinlocks enabled\n"); __pv_init_lock_hash(); - pv_ops.lock.queued_spin_lock_slowpath = __pv_queued_spin_lock_slowpath; - pv_ops.lock.queued_spin_unlock = + pv_ops_lock.queued_spin_lock_slowpath = __pv_queued_spin_lock_slowpath; + pv_ops_lock.queued_spin_unlock = PV_CALLEE_SAVE(__pv_queued_spin_unlock); - pv_ops.lock.wait = kvm_wait; - pv_ops.lock.kick = kvm_kick_cpu; + pv_ops_lock.wait = kvm_wait; + pv_ops_lock.kick = kvm_kick_cpu; /* * When PV spinlock is enabled which is preferred over diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c index 9e1ea99ad9df..95452444868f 100644 --- a/arch/x86/kernel/paravirt-spinlocks.c +++ b/arch/x86/kernel/paravirt-spinlocks.c @@ -3,12 +3,22 @@ * Split spinlock implementation out into its own file, so it can be * compiled in a FTRACE-compatible way. */ +#include <linux/static_call.h> #include <linux/spinlock.h> #include <linux/export.h> #include <linux/jump_label.h> -#include <asm/paravirt.h> +DEFINE_STATIC_KEY_FALSE(virt_spin_lock_key); +#ifdef CONFIG_SMP +void __init native_pv_lock_init(void) +{ + if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) + static_branch_enable(&virt_spin_lock_key); +} +#endif + +#ifdef CONFIG_PARAVIRT_SPINLOCKS __visible void __native_queued_spin_unlock(struct qspinlock *lock) { native_queued_spin_unlock(lock); @@ -17,7 +27,7 @@ PV_CALLEE_SAVE_REGS_THUNK(__native_queued_spin_unlock); bool pv_is_native_spin_unlock(void) { - return pv_ops.lock.queued_spin_unlock.func == + return pv_ops_lock.queued_spin_unlock.func == __raw_callee_save___native_queued_spin_unlock; } @@ -29,7 +39,7 @@ PV_CALLEE_SAVE_REGS_THUNK(__native_vcpu_is_preempted); bool pv_is_native_vcpu_is_preempted(void) { - return pv_ops.lock.vcpu_is_preempted.func == + return pv_ops_lock.vcpu_is_preempted.func == __raw_callee_save___native_vcpu_is_preempted; } @@ -41,3 +51,13 @@ void __init paravirt_set_cap(void) if (!pv_is_native_vcpu_is_preempted()) setup_force_cpu_cap(X86_FEATURE_VCPUPREEMPT); } + +struct pv_lock_ops pv_ops_lock = { + .queued_spin_lock_slowpath = native_queued_spin_lock_slowpath, + .queued_spin_unlock = PV_CALLEE_SAVE(__native_queued_spin_unlock), + .wait = paravirt_nop, + .kick = paravirt_nop, + .vcpu_is_preempted = PV_CALLEE_SAVE(__native_vcpu_is_preempted), +}; +EXPORT_SYMBOL(pv_ops_lock); +#endif diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c index 5dfbd3f55792..a6ed52cae003 100644 --- a/arch/x86/kernel/paravirt.c +++ b/arch/x86/kernel/paravirt.c @@ -57,14 +57,6 @@ DEFINE_ASM_FUNC(pv_native_irq_enable, "sti", .noinstr.text); DEFINE_ASM_FUNC(pv_native_read_cr2, "mov %cr2, %rax", .noinstr.text); #endif -DEFINE_STATIC_KEY_FALSE(virt_spin_lock_key); - -void __init native_pv_lock_init(void) -{ - if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) - static_branch_enable(&virt_spin_lock_key); -} - static noinstr void pv_native_safe_halt(void) { native_safe_halt(); @@ -221,19 +213,6 @@ struct paravirt_patch_template pv_ops = { .mmu.set_fixmap = native_set_fixmap, #endif /* CONFIG_PARAVIRT_XXL */ - -#if defined(CONFIG_PARAVIRT_SPINLOCKS) - /* Lock ops. */ -#ifdef CONFIG_SMP - .lock.queued_spin_lock_slowpath = native_queued_spin_lock_slowpath, - .lock.queued_spin_unlock = - PV_CALLEE_SAVE(__native_queued_spin_unlock), - .lock.wait = paravirt_nop, - .lock.kick = paravirt_nop, - .lock.vcpu_is_preempted = - PV_CALLEE_SAVE(__native_vcpu_is_preempted), -#endif /* SMP */ -#endif }; #ifdef CONFIG_PARAVIRT_XXL |
