diff options
| author | Juergen Gross <jgross@suse.com> | 2026-01-05 12:05:20 +0100 |
|---|---|---|
| committer | Borislav Petkov (AMD) <bp@alien8.de> | 2026-01-13 14:57:45 +0100 |
| commit | b0b449e6fec4cd182bd4384f7eb9002596079f68 (patch) | |
| tree | ce058ab4c1c42353515b63fe76b017f09113d920 /arch/x86/include | |
| parent | 392afe83165a54080ec48e50d45049bd5aaad332 (diff) | |
x86/pvlocks: Move paravirt spinlock functions into own header
Instead of having the pv spinlock function definitions in paravirt.h,
move them into the new header paravirt-spinlock.h.
Signed-off-by: Juergen Gross <jgross@suse.com>
Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
Link: https://patch.msgid.link/20260105110520.21356-22-jgross@suse.com
Diffstat (limited to 'arch/x86/include')
| -rw-r--r-- | arch/x86/include/asm/paravirt-base.h | 6 | ||||
| -rw-r--r-- | arch/x86/include/asm/paravirt-spinlock.h | 145 | ||||
| -rw-r--r-- | arch/x86/include/asm/paravirt.h | 61 | ||||
| -rw-r--r-- | arch/x86/include/asm/paravirt_types.h | 17 | ||||
| -rw-r--r-- | arch/x86/include/asm/qspinlock.h | 87 |
5 files changed, 156 insertions, 160 deletions
diff --git a/arch/x86/include/asm/paravirt-base.h b/arch/x86/include/asm/paravirt-base.h index 3827ea20de18..982a0b93bc76 100644 --- a/arch/x86/include/asm/paravirt-base.h +++ b/arch/x86/include/asm/paravirt-base.h @@ -26,4 +26,10 @@ u64 _paravirt_ident_64(u64); #endif #define paravirt_nop ((void *)nop_func) +#ifdef CONFIG_PARAVIRT_SPINLOCKS +void paravirt_set_cap(void); +#else +static inline void paravirt_set_cap(void) { } +#endif + #endif /* _ASM_X86_PARAVIRT_BASE_H */ diff --git a/arch/x86/include/asm/paravirt-spinlock.h b/arch/x86/include/asm/paravirt-spinlock.h new file mode 100644 index 000000000000..a5011ef3a6cc --- /dev/null +++ b/arch/x86/include/asm/paravirt-spinlock.h @@ -0,0 +1,145 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +#ifndef _ASM_X86_PARAVIRT_SPINLOCK_H +#define _ASM_X86_PARAVIRT_SPINLOCK_H + +#include <asm/paravirt_types.h> + +#ifdef CONFIG_SMP +#include <asm/spinlock_types.h> +#endif + +struct qspinlock; + +struct pv_lock_ops { + void (*queued_spin_lock_slowpath)(struct qspinlock *lock, u32 val); + struct paravirt_callee_save queued_spin_unlock; + + void (*wait)(u8 *ptr, u8 val); + void (*kick)(int cpu); + + struct paravirt_callee_save vcpu_is_preempted; +} __no_randomize_layout; + +extern struct pv_lock_ops pv_ops_lock; + +#ifdef CONFIG_PARAVIRT_SPINLOCKS +extern void native_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val); +extern void __pv_init_lock_hash(void); +extern void __pv_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val); +extern void __raw_callee_save___pv_queued_spin_unlock(struct qspinlock *lock); +extern bool nopvspin; + +static __always_inline void pv_queued_spin_lock_slowpath(struct qspinlock *lock, + u32 val) +{ + PVOP_VCALL2(pv_ops_lock, queued_spin_lock_slowpath, lock, val); +} + +static __always_inline void pv_queued_spin_unlock(struct qspinlock *lock) +{ + PVOP_ALT_VCALLEE1(pv_ops_lock, queued_spin_unlock, lock, + "movb $0, (%%" _ASM_ARG1 ");", + ALT_NOT(X86_FEATURE_PVUNLOCK)); +} + +static __always_inline bool pv_vcpu_is_preempted(long cpu) +{ + return PVOP_ALT_CALLEE1(bool, pv_ops_lock, vcpu_is_preempted, cpu, + "xor %%" _ASM_AX ", %%" _ASM_AX ";", + ALT_NOT(X86_FEATURE_VCPUPREEMPT)); +} + +#define queued_spin_unlock queued_spin_unlock +/** + * queued_spin_unlock - release a queued spinlock + * @lock : Pointer to queued spinlock structure + * + * A smp_store_release() on the least-significant byte. + */ +static inline void native_queued_spin_unlock(struct qspinlock *lock) +{ + smp_store_release(&lock->locked, 0); +} + +static inline void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val) +{ + pv_queued_spin_lock_slowpath(lock, val); +} + +static inline void queued_spin_unlock(struct qspinlock *lock) +{ + kcsan_release(); + pv_queued_spin_unlock(lock); +} + +#define vcpu_is_preempted vcpu_is_preempted +static inline bool vcpu_is_preempted(long cpu) +{ + return pv_vcpu_is_preempted(cpu); +} + +static __always_inline void pv_wait(u8 *ptr, u8 val) +{ + PVOP_VCALL2(pv_ops_lock, wait, ptr, val); +} + +static __always_inline void pv_kick(int cpu) +{ + PVOP_VCALL1(pv_ops_lock, kick, cpu); +} + +void __raw_callee_save___native_queued_spin_unlock(struct qspinlock *lock); +bool __raw_callee_save___native_vcpu_is_preempted(long cpu); +#endif /* CONFIG_PARAVIRT_SPINLOCKS */ + +void __init native_pv_lock_init(void); +__visible void __native_queued_spin_unlock(struct qspinlock *lock); +bool pv_is_native_spin_unlock(void); +__visible bool __native_vcpu_is_preempted(long cpu); +bool pv_is_native_vcpu_is_preempted(void); + +/* + * virt_spin_lock_key - disables by default the virt_spin_lock() hijack. + * + * Native (and PV wanting native due to vCPU pinning) should keep this key + * disabled. Native does not touch the key. + * + * When in a guest then native_pv_lock_init() enables the key first and + * KVM/XEN might conditionally disable it later in the boot process again. + */ +DECLARE_STATIC_KEY_FALSE(virt_spin_lock_key); + +/* + * Shortcut for the queued_spin_lock_slowpath() function that allows + * virt to hijack it. + * + * Returns: + * true - lock has been negotiated, all done; + * false - queued_spin_lock_slowpath() will do its thing. + */ +#define virt_spin_lock virt_spin_lock +static inline bool virt_spin_lock(struct qspinlock *lock) +{ + int val; + + if (!static_branch_likely(&virt_spin_lock_key)) + return false; + + /* + * On hypervisors without PARAVIRT_SPINLOCKS support we fall + * back to a Test-and-Set spinlock, because fair locks have + * horrible lock 'holder' preemption issues. + */ + + __retry: + val = atomic_read(&lock->val); + + if (val || !atomic_try_cmpxchg(&lock->val, &val, _Q_LOCKED_VAL)) { + cpu_relax(); + goto __retry; + } + + return true; +} + +#endif /* _ASM_X86_PARAVIRT_SPINLOCK_H */ diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h index ec274d13bae0..b21072af731d 100644 --- a/arch/x86/include/asm/paravirt.h +++ b/arch/x86/include/asm/paravirt.h @@ -19,15 +19,6 @@ #include <linux/cpumask.h> #include <asm/frame.h> -__visible void __native_queued_spin_unlock(struct qspinlock *lock); -bool pv_is_native_spin_unlock(void); -__visible bool __native_vcpu_is_preempted(long cpu); -bool pv_is_native_vcpu_is_preempted(void); - -#ifdef CONFIG_PARAVIRT_SPINLOCKS -void __init paravirt_set_cap(void); -#endif - /* The paravirtualized I/O functions */ static inline void slow_down_io(void) { @@ -522,46 +513,7 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx, { pv_ops.mmu.set_fixmap(idx, phys, flags); } -#endif - -#if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS) - -static __always_inline void pv_queued_spin_lock_slowpath(struct qspinlock *lock, - u32 val) -{ - PVOP_VCALL2(pv_ops, lock.queued_spin_lock_slowpath, lock, val); -} - -static __always_inline void pv_queued_spin_unlock(struct qspinlock *lock) -{ - PVOP_ALT_VCALLEE1(pv_ops, lock.queued_spin_unlock, lock, - "movb $0, (%%" _ASM_ARG1 ");", - ALT_NOT(X86_FEATURE_PVUNLOCK)); -} - -static __always_inline void pv_wait(u8 *ptr, u8 val) -{ - PVOP_VCALL2(pv_ops, lock.wait, ptr, val); -} - -static __always_inline void pv_kick(int cpu) -{ - PVOP_VCALL1(pv_ops, lock.kick, cpu); -} - -static __always_inline bool pv_vcpu_is_preempted(long cpu) -{ - return PVOP_ALT_CALLEE1(bool, pv_ops, lock.vcpu_is_preempted, cpu, - "xor %%" _ASM_AX ", %%" _ASM_AX ";", - ALT_NOT(X86_FEATURE_VCPUPREEMPT)); -} -void __raw_callee_save___native_queued_spin_unlock(struct qspinlock *lock); -bool __raw_callee_save___native_vcpu_is_preempted(long cpu); - -#endif /* SMP && PARAVIRT_SPINLOCKS */ - -#ifdef CONFIG_PARAVIRT_XXL static __always_inline unsigned long arch_local_save_flags(void) { return PVOP_ALT_CALLEE0(unsigned long, pv_ops, irq.save_fl, "pushf; pop %%rax;", @@ -588,8 +540,6 @@ static __always_inline unsigned long arch_local_irq_save(void) } #endif -void native_pv_lock_init(void) __init; - #else /* __ASSEMBLER__ */ #ifdef CONFIG_X86_64 @@ -613,12 +563,6 @@ void native_pv_lock_init(void) __init; #endif /* __ASSEMBLER__ */ #else /* CONFIG_PARAVIRT */ # define default_banner x86_init_noop - -#ifndef __ASSEMBLER__ -static inline void native_pv_lock_init(void) -{ -} -#endif #endif /* !CONFIG_PARAVIRT */ #ifndef __ASSEMBLER__ @@ -634,10 +578,5 @@ static inline void paravirt_arch_exit_mmap(struct mm_struct *mm) } #endif -#ifndef CONFIG_PARAVIRT_SPINLOCKS -static inline void paravirt_set_cap(void) -{ -} -#endif #endif /* __ASSEMBLER__ */ #endif /* _ASM_X86_PARAVIRT_H */ diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h index b36d425d099b..7ccd41628d36 100644 --- a/arch/x86/include/asm/paravirt_types.h +++ b/arch/x86/include/asm/paravirt_types.h @@ -184,22 +184,6 @@ struct pv_mmu_ops { #endif } __no_randomize_layout; -#ifdef CONFIG_SMP -#include <asm/spinlock_types.h> -#endif - -struct qspinlock; - -struct pv_lock_ops { - void (*queued_spin_lock_slowpath)(struct qspinlock *lock, u32 val); - struct paravirt_callee_save queued_spin_unlock; - - void (*wait)(u8 *ptr, u8 val); - void (*kick)(int cpu); - - struct paravirt_callee_save vcpu_is_preempted; -} __no_randomize_layout; - /* This contains all the paravirt structures: we get a convenient * number for each function using the offset which we use to indicate * what to patch. */ @@ -207,7 +191,6 @@ struct paravirt_patch_template { struct pv_cpu_ops cpu; struct pv_irq_ops irq; struct pv_mmu_ops mmu; - struct pv_lock_ops lock; } __no_randomize_layout; extern struct paravirt_patch_template pv_ops; diff --git a/arch/x86/include/asm/qspinlock.h b/arch/x86/include/asm/qspinlock.h index 68da67df304d..25a1919542d9 100644 --- a/arch/x86/include/asm/qspinlock.h +++ b/arch/x86/include/asm/qspinlock.h @@ -7,6 +7,9 @@ #include <asm-generic/qspinlock_types.h> #include <asm/paravirt.h> #include <asm/rmwcc.h> +#ifdef CONFIG_PARAVIRT +#include <asm/paravirt-spinlock.h> +#endif #define _Q_PENDING_LOOPS (1 << 9) @@ -27,90 +30,10 @@ static __always_inline u32 queued_fetch_set_pending_acquire(struct qspinlock *lo return val; } -#ifdef CONFIG_PARAVIRT_SPINLOCKS -extern void native_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val); -extern void __pv_init_lock_hash(void); -extern void __pv_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val); -extern void __raw_callee_save___pv_queued_spin_unlock(struct qspinlock *lock); -extern bool nopvspin; - -#define queued_spin_unlock queued_spin_unlock -/** - * queued_spin_unlock - release a queued spinlock - * @lock : Pointer to queued spinlock structure - * - * A smp_store_release() on the least-significant byte. - */ -static inline void native_queued_spin_unlock(struct qspinlock *lock) -{ - smp_store_release(&lock->locked, 0); -} - -static inline void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val) -{ - pv_queued_spin_lock_slowpath(lock, val); -} - -static inline void queued_spin_unlock(struct qspinlock *lock) -{ - kcsan_release(); - pv_queued_spin_unlock(lock); -} - -#define vcpu_is_preempted vcpu_is_preempted -static inline bool vcpu_is_preempted(long cpu) -{ - return pv_vcpu_is_preempted(cpu); -} +#ifndef CONFIG_PARAVIRT +static inline void native_pv_lock_init(void) { } #endif -#ifdef CONFIG_PARAVIRT -/* - * virt_spin_lock_key - disables by default the virt_spin_lock() hijack. - * - * Native (and PV wanting native due to vCPU pinning) should keep this key - * disabled. Native does not touch the key. - * - * When in a guest then native_pv_lock_init() enables the key first and - * KVM/XEN might conditionally disable it later in the boot process again. - */ -DECLARE_STATIC_KEY_FALSE(virt_spin_lock_key); - -/* - * Shortcut for the queued_spin_lock_slowpath() function that allows - * virt to hijack it. - * - * Returns: - * true - lock has been negotiated, all done; - * false - queued_spin_lock_slowpath() will do its thing. - */ -#define virt_spin_lock virt_spin_lock -static inline bool virt_spin_lock(struct qspinlock *lock) -{ - int val; - - if (!static_branch_likely(&virt_spin_lock_key)) - return false; - - /* - * On hypervisors without PARAVIRT_SPINLOCKS support we fall - * back to a Test-and-Set spinlock, because fair locks have - * horrible lock 'holder' preemption issues. - */ - - __retry: - val = atomic_read(&lock->val); - - if (val || !atomic_try_cmpxchg(&lock->val, &val, _Q_LOCKED_VAL)) { - cpu_relax(); - goto __retry; - } - - return true; -} - -#endif /* CONFIG_PARAVIRT */ - #include <asm-generic/qspinlock.h> #endif /* _ASM_X86_QSPINLOCK_H */ |
