From d3febf16dee28a74b01ba43195ee4965edb6208f Mon Sep 17 00:00:00 2001 From: Marco Elver Date: Fri, 19 Dec 2025 16:40:08 +0100 Subject: locking/local_lock: Support Clang's context analysis Add support for Clang's context analysis for local_lock_t and local_trylock_t. Signed-off-by: Marco Elver Signed-off-by: Peter Zijlstra (Intel) Link: https://patch.msgid.link/20251219154418.3592607-20-elver@google.com --- include/linux/local_lock.h | 51 +++++++++++++++----------- include/linux/local_lock_internal.h | 71 +++++++++++++++++++++++++++++-------- 2 files changed, 87 insertions(+), 35 deletions(-) (limited to 'include') diff --git a/include/linux/local_lock.h b/include/linux/local_lock.h index b0e6ab329b00..99c06e499375 100644 --- a/include/linux/local_lock.h +++ b/include/linux/local_lock.h @@ -14,13 +14,13 @@ * local_lock - Acquire a per CPU local lock * @lock: The lock variable */ -#define local_lock(lock) __local_lock(this_cpu_ptr(lock)) +#define local_lock(lock) __local_lock(__this_cpu_local_lock(lock)) /** * local_lock_irq - Acquire a per CPU local lock and disable interrupts * @lock: The lock variable */ -#define local_lock_irq(lock) __local_lock_irq(this_cpu_ptr(lock)) +#define local_lock_irq(lock) __local_lock_irq(__this_cpu_local_lock(lock)) /** * local_lock_irqsave - Acquire a per CPU local lock, save and disable @@ -29,19 +29,19 @@ * @flags: Storage for interrupt flags */ #define local_lock_irqsave(lock, flags) \ - __local_lock_irqsave(this_cpu_ptr(lock), flags) + __local_lock_irqsave(__this_cpu_local_lock(lock), flags) /** * local_unlock - Release a per CPU local lock * @lock: The lock variable */ -#define local_unlock(lock) __local_unlock(this_cpu_ptr(lock)) +#define local_unlock(lock) __local_unlock(__this_cpu_local_lock(lock)) /** * local_unlock_irq - Release a per CPU local lock and enable interrupts * @lock: The lock variable */ -#define local_unlock_irq(lock) __local_unlock_irq(this_cpu_ptr(lock)) +#define local_unlock_irq(lock) __local_unlock_irq(__this_cpu_local_lock(lock)) /** * local_unlock_irqrestore - Release a per CPU local lock and restore @@ -50,7 +50,7 @@ * @flags: Interrupt flags to restore */ #define local_unlock_irqrestore(lock, flags) \ - __local_unlock_irqrestore(this_cpu_ptr(lock), flags) + __local_unlock_irqrestore(__this_cpu_local_lock(lock), flags) /** * local_trylock_init - Runtime initialize a lock instance @@ -66,7 +66,7 @@ * locking constrains it will _always_ fail to acquire the lock in NMI or * HARDIRQ context on PREEMPT_RT. */ -#define local_trylock(lock) __local_trylock(this_cpu_ptr(lock)) +#define local_trylock(lock) __local_trylock(__this_cpu_local_lock(lock)) #define local_lock_is_locked(lock) __local_lock_is_locked(lock) @@ -81,27 +81,36 @@ * HARDIRQ context on PREEMPT_RT. */ #define local_trylock_irqsave(lock, flags) \ - __local_trylock_irqsave(this_cpu_ptr(lock), flags) - -DEFINE_GUARD(local_lock, local_lock_t __percpu*, - local_lock(_T), - local_unlock(_T)) -DEFINE_GUARD(local_lock_irq, local_lock_t __percpu*, - local_lock_irq(_T), - local_unlock_irq(_T)) + __local_trylock_irqsave(__this_cpu_local_lock(lock), flags) + +DEFINE_LOCK_GUARD_1(local_lock, local_lock_t __percpu, + local_lock(_T->lock), + local_unlock(_T->lock)) +DEFINE_LOCK_GUARD_1(local_lock_irq, local_lock_t __percpu, + local_lock_irq(_T->lock), + local_unlock_irq(_T->lock)) DEFINE_LOCK_GUARD_1(local_lock_irqsave, local_lock_t __percpu, local_lock_irqsave(_T->lock, _T->flags), local_unlock_irqrestore(_T->lock, _T->flags), unsigned long flags) #define local_lock_nested_bh(_lock) \ - __local_lock_nested_bh(this_cpu_ptr(_lock)) + __local_lock_nested_bh(__this_cpu_local_lock(_lock)) #define local_unlock_nested_bh(_lock) \ - __local_unlock_nested_bh(this_cpu_ptr(_lock)) - -DEFINE_GUARD(local_lock_nested_bh, local_lock_t __percpu*, - local_lock_nested_bh(_T), - local_unlock_nested_bh(_T)) + __local_unlock_nested_bh(__this_cpu_local_lock(_lock)) + +DEFINE_LOCK_GUARD_1(local_lock_nested_bh, local_lock_t __percpu, + local_lock_nested_bh(_T->lock), + local_unlock_nested_bh(_T->lock)) + +DECLARE_LOCK_GUARD_1_ATTRS(local_lock, __acquires(_T), __releases(*(local_lock_t __percpu **)_T)) +#define class_local_lock_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(local_lock, _T) +DECLARE_LOCK_GUARD_1_ATTRS(local_lock_irq, __acquires(_T), __releases(*(local_lock_t __percpu **)_T)) +#define class_local_lock_irq_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(local_lock_irq, _T) +DECLARE_LOCK_GUARD_1_ATTRS(local_lock_irqsave, __acquires(_T), __releases(*(local_lock_t __percpu **)_T)) +#define class_local_lock_irqsave_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(local_lock_irqsave, _T) +DECLARE_LOCK_GUARD_1_ATTRS(local_lock_nested_bh, __acquires(_T), __releases(*(local_lock_t __percpu **)_T)) +#define class_local_lock_nested_bh_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(local_lock_nested_bh, _T) #endif diff --git a/include/linux/local_lock_internal.h b/include/linux/local_lock_internal.h index 1a1ea1232add..e8c4803d8db4 100644 --- a/include/linux/local_lock_internal.h +++ b/include/linux/local_lock_internal.h @@ -10,21 +10,23 @@ #ifndef CONFIG_PREEMPT_RT -typedef struct { +context_lock_struct(local_lock) { #ifdef CONFIG_DEBUG_LOCK_ALLOC struct lockdep_map dep_map; struct task_struct *owner; #endif -} local_lock_t; +}; +typedef struct local_lock local_lock_t; /* local_trylock() and local_trylock_irqsave() only work with local_trylock_t */ -typedef struct { +context_lock_struct(local_trylock) { #ifdef CONFIG_DEBUG_LOCK_ALLOC struct lockdep_map dep_map; struct task_struct *owner; #endif u8 acquired; -} local_trylock_t; +}; +typedef struct local_trylock local_trylock_t; #ifdef CONFIG_DEBUG_LOCK_ALLOC # define LOCAL_LOCK_DEBUG_INIT(lockname) \ @@ -84,9 +86,14 @@ do { \ 0, LD_WAIT_CONFIG, LD_WAIT_INV, \ LD_LOCK_PERCPU); \ local_lock_debug_init(lock); \ + __assume_ctx_lock(lock); \ } while (0) -#define __local_trylock_init(lock) __local_lock_init((local_lock_t *)lock) +#define __local_trylock_init(lock) \ +do { \ + __local_lock_init((local_lock_t *)lock); \ + __assume_ctx_lock(lock); \ +} while (0) #define __spinlock_nested_bh_init(lock) \ do { \ @@ -97,6 +104,7 @@ do { \ 0, LD_WAIT_CONFIG, LD_WAIT_INV, \ LD_LOCK_NORMAL); \ local_lock_debug_init(lock); \ + __assume_ctx_lock(lock); \ } while (0) #define __local_lock_acquire(lock) \ @@ -119,22 +127,25 @@ do { \ do { \ preempt_disable(); \ __local_lock_acquire(lock); \ + __acquire(lock); \ } while (0) #define __local_lock_irq(lock) \ do { \ local_irq_disable(); \ __local_lock_acquire(lock); \ + __acquire(lock); \ } while (0) #define __local_lock_irqsave(lock, flags) \ do { \ local_irq_save(flags); \ __local_lock_acquire(lock); \ + __acquire(lock); \ } while (0) #define __local_trylock(lock) \ - ({ \ + __try_acquire_ctx_lock(lock, ({ \ local_trylock_t *__tl; \ \ preempt_disable(); \ @@ -148,10 +159,10 @@ do { \ (local_lock_t *)__tl); \ } \ !!__tl; \ - }) + })) #define __local_trylock_irqsave(lock, flags) \ - ({ \ + __try_acquire_ctx_lock(lock, ({ \ local_trylock_t *__tl; \ \ local_irq_save(flags); \ @@ -165,7 +176,7 @@ do { \ (local_lock_t *)__tl); \ } \ !!__tl; \ - }) + })) /* preemption or migration must be disabled before calling __local_lock_is_locked */ #define __local_lock_is_locked(lock) READ_ONCE(this_cpu_ptr(lock)->acquired) @@ -188,18 +199,21 @@ do { \ #define __local_unlock(lock) \ do { \ + __release(lock); \ __local_lock_release(lock); \ preempt_enable(); \ } while (0) #define __local_unlock_irq(lock) \ do { \ + __release(lock); \ __local_lock_release(lock); \ local_irq_enable(); \ } while (0) #define __local_unlock_irqrestore(lock, flags) \ do { \ + __release(lock); \ __local_lock_release(lock); \ local_irq_restore(flags); \ } while (0) @@ -208,13 +222,19 @@ do { \ do { \ lockdep_assert_in_softirq(); \ local_lock_acquire((lock)); \ + __acquire(lock); \ } while (0) #define __local_unlock_nested_bh(lock) \ - local_lock_release((lock)) + do { \ + __release(lock); \ + local_lock_release((lock)); \ + } while (0) #else /* !CONFIG_PREEMPT_RT */ +#include + /* * On PREEMPT_RT local_lock maps to a per CPU spinlock, which protects the * critical section while staying preemptible. @@ -269,7 +289,7 @@ do { \ } while (0) #define __local_trylock(lock) \ - ({ \ + __try_acquire_ctx_lock(lock, context_unsafe(({ \ int __locked; \ \ if (in_nmi() | in_hardirq()) { \ @@ -281,17 +301,40 @@ do { \ migrate_enable(); \ } \ __locked; \ - }) + }))) #define __local_trylock_irqsave(lock, flags) \ - ({ \ + __try_acquire_ctx_lock(lock, ({ \ typecheck(unsigned long, flags); \ flags = 0; \ __local_trylock(lock); \ - }) + })) /* migration must be disabled before calling __local_lock_is_locked */ #define __local_lock_is_locked(__lock) \ (rt_mutex_owner(&this_cpu_ptr(__lock)->lock) == current) #endif /* CONFIG_PREEMPT_RT */ + +#if defined(WARN_CONTEXT_ANALYSIS) +/* + * Because the compiler only knows about the base per-CPU variable, use this + * helper function to make the compiler think we lock/unlock the @base variable, + * and hide the fact we actually pass the per-CPU instance to lock/unlock + * functions. + */ +static __always_inline local_lock_t *__this_cpu_local_lock(local_lock_t __percpu *base) + __returns_ctx_lock(base) __attribute__((overloadable)) +{ + return this_cpu_ptr(base); +} +#ifndef CONFIG_PREEMPT_RT +static __always_inline local_trylock_t *__this_cpu_local_lock(local_trylock_t __percpu *base) + __returns_ctx_lock(base) __attribute__((overloadable)) +{ + return this_cpu_ptr(base); +} +#endif /* CONFIG_PREEMPT_RT */ +#else /* WARN_CONTEXT_ANALYSIS */ +#define __this_cpu_local_lock(base) this_cpu_ptr(base) +#endif /* WARN_CONTEXT_ANALYSIS */ -- cgit v1.2.3