summaryrefslogtreecommitdiff
path: root/include/linux/spinlock_rt.h
diff options
context:
space:
mode:
authorMarco Elver <elver@google.com>2025-12-19 16:39:57 +0100
committerPeter Zijlstra <peterz@infradead.org>2026-01-05 16:43:28 +0100
commitf16a802d402d735a55731f8c94952b3bbb5ddfe8 (patch)
treec2c18a0e9a7d0794f07ade7416844c55f581f6b5 /include/linux/spinlock_rt.h
parent7c451541743c6c2ef1afc425191f18a23e311019 (diff)
locking/rwlock, spinlock: Support Clang's context analysis
Add support for Clang's context analysis for raw_spinlock_t, spinlock_t, and rwlock. This wholesale conversion is required because all three of them are interdependent. To avoid warnings in constructors, the initialization functions mark a lock as acquired when initialized before guarded variables. The test verifies that common patterns do not generate false positives. Signed-off-by: Marco Elver <elver@google.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Link: https://patch.msgid.link/20251219154418.3592607-9-elver@google.com
Diffstat (limited to 'include/linux/spinlock_rt.h')
-rw-r--r--include/linux/spinlock_rt.h21
1 files changed, 13 insertions, 8 deletions
diff --git a/include/linux/spinlock_rt.h b/include/linux/spinlock_rt.h
index f6499c37157d..6bab73ee1384 100644
--- a/include/linux/spinlock_rt.h
+++ b/include/linux/spinlock_rt.h
@@ -20,6 +20,7 @@ static inline void __rt_spin_lock_init(spinlock_t *lock, const char *name,
do { \
rt_mutex_base_init(&(slock)->lock); \
__rt_spin_lock_init(slock, name, key, percpu); \
+ __assume_ctx_lock(slock); \
} while (0)
#define _spin_lock_init(slock, percpu) \
@@ -40,6 +41,7 @@ extern int rt_spin_trylock_bh(spinlock_t *lock);
extern int rt_spin_trylock(spinlock_t *lock);
static __always_inline void spin_lock(spinlock_t *lock)
+ __acquires(lock)
{
rt_spin_lock(lock);
}
@@ -82,6 +84,7 @@ static __always_inline void spin_lock(spinlock_t *lock)
__spin_lock_irqsave_nested(lock, flags, subclass)
static __always_inline void spin_lock_bh(spinlock_t *lock)
+ __acquires(lock)
{
/* Investigate: Drop bh when blocking ? */
local_bh_disable();
@@ -89,6 +92,7 @@ static __always_inline void spin_lock_bh(spinlock_t *lock)
}
static __always_inline void spin_lock_irq(spinlock_t *lock)
+ __acquires(lock)
{
rt_spin_lock(lock);
}
@@ -101,23 +105,27 @@ static __always_inline void spin_lock_irq(spinlock_t *lock)
} while (0)
static __always_inline void spin_unlock(spinlock_t *lock)
+ __releases(lock)
{
rt_spin_unlock(lock);
}
static __always_inline void spin_unlock_bh(spinlock_t *lock)
+ __releases(lock)
{
rt_spin_unlock(lock);
local_bh_enable();
}
static __always_inline void spin_unlock_irq(spinlock_t *lock)
+ __releases(lock)
{
rt_spin_unlock(lock);
}
static __always_inline void spin_unlock_irqrestore(spinlock_t *lock,
unsigned long flags)
+ __releases(lock)
{
rt_spin_unlock(lock);
}
@@ -132,14 +140,11 @@ static __always_inline void spin_unlock_irqrestore(spinlock_t *lock,
__cond_lock(lock, rt_spin_trylock(lock))
#define spin_trylock_irqsave(lock, flags) \
-({ \
- int __locked; \
- \
- typecheck(unsigned long, flags); \
- flags = 0; \
- __locked = spin_trylock(lock); \
- __locked; \
-})
+ __cond_lock(lock, ({ \
+ typecheck(unsigned long, flags); \
+ flags = 0; \
+ rt_spin_trylock(lock); \
+ }))
#define spin_is_contended(lock) (((void)(lock), 0))