summaryrefslogtreecommitdiff
path: root/include/linux
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2026-02-10 12:28:44 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2026-02-10 12:28:44 -0800
commit0923fd0419a1a2c8846e15deacac11b619e996d9 (patch)
tree7cc5fecc1680f5881f1d4183be400b51c81e6943 /include/linux
parent4d84667627c4ff70826b349c449bbaf63b9af4e5 (diff)
parent7a562d5d2396c9c78fbbced7ae81bcfcfa0fde3f (diff)
Merge tag 'locking-core-2026-02-08' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull locking updates from Ingo Molnar: "Lock debugging: - Implement compiler-driven static analysis locking context checking, using the upcoming Clang 22 compiler's context analysis features (Marco Elver) We removed Sparse context analysis support, because prior to removal even a defconfig kernel produced 1,700+ context tracking Sparse warnings, the overwhelming majority of which are false positives. On an allmodconfig kernel the number of false positive context tracking Sparse warnings grows to over 5,200... On the plus side of the balance actual locking bugs found by Sparse context analysis is also rather ... sparse: I found only 3 such commits in the last 3 years. So the rate of false positives and the maintenance overhead is rather high and there appears to be no active policy in place to achieve a zero-warnings baseline to move the annotations & fixers to developers who introduce new code. Clang context analysis is more complete and more aggressive in trying to find bugs, at least in principle. Plus it has a different model to enabling it: it's enabled subsystem by subsystem, which results in zero warnings on all relevant kernel builds (as far as our testing managed to cover it). Which allowed us to enable it by default, similar to other compiler warnings, with the expectation that there are no warnings going forward. This enforces a zero-warnings baseline on clang-22+ builds (Which are still limited in distribution, admittedly) Hopefully the Clang approach can lead to a more maintainable zero-warnings status quo and policy, with more and more subsystems and drivers enabling the feature. Context tracking can be enabled for all kernel code via WARN_CONTEXT_ANALYSIS_ALL=y (default disabled), but this will generate a lot of false positives. ( Having said that, Sparse support could still be added back, if anyone is interested - the removal patch is still relatively straightforward to revert at this stage. ) Rust integration updates: (Alice Ryhl, Fujita Tomonori, Boqun Feng) - Add support for Atomic<i8/i16/bool> and replace most Rust native AtomicBool usages with Atomic<bool> - Clean up LockClassKey and improve its documentation - Add missing Send and Sync trait implementation for SetOnce - Make ARef Unpin as it is supposed to be - Add __rust_helper to a few Rust helpers as a preparation for helper LTO - Inline various lock related functions to avoid additional function calls WW mutexes: - Extend ww_mutex tests and other test-ww_mutex updates (John Stultz) Misc fixes and cleanups: - rcu: Mark lockdep_assert_rcu_helper() __always_inline (Arnd Bergmann) - locking/local_lock: Include more missing headers (Peter Zijlstra) - seqlock: fix scoped_seqlock_read kernel-doc (Randy Dunlap) - rust: sync: Replace `kernel::c_str!` with C-Strings (Tamir Duberstein)" * tag 'locking-core-2026-02-08' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (90 commits) locking/rwlock: Fix write_trylock_irqsave() with CONFIG_INLINE_WRITE_TRYLOCK rcu: Mark lockdep_assert_rcu_helper() __always_inline compiler-context-analysis: Remove __assume_ctx_lock from initializers tomoyo: Use scoped init guard crypto: Use scoped init guard kcov: Use scoped init guard compiler-context-analysis: Introduce scoped init guards cleanup: Make __DEFINE_LOCK_GUARD handle commas in initializers seqlock: fix scoped_seqlock_read kernel-doc tools: Update context analysis macros in compiler_types.h rust: sync: Replace `kernel::c_str!` with C-Strings rust: sync: Inline various lock related methods rust: helpers: Move #define __rust_helper out of atomic.c rust: wait: Add __rust_helper to helpers rust: time: Add __rust_helper to helpers rust: task: Add __rust_helper to helpers rust: sync: Add __rust_helper to helpers rust: refcount: Add __rust_helper to helpers rust: rcu: Add __rust_helper to helpers rust: processor: Add __rust_helper to helpers ...
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/atomic/atomic-arch-fallback.h18
-rw-r--r--include/linux/atomic/atomic-instrumented.h26
-rw-r--r--include/linux/atomic/atomic-long.h10
-rw-r--r--include/linux/bit_spinlock.h24
-rw-r--r--include/linux/cleanup.h58
-rw-r--r--include/linux/compiler-context-analysis.h436
-rw-r--r--include/linux/compiler.h2
-rw-r--r--include/linux/compiler_types.h18
-rw-r--r--include/linux/console.h4
-rw-r--r--include/linux/debugfs.h12
-rw-r--r--include/linux/kref.h2
-rw-r--r--include/linux/list_bl.h2
-rw-r--r--include/linux/local_lock.h59
-rw-r--r--include/linux/local_lock_internal.h72
-rw-r--r--include/linux/lockdep.h12
-rw-r--r--include/linux/lockref.h4
-rw-r--r--include/linux/mm.h33
-rw-r--r--include/linux/mutex.h40
-rw-r--r--include/linux/mutex_types.h4
-rw-r--r--include/linux/rcupdate.h90
-rw-r--r--include/linux/refcount.h6
-rw-r--r--include/linux/rhashtable.h16
-rw-r--r--include/linux/rwlock.h19
-rw-r--r--include/linux/rwlock_api_smp.h43
-rw-r--r--include/linux/rwlock_rt.h43
-rw-r--r--include/linux/rwlock_types.h10
-rw-r--r--include/linux/rwsem.h78
-rw-r--r--include/linux/sched.h6
-rw-r--r--include/linux/sched/signal.h16
-rw-r--r--include/linux/sched/task.h6
-rw-r--r--include/linux/sched/wake_q.h3
-rw-r--r--include/linux/seqlock.h57
-rw-r--r--include/linux/seqlock_types.h5
-rw-r--r--include/linux/spinlock.h119
-rw-r--r--include/linux/spinlock_api_smp.h34
-rw-r--r--include/linux/spinlock_api_up.h112
-rw-r--r--include/linux/spinlock_rt.h36
-rw-r--r--include/linux/spinlock_types.h10
-rw-r--r--include/linux/spinlock_types_raw.h5
-rw-r--r--include/linux/srcu.h73
-rw-r--r--include/linux/srcutiny.h6
-rw-r--r--include/linux/srcutree.h10
-rw-r--r--include/linux/ww_mutex.h21
43 files changed, 1259 insertions, 401 deletions
diff --git a/include/linux/atomic/atomic-arch-fallback.h b/include/linux/atomic/atomic-arch-fallback.h
index 2f9d36b72bd8..cdc25f8979f7 100644
--- a/include/linux/atomic/atomic-arch-fallback.h
+++ b/include/linux/atomic/atomic-arch-fallback.h
@@ -2121,7 +2121,7 @@ raw_atomic_cmpxchg_relaxed(atomic_t *v, int old, int new)
*
* Safe to use in noinstr code; prefer atomic_try_cmpxchg() elsewhere.
*
- * Return: @true if the exchange occured, @false otherwise.
+ * Return: @true if the exchange occurred, @false otherwise.
*/
static __always_inline bool
raw_atomic_try_cmpxchg(atomic_t *v, int *old, int new)
@@ -2155,7 +2155,7 @@ raw_atomic_try_cmpxchg(atomic_t *v, int *old, int new)
*
* Safe to use in noinstr code; prefer atomic_try_cmpxchg_acquire() elsewhere.
*
- * Return: @true if the exchange occured, @false otherwise.
+ * Return: @true if the exchange occurred, @false otherwise.
*/
static __always_inline bool
raw_atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new)
@@ -2189,7 +2189,7 @@ raw_atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new)
*
* Safe to use in noinstr code; prefer atomic_try_cmpxchg_release() elsewhere.
*
- * Return: @true if the exchange occured, @false otherwise.
+ * Return: @true if the exchange occurred, @false otherwise.
*/
static __always_inline bool
raw_atomic_try_cmpxchg_release(atomic_t *v, int *old, int new)
@@ -2222,7 +2222,7 @@ raw_atomic_try_cmpxchg_release(atomic_t *v, int *old, int new)
*
* Safe to use in noinstr code; prefer atomic_try_cmpxchg_relaxed() elsewhere.
*
- * Return: @true if the exchange occured, @false otherwise.
+ * Return: @true if the exchange occurred, @false otherwise.
*/
static __always_inline bool
raw_atomic_try_cmpxchg_relaxed(atomic_t *v, int *old, int new)
@@ -4247,7 +4247,7 @@ raw_atomic64_cmpxchg_relaxed(atomic64_t *v, s64 old, s64 new)
*
* Safe to use in noinstr code; prefer atomic64_try_cmpxchg() elsewhere.
*
- * Return: @true if the exchange occured, @false otherwise.
+ * Return: @true if the exchange occurred, @false otherwise.
*/
static __always_inline bool
raw_atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new)
@@ -4281,7 +4281,7 @@ raw_atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new)
*
* Safe to use in noinstr code; prefer atomic64_try_cmpxchg_acquire() elsewhere.
*
- * Return: @true if the exchange occured, @false otherwise.
+ * Return: @true if the exchange occurred, @false otherwise.
*/
static __always_inline bool
raw_atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new)
@@ -4315,7 +4315,7 @@ raw_atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new)
*
* Safe to use in noinstr code; prefer atomic64_try_cmpxchg_release() elsewhere.
*
- * Return: @true if the exchange occured, @false otherwise.
+ * Return: @true if the exchange occurred, @false otherwise.
*/
static __always_inline bool
raw_atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new)
@@ -4348,7 +4348,7 @@ raw_atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new)
*
* Safe to use in noinstr code; prefer atomic64_try_cmpxchg_relaxed() elsewhere.
*
- * Return: @true if the exchange occured, @false otherwise.
+ * Return: @true if the exchange occurred, @false otherwise.
*/
static __always_inline bool
raw_atomic64_try_cmpxchg_relaxed(atomic64_t *v, s64 *old, s64 new)
@@ -4690,4 +4690,4 @@ raw_atomic64_dec_if_positive(atomic64_t *v)
}
#endif /* _LINUX_ATOMIC_FALLBACK_H */
-// b565db590afeeff0d7c9485ccbca5bb6e155749f
+// 206314f82b8b73a5c3aa69cf7f35ac9e7b5d6b58
diff --git a/include/linux/atomic/atomic-instrumented.h b/include/linux/atomic/atomic-instrumented.h
index 37ab6314a9f7..feb3b5dc3e96 100644
--- a/include/linux/atomic/atomic-instrumented.h
+++ b/include/linux/atomic/atomic-instrumented.h
@@ -1269,7 +1269,7 @@ atomic_cmpxchg_relaxed(atomic_t *v, int old, int new)
*
* Unsafe to use in noinstr code; use raw_atomic_try_cmpxchg() there.
*
- * Return: @true if the exchange occured, @false otherwise.
+ * Return: @true if the exchange occurred, @false otherwise.
*/
static __always_inline bool
atomic_try_cmpxchg(atomic_t *v, int *old, int new)
@@ -1292,7 +1292,7 @@ atomic_try_cmpxchg(atomic_t *v, int *old, int new)
*
* Unsafe to use in noinstr code; use raw_atomic_try_cmpxchg_acquire() there.
*
- * Return: @true if the exchange occured, @false otherwise.
+ * Return: @true if the exchange occurred, @false otherwise.
*/
static __always_inline bool
atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new)
@@ -1314,7 +1314,7 @@ atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new)
*
* Unsafe to use in noinstr code; use raw_atomic_try_cmpxchg_release() there.
*
- * Return: @true if the exchange occured, @false otherwise.
+ * Return: @true if the exchange occurred, @false otherwise.
*/
static __always_inline bool
atomic_try_cmpxchg_release(atomic_t *v, int *old, int new)
@@ -1337,7 +1337,7 @@ atomic_try_cmpxchg_release(atomic_t *v, int *old, int new)
*
* Unsafe to use in noinstr code; use raw_atomic_try_cmpxchg_relaxed() there.
*
- * Return: @true if the exchange occured, @false otherwise.
+ * Return: @true if the exchange occurred, @false otherwise.
*/
static __always_inline bool
atomic_try_cmpxchg_relaxed(atomic_t *v, int *old, int new)
@@ -2847,7 +2847,7 @@ atomic64_cmpxchg_relaxed(atomic64_t *v, s64 old, s64 new)
*
* Unsafe to use in noinstr code; use raw_atomic64_try_cmpxchg() there.
*
- * Return: @true if the exchange occured, @false otherwise.
+ * Return: @true if the exchange occurred, @false otherwise.
*/
static __always_inline bool
atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new)
@@ -2870,7 +2870,7 @@ atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new)
*
* Unsafe to use in noinstr code; use raw_atomic64_try_cmpxchg_acquire() there.
*
- * Return: @true if the exchange occured, @false otherwise.
+ * Return: @true if the exchange occurred, @false otherwise.
*/
static __always_inline bool
atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new)
@@ -2892,7 +2892,7 @@ atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new)
*
* Unsafe to use in noinstr code; use raw_atomic64_try_cmpxchg_release() there.
*
- * Return: @true if the exchange occured, @false otherwise.
+ * Return: @true if the exchange occurred, @false otherwise.
*/
static __always_inline bool
atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new)
@@ -2915,7 +2915,7 @@ atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new)
*
* Unsafe to use in noinstr code; use raw_atomic64_try_cmpxchg_relaxed() there.
*
- * Return: @true if the exchange occured, @false otherwise.
+ * Return: @true if the exchange occurred, @false otherwise.
*/
static __always_inline bool
atomic64_try_cmpxchg_relaxed(atomic64_t *v, s64 *old, s64 new)
@@ -4425,7 +4425,7 @@ atomic_long_cmpxchg_relaxed(atomic_long_t *v, long old, long new)
*
* Unsafe to use in noinstr code; use raw_atomic_long_try_cmpxchg() there.
*
- * Return: @true if the exchange occured, @false otherwise.
+ * Return: @true if the exchange occurred, @false otherwise.
*/
static __always_inline bool
atomic_long_try_cmpxchg(atomic_long_t *v, long *old, long new)
@@ -4448,7 +4448,7 @@ atomic_long_try_cmpxchg(atomic_long_t *v, long *old, long new)
*
* Unsafe to use in noinstr code; use raw_atomic_long_try_cmpxchg_acquire() there.
*
- * Return: @true if the exchange occured, @false otherwise.
+ * Return: @true if the exchange occurred, @false otherwise.
*/
static __always_inline bool
atomic_long_try_cmpxchg_acquire(atomic_long_t *v, long *old, long new)
@@ -4470,7 +4470,7 @@ atomic_long_try_cmpxchg_acquire(atomic_long_t *v, long *old, long new)
*
* Unsafe to use in noinstr code; use raw_atomic_long_try_cmpxchg_release() there.
*
- * Return: @true if the exchange occured, @false otherwise.
+ * Return: @true if the exchange occurred, @false otherwise.
*/
static __always_inline bool
atomic_long_try_cmpxchg_release(atomic_long_t *v, long *old, long new)
@@ -4493,7 +4493,7 @@ atomic_long_try_cmpxchg_release(atomic_long_t *v, long *old, long new)
*
* Unsafe to use in noinstr code; use raw_atomic_long_try_cmpxchg_relaxed() there.
*
- * Return: @true if the exchange occured, @false otherwise.
+ * Return: @true if the exchange occurred, @false otherwise.
*/
static __always_inline bool
atomic_long_try_cmpxchg_relaxed(atomic_long_t *v, long *old, long new)
@@ -5050,4 +5050,4 @@ atomic_long_dec_if_positive(atomic_long_t *v)
#endif /* _LINUX_ATOMIC_INSTRUMENTED_H */
-// f618ac667f868941a84ce0ab2242f1786e049ed4
+// 9dd948d3012b22c4e75933a5172983f912e46439
diff --git a/include/linux/atomic/atomic-long.h b/include/linux/atomic/atomic-long.h
index f86b29d90877..6a4e47d2db35 100644
--- a/include/linux/atomic/atomic-long.h
+++ b/include/linux/atomic/atomic-long.h
@@ -1449,7 +1449,7 @@ raw_atomic_long_cmpxchg_relaxed(atomic_long_t *v, long old, long new)
*
* Safe to use in noinstr code; prefer atomic_long_try_cmpxchg() elsewhere.
*
- * Return: @true if the exchange occured, @false otherwise.
+ * Return: @true if the exchange occurred, @false otherwise.
*/
static __always_inline bool
raw_atomic_long_try_cmpxchg(atomic_long_t *v, long *old, long new)
@@ -1473,7 +1473,7 @@ raw_atomic_long_try_cmpxchg(atomic_long_t *v, long *old, long new)
*
* Safe to use in noinstr code; prefer atomic_long_try_cmpxchg_acquire() elsewhere.
*
- * Return: @true if the exchange occured, @false otherwise.
+ * Return: @true if the exchange occurred, @false otherwise.
*/
static __always_inline bool
raw_atomic_long_try_cmpxchg_acquire(atomic_long_t *v, long *old, long new)
@@ -1497,7 +1497,7 @@ raw_atomic_long_try_cmpxchg_acquire(atomic_long_t *v, long *old, long new)
*
* Safe to use in noinstr code; prefer atomic_long_try_cmpxchg_release() elsewhere.
*
- * Return: @true if the exchange occured, @false otherwise.
+ * Return: @true if the exchange occurred, @false otherwise.
*/
static __always_inline bool
raw_atomic_long_try_cmpxchg_release(atomic_long_t *v, long *old, long new)
@@ -1521,7 +1521,7 @@ raw_atomic_long_try_cmpxchg_release(atomic_long_t *v, long *old, long new)
*
* Safe to use in noinstr code; prefer atomic_long_try_cmpxchg_relaxed() elsewhere.
*
- * Return: @true if the exchange occured, @false otherwise.
+ * Return: @true if the exchange occurred, @false otherwise.
*/
static __always_inline bool
raw_atomic_long_try_cmpxchg_relaxed(atomic_long_t *v, long *old, long new)
@@ -1809,4 +1809,4 @@ raw_atomic_long_dec_if_positive(atomic_long_t *v)
}
#endif /* _LINUX_ATOMIC_LONG_H */
-// eadf183c3600b8b92b91839dd3be6bcc560c752d
+// 4b882bf19018602c10816c52f8b4ae280adc887b
diff --git a/include/linux/bit_spinlock.h b/include/linux/bit_spinlock.h
index c0989b5b0407..7869a6e59b6a 100644
--- a/include/linux/bit_spinlock.h
+++ b/include/linux/bit_spinlock.h
@@ -7,6 +7,18 @@
#include <linux/atomic.h>
#include <linux/bug.h>
+#include <asm/processor.h> /* for cpu_relax() */
+
+/*
+ * For static context analysis, we need a unique token for each possible bit
+ * that can be used as a bit_spinlock. The easiest way to do that is to create a
+ * fake context that we can cast to with the __bitlock(bitnum, addr) macro
+ * below, which will give us unique instances for each (bit, addr) pair that the
+ * static analysis can use.
+ */
+context_lock_struct(__context_bitlock) { };
+#define __bitlock(bitnum, addr) (struct __context_bitlock *)(bitnum + (addr))
+
/*
* bit-based spin_lock()
*
@@ -14,6 +26,7 @@
* are significantly faster.
*/
static __always_inline void bit_spin_lock(int bitnum, unsigned long *addr)
+ __acquires(__bitlock(bitnum, addr))
{
/*
* Assuming the lock is uncontended, this never enters
@@ -32,13 +45,14 @@ static __always_inline void bit_spin_lock(int bitnum, unsigned long *addr)
preempt_disable();
}
#endif
- __acquire(bitlock);
+ __acquire(__bitlock(bitnum, addr));
}
/*
* Return true if it was acquired
*/
static __always_inline int bit_spin_trylock(int bitnum, unsigned long *addr)
+ __cond_acquires(true, __bitlock(bitnum, addr))
{
preempt_disable();
#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
@@ -47,7 +61,7 @@ static __always_inline int bit_spin_trylock(int bitnum, unsigned long *addr)
return 0;
}
#endif
- __acquire(bitlock);
+ __acquire(__bitlock(bitnum, addr));
return 1;
}
@@ -55,6 +69,7 @@ static __always_inline int bit_spin_trylock(int bitnum, unsigned long *addr)
* bit-based spin_unlock()
*/
static __always_inline void bit_spin_unlock(int bitnum, unsigned long *addr)
+ __releases(__bitlock(bitnum, addr))
{
#ifdef CONFIG_DEBUG_SPINLOCK
BUG_ON(!test_bit(bitnum, addr));
@@ -63,7 +78,7 @@ static __always_inline void bit_spin_unlock(int bitnum, unsigned long *addr)
clear_bit_unlock(bitnum, addr);
#endif
preempt_enable();
- __release(bitlock);
+ __release(__bitlock(bitnum, addr));
}
/*
@@ -72,6 +87,7 @@ static __always_inline void bit_spin_unlock(int bitnum, unsigned long *addr)
* protecting the rest of the flags in the word.
*/
static __always_inline void __bit_spin_unlock(int bitnum, unsigned long *addr)
+ __releases(__bitlock(bitnum, addr))
{
#ifdef CONFIG_DEBUG_SPINLOCK
BUG_ON(!test_bit(bitnum, addr));
@@ -80,7 +96,7 @@ static __always_inline void __bit_spin_unlock(int bitnum, unsigned long *addr)
__clear_bit_unlock(bitnum, addr);
#endif
preempt_enable();
- __release(bitlock);
+ __release(__bitlock(bitnum, addr));
}
/*
diff --git a/include/linux/cleanup.h b/include/linux/cleanup.h
index 8d41b917c77d..dbc4162921e9 100644
--- a/include/linux/cleanup.h
+++ b/include/linux/cleanup.h
@@ -278,16 +278,21 @@ const volatile void * __must_check_fn(const volatile void *val)
#define DEFINE_CLASS(_name, _type, _exit, _init, _init_args...) \
typedef _type class_##_name##_t; \
+typedef _type lock_##_name##_t; \
static __always_inline void class_##_name##_destructor(_type *p) \
+ __no_context_analysis \
{ _type _T = *p; _exit; } \
static __always_inline _type class_##_name##_constructor(_init_args) \
+ __no_context_analysis \
{ _type t = _init; return t; }
#define EXTEND_CLASS(_name, ext, _init, _init_args...) \
+typedef lock_##_name##_t lock_##_name##ext##_t; \
typedef class_##_name##_t class_##_name##ext##_t; \
static __always_inline void class_##_name##ext##_destructor(class_##_name##_t *p) \
{ class_##_name##_destructor(p); } \
static __always_inline class_##_name##_t class_##_name##ext##_constructor(_init_args) \
+ __no_context_analysis \
{ class_##_name##_t t = _init; return t; }
#define CLASS(_name, var) \
@@ -474,35 +479,80 @@ _label: \
*/
#define __DEFINE_UNLOCK_GUARD(_name, _type, _unlock, ...) \
+typedef _type lock_##_name##_t; \
typedef struct { \
_type *lock; \
__VA_ARGS__; \
} class_##_name##_t; \
\
static __always_inline void class_##_name##_destructor(class_##_name##_t *_T) \
+ __no_context_analysis \
{ \
if (!__GUARD_IS_ERR(_T->lock)) { _unlock; } \
} \
\
__DEFINE_GUARD_LOCK_PTR(_name, &_T->lock)
-#define __DEFINE_LOCK_GUARD_1(_name, _type, _lock) \
+#define __DEFINE_LOCK_GUARD_1(_name, _type, ...) \
static __always_inline class_##_name##_t class_##_name##_constructor(_type *l) \
+ __no_context_analysis \
{ \
class_##_name##_t _t = { .lock = l }, *_T = &_t; \
- _lock; \
+ __VA_ARGS__; \
return _t; \
}
-#define __DEFINE_LOCK_GUARD_0(_name, _lock) \
+#define __DEFINE_LOCK_GUARD_0(_name, ...) \
static __always_inline class_##_name##_t class_##_name##_constructor(void) \
+ __no_context_analysis \
{ \
class_##_name##_t _t = { .lock = (void*)1 }, \
*_T __maybe_unused = &_t; \
- _lock; \
+ __VA_ARGS__; \
return _t; \
}
+#define DECLARE_LOCK_GUARD_0_ATTRS(_name, _lock, _unlock) \
+static inline class_##_name##_t class_##_name##_constructor(void) _lock;\
+static inline void class_##_name##_destructor(class_##_name##_t *_T) _unlock;
+
+/*
+ * To support Context Analysis, we need to allow the compiler to see the
+ * acquisition and release of the context lock. However, the "cleanup" helpers
+ * wrap the lock in a struct passed through separate helper functions, which
+ * hides the lock alias from the compiler (no inter-procedural analysis).
+ *
+ * To make it work, we introduce an explicit alias to the context lock instance
+ * that is "cleaned" up with a separate cleanup helper. This helper is a dummy
+ * function that does nothing at runtime, but has the "_unlock" attribute to
+ * tell the compiler what happens at the end of the scope.
+ *
+ * To generalize the pattern, the WITH_LOCK_GUARD_1_ATTRS() macro should be used
+ * to redefine the constructor, which then also creates the alias variable with
+ * the right "cleanup" attribute, *after* DECLARE_LOCK_GUARD_1_ATTRS() has been
+ * used.
+ *
+ * Example usage:
+ *
+ * DECLARE_LOCK_GUARD_1_ATTRS(mutex, __acquires(_T), __releases(*(struct mutex **)_T))
+ * #define class_mutex_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(mutex, _T)
+ *
+ * Note: To support the for-loop based scoped helpers, the auxiliary variable
+ * must be a pointer to the "class" type because it is defined in the same
+ * statement as the guard variable. However, we initialize it with the lock
+ * pointer (despite the type mismatch, the compiler's alias analysis still works
+ * as expected). The "_unlock" attribute receives a pointer to the auxiliary
+ * variable (a double pointer to the class type), and must be cast and
+ * dereferenced appropriately.
+ */
+#define DECLARE_LOCK_GUARD_1_ATTRS(_name, _lock, _unlock) \
+static inline class_##_name##_t class_##_name##_constructor(lock_##_name##_t *_T) _lock;\
+static __always_inline void __class_##_name##_cleanup_ctx(class_##_name##_t **_T) \
+ __no_context_analysis _unlock { }
+#define WITH_LOCK_GUARD_1_ATTRS(_name, _T) \
+ class_##_name##_constructor(_T), \
+ *__UNIQUE_ID(unlock) __cleanup(__class_##_name##_cleanup_ctx) = (void *)(unsigned long)(_T)
+
#define DEFINE_LOCK_GUARD_1(_name, _type, _lock, _unlock, ...) \
__DEFINE_CLASS_IS_CONDITIONAL(_name, false); \
__DEFINE_UNLOCK_GUARD(_name, _type, _unlock, __VA_ARGS__) \
diff --git a/include/linux/compiler-context-analysis.h b/include/linux/compiler-context-analysis.h
new file mode 100644
index 000000000000..00c074a2ccb0
--- /dev/null
+++ b/include/linux/compiler-context-analysis.h
@@ -0,0 +1,436 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Macros and attributes for compiler-based static context analysis.
+ */
+
+#ifndef _LINUX_COMPILER_CONTEXT_ANALYSIS_H
+#define _LINUX_COMPILER_CONTEXT_ANALYSIS_H
+
+#if defined(WARN_CONTEXT_ANALYSIS) && !defined(__CHECKER__) && !defined(__GENKSYMS__)
+
+/*
+ * These attributes define new context lock (Clang: capability) types.
+ * Internal only.
+ */
+# define __ctx_lock_type(name) __attribute__((capability(#name)))
+# define __reentrant_ctx_lock __attribute__((reentrant_capability))
+# define __acquires_ctx_lock(...) __attribute__((acquire_capability(__VA_ARGS__)))
+# define __acquires_shared_ctx_lock(...) __attribute__((acquire_shared_capability(__VA_ARGS__)))
+# define __try_acquires_ctx_lock(ret, var) __attribute__((try_acquire_capability(ret, var)))
+# define __try_acquires_shared_ctx_lock(ret, var) __attribute__((try_acquire_shared_capability(ret, var)))
+# define __releases_ctx_lock(...) __attribute__((release_capability(__VA_ARGS__)))
+# define __releases_shared_ctx_lock(...) __attribute__((release_shared_capability(__VA_ARGS__)))
+# define __returns_ctx_lock(var) __attribute__((lock_returned(var)))
+
+/*
+ * The below are used to annotate code being checked. Internal only.
+ */
+# define __excludes_ctx_lock(...) __attribute__((locks_excluded(__VA_ARGS__)))
+# define __requires_ctx_lock(...) __attribute__((requires_capability(__VA_ARGS__)))
+# define __requires_shared_ctx_lock(...) __attribute__((requires_shared_capability(__VA_ARGS__)))
+
+/*
+ * The "assert_capability" attribute is a bit confusingly named. It does not
+ * generate a check. Instead, it tells the analysis to *assume* the capability
+ * is held. This is used for augmenting runtime assertions, that can then help
+ * with patterns beyond the compiler's static reasoning abilities.
+ */
+# define __assumes_ctx_lock(...) __attribute__((assert_capability(__VA_ARGS__)))
+# define __assumes_shared_ctx_lock(...) __attribute__((assert_shared_capability(__VA_ARGS__)))
+
+/**
+ * __guarded_by - struct member and globals attribute, declares variable
+ * only accessible within active context
+ *
+ * Declares that the struct member or global variable is only accessible within
+ * the context entered by the given context lock. Read operations on the data
+ * require shared access, while write operations require exclusive access.
+ *
+ * .. code-block:: c
+ *
+ * struct some_state {
+ * spinlock_t lock;
+ * long counter __guarded_by(&lock);
+ * };
+ */
+# define __guarded_by(...) __attribute__((guarded_by(__VA_ARGS__)))
+
+/**
+ * __pt_guarded_by - struct member and globals attribute, declares pointed-to
+ * data only accessible within active context
+ *
+ * Declares that the data pointed to by the struct member pointer or global
+ * pointer is only accessible within the context entered by the given context
+ * lock. Read operations on the data require shared access, while write
+ * operations require exclusive access.
+ *
+ * .. code-block:: c
+ *
+ * struct some_state {
+ * spinlock_t lock;
+ * long *counter __pt_guarded_by(&lock);
+ * };
+ */
+# define __pt_guarded_by(...) __attribute__((pt_guarded_by(__VA_ARGS__)))
+
+/**
+ * context_lock_struct() - declare or define a context lock struct
+ * @name: struct name
+ *
+ * Helper to declare or define a struct type that is also a context lock.
+ *
+ * .. code-block:: c
+ *
+ * context_lock_struct(my_handle) {
+ * int foo;
+ * long bar;
+ * };
+ *
+ * struct some_state {
+ * ...
+ * };
+ * // ... declared elsewhere ...
+ * context_lock_struct(some_state);
+ *
+ * Note: The implementation defines several helper functions that can acquire
+ * and release the context lock.
+ */
+# define context_lock_struct(name, ...) \
+ struct __ctx_lock_type(name) __VA_ARGS__ name; \
+ static __always_inline void __acquire_ctx_lock(const struct name *var) \
+ __attribute__((overloadable)) __no_context_analysis __acquires_ctx_lock(var) { } \
+ static __always_inline void __acquire_shared_ctx_lock(const struct name *var) \
+ __attribute__((overloadable)) __no_context_analysis __acquires_shared_ctx_lock(var) { } \
+ static __always_inline bool __try_acquire_ctx_lock(const struct name *var, bool ret) \
+ __attribute__((overloadable)) __no_context_analysis __try_acquires_ctx_lock(1, var) \
+ { return ret; } \
+ static __always_inline bool __try_acquire_shared_ctx_lock(const struct name *var, bool ret) \
+ __attribute__((overloadable)) __no_context_analysis __try_acquires_shared_ctx_lock(1, var) \
+ { return ret; } \
+ static __always_inline void __release_ctx_lock(const struct name *var) \
+ __attribute__((overloadable)) __no_context_analysis __releases_ctx_lock(var) { } \
+ static __always_inline void __release_shared_ctx_lock(const struct name *var) \
+ __attribute__((overloadable)) __no_context_analysis __releases_shared_ctx_lock(var) { } \
+ static __always_inline void __assume_ctx_lock(const struct name *var) \
+ __attribute__((overloadable)) __assumes_ctx_lock(var) { } \
+ static __always_inline void __assume_shared_ctx_lock(const struct name *var) \
+ __attribute__((overloadable)) __assumes_shared_ctx_lock(var) { } \
+ struct name
+
+/**
+ * disable_context_analysis() - disables context analysis
+ *
+ * Disables context analysis. Must be paired with a later
+ * enable_context_analysis().
+ */
+# define disable_context_analysis() \
+ __diag_push(); \
+ __diag_ignore_all("-Wunknown-warning-option", "") \
+ __diag_ignore_all("-Wthread-safety", "") \
+ __diag_ignore_all("-Wthread-safety-pointer", "")
+
+/**
+ * enable_context_analysis() - re-enables context analysis
+ *
+ * Re-enables context analysis. Must be paired with a prior
+ * disable_context_analysis().
+ */
+# define enable_context_analysis() __diag_pop()
+
+/**
+ * __no_context_analysis - function attribute, disables context analysis
+ *
+ * Function attribute denoting that context analysis is disabled for the
+ * whole function. Prefer use of `context_unsafe()` where possible.
+ */
+# define __no_context_analysis __attribute__((no_thread_safety_analysis))
+
+#else /* !WARN_CONTEXT_ANALYSIS */
+
+# define __ctx_lock_type(name)
+# define __reentrant_ctx_lock
+# define __acquires_ctx_lock(...)
+# define __acquires_shared_ctx_lock(...)
+# define __try_acquires_ctx_lock(ret, var)
+# define __try_acquires_shared_ctx_lock(ret, var)
+# define __releases_ctx_lock(...)
+# define __releases_shared_ctx_lock(...)
+# define __assumes_ctx_lock(...)
+# define __assumes_shared_ctx_lock(...)
+# define __returns_ctx_lock(var)
+# define __guarded_by(...)
+# define __pt_guarded_by(...)
+# define __excludes_ctx_lock(...)
+# define __requires_ctx_lock(...)
+# define __requires_shared_ctx_lock(...)
+# define __acquire_ctx_lock(var) do { } while (0)
+# define __acquire_shared_ctx_lock(var) do { } while (0)
+# define __try_acquire_ctx_lock(var, ret) (ret)
+# define __try_acquire_shared_ctx_lock(var, ret) (ret)
+# define __release_ctx_lock(var) do { } while (0)
+# define __release_shared_ctx_lock(var) do { } while (0)
+# define __assume_ctx_lock(var) do { (void)(var); } while (0)
+# define __assume_shared_ctx_lock(var) do { (void)(var); } while (0)
+# define context_lock_struct(name, ...) struct __VA_ARGS__ name
+# define disable_context_analysis()
+# define enable_context_analysis()
+# define __no_context_analysis
+
+#endif /* WARN_CONTEXT_ANALYSIS */
+
+/**
+ * context_unsafe() - disable context checking for contained code
+ *
+ * Disables context checking for contained statements or expression.
+ *
+ * .. code-block:: c
+ *
+ * struct some_data {
+ * spinlock_t lock;
+ * int counter __guarded_by(&lock);
+ * };
+ *
+ * int foo(struct some_data *d)
+ * {
+ * // ...
+ * // other code that is still checked ...
+ * // ...
+ * return context_unsafe(d->counter);
+ * }
+ */
+#define context_unsafe(...) \
+({ \
+ disable_context_analysis(); \
+ __VA_ARGS__; \
+ enable_context_analysis() \
+})
+
+/**
+ * __context_unsafe() - function attribute, disable context checking
+ * @comment: comment explaining why opt-out is safe
+ *
+ * Function attribute denoting that context analysis is disabled for the
+ * whole function. Forces adding an inline comment as argument.
+ */
+#define __context_unsafe(comment) __no_context_analysis
+
+/**
+ * context_unsafe_alias() - helper to insert a context lock "alias barrier"
+ * @p: pointer aliasing a context lock or object containing context locks
+ *
+ * No-op function that acts as a "context lock alias barrier", where the
+ * analysis rightfully detects that we're switching aliases, but the switch is
+ * considered safe but beyond the analysis reasoning abilities.
+ *
+ * This should be inserted before the first use of such an alias.
+ *
+ * Implementation Note: The compiler ignores aliases that may be reassigned but
+ * their value cannot be determined (e.g. when passing a non-const pointer to an
+ * alias as a function argument).
+ */
+#define context_unsafe_alias(p) _context_unsafe_alias((void **)&(p))
+static inline void _context_unsafe_alias(void **p) { }
+
+/**
+ * token_context_lock() - declare an abstract global context lock instance
+ * @name: token context lock name
+ *
+ * Helper that declares an abstract global context lock instance @name, but not
+ * backed by a real data structure (linker error if accidentally referenced).
+ * The type name is `__ctx_lock_@name`.
+ */
+#define token_context_lock(name, ...) \
+ context_lock_struct(__ctx_lock_##name, ##__VA_ARGS__) {}; \
+ extern const struct __ctx_lock_##name *name
+
+/**
+ * token_context_lock_instance() - declare another instance of a global context lock
+ * @ctx: token context lock previously declared with token_context_lock()
+ * @name: name of additional global context lock instance
+ *
+ * Helper that declares an additional instance @name of the same token context
+ * lock class @ctx. This is helpful where multiple related token contexts are
+ * declared, to allow using the same underlying type (`__ctx_lock_@ctx`) as
+ * function arguments.
+ */
+#define token_context_lock_instance(ctx, name) \
+ extern const struct __ctx_lock_##ctx *name
+
+/*
+ * Common keywords for static context analysis.
+ */
+
+/**
+ * __must_hold() - function attribute, caller must hold exclusive context lock
+ *
+ * Function attribute declaring that the caller must hold the given context
+ * lock instance(s) exclusively.
+ */
+#define __must_hold(...) __requires_ctx_lock(__VA_ARGS__)
+
+/**
+ * __must_not_hold() - function attribute, caller must not hold context lock
+ *
+ * Function attribute declaring that the caller must not hold the given context
+ * lock instance(s).
+ */
+#define __must_not_hold(...) __excludes_ctx_lock(__VA_ARGS__)
+
+/**
+ * __acquires() - function attribute, function acquires context lock exclusively
+ *
+ * Function attribute declaring that the function acquires the given context
+ * lock instance(s) exclusively, but does not release them.
+ */
+#define __acquires(...) __acquires_ctx_lock(__VA_ARGS__)
+
+/*
+ * Clang's analysis does not care precisely about the value, only that it is
+ * either zero or non-zero. So the __cond_acquires() interface might be
+ * misleading if we say that @ret is the value returned if acquired. Instead,
+ * provide symbolic variants which we translate.
+ */
+#define __cond_acquires_impl_true(x, ...) __try_acquires##__VA_ARGS__##_ctx_lock(1, x)
+#define __cond_acquires_impl_false(x, ...) __try_acquires##__VA_ARGS__##_ctx_lock(0, x)
+#define __cond_acquires_impl_nonzero(x, ...) __try_acquires##__VA_ARGS__##_ctx_lock(1, x)
+#define __cond_acquires_impl_0(x, ...) __try_acquires##__VA_ARGS__##_ctx_lock(0, x)
+#define __cond_acquires_impl_nonnull(x, ...) __try_acquires##__VA_ARGS__##_ctx_lock(1, x)
+#define __cond_acquires_impl_NULL(x, ...) __try_acquires##__VA_ARGS__##_ctx_lock(0, x)
+
+/**
+ * __cond_acquires() - function attribute, function conditionally
+ * acquires a context lock exclusively
+ * @ret: abstract value returned by function if context lock acquired
+ * @x: context lock instance pointer
+ *
+ * Function attribute declaring that the function conditionally acquires the
+ * given context lock instance @x exclusively, but does not release it. The
+ * function return value @ret denotes when the context lock is acquired.
+ *
+ * @ret may be one of: true, false, nonzero, 0, nonnull, NULL.
+ */
+#define __cond_acquires(ret, x) __cond_acquires_impl_##ret(x)
+
+/**
+ * __releases() - function attribute, function releases a context lock exclusively
+ *
+ * Function attribute declaring that the function releases the given context
+ * lock instance(s) exclusively. The associated context(s) must be active on
+ * entry.
+ */
+#define __releases(...) __releases_ctx_lock(__VA_ARGS__)
+
+/**
+ * __acquire() - function to acquire context lock exclusively
+ * @x: context lock instance pointer
+ *
+ * No-op function that acquires the given context lock instance @x exclusively.
+ */
+#define __acquire(x) __acquire_ctx_lock(x)
+
+/**
+ * __release() - function to release context lock exclusively
+ * @x: context lock instance pointer
+ *
+ * No-op function that releases the given context lock instance @x.
+ */
+#define __release(x) __release_ctx_lock(x)
+
+/**
+ * __must_hold_shared() - function attribute, caller must hold shared context lock
+ *
+ * Function attribute declaring that the caller must hold the given context
+ * lock instance(s) with shared access.
+ */
+#define __must_hold_shared(...) __requires_shared_ctx_lock(__VA_ARGS__)
+
+/**
+ * __acquires_shared() - function attribute, function acquires context lock shared
+ *
+ * Function attribute declaring that the function acquires the given
+ * context lock instance(s) with shared access, but does not release them.
+ */
+#define __acquires_shared(...) __acquires_shared_ctx_lock(__VA_ARGS__)
+
+/**
+ * __cond_acquires_shared() - function attribute, function conditionally
+ * acquires a context lock shared
+ * @ret: abstract value returned by function if context lock acquired
+ * @x: context lock instance pointer
+ *
+ * Function attribute declaring that the function conditionally acquires the
+ * given context lock instance @x with shared access, but does not release it.
+ * The function return value @ret denotes when the context lock is acquired.
+ *
+ * @ret may be one of: true, false, nonzero, 0, nonnull, NULL.
+ */
+#define __cond_acquires_shared(ret, x) __cond_acquires_impl_##ret(x, _shared)
+
+/**
+ * __releases_shared() - function attribute, function releases a
+ * context lock shared
+ *
+ * Function attribute declaring that the function releases the given context
+ * lock instance(s) with shared access. The associated context(s) must be
+ * active on entry.
+ */
+#define __releases_shared(...) __releases_shared_ctx_lock(__VA_ARGS__)
+
+/**
+ * __acquire_shared() - function to acquire context lock shared
+ * @x: context lock instance pointer
+ *
+ * No-op function that acquires the given context lock instance @x with shared
+ * access.
+ */
+#define __acquire_shared(x) __acquire_shared_ctx_lock(x)
+
+/**
+ * __release_shared() - function to release context lock shared
+ * @x: context lock instance pointer
+ *
+ * No-op function that releases the given context lock instance @x with shared
+ * access.
+ */
+#define __release_shared(x) __release_shared_ctx_lock(x)
+
+/**
+ * __acquire_ret() - helper to acquire context lock of return value
+ * @call: call expression
+ * @ret_expr: acquire expression that uses __ret
+ */
+#define __acquire_ret(call, ret_expr) \
+ ({ \
+ __auto_type __ret = call; \
+ __acquire(ret_expr); \
+ __ret; \
+ })
+
+/**
+ * __acquire_shared_ret() - helper to acquire context lock shared of return value
+ * @call: call expression
+ * @ret_expr: acquire shared expression that uses __ret
+ */
+#define __acquire_shared_ret(call, ret_expr) \
+ ({ \
+ __auto_type __ret = call; \
+ __acquire_shared(ret_expr); \
+ __ret; \
+ })
+
+/*
+ * Attributes to mark functions returning acquired context locks.
+ *
+ * This is purely cosmetic to help readability, and should be used with the
+ * above macros as follows:
+ *
+ * struct foo { spinlock_t lock; ... };
+ * ...
+ * #define myfunc(...) __acquire_ret(_myfunc(__VA_ARGS__), &__ret->lock)
+ * struct foo *_myfunc(int bar) __acquires_ret;
+ * ...
+ */
+#define __acquires_ret __no_context_analysis
+#define __acquires_shared_ret __no_context_analysis
+
+#endif /* _LINUX_COMPILER_CONTEXT_ANALYSIS_H */
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index 04487c9bd751..110b28dfd1d1 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -190,7 +190,9 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
#define data_race(expr) \
({ \
__kcsan_disable_current(); \
+ disable_context_analysis(); \
auto __v = (expr); \
+ enable_context_analysis(); \
__kcsan_enable_current(); \
__v; \
})
diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h
index a93c166276cd..ea96a4466d82 100644
--- a/include/linux/compiler_types.h
+++ b/include/linux/compiler_types.h
@@ -41,6 +41,8 @@
# define BTF_TYPE_TAG(value) /* nothing */
#endif
+#include <linux/compiler-context-analysis.h>
+
/* sparse defines __CHECKER__; see Documentation/dev-tools/sparse.rst */
#ifdef __CHECKER__
/* address spaces */
@@ -51,14 +53,6 @@
# define __rcu __attribute__((noderef, address_space(__rcu)))
static inline void __chk_user_ptr(const volatile void __user *ptr) { }
static inline void __chk_io_ptr(const volatile void __iomem *ptr) { }
-/* context/locking */
-# define __must_hold(x) __attribute__((context(x,1,1)))
-# define __acquires(x) __attribute__((context(x,0,1)))
-# define __cond_acquires(x) __attribute__((context(x,0,-1)))
-# define __releases(x) __attribute__((context(x,1,0)))
-# define __acquire(x) __context__(x,1)
-# define __release(x) __context__(x,-1)
-# define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
/* other */
# define __force __attribute__((force))
# define __nocast __attribute__((nocast))
@@ -79,14 +73,6 @@ static inline void __chk_io_ptr(const volatile void __iomem *ptr) { }
# define __chk_user_ptr(x) (void)0
# define __chk_io_ptr(x) (void)0
-/* context/locking */
-# define __must_hold(x)
-# define __acquires(x)
-# define __cond_acquires(x)
-# define __releases(x)
-# define __acquire(x) (void)0
-# define __release(x) (void)0
-# define __cond_lock(x,c) (c)
/* other */
# define __force
# define __nocast
diff --git a/include/linux/console.h b/include/linux/console.h
index fc9f5c5c1b04..f882833bedf0 100644
--- a/include/linux/console.h
+++ b/include/linux/console.h
@@ -492,8 +492,8 @@ static inline bool console_srcu_read_lock_is_held(void)
extern int console_srcu_read_lock(void);
extern void console_srcu_read_unlock(int cookie);
-extern void console_list_lock(void) __acquires(console_mutex);
-extern void console_list_unlock(void) __releases(console_mutex);
+extern void console_list_lock(void);
+extern void console_list_unlock(void);
extern struct hlist_head console_list;
diff --git a/include/linux/debugfs.h b/include/linux/debugfs.h
index 7cecda29447e..4177c4738282 100644
--- a/include/linux/debugfs.h
+++ b/include/linux/debugfs.h
@@ -239,18 +239,16 @@ ssize_t debugfs_read_file_str(struct file *file, char __user *user_buf,
* @cancel: callback to call
* @cancel_data: extra data for the callback to call
*/
-struct debugfs_cancellation {
+context_lock_struct(debugfs_cancellation) {
struct list_head list;
void (*cancel)(struct dentry *, void *);
void *cancel_data;
};
-void __acquires(cancellation)
-debugfs_enter_cancellation(struct file *file,
- struct debugfs_cancellation *cancellation);
-void __releases(cancellation)
-debugfs_leave_cancellation(struct file *file,
- struct debugfs_cancellation *cancellation);
+void debugfs_enter_cancellation(struct file *file,
+ struct debugfs_cancellation *cancellation) __acquires(cancellation);
+void debugfs_leave_cancellation(struct file *file,
+ struct debugfs_cancellation *cancellation) __releases(cancellation);
#else
diff --git a/include/linux/kref.h b/include/linux/kref.h
index 88e82ab1367c..9bc6abe57572 100644
--- a/include/linux/kref.h
+++ b/include/linux/kref.h
@@ -81,6 +81,7 @@ static inline int kref_put(struct kref *kref, void (*release)(struct kref *kref)
static inline int kref_put_mutex(struct kref *kref,
void (*release)(struct kref *kref),
struct mutex *mutex)
+ __cond_acquires(true, mutex)
{
if (refcount_dec_and_mutex_lock(&kref->refcount, mutex)) {
release(kref);
@@ -102,6 +103,7 @@ static inline int kref_put_mutex(struct kref *kref,
static inline int kref_put_lock(struct kref *kref,
void (*release)(struct kref *kref),
spinlock_t *lock)
+ __cond_acquires(true, lock)
{
if (refcount_dec_and_lock(&kref->refcount, lock)) {
release(kref);
diff --git a/include/linux/list_bl.h b/include/linux/list_bl.h
index ae1b541446c9..df9eebe6afca 100644
--- a/include/linux/list_bl.h
+++ b/include/linux/list_bl.h
@@ -144,11 +144,13 @@ static inline void hlist_bl_del_init(struct hlist_bl_node *n)
}
static inline void hlist_bl_lock(struct hlist_bl_head *b)
+ __acquires(__bitlock(0, b))
{
bit_spin_lock(0, (unsigned long *)b);
}
static inline void hlist_bl_unlock(struct hlist_bl_head *b)
+ __releases(__bitlock(0, b))
{
__bit_spin_unlock(0, (unsigned long *)b);
}
diff --git a/include/linux/local_lock.h b/include/linux/local_lock.h
index b0e6ab329b00..b8830148a859 100644
--- a/include/linux/local_lock.h
+++ b/include/linux/local_lock.h
@@ -14,13 +14,13 @@
* local_lock - Acquire a per CPU local lock
* @lock: The lock variable
*/
-#define local_lock(lock) __local_lock(this_cpu_ptr(lock))
+#define local_lock(lock) __local_lock(__this_cpu_local_lock(lock))
/**
* local_lock_irq - Acquire a per CPU local lock and disable interrupts
* @lock: The lock variable
*/
-#define local_lock_irq(lock) __local_lock_irq(this_cpu_ptr(lock))
+#define local_lock_irq(lock) __local_lock_irq(__this_cpu_local_lock(lock))
/**
* local_lock_irqsave - Acquire a per CPU local lock, save and disable
@@ -29,19 +29,19 @@
* @flags: Storage for interrupt flags
*/
#define local_lock_irqsave(lock, flags) \
- __local_lock_irqsave(this_cpu_ptr(lock), flags)
+ __local_lock_irqsave(__this_cpu_local_lock(lock), flags)
/**
* local_unlock - Release a per CPU local lock
* @lock: The lock variable
*/
-#define local_unlock(lock) __local_unlock(this_cpu_ptr(lock))
+#define local_unlock(lock) __local_unlock(__this_cpu_local_lock(lock))
/**
* local_unlock_irq - Release a per CPU local lock and enable interrupts
* @lock: The lock variable
*/
-#define local_unlock_irq(lock) __local_unlock_irq(this_cpu_ptr(lock))
+#define local_unlock_irq(lock) __local_unlock_irq(__this_cpu_local_lock(lock))
/**
* local_unlock_irqrestore - Release a per CPU local lock and restore
@@ -50,7 +50,7 @@
* @flags: Interrupt flags to restore
*/
#define local_unlock_irqrestore(lock, flags) \
- __local_unlock_irqrestore(this_cpu_ptr(lock), flags)
+ __local_unlock_irqrestore(__this_cpu_local_lock(lock), flags)
/**
* local_trylock_init - Runtime initialize a lock instance
@@ -66,7 +66,7 @@
* locking constrains it will _always_ fail to acquire the lock in NMI or
* HARDIRQ context on PREEMPT_RT.
*/
-#define local_trylock(lock) __local_trylock(this_cpu_ptr(lock))
+#define local_trylock(lock) __local_trylock(__this_cpu_local_lock(lock))
#define local_lock_is_locked(lock) __local_lock_is_locked(lock)
@@ -81,27 +81,44 @@
* HARDIRQ context on PREEMPT_RT.
*/
#define local_trylock_irqsave(lock, flags) \
- __local_trylock_irqsave(this_cpu_ptr(lock), flags)
-
-DEFINE_GUARD(local_lock, local_lock_t __percpu*,
- local_lock(_T),
- local_unlock(_T))
-DEFINE_GUARD(local_lock_irq, local_lock_t __percpu*,
- local_lock_irq(_T),
- local_unlock_irq(_T))
+ __local_trylock_irqsave(__this_cpu_local_lock(lock), flags)
+
+DEFINE_LOCK_GUARD_1(local_lock, local_lock_t __percpu,
+ local_lock(_T->lock),
+ local_unlock(_T->lock))
+DEFINE_LOCK_GUARD_1(local_lock_irq, local_lock_t __percpu,
+ local_lock_irq(_T->lock),
+ local_unlock_irq(_T->lock))
DEFINE_LOCK_GUARD_1(local_lock_irqsave, local_lock_t __percpu,
local_lock_irqsave(_T->lock, _T->flags),
local_unlock_irqrestore(_T->lock, _T->flags),
unsigned long flags)
#define local_lock_nested_bh(_lock) \
- __local_lock_nested_bh(this_cpu_ptr(_lock))
+ __local_lock_nested_bh(__this_cpu_local_lock(_lock))
#define local_unlock_nested_bh(_lock) \
- __local_unlock_nested_bh(this_cpu_ptr(_lock))
-
-DEFINE_GUARD(local_lock_nested_bh, local_lock_t __percpu*,
- local_lock_nested_bh(_T),
- local_unlock_nested_bh(_T))
+ __local_unlock_nested_bh(__this_cpu_local_lock(_lock))
+
+DEFINE_LOCK_GUARD_1(local_lock_nested_bh, local_lock_t __percpu,
+ local_lock_nested_bh(_T->lock),
+ local_unlock_nested_bh(_T->lock))
+
+DEFINE_LOCK_GUARD_1(local_lock_init, local_lock_t, local_lock_init(_T->lock), /* */)
+
+DECLARE_LOCK_GUARD_1_ATTRS(local_lock, __acquires(_T), __releases(*(local_lock_t __percpu **)_T))
+#define class_local_lock_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(local_lock, _T)
+DECLARE_LOCK_GUARD_1_ATTRS(local_lock_irq, __acquires(_T), __releases(*(local_lock_t __percpu **)_T))
+#define class_local_lock_irq_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(local_lock_irq, _T)
+DECLARE_LOCK_GUARD_1_ATTRS(local_lock_irqsave, __acquires(_T), __releases(*(local_lock_t __percpu **)_T))
+#define class_local_lock_irqsave_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(local_lock_irqsave, _T)
+DECLARE_LOCK_GUARD_1_ATTRS(local_lock_nested_bh, __acquires(_T), __releases(*(local_lock_t __percpu **)_T))
+#define class_local_lock_nested_bh_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(local_lock_nested_bh, _T)
+DECLARE_LOCK_GUARD_1_ATTRS(local_lock_init, __acquires(_T), __releases(*(local_lock_t **)_T))
+#define class_local_lock_init_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(local_lock_init, _T)
+
+DEFINE_LOCK_GUARD_1(local_trylock_init, local_trylock_t, local_trylock_init(_T->lock), /* */)
+DECLARE_LOCK_GUARD_1_ATTRS(local_trylock_init, __acquires(_T), __releases(*(local_trylock_t **)_T))
+#define class_local_trylock_init_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(local_trylock_init, _T)
#endif
diff --git a/include/linux/local_lock_internal.h b/include/linux/local_lock_internal.h
index 8f82b4eb542f..eff711bf973f 100644
--- a/include/linux/local_lock_internal.h
+++ b/include/linux/local_lock_internal.h
@@ -4,25 +4,30 @@
#endif
#include <linux/percpu-defs.h>
+#include <linux/irqflags.h>
#include <linux/lockdep.h>
+#include <linux/debug_locks.h>
+#include <asm/current.h>
#ifndef CONFIG_PREEMPT_RT
-typedef struct {
+context_lock_struct(local_lock) {
#ifdef CONFIG_DEBUG_LOCK_ALLOC
struct lockdep_map dep_map;
struct task_struct *owner;
#endif
-} local_lock_t;
+};
+typedef struct local_lock local_lock_t;
/* local_trylock() and local_trylock_irqsave() only work with local_trylock_t */
-typedef struct {
+context_lock_struct(local_trylock) {
#ifdef CONFIG_DEBUG_LOCK_ALLOC
struct lockdep_map dep_map;
struct task_struct *owner;
#endif
u8 acquired;
-} local_trylock_t;
+};
+typedef struct local_trylock local_trylock_t;
#ifdef CONFIG_DEBUG_LOCK_ALLOC
# define LOCAL_LOCK_DEBUG_INIT(lockname) \
@@ -84,7 +89,10 @@ do { \
local_lock_debug_init(lock); \
} while (0)
-#define __local_trylock_init(lock) __local_lock_init((local_lock_t *)lock)
+#define __local_trylock_init(lock) \
+do { \
+ __local_lock_init((local_lock_t *)lock); \
+} while (0)
#define __spinlock_nested_bh_init(lock) \
do { \
@@ -117,22 +125,25 @@ do { \
do { \
preempt_disable(); \
__local_lock_acquire(lock); \
+ __acquire(lock); \
} while (0)
#define __local_lock_irq(lock) \
do { \
local_irq_disable(); \
__local_lock_acquire(lock); \
+ __acquire(lock); \
} while (0)
#define __local_lock_irqsave(lock, flags) \
do { \
local_irq_save(flags); \
__local_lock_acquire(lock); \
+ __acquire(lock); \
} while (0)
#define __local_trylock(lock) \
- ({ \
+ __try_acquire_ctx_lock(lock, ({ \
local_trylock_t *__tl; \
\
preempt_disable(); \
@@ -146,10 +157,10 @@ do { \
(local_lock_t *)__tl); \
} \
!!__tl; \
- })
+ }))
#define __local_trylock_irqsave(lock, flags) \
- ({ \
+ __try_acquire_ctx_lock(lock, ({ \
local_trylock_t *__tl; \
\
local_irq_save(flags); \
@@ -163,7 +174,7 @@ do { \
(local_lock_t *)__tl); \
} \
!!__tl; \
- })
+ }))
/* preemption or migration must be disabled before calling __local_lock_is_locked */
#define __local_lock_is_locked(lock) READ_ONCE(this_cpu_ptr(lock)->acquired)
@@ -186,18 +197,21 @@ do { \
#define __local_unlock(lock) \
do { \
+ __release(lock); \
__local_lock_release(lock); \
preempt_enable(); \
} while (0)
#define __local_unlock_irq(lock) \
do { \
+ __release(lock); \
__local_lock_release(lock); \
local_irq_enable(); \
} while (0)
#define __local_unlock_irqrestore(lock, flags) \
do { \
+ __release(lock); \
__local_lock_release(lock); \
local_irq_restore(flags); \
} while (0)
@@ -206,13 +220,20 @@ do { \
do { \
lockdep_assert_in_softirq(); \
local_lock_acquire((lock)); \
+ __acquire(lock); \
} while (0)
#define __local_unlock_nested_bh(lock) \
- local_lock_release((lock))
+ do { \
+ __release(lock); \
+ local_lock_release((lock)); \
+ } while (0)
#else /* !CONFIG_PREEMPT_RT */
+#include <linux/sched.h>
+#include <linux/spinlock.h>
+
/*
* On PREEMPT_RT local_lock maps to a per CPU spinlock, which protects the
* critical section while staying preemptible.
@@ -267,7 +288,7 @@ do { \
} while (0)
#define __local_trylock(lock) \
- ({ \
+ __try_acquire_ctx_lock(lock, context_unsafe(({ \
int __locked; \
\
if (in_nmi() | in_hardirq()) { \
@@ -279,17 +300,40 @@ do { \
migrate_enable(); \
} \
__locked; \
- })
+ })))
#define __local_trylock_irqsave(lock, flags) \
- ({ \
+ __try_acquire_ctx_lock(lock, ({ \
typecheck(unsigned long, flags); \
flags = 0; \
__local_trylock(lock); \
- })
+ }))
/* migration must be disabled before calling __local_lock_is_locked */
#define __local_lock_is_locked(__lock) \
(rt_mutex_owner(&this_cpu_ptr(__lock)->lock) == current)
#endif /* CONFIG_PREEMPT_RT */
+
+#if defined(WARN_CONTEXT_ANALYSIS)
+/*
+ * Because the compiler only knows about the base per-CPU variable, use this
+ * helper function to make the compiler think we lock/unlock the @base variable,
+ * and hide the fact we actually pass the per-CPU instance to lock/unlock
+ * functions.
+ */
+static __always_inline local_lock_t *__this_cpu_local_lock(local_lock_t __percpu *base)
+ __returns_ctx_lock(base) __attribute__((overloadable))
+{
+ return this_cpu_ptr(base);
+}
+#ifndef CONFIG_PREEMPT_RT
+static __always_inline local_trylock_t *__this_cpu_local_lock(local_trylock_t __percpu *base)
+ __returns_ctx_lock(base) __attribute__((overloadable))
+{
+ return this_cpu_ptr(base);
+}
+#endif /* CONFIG_PREEMPT_RT */
+#else /* WARN_CONTEXT_ANALYSIS */
+#define __this_cpu_local_lock(base) this_cpu_ptr(base)
+#endif /* WARN_CONTEXT_ANALYSIS */
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index dd634103b014..621566345406 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -282,16 +282,16 @@ extern void lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie);
do { WARN_ON_ONCE(debug_locks && !(cond)); } while (0)
#define lockdep_assert_held(l) \
- lockdep_assert(lockdep_is_held(l) != LOCK_STATE_NOT_HELD)
+ do { lockdep_assert(lockdep_is_held(l) != LOCK_STATE_NOT_HELD); __assume_ctx_lock(l); } while (0)
#define lockdep_assert_not_held(l) \
lockdep_assert(lockdep_is_held(l) != LOCK_STATE_HELD)
#define lockdep_assert_held_write(l) \
- lockdep_assert(lockdep_is_held_type(l, 0))
+ do { lockdep_assert(lockdep_is_held_type(l, 0)); __assume_ctx_lock(l); } while (0)
#define lockdep_assert_held_read(l) \
- lockdep_assert(lockdep_is_held_type(l, 1))
+ do { lockdep_assert(lockdep_is_held_type(l, 1)); __assume_shared_ctx_lock(l); } while (0)
#define lockdep_assert_held_once(l) \
lockdep_assert_once(lockdep_is_held(l) != LOCK_STATE_NOT_HELD)
@@ -389,10 +389,10 @@ extern int lockdep_is_held(const void *);
#define lockdep_assert(c) do { } while (0)
#define lockdep_assert_once(c) do { } while (0)
-#define lockdep_assert_held(l) do { (void)(l); } while (0)
+#define lockdep_assert_held(l) __assume_ctx_lock(l)
#define lockdep_assert_not_held(l) do { (void)(l); } while (0)
-#define lockdep_assert_held_write(l) do { (void)(l); } while (0)
-#define lockdep_assert_held_read(l) do { (void)(l); } while (0)
+#define lockdep_assert_held_write(l) __assume_ctx_lock(l)
+#define lockdep_assert_held_read(l) __assume_shared_ctx_lock(l)
#define lockdep_assert_held_once(l) do { (void)(l); } while (0)
#define lockdep_assert_none_held_once() do { } while (0)
diff --git a/include/linux/lockref.h b/include/linux/lockref.h
index 815d871fadfc..6ded24cdb4a8 100644
--- a/include/linux/lockref.h
+++ b/include/linux/lockref.h
@@ -49,9 +49,7 @@ static inline void lockref_init(struct lockref *lockref)
void lockref_get(struct lockref *lockref);
int lockref_put_return(struct lockref *lockref);
bool lockref_get_not_zero(struct lockref *lockref);
-bool lockref_put_or_lock(struct lockref *lockref);
-#define lockref_put_or_lock(_lockref) \
- (!__cond_lock((_lockref)->lock, !lockref_put_or_lock(_lockref)))
+bool lockref_put_or_lock(struct lockref *lockref) __cond_acquires(false, &lockref->lock);
void lockref_mark_dead(struct lockref *lockref);
bool lockref_get_not_dead(struct lockref *lockref);
diff --git a/include/linux/mm.h b/include/linux/mm.h
index f0d5be9dc736..73d3500d388e 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -2979,15 +2979,8 @@ static inline pud_t pud_mkspecial(pud_t pud)
}
#endif /* CONFIG_ARCH_SUPPORTS_PUD_PFNMAP */
-extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
- spinlock_t **ptl);
-static inline pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr,
- spinlock_t **ptl)
-{
- pte_t *ptep;
- __cond_lock(*ptl, ptep = __get_locked_pte(mm, addr, ptl));
- return ptep;
-}
+extern pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr,
+ spinlock_t **ptl);
#ifdef __PAGETABLE_P4D_FOLDED
static inline int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd,
@@ -3341,31 +3334,15 @@ static inline bool pagetable_pte_ctor(struct mm_struct *mm,
return true;
}
-pte_t *___pte_offset_map(pmd_t *pmd, unsigned long addr, pmd_t *pmdvalp);
-static inline pte_t *__pte_offset_map(pmd_t *pmd, unsigned long addr,
- pmd_t *pmdvalp)
-{
- pte_t *pte;
+pte_t *__pte_offset_map(pmd_t *pmd, unsigned long addr, pmd_t *pmdvalp);
- __cond_lock(RCU, pte = ___pte_offset_map(pmd, addr, pmdvalp));
- return pte;
-}
static inline pte_t *pte_offset_map(pmd_t *pmd, unsigned long addr)
{
return __pte_offset_map(pmd, addr, NULL);
}
-pte_t *__pte_offset_map_lock(struct mm_struct *mm, pmd_t *pmd,
- unsigned long addr, spinlock_t **ptlp);
-static inline pte_t *pte_offset_map_lock(struct mm_struct *mm, pmd_t *pmd,
- unsigned long addr, spinlock_t **ptlp)
-{
- pte_t *pte;
-
- __cond_lock(RCU, __cond_lock(*ptlp,
- pte = __pte_offset_map_lock(mm, pmd, addr, ptlp)));
- return pte;
-}
+pte_t *pte_offset_map_lock(struct mm_struct *mm, pmd_t *pmd,
+ unsigned long addr, spinlock_t **ptlp);
pte_t *pte_offset_map_ro_nolock(struct mm_struct *mm, pmd_t *pmd,
unsigned long addr, spinlock_t **ptlp);
diff --git a/include/linux/mutex.h b/include/linux/mutex.h
index bf535f0118bb..ecaa0440f6ec 100644
--- a/include/linux/mutex.h
+++ b/include/linux/mutex.h
@@ -182,13 +182,13 @@ static inline int __must_check __devm_mutex_init(struct device *dev, struct mute
* Also see Documentation/locking/mutex-design.rst.
*/
#ifdef CONFIG_DEBUG_LOCK_ALLOC
-extern void mutex_lock_nested(struct mutex *lock, unsigned int subclass);
+extern void mutex_lock_nested(struct mutex *lock, unsigned int subclass) __acquires(lock);
extern void _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest_lock);
extern int __must_check mutex_lock_interruptible_nested(struct mutex *lock,
- unsigned int subclass);
+ unsigned int subclass) __cond_acquires(0, lock);
extern int __must_check _mutex_lock_killable(struct mutex *lock,
- unsigned int subclass, struct lockdep_map *nest_lock);
-extern void mutex_lock_io_nested(struct mutex *lock, unsigned int subclass);
+ unsigned int subclass, struct lockdep_map *nest_lock) __cond_acquires(0, lock);
+extern void mutex_lock_io_nested(struct mutex *lock, unsigned int subclass) __acquires(lock);
#define mutex_lock(lock) mutex_lock_nested(lock, 0)
#define mutex_lock_interruptible(lock) mutex_lock_interruptible_nested(lock, 0)
@@ -211,10 +211,10 @@ do { \
_mutex_lock_killable(lock, subclass, NULL)
#else
-extern void mutex_lock(struct mutex *lock);
-extern int __must_check mutex_lock_interruptible(struct mutex *lock);
-extern int __must_check mutex_lock_killable(struct mutex *lock);
-extern void mutex_lock_io(struct mutex *lock);
+extern void mutex_lock(struct mutex *lock) __acquires(lock);
+extern int __must_check mutex_lock_interruptible(struct mutex *lock) __cond_acquires(0, lock);
+extern int __must_check mutex_lock_killable(struct mutex *lock) __cond_acquires(0, lock);
+extern void mutex_lock_io(struct mutex *lock) __acquires(lock);
# define mutex_lock_nested(lock, subclass) mutex_lock(lock)
# define mutex_lock_interruptible_nested(lock, subclass) mutex_lock_interruptible(lock)
@@ -232,7 +232,7 @@ extern void mutex_lock_io(struct mutex *lock);
*/
#ifdef CONFIG_DEBUG_LOCK_ALLOC
-extern int _mutex_trylock_nest_lock(struct mutex *lock, struct lockdep_map *nest_lock);
+extern int _mutex_trylock_nest_lock(struct mutex *lock, struct lockdep_map *nest_lock) __cond_acquires(true, lock);
#define mutex_trylock_nest_lock(lock, nest_lock) \
( \
@@ -242,17 +242,27 @@ extern int _mutex_trylock_nest_lock(struct mutex *lock, struct lockdep_map *nest
#define mutex_trylock(lock) _mutex_trylock_nest_lock(lock, NULL)
#else
-extern int mutex_trylock(struct mutex *lock);
+extern int mutex_trylock(struct mutex *lock) __cond_acquires(true, lock);
#define mutex_trylock_nest_lock(lock, nest_lock) mutex_trylock(lock)
#endif
-extern void mutex_unlock(struct mutex *lock);
+extern void mutex_unlock(struct mutex *lock) __releases(lock);
-extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock);
+extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock) __cond_acquires(true, lock);
-DEFINE_GUARD(mutex, struct mutex *, mutex_lock(_T), mutex_unlock(_T))
-DEFINE_GUARD_COND(mutex, _try, mutex_trylock(_T))
-DEFINE_GUARD_COND(mutex, _intr, mutex_lock_interruptible(_T), _RET == 0)
+DEFINE_LOCK_GUARD_1(mutex, struct mutex, mutex_lock(_T->lock), mutex_unlock(_T->lock))
+DEFINE_LOCK_GUARD_1_COND(mutex, _try, mutex_trylock(_T->lock))
+DEFINE_LOCK_GUARD_1_COND(mutex, _intr, mutex_lock_interruptible(_T->lock), _RET == 0)
+DEFINE_LOCK_GUARD_1(mutex_init, struct mutex, mutex_init(_T->lock), /* */)
+
+DECLARE_LOCK_GUARD_1_ATTRS(mutex, __acquires(_T), __releases(*(struct mutex **)_T))
+#define class_mutex_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(mutex, _T)
+DECLARE_LOCK_GUARD_1_ATTRS(mutex_try, __acquires(_T), __releases(*(struct mutex **)_T))
+#define class_mutex_try_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(mutex_try, _T)
+DECLARE_LOCK_GUARD_1_ATTRS(mutex_intr, __acquires(_T), __releases(*(struct mutex **)_T))
+#define class_mutex_intr_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(mutex_intr, _T)
+DECLARE_LOCK_GUARD_1_ATTRS(mutex_init, __acquires(_T), __releases(*(struct mutex **)_T))
+#define class_mutex_init_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(mutex_init, _T)
extern unsigned long mutex_get_owner(struct mutex *lock);
diff --git a/include/linux/mutex_types.h b/include/linux/mutex_types.h
index fdf7f515fde8..80975935ec48 100644
--- a/include/linux/mutex_types.h
+++ b/include/linux/mutex_types.h
@@ -38,7 +38,7 @@
* - detects multi-task circular deadlocks and prints out all affected
* locks and tasks (and only those tasks)
*/
-struct mutex {
+context_lock_struct(mutex) {
atomic_long_t owner;
raw_spinlock_t wait_lock;
#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
@@ -59,7 +59,7 @@ struct mutex {
*/
#include <linux/rtmutex.h>
-struct mutex {
+context_lock_struct(mutex) {
struct rt_mutex_base rtmutex;
#ifdef CONFIG_DEBUG_LOCK_ALLOC
struct lockdep_map dep_map;
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index bd5a420cf09a..7729fef249e1 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -31,6 +31,16 @@
#include <asm/processor.h>
#include <linux/context_tracking_irq.h>
+token_context_lock(RCU, __reentrant_ctx_lock);
+token_context_lock_instance(RCU, RCU_SCHED);
+token_context_lock_instance(RCU, RCU_BH);
+
+/*
+ * A convenience macro that can be used for RCU-protected globals or struct
+ * members; adds type qualifier __rcu, and also enforces __guarded_by(RCU).
+ */
+#define __rcu_guarded __rcu __guarded_by(RCU)
+
#define ULONG_CMP_GE(a, b) (ULONG_MAX / 2 >= (a) - (b))
#define ULONG_CMP_LT(a, b) (ULONG_MAX / 2 < (a) - (b))
@@ -396,7 +406,8 @@ static inline void rcu_preempt_sleep_check(void) { }
// See RCU_LOCKDEP_WARN() for an explanation of the double call to
// debug_lockdep_rcu_enabled().
-static inline bool lockdep_assert_rcu_helper(bool c)
+static __always_inline bool lockdep_assert_rcu_helper(bool c, const struct __ctx_lock_RCU *ctx)
+ __assumes_shared_ctx_lock(RCU) __assumes_shared_ctx_lock(ctx)
{
return debug_lockdep_rcu_enabled() &&
(c || !rcu_is_watching() || !rcu_lockdep_current_cpu_online()) &&
@@ -409,7 +420,7 @@ static inline bool lockdep_assert_rcu_helper(bool c)
* Splats if lockdep is enabled and there is no rcu_read_lock() in effect.
*/
#define lockdep_assert_in_rcu_read_lock() \
- WARN_ON_ONCE(lockdep_assert_rcu_helper(!lock_is_held(&rcu_lock_map)))
+ WARN_ON_ONCE(lockdep_assert_rcu_helper(!lock_is_held(&rcu_lock_map), RCU))
/**
* lockdep_assert_in_rcu_read_lock_bh - WARN if not protected by rcu_read_lock_bh()
@@ -419,7 +430,7 @@ static inline bool lockdep_assert_rcu_helper(bool c)
* actual rcu_read_lock_bh() is required.
*/
#define lockdep_assert_in_rcu_read_lock_bh() \
- WARN_ON_ONCE(lockdep_assert_rcu_helper(!lock_is_held(&rcu_bh_lock_map)))
+ WARN_ON_ONCE(lockdep_assert_rcu_helper(!lock_is_held(&rcu_bh_lock_map), RCU_BH))
/**
* lockdep_assert_in_rcu_read_lock_sched - WARN if not protected by rcu_read_lock_sched()
@@ -429,7 +440,7 @@ static inline bool lockdep_assert_rcu_helper(bool c)
* instead an actual rcu_read_lock_sched() is required.
*/
#define lockdep_assert_in_rcu_read_lock_sched() \
- WARN_ON_ONCE(lockdep_assert_rcu_helper(!lock_is_held(&rcu_sched_lock_map)))
+ WARN_ON_ONCE(lockdep_assert_rcu_helper(!lock_is_held(&rcu_sched_lock_map), RCU_SCHED))
/**
* lockdep_assert_in_rcu_reader - WARN if not within some type of RCU reader
@@ -447,17 +458,17 @@ static inline bool lockdep_assert_rcu_helper(bool c)
WARN_ON_ONCE(lockdep_assert_rcu_helper(!lock_is_held(&rcu_lock_map) && \
!lock_is_held(&rcu_bh_lock_map) && \
!lock_is_held(&rcu_sched_lock_map) && \
- preemptible()))
+ preemptible(), RCU))
#else /* #ifdef CONFIG_PROVE_RCU */
#define RCU_LOCKDEP_WARN(c, s) do { } while (0 && (c))
#define rcu_sleep_check() do { } while (0)
-#define lockdep_assert_in_rcu_read_lock() do { } while (0)
-#define lockdep_assert_in_rcu_read_lock_bh() do { } while (0)
-#define lockdep_assert_in_rcu_read_lock_sched() do { } while (0)
-#define lockdep_assert_in_rcu_reader() do { } while (0)
+#define lockdep_assert_in_rcu_read_lock() __assume_shared_ctx_lock(RCU)
+#define lockdep_assert_in_rcu_read_lock_bh() __assume_shared_ctx_lock(RCU_BH)
+#define lockdep_assert_in_rcu_read_lock_sched() __assume_shared_ctx_lock(RCU_SCHED)
+#define lockdep_assert_in_rcu_reader() __assume_shared_ctx_lock(RCU)
#endif /* #else #ifdef CONFIG_PROVE_RCU */
@@ -477,11 +488,11 @@ static inline bool lockdep_assert_rcu_helper(bool c)
#endif /* #else #ifdef __CHECKER__ */
#define __unrcu_pointer(p, local) \
-({ \
+context_unsafe( \
typeof(*p) *local = (typeof(*p) *__force)(p); \
rcu_check_sparse(p, __rcu); \
- ((typeof(*p) __force __kernel *)(local)); \
-})
+ ((typeof(*p) __force __kernel *)(local)) \
+)
/**
* unrcu_pointer - mark a pointer as not being RCU protected
* @p: pointer needing to lose its __rcu property
@@ -557,7 +568,7 @@ static inline bool lockdep_assert_rcu_helper(bool c)
* other macros that it invokes.
*/
#define rcu_assign_pointer(p, v) \
-do { \
+context_unsafe( \
uintptr_t _r_a_p__v = (uintptr_t)(v); \
rcu_check_sparse(p, __rcu); \
\
@@ -565,7 +576,7 @@ do { \
WRITE_ONCE((p), (typeof(p))(_r_a_p__v)); \
else \
smp_store_release(&p, RCU_INITIALIZER((typeof(p))_r_a_p__v)); \
-} while (0)
+)
/**
* rcu_replace_pointer() - replace an RCU pointer, returning its old value
@@ -832,9 +843,10 @@ do { \
* only when acquiring spinlocks that are subject to priority inheritance.
*/
static __always_inline void rcu_read_lock(void)
+ __acquires_shared(RCU)
{
__rcu_read_lock();
- __acquire(RCU);
+ __acquire_shared(RCU);
rcu_lock_acquire(&rcu_lock_map);
RCU_LOCKDEP_WARN(!rcu_is_watching(),
"rcu_read_lock() used illegally while idle");
@@ -862,11 +874,12 @@ static __always_inline void rcu_read_lock(void)
* See rcu_read_lock() for more information.
*/
static inline void rcu_read_unlock(void)
+ __releases_shared(RCU)
{
RCU_LOCKDEP_WARN(!rcu_is_watching(),
"rcu_read_unlock() used illegally while idle");
rcu_lock_release(&rcu_lock_map); /* Keep acq info for rls diags. */
- __release(RCU);
+ __release_shared(RCU);
__rcu_read_unlock();
}
@@ -885,9 +898,11 @@ static inline void rcu_read_unlock(void)
* was invoked from some other task.
*/
static inline void rcu_read_lock_bh(void)
+ __acquires_shared(RCU) __acquires_shared(RCU_BH)
{
local_bh_disable();
- __acquire(RCU_BH);
+ __acquire_shared(RCU);
+ __acquire_shared(RCU_BH);
rcu_lock_acquire(&rcu_bh_lock_map);
RCU_LOCKDEP_WARN(!rcu_is_watching(),
"rcu_read_lock_bh() used illegally while idle");
@@ -899,11 +914,13 @@ static inline void rcu_read_lock_bh(void)
* See rcu_read_lock_bh() for more information.
*/
static inline void rcu_read_unlock_bh(void)
+ __releases_shared(RCU) __releases_shared(RCU_BH)
{
RCU_LOCKDEP_WARN(!rcu_is_watching(),
"rcu_read_unlock_bh() used illegally while idle");
rcu_lock_release(&rcu_bh_lock_map);
- __release(RCU_BH);
+ __release_shared(RCU_BH);
+ __release_shared(RCU);
local_bh_enable();
}
@@ -923,9 +940,11 @@ static inline void rcu_read_unlock_bh(void)
* rcu_read_lock_sched() was invoked from an NMI handler.
*/
static inline void rcu_read_lock_sched(void)
+ __acquires_shared(RCU) __acquires_shared(RCU_SCHED)
{
preempt_disable();
- __acquire(RCU_SCHED);
+ __acquire_shared(RCU);
+ __acquire_shared(RCU_SCHED);
rcu_lock_acquire(&rcu_sched_lock_map);
RCU_LOCKDEP_WARN(!rcu_is_watching(),
"rcu_read_lock_sched() used illegally while idle");
@@ -933,9 +952,11 @@ static inline void rcu_read_lock_sched(void)
/* Used by lockdep and tracing: cannot be traced, cannot call lockdep. */
static inline notrace void rcu_read_lock_sched_notrace(void)
+ __acquires_shared(RCU) __acquires_shared(RCU_SCHED)
{
preempt_disable_notrace();
- __acquire(RCU_SCHED);
+ __acquire_shared(RCU);
+ __acquire_shared(RCU_SCHED);
}
/**
@@ -944,22 +965,27 @@ static inline notrace void rcu_read_lock_sched_notrace(void)
* See rcu_read_lock_sched() for more information.
*/
static inline void rcu_read_unlock_sched(void)
+ __releases_shared(RCU) __releases_shared(RCU_SCHED)
{
RCU_LOCKDEP_WARN(!rcu_is_watching(),
"rcu_read_unlock_sched() used illegally while idle");
rcu_lock_release(&rcu_sched_lock_map);
- __release(RCU_SCHED);
+ __release_shared(RCU_SCHED);
+ __release_shared(RCU);
preempt_enable();
}
/* Used by lockdep and tracing: cannot be traced, cannot call lockdep. */
static inline notrace void rcu_read_unlock_sched_notrace(void)
+ __releases_shared(RCU) __releases_shared(RCU_SCHED)
{
- __release(RCU_SCHED);
+ __release_shared(RCU_SCHED);
+ __release_shared(RCU);
preempt_enable_notrace();
}
static __always_inline void rcu_read_lock_dont_migrate(void)
+ __acquires_shared(RCU)
{
if (IS_ENABLED(CONFIG_PREEMPT_RCU))
migrate_disable();
@@ -967,6 +993,7 @@ static __always_inline void rcu_read_lock_dont_migrate(void)
}
static inline void rcu_read_unlock_migrate(void)
+ __releases_shared(RCU)
{
rcu_read_unlock();
if (IS_ENABLED(CONFIG_PREEMPT_RCU))
@@ -1012,10 +1039,10 @@ static inline void rcu_read_unlock_migrate(void)
* ordering guarantees for either the CPU or the compiler.
*/
#define RCU_INIT_POINTER(p, v) \
- do { \
+ context_unsafe( \
rcu_check_sparse(p, __rcu); \
WRITE_ONCE(p, RCU_INITIALIZER(v)); \
- } while (0)
+ )
/**
* RCU_POINTER_INITIALIZER() - statically initialize an RCU protected pointer
@@ -1163,18 +1190,7 @@ rcu_head_after_call_rcu(struct rcu_head *rhp, rcu_callback_t f)
extern int rcu_expedited;
extern int rcu_normal;
-DEFINE_LOCK_GUARD_0(rcu,
- do {
- rcu_read_lock();
- /*
- * sparse doesn't call the cleanup function,
- * so just release immediately and don't track
- * the context. We don't need to anyway, since
- * the whole point of the guard is to not need
- * the explicit unlock.
- */
- __release(RCU);
- } while (0),
- rcu_read_unlock())
+DEFINE_LOCK_GUARD_0(rcu, rcu_read_lock(), rcu_read_unlock())
+DECLARE_LOCK_GUARD_0_ATTRS(rcu, __acquires_shared(RCU), __releases_shared(RCU))
#endif /* __LINUX_RCUPDATE_H */
diff --git a/include/linux/refcount.h b/include/linux/refcount.h
index 80dc023ac2bf..3da377ffb0c2 100644
--- a/include/linux/refcount.h
+++ b/include/linux/refcount.h
@@ -478,9 +478,9 @@ static inline void refcount_dec(refcount_t *r)
extern __must_check bool refcount_dec_if_one(refcount_t *r);
extern __must_check bool refcount_dec_not_one(refcount_t *r);
-extern __must_check bool refcount_dec_and_mutex_lock(refcount_t *r, struct mutex *lock) __cond_acquires(lock);
-extern __must_check bool refcount_dec_and_lock(refcount_t *r, spinlock_t *lock) __cond_acquires(lock);
+extern __must_check bool refcount_dec_and_mutex_lock(refcount_t *r, struct mutex *lock) __cond_acquires(true, lock);
+extern __must_check bool refcount_dec_and_lock(refcount_t *r, spinlock_t *lock) __cond_acquires(true, lock);
extern __must_check bool refcount_dec_and_lock_irqsave(refcount_t *r,
spinlock_t *lock,
- unsigned long *flags) __cond_acquires(lock);
+ unsigned long *flags) __cond_acquires(true, lock);
#endif /* _LINUX_REFCOUNT_H */
diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h
index 08e664b21f5a..133ccb39137a 100644
--- a/include/linux/rhashtable.h
+++ b/include/linux/rhashtable.h
@@ -245,16 +245,17 @@ void *rhashtable_insert_slow(struct rhashtable *ht, const void *key,
void rhashtable_walk_enter(struct rhashtable *ht,
struct rhashtable_iter *iter);
void rhashtable_walk_exit(struct rhashtable_iter *iter);
-int rhashtable_walk_start_check(struct rhashtable_iter *iter) __acquires(RCU);
+int rhashtable_walk_start_check(struct rhashtable_iter *iter) __acquires_shared(RCU);
static inline void rhashtable_walk_start(struct rhashtable_iter *iter)
+ __acquires_shared(RCU)
{
(void)rhashtable_walk_start_check(iter);
}
void *rhashtable_walk_next(struct rhashtable_iter *iter);
void *rhashtable_walk_peek(struct rhashtable_iter *iter);
-void rhashtable_walk_stop(struct rhashtable_iter *iter) __releases(RCU);
+void rhashtable_walk_stop(struct rhashtable_iter *iter) __releases_shared(RCU);
void rhashtable_free_and_destroy(struct rhashtable *ht,
void (*free_fn)(void *ptr, void *arg),
@@ -325,6 +326,7 @@ static inline struct rhash_lock_head __rcu **rht_bucket_insert(
static inline unsigned long rht_lock(struct bucket_table *tbl,
struct rhash_lock_head __rcu **bkt)
+ __acquires(__bitlock(0, bkt))
{
unsigned long flags;
@@ -337,6 +339,7 @@ static inline unsigned long rht_lock(struct bucket_table *tbl,
static inline unsigned long rht_lock_nested(struct bucket_table *tbl,
struct rhash_lock_head __rcu **bucket,
unsigned int subclass)
+ __acquires(__bitlock(0, bucket))
{
unsigned long flags;
@@ -349,6 +352,7 @@ static inline unsigned long rht_lock_nested(struct bucket_table *tbl,
static inline void rht_unlock(struct bucket_table *tbl,
struct rhash_lock_head __rcu **bkt,
unsigned long flags)
+ __releases(__bitlock(0, bkt))
{
lock_map_release(&tbl->dep_map);
bit_spin_unlock(0, (unsigned long *)bkt);
@@ -424,13 +428,14 @@ static inline void rht_assign_unlock(struct bucket_table *tbl,
struct rhash_lock_head __rcu **bkt,
struct rhash_head *obj,
unsigned long flags)
+ __releases(__bitlock(0, bkt))
{
if (rht_is_a_nulls(obj))
obj = NULL;
lock_map_release(&tbl->dep_map);
rcu_assign_pointer(*bkt, (void *)obj);
preempt_enable();
- __release(bitlock);
+ __release(__bitlock(0, bkt));
local_irq_restore(flags);
}
@@ -612,6 +617,7 @@ static __always_inline struct rhash_head *__rhashtable_lookup(
struct rhashtable *ht, const void *key,
const struct rhashtable_params params,
const enum rht_lookup_freq freq)
+ __must_hold_shared(RCU)
{
struct rhashtable_compare_arg arg = {
.ht = ht,
@@ -666,6 +672,7 @@ restart:
static __always_inline void *rhashtable_lookup(
struct rhashtable *ht, const void *key,
const struct rhashtable_params params)
+ __must_hold_shared(RCU)
{
struct rhash_head *he = __rhashtable_lookup(ht, key, params,
RHT_LOOKUP_NORMAL);
@@ -676,6 +683,7 @@ static __always_inline void *rhashtable_lookup(
static __always_inline void *rhashtable_lookup_likely(
struct rhashtable *ht, const void *key,
const struct rhashtable_params params)
+ __must_hold_shared(RCU)
{
struct rhash_head *he = __rhashtable_lookup(ht, key, params,
RHT_LOOKUP_LIKELY);
@@ -727,6 +735,7 @@ static __always_inline void *rhashtable_lookup_fast(
static __always_inline struct rhlist_head *rhltable_lookup(
struct rhltable *hlt, const void *key,
const struct rhashtable_params params)
+ __must_hold_shared(RCU)
{
struct rhash_head *he = __rhashtable_lookup(&hlt->ht, key, params,
RHT_LOOKUP_NORMAL);
@@ -737,6 +746,7 @@ static __always_inline struct rhlist_head *rhltable_lookup(
static __always_inline struct rhlist_head *rhltable_lookup_likely(
struct rhltable *hlt, const void *key,
const struct rhashtable_params params)
+ __must_hold_shared(RCU)
{
struct rhash_head *he = __rhashtable_lookup(&hlt->ht, key, params,
RHT_LOOKUP_LIKELY);
diff --git a/include/linux/rwlock.h b/include/linux/rwlock.h
index 5b87c6f4a243..3390d21c95dd 100644
--- a/include/linux/rwlock.h
+++ b/include/linux/rwlock.h
@@ -29,16 +29,16 @@ do { \
#endif
#ifdef CONFIG_DEBUG_SPINLOCK
- extern void do_raw_read_lock(rwlock_t *lock) __acquires(lock);
+ extern void do_raw_read_lock(rwlock_t *lock) __acquires_shared(lock);
extern int do_raw_read_trylock(rwlock_t *lock);
- extern void do_raw_read_unlock(rwlock_t *lock) __releases(lock);
+ extern void do_raw_read_unlock(rwlock_t *lock) __releases_shared(lock);
extern void do_raw_write_lock(rwlock_t *lock) __acquires(lock);
extern int do_raw_write_trylock(rwlock_t *lock);
extern void do_raw_write_unlock(rwlock_t *lock) __releases(lock);
#else
-# define do_raw_read_lock(rwlock) do {__acquire(lock); arch_read_lock(&(rwlock)->raw_lock); } while (0)
+# define do_raw_read_lock(rwlock) do {__acquire_shared(lock); arch_read_lock(&(rwlock)->raw_lock); } while (0)
# define do_raw_read_trylock(rwlock) arch_read_trylock(&(rwlock)->raw_lock)
-# define do_raw_read_unlock(rwlock) do {arch_read_unlock(&(rwlock)->raw_lock); __release(lock); } while (0)
+# define do_raw_read_unlock(rwlock) do {arch_read_unlock(&(rwlock)->raw_lock); __release_shared(lock); } while (0)
# define do_raw_write_lock(rwlock) do {__acquire(lock); arch_write_lock(&(rwlock)->raw_lock); } while (0)
# define do_raw_write_trylock(rwlock) arch_write_trylock(&(rwlock)->raw_lock)
# define do_raw_write_unlock(rwlock) do {arch_write_unlock(&(rwlock)->raw_lock); __release(lock); } while (0)
@@ -49,8 +49,8 @@ do { \
* regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The various
* methods are defined as nops in the case they are not required.
*/
-#define read_trylock(lock) __cond_lock(lock, _raw_read_trylock(lock))
-#define write_trylock(lock) __cond_lock(lock, _raw_write_trylock(lock))
+#define read_trylock(lock) _raw_read_trylock(lock)
+#define write_trylock(lock) _raw_write_trylock(lock)
#define write_lock(lock) _raw_write_lock(lock)
#define read_lock(lock) _raw_read_lock(lock)
@@ -112,12 +112,7 @@ do { \
} while (0)
#define write_unlock_bh(lock) _raw_write_unlock_bh(lock)
-#define write_trylock_irqsave(lock, flags) \
-({ \
- local_irq_save(flags); \
- write_trylock(lock) ? \
- 1 : ({ local_irq_restore(flags); 0; }); \
-})
+#define write_trylock_irqsave(lock, flags) _raw_write_trylock_irqsave(lock, &(flags))
#ifdef arch_rwlock_is_contended
#define rwlock_is_contended(lock) \
diff --git a/include/linux/rwlock_api_smp.h b/include/linux/rwlock_api_smp.h
index 31d3d1116323..61a852609eab 100644
--- a/include/linux/rwlock_api_smp.h
+++ b/include/linux/rwlock_api_smp.h
@@ -15,24 +15,24 @@
* Released under the General Public License (GPL).
*/
-void __lockfunc _raw_read_lock(rwlock_t *lock) __acquires(lock);
+void __lockfunc _raw_read_lock(rwlock_t *lock) __acquires_shared(lock);
void __lockfunc _raw_write_lock(rwlock_t *lock) __acquires(lock);
void __lockfunc _raw_write_lock_nested(rwlock_t *lock, int subclass) __acquires(lock);
-void __lockfunc _raw_read_lock_bh(rwlock_t *lock) __acquires(lock);
+void __lockfunc _raw_read_lock_bh(rwlock_t *lock) __acquires_shared(lock);
void __lockfunc _raw_write_lock_bh(rwlock_t *lock) __acquires(lock);
-void __lockfunc _raw_read_lock_irq(rwlock_t *lock) __acquires(lock);
+void __lockfunc _raw_read_lock_irq(rwlock_t *lock) __acquires_shared(lock);
void __lockfunc _raw_write_lock_irq(rwlock_t *lock) __acquires(lock);
unsigned long __lockfunc _raw_read_lock_irqsave(rwlock_t *lock)
__acquires(lock);
unsigned long __lockfunc _raw_write_lock_irqsave(rwlock_t *lock)
__acquires(lock);
-int __lockfunc _raw_read_trylock(rwlock_t *lock);
-int __lockfunc _raw_write_trylock(rwlock_t *lock);
-void __lockfunc _raw_read_unlock(rwlock_t *lock) __releases(lock);
+int __lockfunc _raw_read_trylock(rwlock_t *lock) __cond_acquires_shared(true, lock);
+int __lockfunc _raw_write_trylock(rwlock_t *lock) __cond_acquires(true, lock);
+void __lockfunc _raw_read_unlock(rwlock_t *lock) __releases_shared(lock);
void __lockfunc _raw_write_unlock(rwlock_t *lock) __releases(lock);
-void __lockfunc _raw_read_unlock_bh(rwlock_t *lock) __releases(lock);
+void __lockfunc _raw_read_unlock_bh(rwlock_t *lock) __releases_shared(lock);
void __lockfunc _raw_write_unlock_bh(rwlock_t *lock) __releases(lock);
-void __lockfunc _raw_read_unlock_irq(rwlock_t *lock) __releases(lock);
+void __lockfunc _raw_read_unlock_irq(rwlock_t *lock) __releases_shared(lock);
void __lockfunc _raw_write_unlock_irq(rwlock_t *lock) __releases(lock);
void __lockfunc
_raw_read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
@@ -137,6 +137,16 @@ static inline int __raw_write_trylock(rwlock_t *lock)
return 0;
}
+static inline bool _raw_write_trylock_irqsave(rwlock_t *lock, unsigned long *flags)
+ __cond_acquires(true, lock) __no_context_analysis
+{
+ local_irq_save(*flags);
+ if (_raw_write_trylock(lock))
+ return true;
+ local_irq_restore(*flags);
+ return false;
+}
+
/*
* If lockdep is enabled then we use the non-preemption spin-ops
* even on CONFIG_PREEMPT, because lockdep assumes that interrupts are
@@ -145,6 +155,7 @@ static inline int __raw_write_trylock(rwlock_t *lock)
#if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC)
static inline void __raw_read_lock(rwlock_t *lock)
+ __acquires_shared(lock) __no_context_analysis
{
preempt_disable();
rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
@@ -152,6 +163,7 @@ static inline void __raw_read_lock(rwlock_t *lock)
}
static inline unsigned long __raw_read_lock_irqsave(rwlock_t *lock)
+ __acquires_shared(lock) __no_context_analysis
{
unsigned long flags;
@@ -163,6 +175,7 @@ static inline unsigned long __raw_read_lock_irqsave(rwlock_t *lock)
}
static inline void __raw_read_lock_irq(rwlock_t *lock)
+ __acquires_shared(lock) __no_context_analysis
{
local_irq_disable();
preempt_disable();
@@ -171,6 +184,7 @@ static inline void __raw_read_lock_irq(rwlock_t *lock)
}
static inline void __raw_read_lock_bh(rwlock_t *lock)
+ __acquires_shared(lock) __no_context_analysis
{
__local_bh_disable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET);
rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
@@ -178,6 +192,7 @@ static inline void __raw_read_lock_bh(rwlock_t *lock)
}
static inline unsigned long __raw_write_lock_irqsave(rwlock_t *lock)
+ __acquires(lock) __no_context_analysis
{
unsigned long flags;
@@ -189,6 +204,7 @@ static inline unsigned long __raw_write_lock_irqsave(rwlock_t *lock)
}
static inline void __raw_write_lock_irq(rwlock_t *lock)
+ __acquires(lock) __no_context_analysis
{
local_irq_disable();
preempt_disable();
@@ -197,6 +213,7 @@ static inline void __raw_write_lock_irq(rwlock_t *lock)
}
static inline void __raw_write_lock_bh(rwlock_t *lock)
+ __acquires(lock) __no_context_analysis
{
__local_bh_disable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET);
rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
@@ -204,6 +221,7 @@ static inline void __raw_write_lock_bh(rwlock_t *lock)
}
static inline void __raw_write_lock(rwlock_t *lock)
+ __acquires(lock) __no_context_analysis
{
preempt_disable();
rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
@@ -211,6 +229,7 @@ static inline void __raw_write_lock(rwlock_t *lock)
}
static inline void __raw_write_lock_nested(rwlock_t *lock, int subclass)
+ __acquires(lock) __no_context_analysis
{
preempt_disable();
rwlock_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
@@ -220,6 +239,7 @@ static inline void __raw_write_lock_nested(rwlock_t *lock, int subclass)
#endif /* !CONFIG_GENERIC_LOCKBREAK || CONFIG_DEBUG_LOCK_ALLOC */
static inline void __raw_write_unlock(rwlock_t *lock)
+ __releases(lock)
{
rwlock_release(&lock->dep_map, _RET_IP_);
do_raw_write_unlock(lock);
@@ -227,6 +247,7 @@ static inline void __raw_write_unlock(rwlock_t *lock)
}
static inline void __raw_read_unlock(rwlock_t *lock)
+ __releases_shared(lock)
{
rwlock_release(&lock->dep_map, _RET_IP_);
do_raw_read_unlock(lock);
@@ -235,6 +256,7 @@ static inline void __raw_read_unlock(rwlock_t *lock)
static inline void
__raw_read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
+ __releases_shared(lock)
{
rwlock_release(&lock->dep_map, _RET_IP_);
do_raw_read_unlock(lock);
@@ -243,6 +265,7 @@ __raw_read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
}
static inline void __raw_read_unlock_irq(rwlock_t *lock)
+ __releases_shared(lock)
{
rwlock_release(&lock->dep_map, _RET_IP_);
do_raw_read_unlock(lock);
@@ -251,6 +274,7 @@ static inline void __raw_read_unlock_irq(rwlock_t *lock)
}
static inline void __raw_read_unlock_bh(rwlock_t *lock)
+ __releases_shared(lock)
{
rwlock_release(&lock->dep_map, _RET_IP_);
do_raw_read_unlock(lock);
@@ -259,6 +283,7 @@ static inline void __raw_read_unlock_bh(rwlock_t *lock)
static inline void __raw_write_unlock_irqrestore(rwlock_t *lock,
unsigned long flags)
+ __releases(lock)
{
rwlock_release(&lock->dep_map, _RET_IP_);
do_raw_write_unlock(lock);
@@ -267,6 +292,7 @@ static inline void __raw_write_unlock_irqrestore(rwlock_t *lock,
}
static inline void __raw_write_unlock_irq(rwlock_t *lock)
+ __releases(lock)
{
rwlock_release(&lock->dep_map, _RET_IP_);
do_raw_write_unlock(lock);
@@ -275,6 +301,7 @@ static inline void __raw_write_unlock_irq(rwlock_t *lock)
}
static inline void __raw_write_unlock_bh(rwlock_t *lock)
+ __releases(lock)
{
rwlock_release(&lock->dep_map, _RET_IP_);
do_raw_write_unlock(lock);
diff --git a/include/linux/rwlock_rt.h b/include/linux/rwlock_rt.h
index 7d81fc6918ee..5353abbfdc0b 100644
--- a/include/linux/rwlock_rt.h
+++ b/include/linux/rwlock_rt.h
@@ -24,26 +24,29 @@ do { \
__rt_rwlock_init(rwl, #rwl, &__key); \
} while (0)
-extern void rt_read_lock(rwlock_t *rwlock) __acquires(rwlock);
-extern int rt_read_trylock(rwlock_t *rwlock);
-extern void rt_read_unlock(rwlock_t *rwlock) __releases(rwlock);
+extern void rt_read_lock(rwlock_t *rwlock) __acquires_shared(rwlock);
+extern int rt_read_trylock(rwlock_t *rwlock) __cond_acquires_shared(true, rwlock);
+extern void rt_read_unlock(rwlock_t *rwlock) __releases_shared(rwlock);
extern void rt_write_lock(rwlock_t *rwlock) __acquires(rwlock);
extern void rt_write_lock_nested(rwlock_t *rwlock, int subclass) __acquires(rwlock);
-extern int rt_write_trylock(rwlock_t *rwlock);
+extern int rt_write_trylock(rwlock_t *rwlock) __cond_acquires(true, rwlock);
extern void rt_write_unlock(rwlock_t *rwlock) __releases(rwlock);
static __always_inline void read_lock(rwlock_t *rwlock)
+ __acquires_shared(rwlock)
{
rt_read_lock(rwlock);
}
static __always_inline void read_lock_bh(rwlock_t *rwlock)
+ __acquires_shared(rwlock)
{
local_bh_disable();
rt_read_lock(rwlock);
}
static __always_inline void read_lock_irq(rwlock_t *rwlock)
+ __acquires_shared(rwlock)
{
rt_read_lock(rwlock);
}
@@ -55,37 +58,43 @@ static __always_inline void read_lock_irq(rwlock_t *rwlock)
flags = 0; \
} while (0)
-#define read_trylock(lock) __cond_lock(lock, rt_read_trylock(lock))
+#define read_trylock(lock) rt_read_trylock(lock)
static __always_inline void read_unlock(rwlock_t *rwlock)
+ __releases_shared(rwlock)
{
rt_read_unlock(rwlock);
}
static __always_inline void read_unlock_bh(rwlock_t *rwlock)
+ __releases_shared(rwlock)
{
rt_read_unlock(rwlock);
local_bh_enable();
}
static __always_inline void read_unlock_irq(rwlock_t *rwlock)
+ __releases_shared(rwlock)
{
rt_read_unlock(rwlock);
}
static __always_inline void read_unlock_irqrestore(rwlock_t *rwlock,
unsigned long flags)
+ __releases_shared(rwlock)
{
rt_read_unlock(rwlock);
}
static __always_inline void write_lock(rwlock_t *rwlock)
+ __acquires(rwlock)
{
rt_write_lock(rwlock);
}
#ifdef CONFIG_DEBUG_LOCK_ALLOC
static __always_inline void write_lock_nested(rwlock_t *rwlock, int subclass)
+ __acquires(rwlock)
{
rt_write_lock_nested(rwlock, subclass);
}
@@ -94,12 +103,14 @@ static __always_inline void write_lock_nested(rwlock_t *rwlock, int subclass)
#endif
static __always_inline void write_lock_bh(rwlock_t *rwlock)
+ __acquires(rwlock)
{
local_bh_disable();
rt_write_lock(rwlock);
}
static __always_inline void write_lock_irq(rwlock_t *rwlock)
+ __acquires(rwlock)
{
rt_write_lock(rwlock);
}
@@ -111,36 +122,38 @@ static __always_inline void write_lock_irq(rwlock_t *rwlock)
flags = 0; \
} while (0)
-#define write_trylock(lock) __cond_lock(lock, rt_write_trylock(lock))
+#define write_trylock(lock) rt_write_trylock(lock)
-#define write_trylock_irqsave(lock, flags) \
-({ \
- int __locked; \
- \
- typecheck(unsigned long, flags); \
- flags = 0; \
- __locked = write_trylock(lock); \
- __locked; \
-})
+static __always_inline bool _write_trylock_irqsave(rwlock_t *rwlock, unsigned long *flags)
+ __cond_acquires(true, rwlock)
+{
+ *flags = 0;
+ return rt_write_trylock(rwlock);
+}
+#define write_trylock_irqsave(lock, flags) _write_trylock_irqsave(lock, &(flags))
static __always_inline void write_unlock(rwlock_t *rwlock)
+ __releases(rwlock)
{
rt_write_unlock(rwlock);
}
static __always_inline void write_unlock_bh(rwlock_t *rwlock)
+ __releases(rwlock)
{
rt_write_unlock(rwlock);
local_bh_enable();
}
static __always_inline void write_unlock_irq(rwlock_t *rwlock)
+ __releases(rwlock)
{
rt_write_unlock(rwlock);
}
static __always_inline void write_unlock_irqrestore(rwlock_t *rwlock,
unsigned long flags)
+ __releases(rwlock)
{
rt_write_unlock(rwlock);
}
diff --git a/include/linux/rwlock_types.h b/include/linux/rwlock_types.h
index 1948442e7750..d5e7316401e7 100644
--- a/include/linux/rwlock_types.h
+++ b/include/linux/rwlock_types.h
@@ -22,7 +22,7 @@
* portions Copyright 2005, Red Hat, Inc., Ingo Molnar
* Released under the General Public License (GPL).
*/
-typedef struct {
+context_lock_struct(rwlock) {
arch_rwlock_t raw_lock;
#ifdef CONFIG_DEBUG_SPINLOCK
unsigned int magic, owner_cpu;
@@ -31,7 +31,8 @@ typedef struct {
#ifdef CONFIG_DEBUG_LOCK_ALLOC
struct lockdep_map dep_map;
#endif
-} rwlock_t;
+};
+typedef struct rwlock rwlock_t;
#define RWLOCK_MAGIC 0xdeaf1eed
@@ -54,13 +55,14 @@ typedef struct {
#include <linux/rwbase_rt.h>
-typedef struct {
+context_lock_struct(rwlock) {
struct rwbase_rt rwbase;
atomic_t readers;
#ifdef CONFIG_DEBUG_LOCK_ALLOC
struct lockdep_map dep_map;
#endif
-} rwlock_t;
+};
+typedef struct rwlock rwlock_t;
#define __RWLOCK_RT_INITIALIZER(name) \
{ \
diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h
index f1aaf676a874..9bf1d93d3d7b 100644
--- a/include/linux/rwsem.h
+++ b/include/linux/rwsem.h
@@ -45,7 +45,7 @@
* reduce the chance that they will share the same cacheline causing
* cacheline bouncing problem.
*/
-struct rw_semaphore {
+context_lock_struct(rw_semaphore) {
atomic_long_t count;
/*
* Write owner or one of the read owners as well flags regarding
@@ -76,11 +76,13 @@ static inline int rwsem_is_locked(struct rw_semaphore *sem)
}
static inline void rwsem_assert_held_nolockdep(const struct rw_semaphore *sem)
+ __assumes_ctx_lock(sem)
{
WARN_ON(atomic_long_read(&sem->count) == RWSEM_UNLOCKED_VALUE);
}
static inline void rwsem_assert_held_write_nolockdep(const struct rw_semaphore *sem)
+ __assumes_ctx_lock(sem)
{
WARN_ON(!(atomic_long_read(&sem->count) & RWSEM_WRITER_LOCKED));
}
@@ -148,7 +150,7 @@ extern bool is_rwsem_reader_owned(struct rw_semaphore *sem);
#include <linux/rwbase_rt.h>
-struct rw_semaphore {
+context_lock_struct(rw_semaphore) {
struct rwbase_rt rwbase;
#ifdef CONFIG_DEBUG_LOCK_ALLOC
struct lockdep_map dep_map;
@@ -180,11 +182,13 @@ static __always_inline int rwsem_is_locked(const struct rw_semaphore *sem)
}
static __always_inline void rwsem_assert_held_nolockdep(const struct rw_semaphore *sem)
+ __assumes_ctx_lock(sem)
{
WARN_ON(!rwsem_is_locked(sem));
}
static __always_inline void rwsem_assert_held_write_nolockdep(const struct rw_semaphore *sem)
+ __assumes_ctx_lock(sem)
{
WARN_ON(!rw_base_is_write_locked(&sem->rwbase));
}
@@ -202,6 +206,7 @@ static __always_inline int rwsem_is_contended(struct rw_semaphore *sem)
*/
static inline void rwsem_assert_held(const struct rw_semaphore *sem)
+ __assumes_ctx_lock(sem)
{
if (IS_ENABLED(CONFIG_LOCKDEP))
lockdep_assert_held(sem);
@@ -210,6 +215,7 @@ static inline void rwsem_assert_held(const struct rw_semaphore *sem)
}
static inline void rwsem_assert_held_write(const struct rw_semaphore *sem)
+ __assumes_ctx_lock(sem)
{
if (IS_ENABLED(CONFIG_LOCKDEP))
lockdep_assert_held_write(sem);
@@ -220,48 +226,66 @@ static inline void rwsem_assert_held_write(const struct rw_semaphore *sem)
/*
* lock for reading
*/
-extern void down_read(struct rw_semaphore *sem);
-extern int __must_check down_read_interruptible(struct rw_semaphore *sem);
-extern int __must_check down_read_killable(struct rw_semaphore *sem);
+extern void down_read(struct rw_semaphore *sem) __acquires_shared(sem);
+extern int __must_check down_read_interruptible(struct rw_semaphore *sem) __cond_acquires_shared(0, sem);
+extern int __must_check down_read_killable(struct rw_semaphore *sem) __cond_acquires_shared(0, sem);
/*
* trylock for reading -- returns 1 if successful, 0 if contention
*/
-extern int down_read_trylock(struct rw_semaphore *sem);
+extern int down_read_trylock(struct rw_semaphore *sem) __cond_acquires_shared(true, sem);
/*
* lock for writing
*/
-extern void down_write(struct rw_semaphore *sem);
-extern int __must_check down_write_killable(struct rw_semaphore *sem);
+extern void down_write(struct rw_semaphore *sem) __acquires(sem);
+extern int __must_check down_write_killable(struct rw_semaphore *sem) __cond_acquires(0, sem);
/*
* trylock for writing -- returns 1 if successful, 0 if contention
*/
-extern int down_write_trylock(struct rw_semaphore *sem);
+extern int down_write_trylock(struct rw_semaphore *sem) __cond_acquires(true, sem);
/*
* release a read lock
*/
-extern void up_read(struct rw_semaphore *sem);
+extern void up_read(struct rw_semaphore *sem) __releases_shared(sem);
/*
* release a write lock
*/
-extern void up_write(struct rw_semaphore *sem);
-
-DEFINE_GUARD(rwsem_read, struct rw_semaphore *, down_read(_T), up_read(_T))
-DEFINE_GUARD_COND(rwsem_read, _try, down_read_trylock(_T))
-DEFINE_GUARD_COND(rwsem_read, _intr, down_read_interruptible(_T), _RET == 0)
-
-DEFINE_GUARD(rwsem_write, struct rw_semaphore *, down_write(_T), up_write(_T))
-DEFINE_GUARD_COND(rwsem_write, _try, down_write_trylock(_T))
-DEFINE_GUARD_COND(rwsem_write, _kill, down_write_killable(_T), _RET == 0)
+extern void up_write(struct rw_semaphore *sem) __releases(sem);
+
+DEFINE_LOCK_GUARD_1(rwsem_read, struct rw_semaphore, down_read(_T->lock), up_read(_T->lock))
+DEFINE_LOCK_GUARD_1_COND(rwsem_read, _try, down_read_trylock(_T->lock))
+DEFINE_LOCK_GUARD_1_COND(rwsem_read, _intr, down_read_interruptible(_T->lock), _RET == 0)
+
+DECLARE_LOCK_GUARD_1_ATTRS(rwsem_read, __acquires_shared(_T), __releases_shared(*(struct rw_semaphore **)_T))
+#define class_rwsem_read_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(rwsem_read, _T)
+DECLARE_LOCK_GUARD_1_ATTRS(rwsem_read_try, __acquires_shared(_T), __releases_shared(*(struct rw_semaphore **)_T))
+#define class_rwsem_read_try_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(rwsem_read_try, _T)
+DECLARE_LOCK_GUARD_1_ATTRS(rwsem_read_intr, __acquires_shared(_T), __releases_shared(*(struct rw_semaphore **)_T))
+#define class_rwsem_read_intr_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(rwsem_read_intr, _T)
+
+DEFINE_LOCK_GUARD_1(rwsem_write, struct rw_semaphore, down_write(_T->lock), up_write(_T->lock))
+DEFINE_LOCK_GUARD_1_COND(rwsem_write, _try, down_write_trylock(_T->lock))
+DEFINE_LOCK_GUARD_1_COND(rwsem_write, _kill, down_write_killable(_T->lock), _RET == 0)
+
+DECLARE_LOCK_GUARD_1_ATTRS(rwsem_write, __acquires(_T), __releases(*(struct rw_semaphore **)_T))
+#define class_rwsem_write_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(rwsem_write, _T)
+DECLARE_LOCK_GUARD_1_ATTRS(rwsem_write_try, __acquires(_T), __releases(*(struct rw_semaphore **)_T))
+#define class_rwsem_write_try_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(rwsem_write_try, _T)
+DECLARE_LOCK_GUARD_1_ATTRS(rwsem_write_kill, __acquires(_T), __releases(*(struct rw_semaphore **)_T))
+#define class_rwsem_write_kill_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(rwsem_write_kill, _T)
+
+DEFINE_LOCK_GUARD_1(rwsem_init, struct rw_semaphore, init_rwsem(_T->lock), /* */)
+DECLARE_LOCK_GUARD_1_ATTRS(rwsem_init, __acquires(_T), __releases(*(struct rw_semaphore **)_T))
+#define class_rwsem_init_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(rwsem_init, _T)
/*
* downgrade write lock to read lock
*/
-extern void downgrade_write(struct rw_semaphore *sem);
+extern void downgrade_write(struct rw_semaphore *sem) __releases(sem) __acquires_shared(sem);
#ifdef CONFIG_DEBUG_LOCK_ALLOC
/*
@@ -277,11 +301,11 @@ extern void downgrade_write(struct rw_semaphore *sem);
* lockdep_set_class() at lock initialization time.
* See Documentation/locking/lockdep-design.rst for more details.)
*/
-extern void down_read_nested(struct rw_semaphore *sem, int subclass);
-extern int __must_check down_read_killable_nested(struct rw_semaphore *sem, int subclass);
-extern void down_write_nested(struct rw_semaphore *sem, int subclass);
-extern int down_write_killable_nested(struct rw_semaphore *sem, int subclass);
-extern void _down_write_nest_lock(struct rw_semaphore *sem, struct lockdep_map *nest_lock);
+extern void down_read_nested(struct rw_semaphore *sem, int subclass) __acquires_shared(sem);
+extern int __must_check down_read_killable_nested(struct rw_semaphore *sem, int subclass) __cond_acquires_shared(0, sem);
+extern void down_write_nested(struct rw_semaphore *sem, int subclass) __acquires(sem);
+extern int down_write_killable_nested(struct rw_semaphore *sem, int subclass) __cond_acquires(0, sem);
+extern void _down_write_nest_lock(struct rw_semaphore *sem, struct lockdep_map *nest_lock) __acquires(sem);
# define down_write_nest_lock(sem, nest_lock) \
do { \
@@ -295,8 +319,8 @@ do { \
* [ This API should be avoided as much as possible - the
* proper abstraction for this case is completions. ]
*/
-extern void down_read_non_owner(struct rw_semaphore *sem);
-extern void up_read_non_owner(struct rw_semaphore *sem);
+extern void down_read_non_owner(struct rw_semaphore *sem) __acquires_shared(sem);
+extern void up_read_non_owner(struct rw_semaphore *sem) __releases_shared(sem);
#else
# define down_read_nested(sem, subclass) down_read(sem)
# define down_read_killable_nested(sem, subclass) down_read_killable(sem)
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 0ef3325a39eb..8faf653803a1 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -2095,9 +2095,9 @@ static inline int _cond_resched(void)
_cond_resched(); \
})
-extern int __cond_resched_lock(spinlock_t *lock);
-extern int __cond_resched_rwlock_read(rwlock_t *lock);
-extern int __cond_resched_rwlock_write(rwlock_t *lock);
+extern int __cond_resched_lock(spinlock_t *lock) __must_hold(lock);
+extern int __cond_resched_rwlock_read(rwlock_t *lock) __must_hold_shared(lock);
+extern int __cond_resched_rwlock_write(rwlock_t *lock) __must_hold(lock);
#define MIGHT_RESCHED_RCU_SHIFT 8
#define MIGHT_RESCHED_PREEMPT_MASK ((1U << MIGHT_RESCHED_RCU_SHIFT) - 1)
diff --git a/include/linux/sched/signal.h b/include/linux/sched/signal.h
index 7d6449982822..a22248aebcf9 100644
--- a/include/linux/sched/signal.h
+++ b/include/linux/sched/signal.h
@@ -737,21 +737,13 @@ static inline int thread_group_empty(struct task_struct *p)
#define delay_group_leader(p) \
(thread_group_leader(p) && !thread_group_empty(p))
-extern struct sighand_struct *__lock_task_sighand(struct task_struct *task,
- unsigned long *flags);
-
-static inline struct sighand_struct *lock_task_sighand(struct task_struct *task,
- unsigned long *flags)
-{
- struct sighand_struct *ret;
-
- ret = __lock_task_sighand(task, flags);
- (void)__cond_lock(&task->sighand->siglock, ret);
- return ret;
-}
+extern struct sighand_struct *lock_task_sighand(struct task_struct *task,
+ unsigned long *flags)
+ __acquires(&task->sighand->siglock);
static inline void unlock_task_sighand(struct task_struct *task,
unsigned long *flags)
+ __releases(&task->sighand->siglock)
{
spin_unlock_irqrestore(&task->sighand->siglock, *flags);
}
diff --git a/include/linux/sched/task.h b/include/linux/sched/task.h
index 525aa2a632b2..41ed884cffc9 100644
--- a/include/linux/sched/task.h
+++ b/include/linux/sched/task.h
@@ -214,15 +214,19 @@ static inline struct vm_struct *task_stack_vm_area(const struct task_struct *t)
* write_lock_irq(&tasklist_lock), neither inside nor outside.
*/
static inline void task_lock(struct task_struct *p)
+ __acquires(&p->alloc_lock)
{
spin_lock(&p->alloc_lock);
}
static inline void task_unlock(struct task_struct *p)
+ __releases(&p->alloc_lock)
{
spin_unlock(&p->alloc_lock);
}
-DEFINE_GUARD(task_lock, struct task_struct *, task_lock(_T), task_unlock(_T))
+DEFINE_LOCK_GUARD_1(task_lock, struct task_struct, task_lock(_T->lock), task_unlock(_T->lock))
+DECLARE_LOCK_GUARD_1_ATTRS(task_lock, __acquires(&_T->alloc_lock), __releases(&(*(struct task_struct **)_T)->alloc_lock))
+#define class_task_lock_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(task_lock, _T)
#endif /* _LINUX_SCHED_TASK_H */
diff --git a/include/linux/sched/wake_q.h b/include/linux/sched/wake_q.h
index 0f28b4623ad4..765bbc3d54be 100644
--- a/include/linux/sched/wake_q.h
+++ b/include/linux/sched/wake_q.h
@@ -66,6 +66,7 @@ extern void wake_up_q(struct wake_q_head *head);
/* Spin unlock helpers to unlock and call wake_up_q with preempt disabled */
static inline
void raw_spin_unlock_wake(raw_spinlock_t *lock, struct wake_q_head *wake_q)
+ __releases(lock)
{
guard(preempt)();
raw_spin_unlock(lock);
@@ -77,6 +78,7 @@ void raw_spin_unlock_wake(raw_spinlock_t *lock, struct wake_q_head *wake_q)
static inline
void raw_spin_unlock_irq_wake(raw_spinlock_t *lock, struct wake_q_head *wake_q)
+ __releases(lock)
{
guard(preempt)();
raw_spin_unlock_irq(lock);
@@ -89,6 +91,7 @@ void raw_spin_unlock_irq_wake(raw_spinlock_t *lock, struct wake_q_head *wake_q)
static inline
void raw_spin_unlock_irqrestore_wake(raw_spinlock_t *lock, unsigned long flags,
struct wake_q_head *wake_q)
+ __releases(lock)
{
guard(preempt)();
raw_spin_unlock_irqrestore(lock, flags);
diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h
index 221123660e71..5a40252b8334 100644
--- a/include/linux/seqlock.h
+++ b/include/linux/seqlock.h
@@ -14,6 +14,7 @@
*/
#include <linux/compiler.h>
+#include <linux/cleanup.h>
#include <linux/kcsan-checks.h>
#include <linux/lockdep.h>
#include <linux/mutex.h>
@@ -832,6 +833,7 @@ static __always_inline void write_seqcount_latch_end(seqcount_latch_t *s)
* Return: count, to be passed to read_seqretry()
*/
static inline unsigned read_seqbegin(const seqlock_t *sl)
+ __acquires_shared(sl) __no_context_analysis
{
return read_seqcount_begin(&sl->seqcount);
}
@@ -848,6 +850,7 @@ static inline unsigned read_seqbegin(const seqlock_t *sl)
* Return: true if a read section retry is required, else false
*/
static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start)
+ __releases_shared(sl) __no_context_analysis
{
return read_seqcount_retry(&sl->seqcount, start);
}
@@ -872,6 +875,7 @@ static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start)
* _irqsave or _bh variants of this function instead.
*/
static inline void write_seqlock(seqlock_t *sl)
+ __acquires(sl) __no_context_analysis
{
spin_lock(&sl->lock);
do_write_seqcount_begin(&sl->seqcount.seqcount);
@@ -885,6 +889,7 @@ static inline void write_seqlock(seqlock_t *sl)
* critical section of given seqlock_t.
*/
static inline void write_sequnlock(seqlock_t *sl)
+ __releases(sl) __no_context_analysis
{
do_write_seqcount_end(&sl->seqcount.seqcount);
spin_unlock(&sl->lock);
@@ -898,6 +903,7 @@ static inline void write_sequnlock(seqlock_t *sl)
* other write side sections, can be invoked from softirq contexts.
*/
static inline void write_seqlock_bh(seqlock_t *sl)
+ __acquires(sl) __no_context_analysis
{
spin_lock_bh(&sl->lock);
do_write_seqcount_begin(&sl->seqcount.seqcount);
@@ -912,6 +918,7 @@ static inline void write_seqlock_bh(seqlock_t *sl)
* write_seqlock_bh().
*/
static inline void write_sequnlock_bh(seqlock_t *sl)
+ __releases(sl) __no_context_analysis
{
do_write_seqcount_end(&sl->seqcount.seqcount);
spin_unlock_bh(&sl->lock);
@@ -925,6 +932,7 @@ static inline void write_sequnlock_bh(seqlock_t *sl)
* other write sections, can be invoked from hardirq contexts.
*/
static inline void write_seqlock_irq(seqlock_t *sl)
+ __acquires(sl) __no_context_analysis
{
spin_lock_irq(&sl->lock);
do_write_seqcount_begin(&sl->seqcount.seqcount);
@@ -938,12 +946,14 @@ static inline void write_seqlock_irq(seqlock_t *sl)
* seqlock_t write side section opened with write_seqlock_irq().
*/
static inline void write_sequnlock_irq(seqlock_t *sl)
+ __releases(sl) __no_context_analysis
{
do_write_seqcount_end(&sl->seqcount.seqcount);
spin_unlock_irq(&sl->lock);
}
static inline unsigned long __write_seqlock_irqsave(seqlock_t *sl)
+ __acquires(sl) __no_context_analysis
{
unsigned long flags;
@@ -976,6 +986,7 @@ static inline unsigned long __write_seqlock_irqsave(seqlock_t *sl)
*/
static inline void
write_sequnlock_irqrestore(seqlock_t *sl, unsigned long flags)
+ __releases(sl) __no_context_analysis
{
do_write_seqcount_end(&sl->seqcount.seqcount);
spin_unlock_irqrestore(&sl->lock, flags);
@@ -998,6 +1009,7 @@ write_sequnlock_irqrestore(seqlock_t *sl, unsigned long flags)
* The opened read section must be closed with read_sequnlock_excl().
*/
static inline void read_seqlock_excl(seqlock_t *sl)
+ __acquires_shared(sl) __no_context_analysis
{
spin_lock(&sl->lock);
}
@@ -1007,6 +1019,7 @@ static inline void read_seqlock_excl(seqlock_t *sl)
* @sl: Pointer to seqlock_t
*/
static inline void read_sequnlock_excl(seqlock_t *sl)
+ __releases_shared(sl) __no_context_analysis
{
spin_unlock(&sl->lock);
}
@@ -1021,6 +1034,7 @@ static inline void read_sequnlock_excl(seqlock_t *sl)
* from softirq contexts.
*/
static inline void read_seqlock_excl_bh(seqlock_t *sl)
+ __acquires_shared(sl) __no_context_analysis
{
spin_lock_bh(&sl->lock);
}
@@ -1031,6 +1045,7 @@ static inline void read_seqlock_excl_bh(seqlock_t *sl)
* @sl: Pointer to seqlock_t
*/
static inline void read_sequnlock_excl_bh(seqlock_t *sl)
+ __releases_shared(sl) __no_context_analysis
{
spin_unlock_bh(&sl->lock);
}
@@ -1045,6 +1060,7 @@ static inline void read_sequnlock_excl_bh(seqlock_t *sl)
* hardirq context.
*/
static inline void read_seqlock_excl_irq(seqlock_t *sl)
+ __acquires_shared(sl) __no_context_analysis
{
spin_lock_irq(&sl->lock);
}
@@ -1055,11 +1071,13 @@ static inline void read_seqlock_excl_irq(seqlock_t *sl)
* @sl: Pointer to seqlock_t
*/
static inline void read_sequnlock_excl_irq(seqlock_t *sl)
+ __releases_shared(sl) __no_context_analysis
{
spin_unlock_irq(&sl->lock);
}
static inline unsigned long __read_seqlock_excl_irqsave(seqlock_t *sl)
+ __acquires_shared(sl) __no_context_analysis
{
unsigned long flags;
@@ -1089,6 +1107,7 @@ static inline unsigned long __read_seqlock_excl_irqsave(seqlock_t *sl)
*/
static inline void
read_sequnlock_excl_irqrestore(seqlock_t *sl, unsigned long flags)
+ __releases_shared(sl) __no_context_analysis
{
spin_unlock_irqrestore(&sl->lock, flags);
}
@@ -1125,6 +1144,7 @@ read_sequnlock_excl_irqrestore(seqlock_t *sl, unsigned long flags)
* parameter of the next read_seqbegin_or_lock() iteration.
*/
static inline void read_seqbegin_or_lock(seqlock_t *lock, int *seq)
+ __acquires_shared(lock) __no_context_analysis
{
if (!(*seq & 1)) /* Even */
*seq = read_seqbegin(lock);
@@ -1140,6 +1160,7 @@ static inline void read_seqbegin_or_lock(seqlock_t *lock, int *seq)
* Return: true if a read section retry is required, false otherwise
*/
static inline int need_seqretry(seqlock_t *lock, int seq)
+ __releases_shared(lock) __no_context_analysis
{
return !(seq & 1) && read_seqretry(lock, seq);
}
@@ -1153,6 +1174,7 @@ static inline int need_seqretry(seqlock_t *lock, int seq)
* with read_seqbegin_or_lock() and validated by need_seqretry().
*/
static inline void done_seqretry(seqlock_t *lock, int seq)
+ __no_context_analysis
{
if (seq & 1)
read_sequnlock_excl(lock);
@@ -1180,6 +1202,7 @@ static inline void done_seqretry(seqlock_t *lock, int seq)
*/
static inline unsigned long
read_seqbegin_or_lock_irqsave(seqlock_t *lock, int *seq)
+ __acquires_shared(lock) __no_context_analysis
{
unsigned long flags = 0;
@@ -1205,6 +1228,7 @@ read_seqbegin_or_lock_irqsave(seqlock_t *lock, int *seq)
*/
static inline void
done_seqretry_irqrestore(seqlock_t *lock, int seq, unsigned long flags)
+ __no_context_analysis
{
if (seq & 1)
read_sequnlock_excl_irqrestore(lock, flags);
@@ -1225,6 +1249,7 @@ struct ss_tmp {
};
static __always_inline void __scoped_seqlock_cleanup(struct ss_tmp *sst)
+ __no_context_analysis
{
if (sst->lock)
spin_unlock(sst->lock);
@@ -1254,6 +1279,7 @@ extern void __scoped_seqlock_bug(void);
static __always_inline void
__scoped_seqlock_next(struct ss_tmp *sst, seqlock_t *lock, enum ss_state target)
+ __no_context_analysis
{
switch (sst->state) {
case ss_done:
@@ -1296,22 +1322,31 @@ __scoped_seqlock_next(struct ss_tmp *sst, seqlock_t *lock, enum ss_state target)
}
}
+/*
+ * Context analysis no-op helper to release seqlock at the end of the for-scope;
+ * the alias analysis of the compiler will recognize that the pointer @s is an
+ * alias to @_seqlock passed to read_seqbegin(_seqlock) below.
+ */
+static __always_inline void __scoped_seqlock_cleanup_ctx(struct ss_tmp **s)
+ __releases_shared(*((seqlock_t **)s)) __no_context_analysis {}
+
#define __scoped_seqlock_read(_seqlock, _target, _s) \
for (struct ss_tmp _s __cleanup(__scoped_seqlock_cleanup) = \
- { .state = ss_lockless, .data = read_seqbegin(_seqlock) }; \
+ { .state = ss_lockless, .data = read_seqbegin(_seqlock) }, \
+ *__UNIQUE_ID(ctx) __cleanup(__scoped_seqlock_cleanup_ctx) =\
+ (struct ss_tmp *)_seqlock; \
_s.state != ss_done; \
__scoped_seqlock_next(&_s, _seqlock, _target))
/**
- * scoped_seqlock_read (lock, ss_state) - execute the read side critical
- * section without manual sequence
- * counter handling or calls to other
- * helpers
- * @lock: pointer to seqlock_t protecting the data
- * @ss_state: one of {ss_lock, ss_lock_irqsave, ss_lockless} indicating
- * the type of critical read section
+ * scoped_seqlock_read() - execute the read-side critical section
+ * without manual sequence counter handling
+ * or calls to other helpers
+ * @_seqlock: pointer to seqlock_t protecting the data
+ * @_target: an enum ss_state: one of {ss_lock, ss_lock_irqsave, ss_lockless}
+ * indicating the type of critical read section
*
- * Example:
+ * Example::
*
* scoped_seqlock_read (&lock, ss_lock) {
* // read-side critical section
@@ -1323,4 +1358,8 @@ __scoped_seqlock_next(struct ss_tmp *sst, seqlock_t *lock, enum ss_state target)
#define scoped_seqlock_read(_seqlock, _target) \
__scoped_seqlock_read(_seqlock, _target, __UNIQUE_ID(seqlock))
+DEFINE_LOCK_GUARD_1(seqlock_init, seqlock_t, seqlock_init(_T->lock), /* */)
+DECLARE_LOCK_GUARD_1_ATTRS(seqlock_init, __acquires(_T), __releases(*(seqlock_t **)_T))
+#define class_seqlock_init_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(seqlock_init, _T)
+
#endif /* __LINUX_SEQLOCK_H */
diff --git a/include/linux/seqlock_types.h b/include/linux/seqlock_types.h
index dfdf43e3fa3d..2d5d793ef660 100644
--- a/include/linux/seqlock_types.h
+++ b/include/linux/seqlock_types.h
@@ -81,13 +81,14 @@ SEQCOUNT_LOCKNAME(mutex, struct mutex, true, mutex)
* - Comments on top of seqcount_t
* - Documentation/locking/seqlock.rst
*/
-typedef struct {
+context_lock_struct(seqlock) {
/*
* Make sure that readers don't starve writers on PREEMPT_RT: use
* seqcount_spinlock_t instead of seqcount_t. Check __SEQ_LOCK().
*/
seqcount_spinlock_t seqcount;
spinlock_t lock;
-} seqlock_t;
+};
+typedef struct seqlock seqlock_t;
#endif /* __LINUX_SEQLOCK_TYPES_H */
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
index d3561c4a080e..e1e2f144af9b 100644
--- a/include/linux/spinlock.h
+++ b/include/linux/spinlock.h
@@ -212,7 +212,7 @@ static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock)
* various methods are defined as nops in the case they are not
* required.
*/
-#define raw_spin_trylock(lock) __cond_lock(lock, _raw_spin_trylock(lock))
+#define raw_spin_trylock(lock) _raw_spin_trylock(lock)
#define raw_spin_lock(lock) _raw_spin_lock(lock)
@@ -283,22 +283,11 @@ static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock)
} while (0)
#define raw_spin_unlock_bh(lock) _raw_spin_unlock_bh(lock)
-#define raw_spin_trylock_bh(lock) \
- __cond_lock(lock, _raw_spin_trylock_bh(lock))
+#define raw_spin_trylock_bh(lock) _raw_spin_trylock_bh(lock)
-#define raw_spin_trylock_irq(lock) \
-({ \
- local_irq_disable(); \
- raw_spin_trylock(lock) ? \
- 1 : ({ local_irq_enable(); 0; }); \
-})
+#define raw_spin_trylock_irq(lock) _raw_spin_trylock_irq(lock)
-#define raw_spin_trylock_irqsave(lock, flags) \
-({ \
- local_irq_save(flags); \
- raw_spin_trylock(lock) ? \
- 1 : ({ local_irq_restore(flags); 0; }); \
-})
+#define raw_spin_trylock_irqsave(lock, flags) _raw_spin_trylock_irqsave(lock, &(flags))
#ifndef CONFIG_PREEMPT_RT
/* Include rwlock functions for !RT */
@@ -347,16 +336,19 @@ do { \
#endif
static __always_inline void spin_lock(spinlock_t *lock)
+ __acquires(lock) __no_context_analysis
{
raw_spin_lock(&lock->rlock);
}
static __always_inline void spin_lock_bh(spinlock_t *lock)
+ __acquires(lock) __no_context_analysis
{
raw_spin_lock_bh(&lock->rlock);
}
static __always_inline int spin_trylock(spinlock_t *lock)
+ __cond_acquires(true, lock) __no_context_analysis
{
return raw_spin_trylock(&lock->rlock);
}
@@ -364,14 +356,17 @@ static __always_inline int spin_trylock(spinlock_t *lock)
#define spin_lock_nested(lock, subclass) \
do { \
raw_spin_lock_nested(spinlock_check(lock), subclass); \
+ __release(spinlock_check(lock)); __acquire(lock); \
} while (0)
#define spin_lock_nest_lock(lock, nest_lock) \
do { \
raw_spin_lock_nest_lock(spinlock_check(lock), nest_lock); \
+ __release(spinlock_check(lock)); __acquire(lock); \
} while (0)
static __always_inline void spin_lock_irq(spinlock_t *lock)
+ __acquires(lock) __no_context_analysis
{
raw_spin_lock_irq(&lock->rlock);
}
@@ -379,47 +374,57 @@ static __always_inline void spin_lock_irq(spinlock_t *lock)
#define spin_lock_irqsave(lock, flags) \
do { \
raw_spin_lock_irqsave(spinlock_check(lock), flags); \
+ __release(spinlock_check(lock)); __acquire(lock); \
} while (0)
#define spin_lock_irqsave_nested(lock, flags, subclass) \
do { \
raw_spin_lock_irqsave_nested(spinlock_check(lock), flags, subclass); \
+ __release(spinlock_check(lock)); __acquire(lock); \
} while (0)
static __always_inline void spin_unlock(spinlock_t *lock)
+ __releases(lock) __no_context_analysis
{
raw_spin_unlock(&lock->rlock);
}
static __always_inline void spin_unlock_bh(spinlock_t *lock)
+ __releases(lock) __no_context_analysis
{
raw_spin_unlock_bh(&lock->rlock);
}
static __always_inline void spin_unlock_irq(spinlock_t *lock)
+ __releases(lock) __no_context_analysis
{
raw_spin_unlock_irq(&lock->rlock);
}
static __always_inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
+ __releases(lock) __no_context_analysis
{
raw_spin_unlock_irqrestore(&lock->rlock, flags);
}
static __always_inline int spin_trylock_bh(spinlock_t *lock)
+ __cond_acquires(true, lock) __no_context_analysis
{
return raw_spin_trylock_bh(&lock->rlock);
}
static __always_inline int spin_trylock_irq(spinlock_t *lock)
+ __cond_acquires(true, lock) __no_context_analysis
{
return raw_spin_trylock_irq(&lock->rlock);
}
-#define spin_trylock_irqsave(lock, flags) \
-({ \
- raw_spin_trylock_irqsave(spinlock_check(lock), flags); \
-})
+static __always_inline bool _spin_trylock_irqsave(spinlock_t *lock, unsigned long *flags)
+ __cond_acquires(true, lock) __no_context_analysis
+{
+ return raw_spin_trylock_irqsave(spinlock_check(lock), *flags);
+}
+#define spin_trylock_irqsave(lock, flags) _spin_trylock_irqsave(lock, &(flags))
/**
* spin_is_locked() - Check whether a spinlock is locked.
@@ -497,23 +502,17 @@ static inline int rwlock_needbreak(rwlock_t *lock)
* Decrements @atomic by 1. If the result is 0, returns true and locks
* @lock. Returns false for all other cases.
*/
-extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock);
-#define atomic_dec_and_lock(atomic, lock) \
- __cond_lock(lock, _atomic_dec_and_lock(atomic, lock))
+extern int atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock) __cond_acquires(true, lock);
extern int _atomic_dec_and_lock_irqsave(atomic_t *atomic, spinlock_t *lock,
- unsigned long *flags);
-#define atomic_dec_and_lock_irqsave(atomic, lock, flags) \
- __cond_lock(lock, _atomic_dec_and_lock_irqsave(atomic, lock, &(flags)))
+ unsigned long *flags) __cond_acquires(true, lock);
+#define atomic_dec_and_lock_irqsave(atomic, lock, flags) _atomic_dec_and_lock_irqsave(atomic, lock, &(flags))
-extern int _atomic_dec_and_raw_lock(atomic_t *atomic, raw_spinlock_t *lock);
-#define atomic_dec_and_raw_lock(atomic, lock) \
- __cond_lock(lock, _atomic_dec_and_raw_lock(atomic, lock))
+extern int atomic_dec_and_raw_lock(atomic_t *atomic, raw_spinlock_t *lock) __cond_acquires(true, lock);
extern int _atomic_dec_and_raw_lock_irqsave(atomic_t *atomic, raw_spinlock_t *lock,
- unsigned long *flags);
-#define atomic_dec_and_raw_lock_irqsave(atomic, lock, flags) \
- __cond_lock(lock, _atomic_dec_and_raw_lock_irqsave(atomic, lock, &(flags)))
+ unsigned long *flags) __cond_acquires(true, lock);
+#define atomic_dec_and_raw_lock_irqsave(atomic, lock, flags) _atomic_dec_and_raw_lock_irqsave(atomic, lock, &(flags))
int __alloc_bucket_spinlocks(spinlock_t **locks, unsigned int *lock_mask,
size_t max_size, unsigned int cpu_mult,
@@ -535,86 +534,144 @@ void free_bucket_spinlocks(spinlock_t *locks);
DEFINE_LOCK_GUARD_1(raw_spinlock, raw_spinlock_t,
raw_spin_lock(_T->lock),
raw_spin_unlock(_T->lock))
+DECLARE_LOCK_GUARD_1_ATTRS(raw_spinlock, __acquires(_T), __releases(*(raw_spinlock_t **)_T))
+#define class_raw_spinlock_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(raw_spinlock, _T)
DEFINE_LOCK_GUARD_1_COND(raw_spinlock, _try, raw_spin_trylock(_T->lock))
+DECLARE_LOCK_GUARD_1_ATTRS(raw_spinlock_try, __acquires(_T), __releases(*(raw_spinlock_t **)_T))
+#define class_raw_spinlock_try_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(raw_spinlock_try, _T)
DEFINE_LOCK_GUARD_1(raw_spinlock_nested, raw_spinlock_t,
raw_spin_lock_nested(_T->lock, SINGLE_DEPTH_NESTING),
raw_spin_unlock(_T->lock))
+DECLARE_LOCK_GUARD_1_ATTRS(raw_spinlock_nested, __acquires(_T), __releases(*(raw_spinlock_t **)_T))
+#define class_raw_spinlock_nested_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(raw_spinlock_nested, _T)
DEFINE_LOCK_GUARD_1(raw_spinlock_irq, raw_spinlock_t,
raw_spin_lock_irq(_T->lock),
raw_spin_unlock_irq(_T->lock))
+DECLARE_LOCK_GUARD_1_ATTRS(raw_spinlock_irq, __acquires(_T), __releases(*(raw_spinlock_t **)_T))
+#define class_raw_spinlock_irq_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(raw_spinlock_irq, _T)
DEFINE_LOCK_GUARD_1_COND(raw_spinlock_irq, _try, raw_spin_trylock_irq(_T->lock))
+DECLARE_LOCK_GUARD_1_ATTRS(raw_spinlock_irq_try, __acquires(_T), __releases(*(raw_spinlock_t **)_T))
+#define class_raw_spinlock_irq_try_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(raw_spinlock_irq_try, _T)
DEFINE_LOCK_GUARD_1(raw_spinlock_bh, raw_spinlock_t,
raw_spin_lock_bh(_T->lock),
raw_spin_unlock_bh(_T->lock))
+DECLARE_LOCK_GUARD_1_ATTRS(raw_spinlock_bh, __acquires(_T), __releases(*(raw_spinlock_t **)_T))
+#define class_raw_spinlock_bh_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(raw_spinlock_bh, _T)
DEFINE_LOCK_GUARD_1_COND(raw_spinlock_bh, _try, raw_spin_trylock_bh(_T->lock))
+DECLARE_LOCK_GUARD_1_ATTRS(raw_spinlock_bh_try, __acquires(_T), __releases(*(raw_spinlock_t **)_T))
+#define class_raw_spinlock_bh_try_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(raw_spinlock_bh_try, _T)
DEFINE_LOCK_GUARD_1(raw_spinlock_irqsave, raw_spinlock_t,
raw_spin_lock_irqsave(_T->lock, _T->flags),
raw_spin_unlock_irqrestore(_T->lock, _T->flags),
unsigned long flags)
+DECLARE_LOCK_GUARD_1_ATTRS(raw_spinlock_irqsave, __acquires(_T), __releases(*(raw_spinlock_t **)_T))
+#define class_raw_spinlock_irqsave_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(raw_spinlock_irqsave, _T)
DEFINE_LOCK_GUARD_1_COND(raw_spinlock_irqsave, _try,
raw_spin_trylock_irqsave(_T->lock, _T->flags))
+DECLARE_LOCK_GUARD_1_ATTRS(raw_spinlock_irqsave_try, __acquires(_T), __releases(*(raw_spinlock_t **)_T))
+#define class_raw_spinlock_irqsave_try_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(raw_spinlock_irqsave_try, _T)
+
+DEFINE_LOCK_GUARD_1(raw_spinlock_init, raw_spinlock_t, raw_spin_lock_init(_T->lock), /* */)
+DECLARE_LOCK_GUARD_1_ATTRS(raw_spinlock_init, __acquires(_T), __releases(*(raw_spinlock_t **)_T))
+#define class_raw_spinlock_init_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(raw_spinlock_init, _T)
DEFINE_LOCK_GUARD_1(spinlock, spinlock_t,
spin_lock(_T->lock),
spin_unlock(_T->lock))
+DECLARE_LOCK_GUARD_1_ATTRS(spinlock, __acquires(_T), __releases(*(spinlock_t **)_T))
+#define class_spinlock_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(spinlock, _T)
DEFINE_LOCK_GUARD_1_COND(spinlock, _try, spin_trylock(_T->lock))
+DECLARE_LOCK_GUARD_1_ATTRS(spinlock_try, __acquires(_T), __releases(*(spinlock_t **)_T))
+#define class_spinlock_try_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(spinlock_try, _T)
DEFINE_LOCK_GUARD_1(spinlock_irq, spinlock_t,
spin_lock_irq(_T->lock),
spin_unlock_irq(_T->lock))
+DECLARE_LOCK_GUARD_1_ATTRS(spinlock_irq, __acquires(_T), __releases(*(spinlock_t **)_T))
+#define class_spinlock_irq_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(spinlock_irq, _T)
DEFINE_LOCK_GUARD_1_COND(spinlock_irq, _try,
spin_trylock_irq(_T->lock))
+DECLARE_LOCK_GUARD_1_ATTRS(spinlock_irq_try, __acquires(_T), __releases(*(spinlock_t **)_T))
+#define class_spinlock_irq_try_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(spinlock_irq_try, _T)
DEFINE_LOCK_GUARD_1(spinlock_bh, spinlock_t,
spin_lock_bh(_T->lock),
spin_unlock_bh(_T->lock))
+DECLARE_LOCK_GUARD_1_ATTRS(spinlock_bh, __acquires(_T), __releases(*(spinlock_t **)_T))
+#define class_spinlock_bh_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(spinlock_bh, _T)
DEFINE_LOCK_GUARD_1_COND(spinlock_bh, _try,
spin_trylock_bh(_T->lock))
+DECLARE_LOCK_GUARD_1_ATTRS(spinlock_bh_try, __acquires(_T), __releases(*(spinlock_t **)_T))
+#define class_spinlock_bh_try_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(spinlock_bh_try, _T)
DEFINE_LOCK_GUARD_1(spinlock_irqsave, spinlock_t,
spin_lock_irqsave(_T->lock, _T->flags),
spin_unlock_irqrestore(_T->lock, _T->flags),
unsigned long flags)
+DECLARE_LOCK_GUARD_1_ATTRS(spinlock_irqsave, __acquires(_T), __releases(*(spinlock_t **)_T))
+#define class_spinlock_irqsave_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(spinlock_irqsave, _T)
DEFINE_LOCK_GUARD_1_COND(spinlock_irqsave, _try,
spin_trylock_irqsave(_T->lock, _T->flags))
+DECLARE_LOCK_GUARD_1_ATTRS(spinlock_irqsave_try, __acquires(_T), __releases(*(spinlock_t **)_T))
+#define class_spinlock_irqsave_try_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(spinlock_irqsave_try, _T)
+
+DEFINE_LOCK_GUARD_1(spinlock_init, spinlock_t, spin_lock_init(_T->lock), /* */)
+DECLARE_LOCK_GUARD_1_ATTRS(spinlock_init, __acquires(_T), __releases(*(spinlock_t **)_T))
+#define class_spinlock_init_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(spinlock_init, _T)
DEFINE_LOCK_GUARD_1(read_lock, rwlock_t,
read_lock(_T->lock),
read_unlock(_T->lock))
+DECLARE_LOCK_GUARD_1_ATTRS(read_lock, __acquires(_T), __releases(*(rwlock_t **)_T))
+#define class_read_lock_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(read_lock, _T)
DEFINE_LOCK_GUARD_1(read_lock_irq, rwlock_t,
read_lock_irq(_T->lock),
read_unlock_irq(_T->lock))
+DECLARE_LOCK_GUARD_1_ATTRS(read_lock_irq, __acquires(_T), __releases(*(rwlock_t **)_T))
+#define class_read_lock_irq_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(read_lock_irq, _T)
DEFINE_LOCK_GUARD_1(read_lock_irqsave, rwlock_t,
read_lock_irqsave(_T->lock, _T->flags),
read_unlock_irqrestore(_T->lock, _T->flags),
unsigned long flags)
+DECLARE_LOCK_GUARD_1_ATTRS(read_lock_irqsave, __acquires(_T), __releases(*(rwlock_t **)_T))
+#define class_read_lock_irqsave_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(read_lock_irqsave, _T)
DEFINE_LOCK_GUARD_1(write_lock, rwlock_t,
write_lock(_T->lock),
write_unlock(_T->lock))
+DECLARE_LOCK_GUARD_1_ATTRS(write_lock, __acquires(_T), __releases(*(rwlock_t **)_T))
+#define class_write_lock_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(write_lock, _T)
DEFINE_LOCK_GUARD_1(write_lock_irq, rwlock_t,
write_lock_irq(_T->lock),
write_unlock_irq(_T->lock))
+DECLARE_LOCK_GUARD_1_ATTRS(write_lock_irq, __acquires(_T), __releases(*(rwlock_t **)_T))
+#define class_write_lock_irq_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(write_lock_irq, _T)
DEFINE_LOCK_GUARD_1(write_lock_irqsave, rwlock_t,
write_lock_irqsave(_T->lock, _T->flags),
write_unlock_irqrestore(_T->lock, _T->flags),
unsigned long flags)
+DECLARE_LOCK_GUARD_1_ATTRS(write_lock_irqsave, __acquires(_T), __releases(*(rwlock_t **)_T))
+#define class_write_lock_irqsave_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(write_lock_irqsave, _T)
+
+DEFINE_LOCK_GUARD_1(rwlock_init, rwlock_t, rwlock_init(_T->lock), /* */)
+DECLARE_LOCK_GUARD_1_ATTRS(rwlock_init, __acquires(_T), __releases(*(rwlock_t **)_T))
+#define class_rwlock_init_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(rwlock_init, _T)
#undef __LINUX_INSIDE_SPINLOCK_H
#endif /* __LINUX_SPINLOCK_H */
diff --git a/include/linux/spinlock_api_smp.h b/include/linux/spinlock_api_smp.h
index 9ecb0ab504e3..bda5e7a390cd 100644
--- a/include/linux/spinlock_api_smp.h
+++ b/include/linux/spinlock_api_smp.h
@@ -34,8 +34,8 @@ unsigned long __lockfunc _raw_spin_lock_irqsave(raw_spinlock_t *lock)
unsigned long __lockfunc
_raw_spin_lock_irqsave_nested(raw_spinlock_t *lock, int subclass)
__acquires(lock);
-int __lockfunc _raw_spin_trylock(raw_spinlock_t *lock);
-int __lockfunc _raw_spin_trylock_bh(raw_spinlock_t *lock);
+int __lockfunc _raw_spin_trylock(raw_spinlock_t *lock) __cond_acquires(true, lock);
+int __lockfunc _raw_spin_trylock_bh(raw_spinlock_t *lock) __cond_acquires(true, lock);
void __lockfunc _raw_spin_unlock(raw_spinlock_t *lock) __releases(lock);
void __lockfunc _raw_spin_unlock_bh(raw_spinlock_t *lock) __releases(lock);
void __lockfunc _raw_spin_unlock_irq(raw_spinlock_t *lock) __releases(lock);
@@ -84,6 +84,7 @@ _raw_spin_unlock_irqrestore(raw_spinlock_t *lock, unsigned long flags)
#endif
static inline int __raw_spin_trylock(raw_spinlock_t *lock)
+ __cond_acquires(true, lock)
{
preempt_disable();
if (do_raw_spin_trylock(lock)) {
@@ -94,6 +95,26 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock)
return 0;
}
+static __always_inline bool _raw_spin_trylock_irq(raw_spinlock_t *lock)
+ __cond_acquires(true, lock)
+{
+ local_irq_disable();
+ if (_raw_spin_trylock(lock))
+ return true;
+ local_irq_enable();
+ return false;
+}
+
+static __always_inline bool _raw_spin_trylock_irqsave(raw_spinlock_t *lock, unsigned long *flags)
+ __cond_acquires(true, lock)
+{
+ local_irq_save(*flags);
+ if (_raw_spin_trylock(lock))
+ return true;
+ local_irq_restore(*flags);
+ return false;
+}
+
/*
* If lockdep is enabled then we use the non-preemption spin-ops
* even on CONFIG_PREEMPTION, because lockdep assumes that interrupts are
@@ -102,6 +123,7 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock)
#if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC)
static inline unsigned long __raw_spin_lock_irqsave(raw_spinlock_t *lock)
+ __acquires(lock) __no_context_analysis
{
unsigned long flags;
@@ -113,6 +135,7 @@ static inline unsigned long __raw_spin_lock_irqsave(raw_spinlock_t *lock)
}
static inline void __raw_spin_lock_irq(raw_spinlock_t *lock)
+ __acquires(lock) __no_context_analysis
{
local_irq_disable();
preempt_disable();
@@ -121,6 +144,7 @@ static inline void __raw_spin_lock_irq(raw_spinlock_t *lock)
}
static inline void __raw_spin_lock_bh(raw_spinlock_t *lock)
+ __acquires(lock) __no_context_analysis
{
__local_bh_disable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET);
spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
@@ -128,6 +152,7 @@ static inline void __raw_spin_lock_bh(raw_spinlock_t *lock)
}
static inline void __raw_spin_lock(raw_spinlock_t *lock)
+ __acquires(lock) __no_context_analysis
{
preempt_disable();
spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
@@ -137,6 +162,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock)
#endif /* !CONFIG_GENERIC_LOCKBREAK || CONFIG_DEBUG_LOCK_ALLOC */
static inline void __raw_spin_unlock(raw_spinlock_t *lock)
+ __releases(lock)
{
spin_release(&lock->dep_map, _RET_IP_);
do_raw_spin_unlock(lock);
@@ -145,6 +171,7 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock)
static inline void __raw_spin_unlock_irqrestore(raw_spinlock_t *lock,
unsigned long flags)
+ __releases(lock)
{
spin_release(&lock->dep_map, _RET_IP_);
do_raw_spin_unlock(lock);
@@ -153,6 +180,7 @@ static inline void __raw_spin_unlock_irqrestore(raw_spinlock_t *lock,
}
static inline void __raw_spin_unlock_irq(raw_spinlock_t *lock)
+ __releases(lock)
{
spin_release(&lock->dep_map, _RET_IP_);
do_raw_spin_unlock(lock);
@@ -161,6 +189,7 @@ static inline void __raw_spin_unlock_irq(raw_spinlock_t *lock)
}
static inline void __raw_spin_unlock_bh(raw_spinlock_t *lock)
+ __releases(lock)
{
spin_release(&lock->dep_map, _RET_IP_);
do_raw_spin_unlock(lock);
@@ -168,6 +197,7 @@ static inline void __raw_spin_unlock_bh(raw_spinlock_t *lock)
}
static inline int __raw_spin_trylock_bh(raw_spinlock_t *lock)
+ __cond_acquires(true, lock)
{
__local_bh_disable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET);
if (do_raw_spin_trylock(lock)) {
diff --git a/include/linux/spinlock_api_up.h b/include/linux/spinlock_api_up.h
index 819aeba1c87e..a9d5c7c66e03 100644
--- a/include/linux/spinlock_api_up.h
+++ b/include/linux/spinlock_api_up.h
@@ -24,68 +24,120 @@
* flags straight, to suppress compiler warnings of unused lock
* variables, and to add the proper checker annotations:
*/
-#define ___LOCK(lock) \
+#define ___LOCK_(lock) \
do { __acquire(lock); (void)(lock); } while (0)
-#define __LOCK(lock) \
- do { preempt_disable(); ___LOCK(lock); } while (0)
+#define ___LOCK_shared(lock) \
+ do { __acquire_shared(lock); (void)(lock); } while (0)
-#define __LOCK_BH(lock) \
- do { __local_bh_disable_ip(_THIS_IP_, SOFTIRQ_LOCK_OFFSET); ___LOCK(lock); } while (0)
+#define __LOCK(lock, ...) \
+ do { preempt_disable(); ___LOCK_##__VA_ARGS__(lock); } while (0)
-#define __LOCK_IRQ(lock) \
- do { local_irq_disable(); __LOCK(lock); } while (0)
+#define __LOCK_BH(lock, ...) \
+ do { __local_bh_disable_ip(_THIS_IP_, SOFTIRQ_LOCK_OFFSET); ___LOCK_##__VA_ARGS__(lock); } while (0)
-#define __LOCK_IRQSAVE(lock, flags) \
- do { local_irq_save(flags); __LOCK(lock); } while (0)
+#define __LOCK_IRQ(lock, ...) \
+ do { local_irq_disable(); __LOCK(lock, ##__VA_ARGS__); } while (0)
-#define ___UNLOCK(lock) \
+#define __LOCK_IRQSAVE(lock, flags, ...) \
+ do { local_irq_save(flags); __LOCK(lock, ##__VA_ARGS__); } while (0)
+
+#define ___UNLOCK_(lock) \
do { __release(lock); (void)(lock); } while (0)
-#define __UNLOCK(lock) \
- do { preempt_enable(); ___UNLOCK(lock); } while (0)
+#define ___UNLOCK_shared(lock) \
+ do { __release_shared(lock); (void)(lock); } while (0)
+
+#define __UNLOCK(lock, ...) \
+ do { preempt_enable(); ___UNLOCK_##__VA_ARGS__(lock); } while (0)
-#define __UNLOCK_BH(lock) \
+#define __UNLOCK_BH(lock, ...) \
do { __local_bh_enable_ip(_THIS_IP_, SOFTIRQ_LOCK_OFFSET); \
- ___UNLOCK(lock); } while (0)
+ ___UNLOCK_##__VA_ARGS__(lock); } while (0)
-#define __UNLOCK_IRQ(lock) \
- do { local_irq_enable(); __UNLOCK(lock); } while (0)
+#define __UNLOCK_IRQ(lock, ...) \
+ do { local_irq_enable(); __UNLOCK(lock, ##__VA_ARGS__); } while (0)
-#define __UNLOCK_IRQRESTORE(lock, flags) \
- do { local_irq_restore(flags); __UNLOCK(lock); } while (0)
+#define __UNLOCK_IRQRESTORE(lock, flags, ...) \
+ do { local_irq_restore(flags); __UNLOCK(lock, ##__VA_ARGS__); } while (0)
#define _raw_spin_lock(lock) __LOCK(lock)
#define _raw_spin_lock_nested(lock, subclass) __LOCK(lock)
-#define _raw_read_lock(lock) __LOCK(lock)
+#define _raw_read_lock(lock) __LOCK(lock, shared)
#define _raw_write_lock(lock) __LOCK(lock)
#define _raw_write_lock_nested(lock, subclass) __LOCK(lock)
#define _raw_spin_lock_bh(lock) __LOCK_BH(lock)
-#define _raw_read_lock_bh(lock) __LOCK_BH(lock)
+#define _raw_read_lock_bh(lock) __LOCK_BH(lock, shared)
#define _raw_write_lock_bh(lock) __LOCK_BH(lock)
#define _raw_spin_lock_irq(lock) __LOCK_IRQ(lock)
-#define _raw_read_lock_irq(lock) __LOCK_IRQ(lock)
+#define _raw_read_lock_irq(lock) __LOCK_IRQ(lock, shared)
#define _raw_write_lock_irq(lock) __LOCK_IRQ(lock)
#define _raw_spin_lock_irqsave(lock, flags) __LOCK_IRQSAVE(lock, flags)
-#define _raw_read_lock_irqsave(lock, flags) __LOCK_IRQSAVE(lock, flags)
+#define _raw_read_lock_irqsave(lock, flags) __LOCK_IRQSAVE(lock, flags, shared)
#define _raw_write_lock_irqsave(lock, flags) __LOCK_IRQSAVE(lock, flags)
-#define _raw_spin_trylock(lock) ({ __LOCK(lock); 1; })
-#define _raw_read_trylock(lock) ({ __LOCK(lock); 1; })
-#define _raw_write_trylock(lock) ({ __LOCK(lock); 1; })
-#define _raw_spin_trylock_bh(lock) ({ __LOCK_BH(lock); 1; })
+
+static __always_inline int _raw_spin_trylock(raw_spinlock_t *lock)
+ __cond_acquires(true, lock)
+{
+ __LOCK(lock);
+ return 1;
+}
+
+static __always_inline int _raw_spin_trylock_bh(raw_spinlock_t *lock)
+ __cond_acquires(true, lock)
+{
+ __LOCK_BH(lock);
+ return 1;
+}
+
+static __always_inline int _raw_spin_trylock_irq(raw_spinlock_t *lock)
+ __cond_acquires(true, lock)
+{
+ __LOCK_IRQ(lock);
+ return 1;
+}
+
+static __always_inline int _raw_spin_trylock_irqsave(raw_spinlock_t *lock, unsigned long *flags)
+ __cond_acquires(true, lock)
+{
+ __LOCK_IRQSAVE(lock, *(flags));
+ return 1;
+}
+
+static __always_inline int _raw_read_trylock(rwlock_t *lock)
+ __cond_acquires_shared(true, lock)
+{
+ __LOCK(lock, shared);
+ return 1;
+}
+
+static __always_inline int _raw_write_trylock(rwlock_t *lock)
+ __cond_acquires(true, lock)
+{
+ __LOCK(lock);
+ return 1;
+}
+
+static __always_inline int _raw_write_trylock_irqsave(rwlock_t *lock, unsigned long *flags)
+ __cond_acquires(true, lock)
+{
+ __LOCK_IRQSAVE(lock, *(flags));
+ return 1;
+}
+
#define _raw_spin_unlock(lock) __UNLOCK(lock)
-#define _raw_read_unlock(lock) __UNLOCK(lock)
+#define _raw_read_unlock(lock) __UNLOCK(lock, shared)
#define _raw_write_unlock(lock) __UNLOCK(lock)
#define _raw_spin_unlock_bh(lock) __UNLOCK_BH(lock)
#define _raw_write_unlock_bh(lock) __UNLOCK_BH(lock)
-#define _raw_read_unlock_bh(lock) __UNLOCK_BH(lock)
+#define _raw_read_unlock_bh(lock) __UNLOCK_BH(lock, shared)
#define _raw_spin_unlock_irq(lock) __UNLOCK_IRQ(lock)
-#define _raw_read_unlock_irq(lock) __UNLOCK_IRQ(lock)
+#define _raw_read_unlock_irq(lock) __UNLOCK_IRQ(lock, shared)
#define _raw_write_unlock_irq(lock) __UNLOCK_IRQ(lock)
#define _raw_spin_unlock_irqrestore(lock, flags) \
__UNLOCK_IRQRESTORE(lock, flags)
#define _raw_read_unlock_irqrestore(lock, flags) \
- __UNLOCK_IRQRESTORE(lock, flags)
+ __UNLOCK_IRQRESTORE(lock, flags, shared)
#define _raw_write_unlock_irqrestore(lock, flags) \
__UNLOCK_IRQRESTORE(lock, flags)
diff --git a/include/linux/spinlock_rt.h b/include/linux/spinlock_rt.h
index f6499c37157d..373618a4243c 100644
--- a/include/linux/spinlock_rt.h
+++ b/include/linux/spinlock_rt.h
@@ -36,10 +36,11 @@ extern void rt_spin_lock_nested(spinlock_t *lock, int subclass) __acquires(lock)
extern void rt_spin_lock_nest_lock(spinlock_t *lock, struct lockdep_map *nest_lock) __acquires(lock);
extern void rt_spin_unlock(spinlock_t *lock) __releases(lock);
extern void rt_spin_lock_unlock(spinlock_t *lock);
-extern int rt_spin_trylock_bh(spinlock_t *lock);
-extern int rt_spin_trylock(spinlock_t *lock);
+extern int rt_spin_trylock_bh(spinlock_t *lock) __cond_acquires(true, lock);
+extern int rt_spin_trylock(spinlock_t *lock) __cond_acquires(true, lock);
static __always_inline void spin_lock(spinlock_t *lock)
+ __acquires(lock)
{
rt_spin_lock(lock);
}
@@ -82,6 +83,7 @@ static __always_inline void spin_lock(spinlock_t *lock)
__spin_lock_irqsave_nested(lock, flags, subclass)
static __always_inline void spin_lock_bh(spinlock_t *lock)
+ __acquires(lock)
{
/* Investigate: Drop bh when blocking ? */
local_bh_disable();
@@ -89,6 +91,7 @@ static __always_inline void spin_lock_bh(spinlock_t *lock)
}
static __always_inline void spin_lock_irq(spinlock_t *lock)
+ __acquires(lock)
{
rt_spin_lock(lock);
}
@@ -101,45 +104,44 @@ static __always_inline void spin_lock_irq(spinlock_t *lock)
} while (0)
static __always_inline void spin_unlock(spinlock_t *lock)
+ __releases(lock)
{
rt_spin_unlock(lock);
}
static __always_inline void spin_unlock_bh(spinlock_t *lock)
+ __releases(lock)
{
rt_spin_unlock(lock);
local_bh_enable();
}
static __always_inline void spin_unlock_irq(spinlock_t *lock)
+ __releases(lock)
{
rt_spin_unlock(lock);
}
static __always_inline void spin_unlock_irqrestore(spinlock_t *lock,
unsigned long flags)
+ __releases(lock)
{
rt_spin_unlock(lock);
}
-#define spin_trylock(lock) \
- __cond_lock(lock, rt_spin_trylock(lock))
+#define spin_trylock(lock) rt_spin_trylock(lock)
-#define spin_trylock_bh(lock) \
- __cond_lock(lock, rt_spin_trylock_bh(lock))
+#define spin_trylock_bh(lock) rt_spin_trylock_bh(lock)
-#define spin_trylock_irq(lock) \
- __cond_lock(lock, rt_spin_trylock(lock))
+#define spin_trylock_irq(lock) rt_spin_trylock(lock)
-#define spin_trylock_irqsave(lock, flags) \
-({ \
- int __locked; \
- \
- typecheck(unsigned long, flags); \
- flags = 0; \
- __locked = spin_trylock(lock); \
- __locked; \
-})
+static __always_inline bool _spin_trylock_irqsave(spinlock_t *lock, unsigned long *flags)
+ __cond_acquires(true, lock)
+{
+ *flags = 0;
+ return rt_spin_trylock(lock);
+}
+#define spin_trylock_irqsave(lock, flags) _spin_trylock_irqsave(lock, &(flags))
#define spin_is_contended(lock) (((void)(lock), 0))
diff --git a/include/linux/spinlock_types.h b/include/linux/spinlock_types.h
index 2dfa35ffec76..b65bb6e4451c 100644
--- a/include/linux/spinlock_types.h
+++ b/include/linux/spinlock_types.h
@@ -14,7 +14,7 @@
#ifndef CONFIG_PREEMPT_RT
/* Non PREEMPT_RT kernels map spinlock to raw_spinlock */
-typedef struct spinlock {
+context_lock_struct(spinlock) {
union {
struct raw_spinlock rlock;
@@ -26,7 +26,8 @@ typedef struct spinlock {
};
#endif
};
-} spinlock_t;
+};
+typedef struct spinlock spinlock_t;
#define ___SPIN_LOCK_INITIALIZER(lockname) \
{ \
@@ -47,12 +48,13 @@ typedef struct spinlock {
/* PREEMPT_RT kernels map spinlock to rt_mutex */
#include <linux/rtmutex.h>
-typedef struct spinlock {
+context_lock_struct(spinlock) {
struct rt_mutex_base lock;
#ifdef CONFIG_DEBUG_LOCK_ALLOC
struct lockdep_map dep_map;
#endif
-} spinlock_t;
+};
+typedef struct spinlock spinlock_t;
#define __SPIN_LOCK_UNLOCKED(name) \
{ \
diff --git a/include/linux/spinlock_types_raw.h b/include/linux/spinlock_types_raw.h
index 91cb36b65a17..e5644ab2161f 100644
--- a/include/linux/spinlock_types_raw.h
+++ b/include/linux/spinlock_types_raw.h
@@ -11,7 +11,7 @@
#include <linux/lockdep_types.h>
-typedef struct raw_spinlock {
+context_lock_struct(raw_spinlock) {
arch_spinlock_t raw_lock;
#ifdef CONFIG_DEBUG_SPINLOCK
unsigned int magic, owner_cpu;
@@ -20,7 +20,8 @@ typedef struct raw_spinlock {
#ifdef CONFIG_DEBUG_LOCK_ALLOC
struct lockdep_map dep_map;
#endif
-} raw_spinlock_t;
+};
+typedef struct raw_spinlock raw_spinlock_t;
#define SPINLOCK_MAGIC 0xdead4ead
diff --git a/include/linux/srcu.h b/include/linux/srcu.h
index 344ad51c8f6c..bb44a0bd7696 100644
--- a/include/linux/srcu.h
+++ b/include/linux/srcu.h
@@ -21,7 +21,7 @@
#include <linux/workqueue.h>
#include <linux/rcu_segcblist.h>
-struct srcu_struct;
+context_lock_struct(srcu_struct, __reentrant_ctx_lock);
#ifdef CONFIG_DEBUG_LOCK_ALLOC
@@ -77,7 +77,7 @@ int init_srcu_struct_fast_updown(struct srcu_struct *ssp);
#define SRCU_READ_FLAVOR_SLOWGP (SRCU_READ_FLAVOR_FAST | SRCU_READ_FLAVOR_FAST_UPDOWN)
// Flavors requiring synchronize_rcu()
// instead of smp_mb().
-void __srcu_read_unlock(struct srcu_struct *ssp, int idx) __releases(ssp);
+void __srcu_read_unlock(struct srcu_struct *ssp, int idx) __releases_shared(ssp);
#ifdef CONFIG_TINY_SRCU
#include <linux/srcutiny.h>
@@ -131,14 +131,16 @@ static inline bool same_state_synchronize_srcu(unsigned long oldstate1, unsigned
}
#ifdef CONFIG_NEED_SRCU_NMI_SAFE
-int __srcu_read_lock_nmisafe(struct srcu_struct *ssp) __acquires(ssp);
-void __srcu_read_unlock_nmisafe(struct srcu_struct *ssp, int idx) __releases(ssp);
+int __srcu_read_lock_nmisafe(struct srcu_struct *ssp) __acquires_shared(ssp);
+void __srcu_read_unlock_nmisafe(struct srcu_struct *ssp, int idx) __releases_shared(ssp);
#else
static inline int __srcu_read_lock_nmisafe(struct srcu_struct *ssp)
+ __acquires_shared(ssp)
{
return __srcu_read_lock(ssp);
}
static inline void __srcu_read_unlock_nmisafe(struct srcu_struct *ssp, int idx)
+ __releases_shared(ssp)
{
__srcu_read_unlock(ssp, idx);
}
@@ -210,6 +212,14 @@ static inline int srcu_read_lock_held(const struct srcu_struct *ssp)
#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
+/*
+ * No-op helper to denote that ssp must be held. Because SRCU-protected pointers
+ * should still be marked with __rcu_guarded, and we do not want to mark them
+ * with __guarded_by(ssp) as it would complicate annotations for writers, we
+ * choose the following strategy: srcu_dereference_check() calls this helper
+ * that checks that the passed ssp is held, and then fake-acquires 'RCU'.
+ */
+static inline void __srcu_read_lock_must_hold(const struct srcu_struct *ssp) __must_hold_shared(ssp) { }
/**
* srcu_dereference_check - fetch SRCU-protected pointer for later dereferencing
@@ -223,9 +233,15 @@ static inline int srcu_read_lock_held(const struct srcu_struct *ssp)
* to 1. The @c argument will normally be a logical expression containing
* lockdep_is_held() calls.
*/
-#define srcu_dereference_check(p, ssp, c) \
- __rcu_dereference_check((p), __UNIQUE_ID(rcu), \
- (c) || srcu_read_lock_held(ssp), __rcu)
+#define srcu_dereference_check(p, ssp, c) \
+({ \
+ __srcu_read_lock_must_hold(ssp); \
+ __acquire_shared_ctx_lock(RCU); \
+ __auto_type __v = __rcu_dereference_check((p), __UNIQUE_ID(rcu), \
+ (c) || srcu_read_lock_held(ssp), __rcu); \
+ __release_shared_ctx_lock(RCU); \
+ __v; \
+})
/**
* srcu_dereference - fetch SRCU-protected pointer for later dereferencing
@@ -268,7 +284,8 @@ static inline int srcu_read_lock_held(const struct srcu_struct *ssp)
* invoke srcu_read_unlock() from one task and the matching srcu_read_lock()
* from another.
*/
-static inline int srcu_read_lock(struct srcu_struct *ssp) __acquires(ssp)
+static inline int srcu_read_lock(struct srcu_struct *ssp)
+ __acquires_shared(ssp)
{
int retval;
@@ -304,7 +321,8 @@ static inline int srcu_read_lock(struct srcu_struct *ssp) __acquires(ssp)
* contexts where RCU is watching, that is, from contexts where it would
* be legal to invoke rcu_read_lock(). Otherwise, lockdep will complain.
*/
-static inline struct srcu_ctr __percpu *srcu_read_lock_fast(struct srcu_struct *ssp) __acquires(ssp)
+static inline struct srcu_ctr __percpu *srcu_read_lock_fast(struct srcu_struct *ssp) __acquires_shared(ssp)
+ __acquires_shared(ssp)
{
struct srcu_ctr __percpu *retval;
@@ -344,7 +362,7 @@ static inline struct srcu_ctr __percpu *srcu_read_lock_fast(struct srcu_struct *
* complain.
*/
static inline struct srcu_ctr __percpu *srcu_read_lock_fast_updown(struct srcu_struct *ssp)
-__acquires(ssp)
+ __acquires_shared(ssp)
{
struct srcu_ctr __percpu *retval;
@@ -360,7 +378,7 @@ __acquires(ssp)
* See srcu_read_lock_fast() for more information.
*/
static inline struct srcu_ctr __percpu *srcu_read_lock_fast_notrace(struct srcu_struct *ssp)
- __acquires(ssp)
+ __acquires_shared(ssp)
{
struct srcu_ctr __percpu *retval;
@@ -381,7 +399,7 @@ static inline struct srcu_ctr __percpu *srcu_read_lock_fast_notrace(struct srcu_
* and srcu_read_lock_fast(). However, the same definition/initialization
* requirements called out for srcu_read_lock_safe() apply.
*/
-static inline struct srcu_ctr __percpu *srcu_down_read_fast(struct srcu_struct *ssp) __acquires(ssp)
+static inline struct srcu_ctr __percpu *srcu_down_read_fast(struct srcu_struct *ssp) __acquires_shared(ssp)
{
WARN_ON_ONCE(IS_ENABLED(CONFIG_PROVE_RCU) && in_nmi());
RCU_LOCKDEP_WARN(!rcu_is_watching(), "RCU must be watching srcu_down_read_fast().");
@@ -400,7 +418,8 @@ static inline struct srcu_ctr __percpu *srcu_down_read_fast(struct srcu_struct *
* then none of the other flavors may be used, whether before, during,
* or after.
*/
-static inline int srcu_read_lock_nmisafe(struct srcu_struct *ssp) __acquires(ssp)
+static inline int srcu_read_lock_nmisafe(struct srcu_struct *ssp)
+ __acquires_shared(ssp)
{
int retval;
@@ -412,7 +431,8 @@ static inline int srcu_read_lock_nmisafe(struct srcu_struct *ssp) __acquires(ssp
/* Used by tracing, cannot be traced and cannot invoke lockdep. */
static inline notrace int
-srcu_read_lock_notrace(struct srcu_struct *ssp) __acquires(ssp)
+srcu_read_lock_notrace(struct srcu_struct *ssp)
+ __acquires_shared(ssp)
{
int retval;
@@ -443,7 +463,8 @@ srcu_read_lock_notrace(struct srcu_struct *ssp) __acquires(ssp)
* which calls to down_read() may be nested. The same srcu_struct may be
* used concurrently by srcu_down_read() and srcu_read_lock().
*/
-static inline int srcu_down_read(struct srcu_struct *ssp) __acquires(ssp)
+static inline int srcu_down_read(struct srcu_struct *ssp)
+ __acquires_shared(ssp)
{
WARN_ON_ONCE(in_nmi());
srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_NORMAL);
@@ -458,7 +479,7 @@ static inline int srcu_down_read(struct srcu_struct *ssp) __acquires(ssp)
* Exit an SRCU read-side critical section.
*/
static inline void srcu_read_unlock(struct srcu_struct *ssp, int idx)
- __releases(ssp)
+ __releases_shared(ssp)
{
WARN_ON_ONCE(idx & ~0x1);
srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_NORMAL);
@@ -474,7 +495,7 @@ static inline void srcu_read_unlock(struct srcu_struct *ssp, int idx)
* Exit a light-weight SRCU read-side critical section.
*/
static inline void srcu_read_unlock_fast(struct srcu_struct *ssp, struct srcu_ctr __percpu *scp)
- __releases(ssp)
+ __releases_shared(ssp)
{
srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_FAST);
srcu_lock_release(&ssp->dep_map);
@@ -490,7 +511,7 @@ static inline void srcu_read_unlock_fast(struct srcu_struct *ssp, struct srcu_ct
* Exit an SRCU-fast-updown read-side critical section.
*/
static inline void
-srcu_read_unlock_fast_updown(struct srcu_struct *ssp, struct srcu_ctr __percpu *scp) __releases(ssp)
+srcu_read_unlock_fast_updown(struct srcu_struct *ssp, struct srcu_ctr __percpu *scp) __releases_shared(ssp)
{
srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_FAST_UPDOWN);
srcu_lock_release(&ssp->dep_map);
@@ -504,7 +525,7 @@ srcu_read_unlock_fast_updown(struct srcu_struct *ssp, struct srcu_ctr __percpu *
* See srcu_read_unlock_fast() for more information.
*/
static inline void srcu_read_unlock_fast_notrace(struct srcu_struct *ssp,
- struct srcu_ctr __percpu *scp) __releases(ssp)
+ struct srcu_ctr __percpu *scp) __releases_shared(ssp)
{
srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_FAST);
__srcu_read_unlock_fast(ssp, scp);
@@ -519,7 +540,7 @@ static inline void srcu_read_unlock_fast_notrace(struct srcu_struct *ssp,
* the same context as the maching srcu_down_read_fast().
*/
static inline void srcu_up_read_fast(struct srcu_struct *ssp, struct srcu_ctr __percpu *scp)
- __releases(ssp)
+ __releases_shared(ssp)
{
WARN_ON_ONCE(IS_ENABLED(CONFIG_PROVE_RCU) && in_nmi());
srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_FAST_UPDOWN);
@@ -535,7 +556,7 @@ static inline void srcu_up_read_fast(struct srcu_struct *ssp, struct srcu_ctr __
* Exit an SRCU read-side critical section, but in an NMI-safe manner.
*/
static inline void srcu_read_unlock_nmisafe(struct srcu_struct *ssp, int idx)
- __releases(ssp)
+ __releases_shared(ssp)
{
WARN_ON_ONCE(idx & ~0x1);
srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_NMI);
@@ -545,7 +566,7 @@ static inline void srcu_read_unlock_nmisafe(struct srcu_struct *ssp, int idx)
/* Used by tracing, cannot be traced and cannot call lockdep. */
static inline notrace void
-srcu_read_unlock_notrace(struct srcu_struct *ssp, int idx) __releases(ssp)
+srcu_read_unlock_notrace(struct srcu_struct *ssp, int idx) __releases_shared(ssp)
{
srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_NORMAL);
__srcu_read_unlock(ssp, idx);
@@ -560,7 +581,7 @@ srcu_read_unlock_notrace(struct srcu_struct *ssp, int idx) __releases(ssp)
* the same context as the maching srcu_down_read().
*/
static inline void srcu_up_read(struct srcu_struct *ssp, int idx)
- __releases(ssp)
+ __releases_shared(ssp)
{
WARN_ON_ONCE(idx & ~0x1);
WARN_ON_ONCE(in_nmi());
@@ -600,15 +621,21 @@ DEFINE_LOCK_GUARD_1(srcu, struct srcu_struct,
_T->idx = srcu_read_lock(_T->lock),
srcu_read_unlock(_T->lock, _T->idx),
int idx)
+DECLARE_LOCK_GUARD_1_ATTRS(srcu, __acquires_shared(_T), __releases_shared(*(struct srcu_struct **)_T))
+#define class_srcu_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(srcu, _T)
DEFINE_LOCK_GUARD_1(srcu_fast, struct srcu_struct,
_T->scp = srcu_read_lock_fast(_T->lock),
srcu_read_unlock_fast(_T->lock, _T->scp),
struct srcu_ctr __percpu *scp)
+DECLARE_LOCK_GUARD_1_ATTRS(srcu_fast, __acquires_shared(_T), __releases_shared(*(struct srcu_struct **)_T))
+#define class_srcu_fast_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(srcu_fast, _T)
DEFINE_LOCK_GUARD_1(srcu_fast_notrace, struct srcu_struct,
_T->scp = srcu_read_lock_fast_notrace(_T->lock),
srcu_read_unlock_fast_notrace(_T->lock, _T->scp),
struct srcu_ctr __percpu *scp)
+DECLARE_LOCK_GUARD_1_ATTRS(srcu_fast_notrace, __acquires_shared(_T), __releases_shared(*(struct srcu_struct **)_T))
+#define class_srcu_fast_notrace_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(srcu_fast_notrace, _T)
#endif
diff --git a/include/linux/srcutiny.h b/include/linux/srcutiny.h
index e0698024667a..dec7cbe015aa 100644
--- a/include/linux/srcutiny.h
+++ b/include/linux/srcutiny.h
@@ -73,6 +73,7 @@ void synchronize_srcu(struct srcu_struct *ssp);
* index that must be passed to the matching srcu_read_unlock().
*/
static inline int __srcu_read_lock(struct srcu_struct *ssp)
+ __acquires_shared(ssp)
{
int idx;
@@ -80,6 +81,7 @@ static inline int __srcu_read_lock(struct srcu_struct *ssp)
idx = ((READ_ONCE(ssp->srcu_idx) + 1) & 0x2) >> 1;
WRITE_ONCE(ssp->srcu_lock_nesting[idx], READ_ONCE(ssp->srcu_lock_nesting[idx]) + 1);
preempt_enable();
+ __acquire_shared(ssp);
return idx;
}
@@ -96,22 +98,26 @@ static inline struct srcu_ctr __percpu *__srcu_ctr_to_ptr(struct srcu_struct *ss
}
static inline struct srcu_ctr __percpu *__srcu_read_lock_fast(struct srcu_struct *ssp)
+ __acquires_shared(ssp)
{
return __srcu_ctr_to_ptr(ssp, __srcu_read_lock(ssp));
}
static inline void __srcu_read_unlock_fast(struct srcu_struct *ssp, struct srcu_ctr __percpu *scp)
+ __releases_shared(ssp)
{
__srcu_read_unlock(ssp, __srcu_ptr_to_ctr(ssp, scp));
}
static inline struct srcu_ctr __percpu *__srcu_read_lock_fast_updown(struct srcu_struct *ssp)
+ __acquires_shared(ssp)
{
return __srcu_ctr_to_ptr(ssp, __srcu_read_lock(ssp));
}
static inline
void __srcu_read_unlock_fast_updown(struct srcu_struct *ssp, struct srcu_ctr __percpu *scp)
+ __releases_shared(ssp)
{
__srcu_read_unlock(ssp, __srcu_ptr_to_ctr(ssp, scp));
}
diff --git a/include/linux/srcutree.h b/include/linux/srcutree.h
index d6f978b50472..958cb7ef41cb 100644
--- a/include/linux/srcutree.h
+++ b/include/linux/srcutree.h
@@ -233,7 +233,7 @@ struct srcu_struct {
#define DEFINE_STATIC_SRCU_FAST_UPDOWN(name) \
__DEFINE_SRCU(name, SRCU_READ_FLAVOR_FAST_UPDOWN, static)
-int __srcu_read_lock(struct srcu_struct *ssp) __acquires(ssp);
+int __srcu_read_lock(struct srcu_struct *ssp) __acquires_shared(ssp);
void synchronize_srcu_expedited(struct srcu_struct *ssp);
void srcu_barrier(struct srcu_struct *ssp);
void srcu_expedite_current(struct srcu_struct *ssp);
@@ -286,6 +286,7 @@ static inline struct srcu_ctr __percpu *__srcu_ctr_to_ptr(struct srcu_struct *ss
* implementations of this_cpu_inc().
*/
static inline struct srcu_ctr __percpu notrace *__srcu_read_lock_fast(struct srcu_struct *ssp)
+ __acquires_shared(ssp)
{
struct srcu_ctr __percpu *scp = READ_ONCE(ssp->srcu_ctrp);
@@ -294,6 +295,7 @@ static inline struct srcu_ctr __percpu notrace *__srcu_read_lock_fast(struct src
else
atomic_long_inc(raw_cpu_ptr(&scp->srcu_locks)); // Y, and implicit RCU reader.
barrier(); /* Avoid leaking the critical section. */
+ __acquire_shared(ssp);
return scp;
}
@@ -308,7 +310,9 @@ static inline struct srcu_ctr __percpu notrace *__srcu_read_lock_fast(struct src
*/
static inline void notrace
__srcu_read_unlock_fast(struct srcu_struct *ssp, struct srcu_ctr __percpu *scp)
+ __releases_shared(ssp)
{
+ __release_shared(ssp);
barrier(); /* Avoid leaking the critical section. */
if (!IS_ENABLED(CONFIG_NEED_SRCU_NMI_SAFE))
this_cpu_inc(scp->srcu_unlocks.counter); // Z, and implicit RCU reader.
@@ -326,6 +330,7 @@ __srcu_read_unlock_fast(struct srcu_struct *ssp, struct srcu_ctr __percpu *scp)
*/
static inline
struct srcu_ctr __percpu notrace *__srcu_read_lock_fast_updown(struct srcu_struct *ssp)
+ __acquires_shared(ssp)
{
struct srcu_ctr __percpu *scp = READ_ONCE(ssp->srcu_ctrp);
@@ -334,6 +339,7 @@ struct srcu_ctr __percpu notrace *__srcu_read_lock_fast_updown(struct srcu_struc
else
atomic_long_inc(raw_cpu_ptr(&scp->srcu_locks)); // Y, and implicit RCU reader.
barrier(); /* Avoid leaking the critical section. */
+ __acquire_shared(ssp);
return scp;
}
@@ -348,7 +354,9 @@ struct srcu_ctr __percpu notrace *__srcu_read_lock_fast_updown(struct srcu_struc
*/
static inline void notrace
__srcu_read_unlock_fast_updown(struct srcu_struct *ssp, struct srcu_ctr __percpu *scp)
+ __releases_shared(ssp)
{
+ __release_shared(ssp);
barrier(); /* Avoid leaking the critical section. */
if (!IS_ENABLED(CONFIG_NEED_SRCU_NMI_SAFE))
this_cpu_inc(scp->srcu_unlocks.counter); // Z, and implicit RCU reader.
diff --git a/include/linux/ww_mutex.h b/include/linux/ww_mutex.h
index 45ff6f7a872b..c47d4b9b88b3 100644
--- a/include/linux/ww_mutex.h
+++ b/include/linux/ww_mutex.h
@@ -44,7 +44,7 @@ struct ww_class {
unsigned int is_wait_die;
};
-struct ww_mutex {
+context_lock_struct(ww_mutex) {
struct WW_MUTEX_BASE base;
struct ww_acquire_ctx *ctx;
#ifdef DEBUG_WW_MUTEXES
@@ -52,7 +52,7 @@ struct ww_mutex {
#endif
};
-struct ww_acquire_ctx {
+context_lock_struct(ww_acquire_ctx) {
struct task_struct *task;
unsigned long stamp;
unsigned int acquired;
@@ -141,6 +141,7 @@ static inline void ww_mutex_init(struct ww_mutex *lock,
*/
static inline void ww_acquire_init(struct ww_acquire_ctx *ctx,
struct ww_class *ww_class)
+ __acquires(ctx) __no_context_analysis
{
ctx->task = current;
ctx->stamp = atomic_long_inc_return_relaxed(&ww_class->stamp);
@@ -179,6 +180,7 @@ static inline void ww_acquire_init(struct ww_acquire_ctx *ctx,
* data structures.
*/
static inline void ww_acquire_done(struct ww_acquire_ctx *ctx)
+ __releases(ctx) __acquires_shared(ctx) __no_context_analysis
{
#ifdef DEBUG_WW_MUTEXES
lockdep_assert_held(ctx);
@@ -196,6 +198,7 @@ static inline void ww_acquire_done(struct ww_acquire_ctx *ctx)
* mutexes have been released with ww_mutex_unlock.
*/
static inline void ww_acquire_fini(struct ww_acquire_ctx *ctx)
+ __releases_shared(ctx) __no_context_analysis
{
#ifdef CONFIG_DEBUG_LOCK_ALLOC
mutex_release(&ctx->first_lock_dep_map, _THIS_IP_);
@@ -245,7 +248,8 @@ static inline void ww_acquire_fini(struct ww_acquire_ctx *ctx)
*
* A mutex acquired with this function must be released with ww_mutex_unlock.
*/
-extern int /* __must_check */ ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx);
+extern int /* __must_check */ ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
+ __cond_acquires(0, lock) __must_hold(ctx);
/**
* ww_mutex_lock_interruptible - acquire the w/w mutex, interruptible
@@ -278,7 +282,8 @@ extern int /* __must_check */ ww_mutex_lock(struct ww_mutex *lock, struct ww_acq
* A mutex acquired with this function must be released with ww_mutex_unlock.
*/
extern int __must_check ww_mutex_lock_interruptible(struct ww_mutex *lock,
- struct ww_acquire_ctx *ctx);
+ struct ww_acquire_ctx *ctx)
+ __cond_acquires(0, lock) __must_hold(ctx);
/**
* ww_mutex_lock_slow - slowpath acquiring of the w/w mutex
@@ -305,6 +310,7 @@ extern int __must_check ww_mutex_lock_interruptible(struct ww_mutex *lock,
*/
static inline void
ww_mutex_lock_slow(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
+ __acquires(lock) __must_hold(ctx) __no_context_analysis
{
int ret;
#ifdef DEBUG_WW_MUTEXES
@@ -342,6 +348,7 @@ ww_mutex_lock_slow(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
static inline int __must_check
ww_mutex_lock_slow_interruptible(struct ww_mutex *lock,
struct ww_acquire_ctx *ctx)
+ __cond_acquires(0, lock) __must_hold(ctx)
{
#ifdef DEBUG_WW_MUTEXES
DEBUG_LOCKS_WARN_ON(!ctx->contending_lock);
@@ -349,10 +356,11 @@ ww_mutex_lock_slow_interruptible(struct ww_mutex *lock,
return ww_mutex_lock_interruptible(lock, ctx);
}
-extern void ww_mutex_unlock(struct ww_mutex *lock);
+extern void ww_mutex_unlock(struct ww_mutex *lock) __releases(lock);
extern int __must_check ww_mutex_trylock(struct ww_mutex *lock,
- struct ww_acquire_ctx *ctx);
+ struct ww_acquire_ctx *ctx)
+ __cond_acquires(true, lock) __must_hold(ctx);
/***
* ww_mutex_destroy - mark a w/w mutex unusable
@@ -363,6 +371,7 @@ extern int __must_check ww_mutex_trylock(struct ww_mutex *lock,
* this function is called.
*/
static inline void ww_mutex_destroy(struct ww_mutex *lock)
+ __must_not_hold(lock)
{
#ifndef CONFIG_PREEMPT_RT
mutex_destroy(&lock->base);