summaryrefslogtreecommitdiff
path: root/include/linux/local_lock_internal.h
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2025-12-01 19:50:58 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2025-12-01 19:50:58 -0800
commitb53440f8e5a1466870d7a1d255e0f9966e0041fb (patch)
tree72dd9cb31730d6fd3fea65ca49ba6b29009637b8 /include/linux/local_lock_internal.h
parent1b5dd29869b1e63f7e5c37d7552e2dcf22de3c26 (diff)
parent43decb6b628eb033a1b6188e5018773c0d38be1d (diff)
Merge tag 'locking-core-2025-12-01' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull locking updates from Ingo Molnar: "Mutexes: - Redo __mutex_init() to reduce generated code size (Sebastian Andrzej Siewior) Seqlocks: - Introduce scoped_seqlock_read() (Peter Zijlstra) - Change thread_group_cputime() to use scoped_seqlock_read() (Oleg Nesterov) - Change do_task_stat() to use scoped_seqlock_read() (Oleg Nesterov) - Change do_io_accounting() to use scoped_seqlock_read() (Oleg Nesterov) - Fix the incorrect documentation of read_seqbegin_or_lock() / need_seqretry() (Oleg Nesterov) - Allow KASAN to fail optimizing (Peter Zijlstra) Local lock updates: - Fix all kernel-doc warnings (Randy Dunlap) - Add the <linux/local_lock*.h> headers to MAINTAINERS (Sebastian Andrzej Siewior) - Reduce the risk of shadowing via s/l/__l/ and s/tl/__tl/ (Vincent Mailhol) Lock debugging: - spinlock/debug: Fix data-race in do_raw_write_lock (Alexander Sverdlin) Atomic primitives infrastructure: - atomic: Skip alignment check for try_cmpxchg() old arg (Arnd Bergmann) Rust runtime integration: - sync: atomic: Enable generated Atomic<T> usage (Boqun Feng) - sync: atomic: Implement Debug for Atomic<Debug> (Boqun Feng) - debugfs: Remove Rust native atomics and replace them with Linux versions (Boqun Feng) - debugfs: Implement Reader for Mutex<T> only when T is Unpin (Boqun Feng) - lock: guard: Add T: Unpin bound to DerefMut (Daniel Almeida) - lock: Pin the inner data (Daniel Almeida) - lock: Add a Pin<&mut T> accessor (Daniel Almeida)" * tag 'locking-core-2025-12-01' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: locking/local_lock: Fix all kernel-doc warnings locking/local_lock: s/l/__l/ and s/tl/__tl/ to reduce the risk of shadowing locking/local_lock: Add the <linux/local_lock*.h> headers to MAINTAINERS locking/mutex: Redo __mutex_init() to reduce generated code size rust: debugfs: Replace the usage of Rust native atomics rust: sync: atomic: Implement Debug for Atomic<Debug> rust: sync: atomic: Make Atomic*Ops pub(crate) seqlock: Allow KASAN to fail optimizing rust: debugfs: Implement Reader for Mutex<T> only when T is Unpin seqlock: Change do_io_accounting() to use scoped_seqlock_read() seqlock: Change do_task_stat() to use scoped_seqlock_read() seqlock: Change thread_group_cputime() to use scoped_seqlock_read() seqlock: Introduce scoped_seqlock_read() documentation: seqlock: fix the wrong documentation of read_seqbegin_or_lock/need_seqretry atomic: Skip alignment check for try_cmpxchg() old arg rust: lock: Add a Pin<&mut T> accessor rust: lock: Pin the inner data rust: lock: guard: Add T: Unpin bound to DerefMut locking/spinlock/debug: Fix data-race in do_raw_write_lock
Diffstat (limited to 'include/linux/local_lock_internal.h')
-rw-r--r--include/linux/local_lock_internal.h62
1 files changed, 31 insertions, 31 deletions
diff --git a/include/linux/local_lock_internal.h b/include/linux/local_lock_internal.h
index a4dc479157b5..8f82b4eb542f 100644
--- a/include/linux/local_lock_internal.h
+++ b/include/linux/local_lock_internal.h
@@ -99,18 +99,18 @@ do { \
#define __local_lock_acquire(lock) \
do { \
- local_trylock_t *tl; \
- local_lock_t *l; \
+ local_trylock_t *__tl; \
+ local_lock_t *__l; \
\
- l = (local_lock_t *)(lock); \
- tl = (local_trylock_t *)l; \
+ __l = (local_lock_t *)(lock); \
+ __tl = (local_trylock_t *)__l; \
_Generic((lock), \
local_trylock_t *: ({ \
- lockdep_assert(tl->acquired == 0); \
- WRITE_ONCE(tl->acquired, 1); \
+ lockdep_assert(__tl->acquired == 0); \
+ WRITE_ONCE(__tl->acquired, 1); \
}), \
local_lock_t *: (void)0); \
- local_lock_acquire(l); \
+ local_lock_acquire(__l); \
} while (0)
#define __local_lock(lock) \
@@ -133,36 +133,36 @@ do { \
#define __local_trylock(lock) \
({ \
- local_trylock_t *tl; \
+ local_trylock_t *__tl; \
\
preempt_disable(); \
- tl = (lock); \
- if (READ_ONCE(tl->acquired)) { \
+ __tl = (lock); \
+ if (READ_ONCE(__tl->acquired)) { \
preempt_enable(); \
- tl = NULL; \
+ __tl = NULL; \
} else { \
- WRITE_ONCE(tl->acquired, 1); \
+ WRITE_ONCE(__tl->acquired, 1); \
local_trylock_acquire( \
- (local_lock_t *)tl); \
+ (local_lock_t *)__tl); \
} \
- !!tl; \
+ !!__tl; \
})
#define __local_trylock_irqsave(lock, flags) \
({ \
- local_trylock_t *tl; \
+ local_trylock_t *__tl; \
\
local_irq_save(flags); \
- tl = (lock); \
- if (READ_ONCE(tl->acquired)) { \
+ __tl = (lock); \
+ if (READ_ONCE(__tl->acquired)) { \
local_irq_restore(flags); \
- tl = NULL; \
+ __tl = NULL; \
} else { \
- WRITE_ONCE(tl->acquired, 1); \
+ WRITE_ONCE(__tl->acquired, 1); \
local_trylock_acquire( \
- (local_lock_t *)tl); \
+ (local_lock_t *)__tl); \
} \
- !!tl; \
+ !!__tl; \
})
/* preemption or migration must be disabled before calling __local_lock_is_locked */
@@ -170,16 +170,16 @@ do { \
#define __local_lock_release(lock) \
do { \
- local_trylock_t *tl; \
- local_lock_t *l; \
+ local_trylock_t *__tl; \
+ local_lock_t *__l; \
\
- l = (local_lock_t *)(lock); \
- tl = (local_trylock_t *)l; \
- local_lock_release(l); \
+ __l = (local_lock_t *)(lock); \
+ __tl = (local_trylock_t *)__l; \
+ local_lock_release(__l); \
_Generic((lock), \
local_trylock_t *: ({ \
- lockdep_assert(tl->acquired == 1); \
- WRITE_ONCE(tl->acquired, 0); \
+ lockdep_assert(__tl->acquired == 1); \
+ WRITE_ONCE(__tl->acquired, 0); \
}), \
local_lock_t *: (void)0); \
} while (0)
@@ -223,12 +223,12 @@ typedef spinlock_t local_trylock_t;
#define INIT_LOCAL_LOCK(lockname) __LOCAL_SPIN_LOCK_UNLOCKED((lockname))
#define INIT_LOCAL_TRYLOCK(lockname) __LOCAL_SPIN_LOCK_UNLOCKED((lockname))
-#define __local_lock_init(l) \
+#define __local_lock_init(__l) \
do { \
- local_spin_lock_init((l)); \
+ local_spin_lock_init((__l)); \
} while (0)
-#define __local_trylock_init(l) __local_lock_init(l)
+#define __local_trylock_init(__l) __local_lock_init(__l)
#define __local_lock(__lock) \
do { \