From cc39f3872c0865bef992b713338df369554fa9e0 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 9 Oct 2025 22:11:54 +0200 Subject: seqlock: Introduce scoped_seqlock_read() The read_seqbegin/need_seqretry/done_seqretry API is cumbersome and error prone. With the new helper the "typical" code like int seq, nextseq; unsigned long flags; nextseq = 0; do { seq = nextseq; flags = read_seqbegin_or_lock_irqsave(&seqlock, &seq); // read-side critical section nextseq = 1; } while (need_seqretry(&seqlock, seq)); done_seqretry_irqrestore(&seqlock, seq, flags); can be rewritten as scoped_seqlock_read (&seqlock, ss_lock_irqsave) { // read-side critical section } Original idea by Oleg Nesterov; with contributions from Linus. Originally-by: Oleg Nesterov Signed-off-by: Peter Zijlstra (Intel) --- include/linux/seqlock.h | 111 ++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 111 insertions(+) (limited to 'include/linux/seqlock.h') diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h index 5ce48eab7a2a..b7bcc4111e90 100644 --- a/include/linux/seqlock.h +++ b/include/linux/seqlock.h @@ -1209,4 +1209,115 @@ done_seqretry_irqrestore(seqlock_t *lock, int seq, unsigned long flags) if (seq & 1) read_sequnlock_excl_irqrestore(lock, flags); } + +enum ss_state { + ss_done = 0, + ss_lock, + ss_lock_irqsave, + ss_lockless, +}; + +struct ss_tmp { + enum ss_state state; + unsigned long data; + spinlock_t *lock; + spinlock_t *lock_irqsave; +}; + +static inline void __scoped_seqlock_cleanup(struct ss_tmp *sst) +{ + if (sst->lock) + spin_unlock(sst->lock); + if (sst->lock_irqsave) + spin_unlock_irqrestore(sst->lock_irqsave, sst->data); +} + +extern void __scoped_seqlock_invalid_target(void); + +#if defined(CONFIG_CC_IS_GCC) && CONFIG_GCC_VERSION < 90000 +/* + * For some reason some GCC-8 architectures (nios2, alpha) have trouble + * determining that the ss_done state is impossible in __scoped_seqlock_next() + * below. + */ +static inline void __scoped_seqlock_bug(void) { } +#else +/* + * Canary for compiler optimization -- if the compiler doesn't realize this is + * an impossible state, it very likely generates sub-optimal code here. + */ +extern void __scoped_seqlock_bug(void); +#endif + +static inline void +__scoped_seqlock_next(struct ss_tmp *sst, seqlock_t *lock, enum ss_state target) +{ + switch (sst->state) { + case ss_done: + __scoped_seqlock_bug(); + return; + + case ss_lock: + case ss_lock_irqsave: + sst->state = ss_done; + return; + + case ss_lockless: + if (!read_seqretry(lock, sst->data)) { + sst->state = ss_done; + return; + } + break; + } + + switch (target) { + case ss_done: + __scoped_seqlock_invalid_target(); + return; + + case ss_lock: + sst->lock = &lock->lock; + spin_lock(sst->lock); + sst->state = ss_lock; + return; + + case ss_lock_irqsave: + sst->lock_irqsave = &lock->lock; + spin_lock_irqsave(sst->lock_irqsave, sst->data); + sst->state = ss_lock_irqsave; + return; + + case ss_lockless: + sst->data = read_seqbegin(lock); + return; + } +} + +#define __scoped_seqlock_read(_seqlock, _target, _s) \ + for (struct ss_tmp _s __cleanup(__scoped_seqlock_cleanup) = \ + { .state = ss_lockless, .data = read_seqbegin(_seqlock) }; \ + _s.state != ss_done; \ + __scoped_seqlock_next(&_s, _seqlock, _target)) + +/** + * scoped_seqlock_read (lock, ss_state) - execute the read side critical + * section without manual sequence + * counter handling or calls to other + * helpers + * @lock: pointer to seqlock_t protecting the data + * @ss_state: one of {ss_lock, ss_lock_irqsave, ss_lockless} indicating + * the type of critical read section + * + * Example: + * + * scoped_seqlock_read (&lock, ss_lock) { + * // read-side critical section + * } + * + * Starts with a lockess pass first. If it fails, restarts the critical + * section with the lock held. + */ +#define scoped_seqlock_read(_seqlock, _target) \ + __scoped_seqlock_read(_seqlock, _target, __UNIQUE_ID(seqlock)) + #endif /* __LINUX_SEQLOCK_H */ -- cgit v1.2.3 From b94d45b6bbb42571ec225d3be0e7457c8765a5b4 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Tue, 28 Oct 2025 09:56:38 +0100 Subject: seqlock: Allow KASAN to fail optimizing Some KASAN builds are failing to properly optimize this code -- luckily we don't care about core quality for KASAN builds, so just exclude it. Reported-by: kernel test robot Signed-off-by: Peter Zijlstra (Intel) Closes: https://lore.kernel.org/oe-kbuild-all/202510251641.idrNXhv5-lkp@intel.com/ --- include/linux/seqlock.h | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'include/linux/seqlock.h') diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h index b7bcc4111e90..a8a8661839b6 100644 --- a/include/linux/seqlock.h +++ b/include/linux/seqlock.h @@ -1234,11 +1234,14 @@ static inline void __scoped_seqlock_cleanup(struct ss_tmp *sst) extern void __scoped_seqlock_invalid_target(void); -#if defined(CONFIG_CC_IS_GCC) && CONFIG_GCC_VERSION < 90000 +#if (defined(CONFIG_CC_IS_GCC) && CONFIG_GCC_VERSION < 90000) || defined(CONFIG_KASAN) /* * For some reason some GCC-8 architectures (nios2, alpha) have trouble * determining that the ss_done state is impossible in __scoped_seqlock_next() * below. + * + * Similarly KASAN is known to confuse compilers enough to break this. But we + * don't care about code quality for KASAN builds anyway. */ static inline void __scoped_seqlock_bug(void) { } #else -- cgit v1.2.3 From 90dfeef1cd38dff19f8b3a752d13bfd79f0f7694 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 4 Dec 2025 11:43:32 +0100 Subject: seqlock: Cure some more scoped_seqlock() optimization fails Arnd reported an x86 randconfig using gcc-15 tripped over __scoped_seqlock_bug(). Turns out GCC chose not to inline the scoped_seqlock helper functions and as such was not able to optimize properly. [ mingo: Clang fails the build too in some circumstances. ] Reported-by: Arnd Bergmann Tested-by: Arnd Bergmann Signed-off-by: Peter Zijlstra (Intel) Signed-off-by: Ingo Molnar Cc: Oleg Nesterov Link: https://patch.msgid.link/20251204104332.GG2528459@noisy.programming.kicks-ass.net --- include/linux/seqlock.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'include/linux/seqlock.h') diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h index a8a8661839b6..221123660e71 100644 --- a/include/linux/seqlock.h +++ b/include/linux/seqlock.h @@ -1224,7 +1224,7 @@ struct ss_tmp { spinlock_t *lock_irqsave; }; -static inline void __scoped_seqlock_cleanup(struct ss_tmp *sst) +static __always_inline void __scoped_seqlock_cleanup(struct ss_tmp *sst) { if (sst->lock) spin_unlock(sst->lock); @@ -1252,7 +1252,7 @@ static inline void __scoped_seqlock_bug(void) { } extern void __scoped_seqlock_bug(void); #endif -static inline void +static __always_inline void __scoped_seqlock_next(struct ss_tmp *sst, seqlock_t *lock, enum ss_state target) { switch (sst->state) { -- cgit v1.2.3