From 8c9c8566e139c0f1398245fbe3aa409fc1a79da8 Mon Sep 17 00:00:00 2001 From: Marco Elver Date: Fri, 19 Dec 2025 16:40:07 +0100 Subject: locking/local_lock: Include missing headers MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Including into an empty TU will result in the compiler complaining: ./include/linux/local_lock.h: In function ‘class_local_lock_irqsave_constructor’: ./include/linux/local_lock_internal.h:95:17: error: implicit declaration of function ‘local_irq_save’; <...> 95 | local_irq_save(flags); \ | ^~~~~~~~~~~~~~ As well as (some architectures only, such as 'sh'): ./include/linux/local_lock_internal.h: In function ‘local_lock_acquire’: ./include/linux/local_lock_internal.h:33:20: error: ‘current’ undeclared (first use in this function) 33 | l->owner = current; Include missing headers to allow including local_lock.h where the required headers are not otherwise included. Signed-off-by: Marco Elver Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Bart Van Assche Link: https://patch.msgid.link/20251219154418.3592607-19-elver@google.com --- include/linux/local_lock_internal.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'include/linux/local_lock_internal.h') diff --git a/include/linux/local_lock_internal.h b/include/linux/local_lock_internal.h index 8f82b4eb542f..1a1ea1232add 100644 --- a/include/linux/local_lock_internal.h +++ b/include/linux/local_lock_internal.h @@ -4,7 +4,9 @@ #endif #include +#include #include +#include #ifndef CONFIG_PREEMPT_RT -- cgit v1.2.3 From d3febf16dee28a74b01ba43195ee4965edb6208f Mon Sep 17 00:00:00 2001 From: Marco Elver Date: Fri, 19 Dec 2025 16:40:08 +0100 Subject: locking/local_lock: Support Clang's context analysis Add support for Clang's context analysis for local_lock_t and local_trylock_t. Signed-off-by: Marco Elver Signed-off-by: Peter Zijlstra (Intel) Link: https://patch.msgid.link/20251219154418.3592607-20-elver@google.com --- include/linux/local_lock_internal.h | 71 +++++++++++++++++++++++++++++-------- 1 file changed, 57 insertions(+), 14 deletions(-) (limited to 'include/linux/local_lock_internal.h') diff --git a/include/linux/local_lock_internal.h b/include/linux/local_lock_internal.h index 1a1ea1232add..e8c4803d8db4 100644 --- a/include/linux/local_lock_internal.h +++ b/include/linux/local_lock_internal.h @@ -10,21 +10,23 @@ #ifndef CONFIG_PREEMPT_RT -typedef struct { +context_lock_struct(local_lock) { #ifdef CONFIG_DEBUG_LOCK_ALLOC struct lockdep_map dep_map; struct task_struct *owner; #endif -} local_lock_t; +}; +typedef struct local_lock local_lock_t; /* local_trylock() and local_trylock_irqsave() only work with local_trylock_t */ -typedef struct { +context_lock_struct(local_trylock) { #ifdef CONFIG_DEBUG_LOCK_ALLOC struct lockdep_map dep_map; struct task_struct *owner; #endif u8 acquired; -} local_trylock_t; +}; +typedef struct local_trylock local_trylock_t; #ifdef CONFIG_DEBUG_LOCK_ALLOC # define LOCAL_LOCK_DEBUG_INIT(lockname) \ @@ -84,9 +86,14 @@ do { \ 0, LD_WAIT_CONFIG, LD_WAIT_INV, \ LD_LOCK_PERCPU); \ local_lock_debug_init(lock); \ + __assume_ctx_lock(lock); \ } while (0) -#define __local_trylock_init(lock) __local_lock_init((local_lock_t *)lock) +#define __local_trylock_init(lock) \ +do { \ + __local_lock_init((local_lock_t *)lock); \ + __assume_ctx_lock(lock); \ +} while (0) #define __spinlock_nested_bh_init(lock) \ do { \ @@ -97,6 +104,7 @@ do { \ 0, LD_WAIT_CONFIG, LD_WAIT_INV, \ LD_LOCK_NORMAL); \ local_lock_debug_init(lock); \ + __assume_ctx_lock(lock); \ } while (0) #define __local_lock_acquire(lock) \ @@ -119,22 +127,25 @@ do { \ do { \ preempt_disable(); \ __local_lock_acquire(lock); \ + __acquire(lock); \ } while (0) #define __local_lock_irq(lock) \ do { \ local_irq_disable(); \ __local_lock_acquire(lock); \ + __acquire(lock); \ } while (0) #define __local_lock_irqsave(lock, flags) \ do { \ local_irq_save(flags); \ __local_lock_acquire(lock); \ + __acquire(lock); \ } while (0) #define __local_trylock(lock) \ - ({ \ + __try_acquire_ctx_lock(lock, ({ \ local_trylock_t *__tl; \ \ preempt_disable(); \ @@ -148,10 +159,10 @@ do { \ (local_lock_t *)__tl); \ } \ !!__tl; \ - }) + })) #define __local_trylock_irqsave(lock, flags) \ - ({ \ + __try_acquire_ctx_lock(lock, ({ \ local_trylock_t *__tl; \ \ local_irq_save(flags); \ @@ -165,7 +176,7 @@ do { \ (local_lock_t *)__tl); \ } \ !!__tl; \ - }) + })) /* preemption or migration must be disabled before calling __local_lock_is_locked */ #define __local_lock_is_locked(lock) READ_ONCE(this_cpu_ptr(lock)->acquired) @@ -188,18 +199,21 @@ do { \ #define __local_unlock(lock) \ do { \ + __release(lock); \ __local_lock_release(lock); \ preempt_enable(); \ } while (0) #define __local_unlock_irq(lock) \ do { \ + __release(lock); \ __local_lock_release(lock); \ local_irq_enable(); \ } while (0) #define __local_unlock_irqrestore(lock, flags) \ do { \ + __release(lock); \ __local_lock_release(lock); \ local_irq_restore(flags); \ } while (0) @@ -208,13 +222,19 @@ do { \ do { \ lockdep_assert_in_softirq(); \ local_lock_acquire((lock)); \ + __acquire(lock); \ } while (0) #define __local_unlock_nested_bh(lock) \ - local_lock_release((lock)) + do { \ + __release(lock); \ + local_lock_release((lock)); \ + } while (0) #else /* !CONFIG_PREEMPT_RT */ +#include + /* * On PREEMPT_RT local_lock maps to a per CPU spinlock, which protects the * critical section while staying preemptible. @@ -269,7 +289,7 @@ do { \ } while (0) #define __local_trylock(lock) \ - ({ \ + __try_acquire_ctx_lock(lock, context_unsafe(({ \ int __locked; \ \ if (in_nmi() | in_hardirq()) { \ @@ -281,17 +301,40 @@ do { \ migrate_enable(); \ } \ __locked; \ - }) + }))) #define __local_trylock_irqsave(lock, flags) \ - ({ \ + __try_acquire_ctx_lock(lock, ({ \ typecheck(unsigned long, flags); \ flags = 0; \ __local_trylock(lock); \ - }) + })) /* migration must be disabled before calling __local_lock_is_locked */ #define __local_lock_is_locked(__lock) \ (rt_mutex_owner(&this_cpu_ptr(__lock)->lock) == current) #endif /* CONFIG_PREEMPT_RT */ + +#if defined(WARN_CONTEXT_ANALYSIS) +/* + * Because the compiler only knows about the base per-CPU variable, use this + * helper function to make the compiler think we lock/unlock the @base variable, + * and hide the fact we actually pass the per-CPU instance to lock/unlock + * functions. + */ +static __always_inline local_lock_t *__this_cpu_local_lock(local_lock_t __percpu *base) + __returns_ctx_lock(base) __attribute__((overloadable)) +{ + return this_cpu_ptr(base); +} +#ifndef CONFIG_PREEMPT_RT +static __always_inline local_trylock_t *__this_cpu_local_lock(local_trylock_t __percpu *base) + __returns_ctx_lock(base) __attribute__((overloadable)) +{ + return this_cpu_ptr(base); +} +#endif /* CONFIG_PREEMPT_RT */ +#else /* WARN_CONTEXT_ANALYSIS */ +#define __this_cpu_local_lock(base) this_cpu_ptr(base) +#endif /* WARN_CONTEXT_ANALYSIS */ -- cgit v1.2.3 From a45026cef17d1080c985adf28234d6c8475ad66f Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 8 Jan 2026 11:14:24 +0100 Subject: locking/local_lock: Include more missing headers Ingo reported PREEMPT_RT=y builds fail building lib/test_context-analysis.c. Reported-by: Ingo Molnar Signed-off-by: Peter Zijlstra (Intel) --- include/linux/local_lock_internal.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include/linux/local_lock_internal.h') diff --git a/include/linux/local_lock_internal.h b/include/linux/local_lock_internal.h index e8c4803d8db4..7843ab9059c2 100644 --- a/include/linux/local_lock_internal.h +++ b/include/linux/local_lock_internal.h @@ -233,6 +233,7 @@ do { \ #else /* !CONFIG_PREEMPT_RT */ +#include #include /* -- cgit v1.2.3 From d084a73714f818ce509022e1aa9483cabf797c16 Mon Sep 17 00:00:00 2001 From: Marco Elver Date: Mon, 19 Jan 2026 10:05:52 +0100 Subject: compiler-context-analysis: Introduce scoped init guards Add scoped init guard definitions for common synchronization primitives supported by context analysis. The scoped init guards treat the context as active within initialization scope of the underlying context lock, given initialization implies exclusive access to the underlying object. This allows initialization of guarded members without disabling context analysis, while documenting initialization from subsequent usage. The documentation is updated with the new recommendation. Where scoped init guards are not provided or cannot be implemented (ww_mutex omitted for lack of multi-arg guard initializers), the alternative is to just disable context analysis where guarded members are initialized. Suggested-by: Peter Zijlstra Signed-off-by: Marco Elver Signed-off-by: Peter Zijlstra (Intel) Link: https://lore.kernel.org/all/20251212095943.GM3911114@noisy.programming.kicks-ass.net/ Link: https://patch.msgid.link/20260119094029.1344361-3-elver@google.com --- include/linux/local_lock_internal.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include/linux/local_lock_internal.h') diff --git a/include/linux/local_lock_internal.h b/include/linux/local_lock_internal.h index 7843ab9059c2..ed2f3fb4c360 100644 --- a/include/linux/local_lock_internal.h +++ b/include/linux/local_lock_internal.h @@ -6,6 +6,7 @@ #include #include #include +#include #include #ifndef CONFIG_PREEMPT_RT -- cgit v1.2.3 From b682b70d016f6aee20d91dcbaa319a932008a83a Mon Sep 17 00:00:00 2001 From: Marco Elver Date: Mon, 19 Jan 2026 10:05:56 +0100 Subject: compiler-context-analysis: Remove __assume_ctx_lock from initializers Remove __assume_ctx_lock() from lock initializers. Implicitly asserting an active context during initialization caused false-positive double-lock errors when acquiring a lock immediately after its initialization. Moving forward, guarded member initialization must either: 1. Use guard(type_init)(&lock) or scoped_guard(type_init, ...). 2. Use context_unsafe() for simple initialization. Reported-by: Bart Van Assche Signed-off-by: Marco Elver Signed-off-by: Peter Zijlstra (Intel) Link: https://lore.kernel.org/all/57062131-e79e-42c2-aa0b-8f931cb8cac2@acm.org/ Link: https://patch.msgid.link/20260119094029.1344361-7-elver@google.com --- include/linux/local_lock_internal.h | 3 --- 1 file changed, 3 deletions(-) (limited to 'include/linux/local_lock_internal.h') diff --git a/include/linux/local_lock_internal.h b/include/linux/local_lock_internal.h index ed2f3fb4c360..eff711bf973f 100644 --- a/include/linux/local_lock_internal.h +++ b/include/linux/local_lock_internal.h @@ -87,13 +87,11 @@ do { \ 0, LD_WAIT_CONFIG, LD_WAIT_INV, \ LD_LOCK_PERCPU); \ local_lock_debug_init(lock); \ - __assume_ctx_lock(lock); \ } while (0) #define __local_trylock_init(lock) \ do { \ __local_lock_init((local_lock_t *)lock); \ - __assume_ctx_lock(lock); \ } while (0) #define __spinlock_nested_bh_init(lock) \ @@ -105,7 +103,6 @@ do { \ 0, LD_WAIT_CONFIG, LD_WAIT_INV, \ LD_LOCK_NORMAL); \ local_lock_debug_init(lock); \ - __assume_ctx_lock(lock); \ } while (0) #define __local_lock_acquire(lock) \ -- cgit v1.2.3