From 14d72d4b6f0e88b5f683c1a5b7a876a55055852d Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Mon, 5 Jun 2023 08:00:59 +0100 Subject: locking/atomic: remove fallback comments Currently a subset of the fallback templates have kerneldoc comments, resulting in a haphazard set of generated kerneldoc comments as only some operations have fallback templates to begin with. We'd like to generate more consistent kerneldoc comments, and to do so we'll need to restructure the way the fallback code is generated. To minimize churn and to make it easier to restructure the fallback code, this patch removes the existing kerneldoc comments from the fallback templates. We can add new kerneldoc comments in subsequent patches. There should be no functional change as a result of this patch. Signed-off-by: Mark Rutland Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Kees Cook Link: https://lore.kernel.org/r/20230605070124.3741859-3-mark.rutland@arm.com --- scripts/atomic/fallbacks/add_unless | 9 --------- 1 file changed, 9 deletions(-) (limited to 'scripts/atomic/fallbacks/add_unless') diff --git a/scripts/atomic/fallbacks/add_unless b/scripts/atomic/fallbacks/add_unless index 9e5159c2ccfc..cf79b9da38db 100755 --- a/scripts/atomic/fallbacks/add_unless +++ b/scripts/atomic/fallbacks/add_unless @@ -1,13 +1,4 @@ cat << EOF -/** - * arch_${atomic}_add_unless - add unless the number is already a given value - * @v: pointer of type ${atomic}_t - * @a: the amount to add to v... - * @u: ...unless v is equal to u. - * - * Atomically adds @a to @v, if @v was not already @u. - * Returns true if the addition was done. - */ static __always_inline bool arch_${atomic}_add_unless(${atomic}_t *v, ${int} a, ${int} u) { -- cgit v1.2.3 From 9257959a6e5b4fca6fc8e985790bff62c2046f20 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Mon, 5 Jun 2023 08:01:17 +0100 Subject: locking/atomic: scripts: restructure fallback ifdeffery Currently the various ordering variants of an atomic operation are defined in groups of full/acquire/release/relaxed ordering variants with some shared ifdeffery and several potential definitions of each ordering variant in different branches of the shared ifdeffery. As an ordering variant can have several potential definitions down different branches of the shared ifdeffery, it can be painful for a human to find a relevant definition, and we don't have a good location to place anything common to all definitions of an ordering variant (e.g. kerneldoc). Historically the grouping of full/acquire/release/relaxed ordering variants was necessary as we filled in the missing atomics in the same namespace as the architecture used. It would be easy to accidentally define one ordering fallback in terms of another ordering fallback with redundant barriers, and avoiding that would otherwise require a lot of baroque ifdeffery. With recent changes we no longer need to fill in the missing atomics in the arch_atomic*_() namespace, and only need to fill in the raw_atomic*_() namespace. Due to this, there's no risk of a namespace collision, and we can define each raw_atomic*_ ordering variant with its own ifdeffery checking for the arch_atomic*_ ordering variants. Restructure the fallbacks in this way, with each ordering variant having its own ifdeffery of the form: | #if defined(arch_atomic_fetch_andnot_acquire) | #define raw_atomic_fetch_andnot_acquire arch_atomic_fetch_andnot_acquire | #elif defined(arch_atomic_fetch_andnot_relaxed) | static __always_inline int | raw_atomic_fetch_andnot_acquire(int i, atomic_t *v) | { | int ret = arch_atomic_fetch_andnot_relaxed(i, v); | __atomic_acquire_fence(); | return ret; | } | #elif defined(arch_atomic_fetch_andnot) | #define raw_atomic_fetch_andnot_acquire arch_atomic_fetch_andnot | #else | static __always_inline int | raw_atomic_fetch_andnot_acquire(int i, atomic_t *v) | { | return raw_atomic_fetch_and_acquire(~i, v); | } | #endif Note that where there's no relevant arch_atomic*_() ordering variant, we'll define the operation in terms of a distinct raw_atomic*_(), as this itself might have been filled in with a fallback. As we now generate the raw_atomic*_() implementations directly, we no longer need the trivial wrappers, so they are removed. This makes the ifdeffery easier to follow, and will allow for further improvements in subsequent patches. There should be no functional change as a result of this patch. Signed-off-by: Mark Rutland Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Kees Cook Link: https://lore.kernel.org/r/20230605070124.3741859-21-mark.rutland@arm.com --- scripts/atomic/fallbacks/add_unless | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'scripts/atomic/fallbacks/add_unless') diff --git a/scripts/atomic/fallbacks/add_unless b/scripts/atomic/fallbacks/add_unless index cf79b9da38db..88593e28b163 100755 --- a/scripts/atomic/fallbacks/add_unless +++ b/scripts/atomic/fallbacks/add_unless @@ -1,7 +1,7 @@ cat << EOF static __always_inline bool -arch_${atomic}_add_unless(${atomic}_t *v, ${int} a, ${int} u) +raw_${atomic}_add_unless(${atomic}_t *v, ${int} a, ${int} u) { - return arch_${atomic}_fetch_add_unless(v, a, u) != u; + return raw_${atomic}_fetch_add_unless(v, a, u) != u; } EOF -- cgit v1.2.3 From 1d78814d41701c216e28fcf2656526146dec4a1a Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Mon, 5 Jun 2023 08:01:20 +0100 Subject: locking/atomic: scripts: simplify raw_atomic*() definitions Currently each ordering variant has several potential definitions, with a mixture of preprocessor and C definitions, including several copies of its C prototype, e.g. | #if defined(arch_atomic_fetch_andnot_acquire) | #define raw_atomic_fetch_andnot_acquire arch_atomic_fetch_andnot_acquire | #elif defined(arch_atomic_fetch_andnot_relaxed) | static __always_inline int | raw_atomic_fetch_andnot_acquire(int i, atomic_t *v) | { | int ret = arch_atomic_fetch_andnot_relaxed(i, v); | __atomic_acquire_fence(); | return ret; | } | #elif defined(arch_atomic_fetch_andnot) | #define raw_atomic_fetch_andnot_acquire arch_atomic_fetch_andnot | #else | static __always_inline int | raw_atomic_fetch_andnot_acquire(int i, atomic_t *v) | { | return raw_atomic_fetch_and_acquire(~i, v); | } | #endif Make this a bit simpler by defining the C prototype once, and writing the various potential definitions as plain C code guarded by ifdeffery. For example, the above becomes: | static __always_inline int | raw_atomic_fetch_andnot_acquire(int i, atomic_t *v) | { | #if defined(arch_atomic_fetch_andnot_acquire) | return arch_atomic_fetch_andnot_acquire(i, v); | #elif defined(arch_atomic_fetch_andnot_relaxed) | int ret = arch_atomic_fetch_andnot_relaxed(i, v); | __atomic_acquire_fence(); | return ret; | #elif defined(arch_atomic_fetch_andnot) | return arch_atomic_fetch_andnot(i, v); | #else | return raw_atomic_fetch_and_acquire(~i, v); | #endif | } Which is far easier to read. As we now always have a single copy of the C prototype wrapping all the potential definitions, we now have an obvious single location for kerneldoc comments. At the same time, the fallbacks for raw_atomic*_xhcg() are made to use 'new' rather than 'i' as the name of the new value. This is what the existing fallback template used, and is more consistent with the raw_atomic{_try,}cmpxchg() fallbacks. There should be no functional change as a result of this patch. Signed-off-by: Mark Rutland Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Kees Cook Link: https://lore.kernel.org/r/20230605070124.3741859-24-mark.rutland@arm.com --- scripts/atomic/fallbacks/add_unless | 4 ---- 1 file changed, 4 deletions(-) (limited to 'scripts/atomic/fallbacks/add_unless') diff --git a/scripts/atomic/fallbacks/add_unless b/scripts/atomic/fallbacks/add_unless index 88593e28b163..95ecb2b7405b 100755 --- a/scripts/atomic/fallbacks/add_unless +++ b/scripts/atomic/fallbacks/add_unless @@ -1,7 +1,3 @@ cat << EOF -static __always_inline bool -raw_${atomic}_add_unless(${atomic}_t *v, ${int} a, ${int} u) -{ return raw_${atomic}_fetch_add_unless(v, a, u) != u; -} EOF -- cgit v1.2.3