From 765dcd209947e7b3666c08fb109ab8b879f7a471 Mon Sep 17 00:00:00 2001 From: Marco Elver Date: Tue, 26 Nov 2019 15:04:05 +0100 Subject: asm-generic/atomic: Use __always_inline for fallback wrappers Use __always_inline for atomic fallback wrappers. When building for size (CC_OPTIMIZE_FOR_SIZE), some compilers appear to be less inclined to inline even relatively small static inline functions that are assumed to be inlinable such as atomic ops. This can cause problems, for example in UACCESS regions. While the fallback wrappers aren't pure wrappers, they are trivial nonetheless, and the function they wrap should determine the final inlining policy. For x86 tinyconfig we observe: - vmlinux baseline: 1315988 - vmlinux with patch: 1315928 (-60 bytes) [ tglx: Cherry-picked from KCSAN ] Suggested-by: Mark Rutland Signed-off-by: Marco Elver Acked-by: Mark Rutland Signed-off-by: Paul E. McKenney Signed-off-by: Thomas Gleixner --- scripts/atomic/fallbacks/fetch_add_unless | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'scripts/atomic/fallbacks/fetch_add_unless') diff --git a/scripts/atomic/fallbacks/fetch_add_unless b/scripts/atomic/fallbacks/fetch_add_unless index d2c091db7eae..fffbc0d16fdf 100755 --- a/scripts/atomic/fallbacks/fetch_add_unless +++ b/scripts/atomic/fallbacks/fetch_add_unless @@ -8,7 +8,7 @@ cat << EOF * Atomically adds @a to @v, so long as @v was not already @u. * Returns original value of @v */ -static inline ${int} +static __always_inline ${int} ${atomic}_fetch_add_unless(${atomic}_t *v, ${int} a, ${int} u) { ${int} c = ${atomic}_read(v); -- cgit v1.2.3 From 37f8173dd84936ea78000ed1cad24f8b18d48ebb Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 24 Jan 2020 22:13:03 +0100 Subject: locking/atomics: Flip fallbacks and instrumentation Currently instrumentation of atomic primitives is done at the architecture level, while composites or fallbacks are provided at the generic level. The result is that there are no uninstrumented variants of the fallbacks. Since there is now need of such variants to isolate text poke from any form of instrumentation invert this ordering. Doing this means moving the instrumentation into the generic code as well as having (for now) two variants of the fallbacks. Notes: - the various *cond_read* primitives are not proper fallbacks and got moved into linux/atomic.c. No arch_ variants are generated because the base primitives smp_cond_load*() are instrumented. - once all architectures are moved over to arch_atomic_ one of the fallback variants can be removed and some 2300 lines reclaimed. - atomic_{read,set}*() are no longer double-instrumented Reported-by: Thomas Gleixner Signed-off-by: Peter Zijlstra (Intel) Signed-off-by: Thomas Gleixner Acked-by: Mark Rutland Link: https://lkml.kernel.org/r/20200505134058.769149955@linutronix.de --- scripts/atomic/fallbacks/fetch_add_unless | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'scripts/atomic/fallbacks/fetch_add_unless') diff --git a/scripts/atomic/fallbacks/fetch_add_unless b/scripts/atomic/fallbacks/fetch_add_unless index fffbc0d16fdf..0e0b9aef1515 100755 --- a/scripts/atomic/fallbacks/fetch_add_unless +++ b/scripts/atomic/fallbacks/fetch_add_unless @@ -1,6 +1,6 @@ cat << EOF /** - * ${atomic}_fetch_add_unless - add unless the number is already a given value + * ${arch}${atomic}_fetch_add_unless - add unless the number is already a given value * @v: pointer of type ${atomic}_t * @a: the amount to add to v... * @u: ...unless v is equal to u. @@ -9,14 +9,14 @@ cat << EOF * Returns original value of @v */ static __always_inline ${int} -${atomic}_fetch_add_unless(${atomic}_t *v, ${int} a, ${int} u) +${arch}${atomic}_fetch_add_unless(${atomic}_t *v, ${int} a, ${int} u) { - ${int} c = ${atomic}_read(v); + ${int} c = ${arch}${atomic}_read(v); do { if (unlikely(c == u)) break; - } while (!${atomic}_try_cmpxchg(v, &c, c + a)); + } while (!${arch}${atomic}_try_cmpxchg(v, &c, c + a)); return c; } -- cgit v1.2.3