From 0e862838f290147ea9c16db852d8d494b552d38d Mon Sep 17 00:00:00 2001 From: Alexander Lobakin Date: Fri, 24 Jun 2022 14:13:07 +0200 Subject: bitops: unify non-atomic bitops prototypes across architectures Currently, there is a mess with the prototypes of the non-atomic bitops across the different architectures: ret bool, int, unsigned long nr int, long, unsigned int, unsigned long addr volatile unsigned long *, volatile void * Thankfully, it doesn't provoke any bugs, but can sometimes make the compiler angry when it's not handy at all. Adjust all the prototypes to the following standard: ret bool retval can be only 0 or 1 nr unsigned long native; signed makes no sense addr volatile unsigned long * bitmaps are arrays of ulongs Next, some architectures don't define 'arch_' versions as they don't support instrumentation, others do. To make sure there is always the same set of callables present and to ease any potential future changes, make them all follow the rule: * architecture-specific files define only 'arch_' versions; * non-prefixed versions can be defined only in asm-generic files; and place the non-prefixed definitions into a new file in asm-generic to be included by non-instrumented architectures. Finally, add some static assertions in order to prevent people from making a mess in this room again. I also used the %__always_inline attribute consistently, so that they always get resolved to the actual operations. Suggested-by: Andy Shevchenko Signed-off-by: Alexander Lobakin Acked-by: Mark Rutland Reviewed-by: Yury Norov Reviewed-by: Andy Shevchenko Signed-off-by: Yury Norov --- tools/include/asm-generic/bitops/non-atomic.h | 24 +++++++++++++++--------- 1 file changed, 15 insertions(+), 9 deletions(-) (limited to 'tools/include') diff --git a/tools/include/asm-generic/bitops/non-atomic.h b/tools/include/asm-generic/bitops/non-atomic.h index 7e10c4b50c5d..e5e78e42e57b 100644 --- a/tools/include/asm-generic/bitops/non-atomic.h +++ b/tools/include/asm-generic/bitops/non-atomic.h @@ -2,7 +2,7 @@ #ifndef _ASM_GENERIC_BITOPS_NON_ATOMIC_H_ #define _ASM_GENERIC_BITOPS_NON_ATOMIC_H_ -#include +#include /** * __set_bit - Set a bit in memory @@ -13,7 +13,8 @@ * If it's called on the same region of memory simultaneously, the effect * may be that only one operation succeeds. */ -static inline void __set_bit(int nr, volatile unsigned long *addr) +static __always_inline void +__set_bit(unsigned long nr, volatile unsigned long *addr) { unsigned long mask = BIT_MASK(nr); unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); @@ -21,7 +22,8 @@ static inline void __set_bit(int nr, volatile unsigned long *addr) *p |= mask; } -static inline void __clear_bit(int nr, volatile unsigned long *addr) +static __always_inline void +__clear_bit(unsigned long nr, volatile unsigned long *addr) { unsigned long mask = BIT_MASK(nr); unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); @@ -38,7 +40,8 @@ static inline void __clear_bit(int nr, volatile unsigned long *addr) * If it's called on the same region of memory simultaneously, the effect * may be that only one operation succeeds. */ -static inline void __change_bit(int nr, volatile unsigned long *addr) +static __always_inline void +__change_bit(unsigned long nr, volatile unsigned long *addr) { unsigned long mask = BIT_MASK(nr); unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); @@ -55,7 +58,8 @@ static inline void __change_bit(int nr, volatile unsigned long *addr) * If two examples of this operation race, one can appear to succeed * but actually fail. You must protect multiple accesses with a lock. */ -static inline int __test_and_set_bit(int nr, volatile unsigned long *addr) +static __always_inline bool +__test_and_set_bit(unsigned long nr, volatile unsigned long *addr) { unsigned long mask = BIT_MASK(nr); unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); @@ -74,7 +78,8 @@ static inline int __test_and_set_bit(int nr, volatile unsigned long *addr) * If two examples of this operation race, one can appear to succeed * but actually fail. You must protect multiple accesses with a lock. */ -static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr) +static __always_inline bool +__test_and_clear_bit(unsigned long nr, volatile unsigned long *addr) { unsigned long mask = BIT_MASK(nr); unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); @@ -85,8 +90,8 @@ static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr) } /* WARNING: non atomic and it can be reordered! */ -static inline int __test_and_change_bit(int nr, - volatile unsigned long *addr) +static __always_inline bool +__test_and_change_bit(unsigned long nr, volatile unsigned long *addr) { unsigned long mask = BIT_MASK(nr); unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); @@ -101,7 +106,8 @@ static inline int __test_and_change_bit(int nr, * @nr: bit number to test * @addr: Address to start counting from */ -static inline int test_bit(int nr, const volatile unsigned long *addr) +static __always_inline bool +test_bit(unsigned long nr, const volatile unsigned long *addr) { return 1UL & (addr[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG-1))); } -- cgit v1.2.3 From e69eb9c460f128b71c6b995d75a05244e4b6cc3e Mon Sep 17 00:00:00 2001 From: Alexander Lobakin Date: Fri, 24 Jun 2022 14:13:09 +0200 Subject: bitops: wrap non-atomic bitops with a transparent macro In preparation for altering the non-atomic bitops with a macro, wrap them in a transparent definition. This requires prepending one more '_' to their names in order to be able to do that seamlessly. It is a simple change, given that all the non-prefixed definitions are now in asm-generic. sparc32 already has several triple-underscored functions, so I had to rename them ('___' -> 'sp32_'). Signed-off-by: Alexander Lobakin Reviewed-by: Marco Elver Reviewed-by: Andy Shevchenko Signed-off-by: Yury Norov --- tools/include/asm-generic/bitops/non-atomic.h | 24 ++++++++++++------------ tools/include/linux/bitops.h | 16 ++++++++++++++++ 2 files changed, 28 insertions(+), 12 deletions(-) (limited to 'tools/include') diff --git a/tools/include/asm-generic/bitops/non-atomic.h b/tools/include/asm-generic/bitops/non-atomic.h index e5e78e42e57b..0c472a833408 100644 --- a/tools/include/asm-generic/bitops/non-atomic.h +++ b/tools/include/asm-generic/bitops/non-atomic.h @@ -5,7 +5,7 @@ #include /** - * __set_bit - Set a bit in memory + * ___set_bit - Set a bit in memory * @nr: the bit to set * @addr: the address to start counting from * @@ -14,7 +14,7 @@ * may be that only one operation succeeds. */ static __always_inline void -__set_bit(unsigned long nr, volatile unsigned long *addr) +___set_bit(unsigned long nr, volatile unsigned long *addr) { unsigned long mask = BIT_MASK(nr); unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); @@ -23,7 +23,7 @@ __set_bit(unsigned long nr, volatile unsigned long *addr) } static __always_inline void -__clear_bit(unsigned long nr, volatile unsigned long *addr) +___clear_bit(unsigned long nr, volatile unsigned long *addr) { unsigned long mask = BIT_MASK(nr); unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); @@ -32,7 +32,7 @@ __clear_bit(unsigned long nr, volatile unsigned long *addr) } /** - * __change_bit - Toggle a bit in memory + * ___change_bit - Toggle a bit in memory * @nr: the bit to change * @addr: the address to start counting from * @@ -41,7 +41,7 @@ __clear_bit(unsigned long nr, volatile unsigned long *addr) * may be that only one operation succeeds. */ static __always_inline void -__change_bit(unsigned long nr, volatile unsigned long *addr) +___change_bit(unsigned long nr, volatile unsigned long *addr) { unsigned long mask = BIT_MASK(nr); unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); @@ -50,7 +50,7 @@ __change_bit(unsigned long nr, volatile unsigned long *addr) } /** - * __test_and_set_bit - Set a bit and return its old value + * ___test_and_set_bit - Set a bit and return its old value * @nr: Bit to set * @addr: Address to count from * @@ -59,7 +59,7 @@ __change_bit(unsigned long nr, volatile unsigned long *addr) * but actually fail. You must protect multiple accesses with a lock. */ static __always_inline bool -__test_and_set_bit(unsigned long nr, volatile unsigned long *addr) +___test_and_set_bit(unsigned long nr, volatile unsigned long *addr) { unsigned long mask = BIT_MASK(nr); unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); @@ -70,7 +70,7 @@ __test_and_set_bit(unsigned long nr, volatile unsigned long *addr) } /** - * __test_and_clear_bit - Clear a bit and return its old value + * ___test_and_clear_bit - Clear a bit and return its old value * @nr: Bit to clear * @addr: Address to count from * @@ -79,7 +79,7 @@ __test_and_set_bit(unsigned long nr, volatile unsigned long *addr) * but actually fail. You must protect multiple accesses with a lock. */ static __always_inline bool -__test_and_clear_bit(unsigned long nr, volatile unsigned long *addr) +___test_and_clear_bit(unsigned long nr, volatile unsigned long *addr) { unsigned long mask = BIT_MASK(nr); unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); @@ -91,7 +91,7 @@ __test_and_clear_bit(unsigned long nr, volatile unsigned long *addr) /* WARNING: non atomic and it can be reordered! */ static __always_inline bool -__test_and_change_bit(unsigned long nr, volatile unsigned long *addr) +___test_and_change_bit(unsigned long nr, volatile unsigned long *addr) { unsigned long mask = BIT_MASK(nr); unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); @@ -102,12 +102,12 @@ __test_and_change_bit(unsigned long nr, volatile unsigned long *addr) } /** - * test_bit - Determine whether a bit is set + * _test_bit - Determine whether a bit is set * @nr: bit number to test * @addr: Address to start counting from */ static __always_inline bool -test_bit(unsigned long nr, const volatile unsigned long *addr) +_test_bit(unsigned long nr, const volatile unsigned long *addr) { return 1UL & (addr[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG-1))); } diff --git a/tools/include/linux/bitops.h b/tools/include/linux/bitops.h index 5fca38fe1ba8..f18683b95ea6 100644 --- a/tools/include/linux/bitops.h +++ b/tools/include/linux/bitops.h @@ -25,6 +25,22 @@ extern unsigned int __sw_hweight16(unsigned int w); extern unsigned int __sw_hweight32(unsigned int w); extern unsigned long __sw_hweight64(__u64 w); +/* + * Defined here because those may be needed by architecture-specific static + * inlines. + */ + +#define bitop(op, nr, addr) \ + op(nr, addr) + +#define __set_bit(nr, addr) bitop(___set_bit, nr, addr) +#define __clear_bit(nr, addr) bitop(___clear_bit, nr, addr) +#define __change_bit(nr, addr) bitop(___change_bit, nr, addr) +#define __test_and_set_bit(nr, addr) bitop(___test_and_set_bit, nr, addr) +#define __test_and_clear_bit(nr, addr) bitop(___test_and_clear_bit, nr, addr) +#define __test_and_change_bit(nr, addr) bitop(___test_and_change_bit, nr, addr) +#define test_bit(nr, addr) bitop(_test_bit, nr, addr) + /* * Include this here because some architectures need generic_ffs/fls in * scope -- cgit v1.2.3 From e2863a78593d638d3924a6f67900c4820034f349 Mon Sep 17 00:00:00 2001 From: Yury Norov Date: Fri, 1 Jul 2022 05:54:24 -0700 Subject: lib/bitmap: change return types to bool where appropriate Some bitmap functions return boolean results in int variables. Fix it by changing return types to bool. Signed-off-by: Yury Norov --- tools/include/linux/bitmap.h | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'tools/include') diff --git a/tools/include/linux/bitmap.h b/tools/include/linux/bitmap.h index afdf93bebaaf..2ae7ab8ed7d1 100644 --- a/tools/include/linux/bitmap.h +++ b/tools/include/linux/bitmap.h @@ -14,7 +14,7 @@ int __bitmap_weight(const unsigned long *bitmap, int bits); void __bitmap_or(unsigned long *dst, const unsigned long *bitmap1, const unsigned long *bitmap2, int bits); -int __bitmap_and(unsigned long *dst, const unsigned long *bitmap1, +bool __bitmap_and(unsigned long *dst, const unsigned long *bitmap1, const unsigned long *bitmap2, unsigned int bits); bool __bitmap_equal(const unsigned long *bitmap1, const unsigned long *bitmap2, unsigned int bits); @@ -45,7 +45,7 @@ static inline void bitmap_fill(unsigned long *dst, unsigned int nbits) dst[nlongs - 1] = BITMAP_LAST_WORD_MASK(nbits); } -static inline int bitmap_empty(const unsigned long *src, unsigned nbits) +static inline bool bitmap_empty(const unsigned long *src, unsigned int nbits) { if (small_const_nbits(nbits)) return ! (*src & BITMAP_LAST_WORD_MASK(nbits)); @@ -53,7 +53,7 @@ static inline int bitmap_empty(const unsigned long *src, unsigned nbits) return find_first_bit(src, nbits) == nbits; } -static inline int bitmap_full(const unsigned long *src, unsigned int nbits) +static inline bool bitmap_full(const unsigned long *src, unsigned int nbits) { if (small_const_nbits(nbits)) return ! (~(*src) & BITMAP_LAST_WORD_MASK(nbits)); @@ -146,7 +146,7 @@ size_t bitmap_scnprintf(unsigned long *bitmap, unsigned int nbits, * @src2: operand 2 * @nbits: size of bitmap */ -static inline int bitmap_and(unsigned long *dst, const unsigned long *src1, +static inline bool bitmap_and(unsigned long *dst, const unsigned long *src1, const unsigned long *src2, unsigned int nbits) { if (small_const_nbits(nbits)) -- cgit v1.2.3 From 4dea97f8636d0514befc9fc5cf342b351b7d0e20 Mon Sep 17 00:00:00 2001 From: Yury Norov Date: Fri, 1 Jul 2022 05:54:25 -0700 Subject: lib/bitmap: change type of bitmap_weight to unsigned long bitmap_weight() doesn't return negative values, so change it's type to unsigned long. It may help compiler to generate better code and catch bugs. Signed-off-by: Yury Norov --- tools/include/linux/bitmap.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'tools/include') diff --git a/tools/include/linux/bitmap.h b/tools/include/linux/bitmap.h index 2ae7ab8ed7d1..ae1852e39142 100644 --- a/tools/include/linux/bitmap.h +++ b/tools/include/linux/bitmap.h @@ -11,7 +11,7 @@ #define DECLARE_BITMAP(name,bits) \ unsigned long name[BITS_TO_LONGS(bits)] -int __bitmap_weight(const unsigned long *bitmap, int bits); +unsigned long __bitmap_weight(const unsigned long *bitmap, unsigned int bits); void __bitmap_or(unsigned long *dst, const unsigned long *bitmap1, const unsigned long *bitmap2, int bits); bool __bitmap_and(unsigned long *dst, const unsigned long *bitmap1, @@ -61,7 +61,7 @@ static inline bool bitmap_full(const unsigned long *src, unsigned int nbits) return find_first_zero_bit(src, nbits) == nbits; } -static inline int bitmap_weight(const unsigned long *src, unsigned int nbits) +static inline unsigned long bitmap_weight(const unsigned long *src, unsigned int nbits) { if (small_const_nbits(nbits)) return hweight_long(*src & BITMAP_LAST_WORD_MASK(nbits)); -- cgit v1.2.3