diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2025-12-06 09:01:27 -0800 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2025-12-06 09:01:27 -0800 |
| commit | f468cf53c5240bf5063d0c6fe620b5ae2de37801 (patch) | |
| tree | 86989275900a3848345c8dbd0af359c0171fcca1 /include/linux | |
| parent | 309e49039f124a9dcb99c05651af8eb8fa05bc29 (diff) | |
| parent | 5ba71195a9cb8bb573c7165685a63654af4d7401 (diff) | |
Merge tag 'bitmap-for-6.19' of github.com:/norov/linux
Pull bitmap updates from Yury Norov:
- Runtime field_{get,prep}() (Geert)
- Rust ID pool updates (Alice)
- min_t() simplification (David)
- __sw_hweightN kernel-doc fixes (Andy)
- cpumask.h headers cleanup (Andy)
* tag 'bitmap-for-6.19' of github.com:/norov/linux: (32 commits)
rust_binder: use bitmap for allocation of handles
rust: id_pool: do not immediately acquire new ids
rust: id_pool: do not supply starting capacity
rust: id_pool: rename IdPool::new() to with_capacity()
rust: bitmap: add BitmapVec::new_inline()
rust: bitmap: add MAX_LEN and MAX_INLINE_LEN constants
cpumask: Don't use "proxy" headers
soc: renesas: Use bitfield helpers
clk: renesas: Use bitfield helpers
ALSA: usb-audio: Convert to common field_{get,prep}() helpers
soc: renesas: rz-sysc: Convert to common field_get() helper
pinctrl: ma35: Convert to common field_{get,prep}() helpers
iio: mlx90614: Convert to common field_{get,prep}() helpers
iio: dac: Convert to common field_prep() helper
gpio: aspeed: Convert to common field_{get,prep}() helpers
EDAC/ie31200: Convert to common field_get() helper
crypto: qat - convert to common field_get() helper
clk: at91: Convert to common field_{get,prep}() helpers
bitfield: Add non-constant field_{prep,get}() helpers
bitfield: Add less-checking __FIELD_{GET,PREP}()
...
Diffstat (limited to 'include/linux')
| -rw-r--r-- | include/linux/bitfield.h | 95 | ||||
| -rw-r--r-- | include/linux/cpumask.h | 10 | ||||
| -rw-r--r-- | include/linux/nodemask.h | 9 |
3 files changed, 97 insertions, 17 deletions
diff --git a/include/linux/bitfield.h b/include/linux/bitfield.h index 5355f8f806a9..126dc5b380af 100644 --- a/include/linux/bitfield.h +++ b/include/linux/bitfield.h @@ -17,6 +17,7 @@ * FIELD_{GET,PREP} macros take as first parameter shifted mask * from which they extract the base mask and shift amount. * Mask must be a compilation time constant. + * field_{get,prep} are variants that take a non-const mask. * * Example: * @@ -60,7 +61,7 @@ #define __bf_cast_unsigned(type, x) ((__unsigned_scalar_typeof(type))(x)) -#define __BF_FIELD_CHECK(_mask, _reg, _val, _pfx) \ +#define __BF_FIELD_CHECK_MASK(_mask, _val, _pfx) \ ({ \ BUILD_BUG_ON_MSG(!__builtin_constant_p(_mask), \ _pfx "mask is not constant"); \ @@ -69,13 +70,33 @@ ~((_mask) >> __bf_shf(_mask)) & \ (0 + (_val)) : 0, \ _pfx "value too large for the field"); \ - BUILD_BUG_ON_MSG(__bf_cast_unsigned(_mask, _mask) > \ - __bf_cast_unsigned(_reg, ~0ull), \ - _pfx "type of reg too small for mask"); \ __BUILD_BUG_ON_NOT_POWER_OF_2((_mask) + \ (1ULL << __bf_shf(_mask))); \ }) +#define __BF_FIELD_CHECK_REG(mask, reg, pfx) \ + BUILD_BUG_ON_MSG(__bf_cast_unsigned(mask, mask) > \ + __bf_cast_unsigned(reg, ~0ull), \ + pfx "type of reg too small for mask") + +#define __BF_FIELD_CHECK(mask, reg, val, pfx) \ + ({ \ + __BF_FIELD_CHECK_MASK(mask, val, pfx); \ + __BF_FIELD_CHECK_REG(mask, reg, pfx); \ + }) + +#define __FIELD_PREP(mask, val, pfx) \ + ({ \ + __BF_FIELD_CHECK_MASK(mask, val, pfx); \ + ((typeof(mask))(val) << __bf_shf(mask)) & (mask); \ + }) + +#define __FIELD_GET(mask, reg, pfx) \ + ({ \ + __BF_FIELD_CHECK_MASK(mask, 0U, pfx); \ + (typeof(mask))(((reg) & (mask)) >> __bf_shf(mask)); \ + }) + /** * FIELD_MAX() - produce the maximum value representable by a field * @_mask: shifted mask defining the field's length and position @@ -112,8 +133,8 @@ */ #define FIELD_PREP(_mask, _val) \ ({ \ - __BF_FIELD_CHECK(_mask, 0ULL, _val, "FIELD_PREP: "); \ - ((typeof(_mask))(_val) << __bf_shf(_mask)) & (_mask); \ + __BF_FIELD_CHECK_REG(_mask, 0ULL, "FIELD_PREP: "); \ + __FIELD_PREP(_mask, _val, "FIELD_PREP: "); \ }) #define __BF_CHECK_POW2(n) BUILD_BUG_ON_ZERO(((n) & ((n) - 1)) != 0) @@ -152,8 +173,8 @@ */ #define FIELD_GET(_mask, _reg) \ ({ \ - __BF_FIELD_CHECK(_mask, _reg, 0U, "FIELD_GET: "); \ - (typeof(_mask))(((_reg) & (_mask)) >> __bf_shf(_mask)); \ + __BF_FIELD_CHECK_REG(_mask, _reg, "FIELD_GET: "); \ + __FIELD_GET(_mask, _reg, "FIELD_GET: "); \ }) /** @@ -220,4 +241,62 @@ __MAKE_OP(64) #undef __MAKE_OP #undef ____MAKE_OP +#define __field_prep(mask, val) \ + ({ \ + __auto_type __mask = (mask); \ + typeof(__mask) __val = (val); \ + unsigned int __shift = BITS_PER_TYPE(__mask) <= 32 ? \ + __ffs(__mask) : __ffs64(__mask); \ + (__val << __shift) & __mask; \ + }) + +#define __field_get(mask, reg) \ + ({ \ + __auto_type __mask = (mask); \ + typeof(__mask) __reg = (reg); \ + unsigned int __shift = BITS_PER_TYPE(__mask) <= 32 ? \ + __ffs(__mask) : __ffs64(__mask); \ + (__reg & __mask) >> __shift; \ + }) + +/** + * field_prep() - prepare a bitfield element + * @mask: shifted mask defining the field's length and position, must be + * non-zero + * @val: value to put in the field + * + * Return: field value masked and shifted to its final destination + * + * field_prep() masks and shifts up the value. The result should be + * combined with other fields of the bitfield using logical OR. + * Unlike FIELD_PREP(), @mask is not limited to a compile-time constant. + * Typical usage patterns are a value stored in a table, or calculated by + * shifting a constant by a variable number of bits. + * If you want to ensure that @mask is a compile-time constant, please use + * FIELD_PREP() directly instead. + */ +#define field_prep(mask, val) \ + (__builtin_constant_p(mask) ? __FIELD_PREP(mask, val, "field_prep: ") \ + : __field_prep(mask, val)) + +/** + * field_get() - extract a bitfield element + * @mask: shifted mask defining the field's length and position, must be + * non-zero + * @reg: value of entire bitfield + * + * Return: extracted field value + * + * field_get() extracts the field specified by @mask from the + * bitfield passed in as @reg by masking and shifting it down. + * Unlike FIELD_GET(), @mask is not limited to a compile-time constant. + * Typical usage patterns are a value stored in a table, or calculated by + * shifting a constant by a variable number of bits. + * If you want to ensure that @mask is a compile-time constant, please use + * FIELD_GET() directly instead. + */ +#define field_get(mask, reg) \ + (__builtin_constant_p(mask) ? __FIELD_GET(mask, reg, "field_get: ") \ + : __field_get(mask, reg)) + #endif diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h index afedfd5bea07..80211900f373 100644 --- a/include/linux/cpumask.h +++ b/include/linux/cpumask.h @@ -7,14 +7,16 @@ * set of CPUs in a system, one bit position per CPU number. In general, * only nr_cpu_ids (<= NR_CPUS) bits are valid. */ -#include <linux/cleanup.h> -#include <linux/kernel.h> +#include <linux/atomic.h> #include <linux/bitmap.h> +#include <linux/cleanup.h> #include <linux/cpumask_types.h> -#include <linux/atomic.h> -#include <linux/bug.h> #include <linux/gfp_types.h> #include <linux/numa.h> +#include <linux/threads.h> +#include <linux/types.h> + +#include <asm/bug.h> /** * cpumask_pr_args - printf args to output a cpumask diff --git a/include/linux/nodemask.h b/include/linux/nodemask.h index 7ad1f5c7407e..bd38648c998d 100644 --- a/include/linux/nodemask.h +++ b/include/linux/nodemask.h @@ -245,18 +245,18 @@ static __always_inline int __nodes_weight(const nodemask_t *srcp, unsigned int n } /* FIXME: better would be to fix all architectures to never return - > MAX_NUMNODES, then the silly min_ts could be dropped. */ + > MAX_NUMNODES, then the silly min()s could be dropped. */ #define first_node(src) __first_node(&(src)) static __always_inline unsigned int __first_node(const nodemask_t *srcp) { - return min_t(unsigned int, MAX_NUMNODES, find_first_bit(srcp->bits, MAX_NUMNODES)); + return min(MAX_NUMNODES, find_first_bit(srcp->bits, MAX_NUMNODES)); } #define next_node(n, src) __next_node((n), &(src)) static __always_inline unsigned int __next_node(int n, const nodemask_t *srcp) { - return min_t(unsigned int, MAX_NUMNODES, find_next_bit(srcp->bits, MAX_NUMNODES, n+1)); + return min(MAX_NUMNODES, find_next_bit(srcp->bits, MAX_NUMNODES, n+1)); } /* @@ -293,8 +293,7 @@ static __always_inline void init_nodemask_of_node(nodemask_t *mask, int node) #define first_unset_node(mask) __first_unset_node(&(mask)) static __always_inline unsigned int __first_unset_node(const nodemask_t *maskp) { - return min_t(unsigned int, MAX_NUMNODES, - find_first_zero_bit(maskp->bits, MAX_NUMNODES)); + return min(MAX_NUMNODES, find_first_zero_bit(maskp->bits, MAX_NUMNODES)); } #define NODE_MASK_LAST_WORD BITMAP_LAST_WORD_MASK(MAX_NUMNODES) |
