summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorKees Cook <kees@kernel.org>2025-12-03 15:30:34 -0800
committerKees Cook <kees@kernel.org>2026-01-14 14:43:01 -0800
commite4c8b46b924eb8de66c6f0accc9cdd0c2e8fa23b (patch)
tree8670c3d9c332b8e93ededb7f9ae93320b1b7edc6 /include
parent81cee9166a9073b4da28e970e75d7f89c98ed966 (diff)
slab: Introduce kmalloc_flex() and family
As done for kmalloc_obj*(), introduce a type-aware allocator for flexible arrays, which may also have "counted_by" annotations: ptr = kmalloc(struct_size(ptr, flex_member, count), gfp); becomes: ptr = kmalloc_flex(*ptr, flex_member, count, gfp); The internal use of __flex_counter() allows for automatically setting the counter member of a struct's flexible array member when it has been annotated with __counted_by(), avoiding any missed early size initializations while __counted_by() annotations are added to the kernel. Additionally, this also checks for "too large" allocations based on the type size of the counter variable. For example: if (count > type_max(ptr->flex_counter)) fail...; size = struct_size(ptr, flex_member, count); ptr = kmalloc(size, gfp); if (!ptr) fail...; ptr->flex_counter = count; becomes (n.b. unchanged from earlier example): ptr = kmalloc_flex(*ptr, flex_member, count, gfp); if (!ptr) fail...; ptr->flex_counter = count; Note that manual initialization of the flexible array counter is still required (at some point) after allocation as not all compiler versions support the __counted_by annotation yet. But doing it internally makes sure they cannot be missed when __counted_by _is_ available, meaning that the bounds checker will not trip due to the lack of "early enough" initializations that used to work before enabling the stricter bounds checking. For example: ptr = kmalloc_flex(*ptr, flex_member, count, gfp); fill(ptr->flex, count); ptr->flex_count = count; This works correctly before adding a __counted_by annotation (since nothing is checking ptr->flex accesses against ptr->flex_count). After adding the annotation, the bounds sanitizer would trip during fill() because ptr->flex_count wasn't set yet. But with kmalloc_flex() setting ptr->flex_count internally at allocation time, the existing code works without needing to move the ptr->flex_count assignment before the call to fill(). (This has been a stumbling block for __counted_by adoption.) Link: https://patch.msgid.link/20251203233036.3212363-4-kees@kernel.org Acked-by: Vlastimil Babka <vbabka@suse.cz> Signed-off-by: Kees Cook <kees@kernel.org>
Diffstat (limited to 'include')
-rw-r--r--include/linux/slab.h48
1 files changed, 48 insertions, 0 deletions
diff --git a/include/linux/slab.h b/include/linux/slab.h
index cbb64a2698f5..7701b38cedec 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -983,6 +983,33 @@ void *kmalloc_nolock_noprof(size_t size, gfp_t gfp_flags, int node);
})
/**
+ * __alloc_flex - Allocate an object that has a trailing flexible array
+ * @KMALLOC: kmalloc wrapper function to use for allocation.
+ * @GFP: GFP flags for the allocation.
+ * @TYPE: type of structure to allocate space for.
+ * @FAM: The name of the flexible array member of @TYPE structure.
+ * @COUNT: how many @FAM elements to allocate space for.
+ *
+ * Returns: Newly allocated pointer to @TYPE with @COUNT-many trailing
+ * @FAM elements, or NULL on failure or if @COUNT cannot be represented
+ * by the member of @TYPE that counts the @FAM elements (annotated via
+ * __counted_by()).
+ */
+#define __alloc_flex(KMALLOC, GFP, TYPE, FAM, COUNT) \
+({ \
+ const size_t __count = (COUNT); \
+ const size_t __obj_size = struct_size_t(TYPE, FAM, __count); \
+ TYPE *__obj_ptr; \
+ if (WARN_ON_ONCE(overflows_flex_counter_type(TYPE, FAM, __count))) \
+ __obj_ptr = NULL; \
+ else \
+ __obj_ptr = KMALLOC(__obj_size, GFP); \
+ if (__obj_ptr) \
+ __set_flex_counter(__obj_ptr->FAM, __count); \
+ __obj_ptr; \
+})
+
+/**
* kmalloc_obj - Allocate a single instance of the given type
* @VAR_OR_TYPE: Variable or type to allocate.
* @GFP: GFP flags for the allocation.
@@ -1005,23 +1032,44 @@ void *kmalloc_nolock_noprof(size_t size, gfp_t gfp_flags, int node);
#define kmalloc_objs(VAR_OR_TYPE, COUNT, GFP) \
__alloc_objs(kmalloc, GFP, typeof(VAR_OR_TYPE), COUNT)
+/**
+ * kmalloc_flex - Allocate a single instance of the given flexible structure
+ * @VAR_OR_TYPE: Variable or type to allocate (with its flex array).
+ * @FAM: The name of the flexible array member of the structure.
+ * @COUNT: How many flexible array member elements are desired.
+ * @GFP: GFP flags for the allocation.
+ *
+ * Returns: newly allocated pointer to @VAR_OR_TYPE on success, NULL on
+ * failure. If @FAM has been annotated with __counted_by(), the allocation
+ * will immediately fail if @COUNT is larger than what the type of the
+ * struct's counter variable can represent.
+ */
+#define kmalloc_flex(VAR_OR_TYPE, FAM, COUNT, GFP) \
+ __alloc_flex(kmalloc, GFP, typeof(VAR_OR_TYPE), FAM, COUNT)
+
/* All kzalloc aliases for kmalloc_(obj|objs|flex). */
#define kzalloc_obj(P, GFP) \
__alloc_objs(kzalloc, GFP, typeof(P), 1)
#define kzalloc_objs(P, COUNT, GFP) \
__alloc_objs(kzalloc, GFP, typeof(P), COUNT)
+#define kzalloc_flex(P, FAM, COUNT, GFP) \
+ __alloc_flex(kzalloc, GFP, typeof(P), FAM, COUNT)
/* All kvmalloc aliases for kmalloc_(obj|objs|flex). */
#define kvmalloc_obj(P, GFP) \
__alloc_objs(kvmalloc, GFP, typeof(P), 1)
#define kvmalloc_objs(P, COUNT, GFP) \
__alloc_objs(kvmalloc, GFP, typeof(P), COUNT)
+#define kvmalloc_flex(P, FAM, COUNT, GFP) \
+ __alloc_flex(kvmalloc, GFP, typeof(P), FAM, COUNT)
/* All kvzalloc aliases for kmalloc_(obj|objs|flex). */
#define kvzalloc_obj(P, GFP) \
__alloc_objs(kvzalloc, GFP, typeof(P), 1)
#define kvzalloc_objs(P, COUNT, GFP) \
__alloc_objs(kvzalloc, GFP, typeof(P), COUNT)
+#define kvzalloc_flex(P, FAM, COUNT, GFP) \
+ __alloc_flex(kvzalloc, GFP, typeof(P), FAM, COUNT)
#define kmem_buckets_alloc(_b, _size, _flags) \
alloc_hooks(__kmalloc_node_noprof(PASS_BUCKET_PARAMS(_size, _b), _flags, NUMA_NO_NODE))