summaryrefslogtreecommitdiff
path: root/kernel/rcu
diff options
context:
space:
mode:
authorKees Cook <kees@kernel.org>2026-02-20 23:49:23 -0800
committerKees Cook <kees@kernel.org>2026-02-21 01:02:28 -0800
commit69050f8d6d075dc01af7a5f2f550a8067510366f (patch)
treebb265f94d9dfa7876c06a5d9f88673d496a15341 /kernel/rcu
parentd39a1d7486d98668dd34aaa6732aad7977c45f5a (diff)
treewide: Replace kmalloc with kmalloc_obj for non-scalar types
This is the result of running the Coccinelle script from scripts/coccinelle/api/kmalloc_objs.cocci. The script is designed to avoid scalar types (which need careful case-by-case checking), and instead replace kmalloc-family calls that allocate struct or union object instances: Single allocations: kmalloc(sizeof(TYPE), ...) are replaced with: kmalloc_obj(TYPE, ...) Array allocations: kmalloc_array(COUNT, sizeof(TYPE), ...) are replaced with: kmalloc_objs(TYPE, COUNT, ...) Flex array allocations: kmalloc(struct_size(PTR, FAM, COUNT), ...) are replaced with: kmalloc_flex(*PTR, FAM, COUNT, ...) (where TYPE may also be *VAR) The resulting allocations no longer return "void *", instead returning "TYPE *". Signed-off-by: Kees Cook <kees@kernel.org>
Diffstat (limited to 'kernel/rcu')
-rw-r--r--kernel/rcu/rcuscale.c25
-rw-r--r--kernel/rcu/rcutorture.c36
-rw-r--r--kernel/rcu/refscale.c5
-rw-r--r--kernel/rcu/srcutree.c5
-rw-r--r--kernel/rcu/tasks.h3
-rw-r--r--kernel/rcu/update.c2
6 files changed, 39 insertions, 37 deletions
diff --git a/kernel/rcu/rcuscale.c b/kernel/rcu/rcuscale.c
index 1c50f89fbd6f..5512686be5d0 100644
--- a/kernel/rcu/rcuscale.c
+++ b/kernel/rcu/rcuscale.c
@@ -755,7 +755,8 @@ kfree_scale_thread(void *arg)
}
for (i = 0; i < kfree_alloc_num; i++) {
- alloc_ptr = kcalloc(kfree_mult, sizeof(struct kfree_obj), GFP_KERNEL);
+ alloc_ptr = kzalloc_objs(struct kfree_obj, kfree_mult,
+ GFP_KERNEL);
if (!alloc_ptr)
return -ENOMEM;
@@ -908,8 +909,8 @@ kfree_scale_init(void)
kfree_mult * sizeof(struct kfree_obj),
kfree_by_call_rcu);
- kfree_reader_tasks = kcalloc(kfree_nrealthreads, sizeof(kfree_reader_tasks[0]),
- GFP_KERNEL);
+ kfree_reader_tasks = kzalloc_objs(kfree_reader_tasks[0],
+ kfree_nrealthreads, GFP_KERNEL);
if (kfree_reader_tasks == NULL) {
firsterr = -ENOMEM;
goto unwind;
@@ -1129,8 +1130,7 @@ rcu_scale_init(void)
goto unwind;
schedule_timeout_uninterruptible(1);
}
- reader_tasks = kcalloc(nrealreaders, sizeof(reader_tasks[0]),
- GFP_KERNEL);
+ reader_tasks = kzalloc_objs(reader_tasks[0], nrealreaders, GFP_KERNEL);
if (reader_tasks == NULL) {
SCALEOUT_ERRSTRING("out of memory");
firsterr = -ENOMEM;
@@ -1144,10 +1144,11 @@ rcu_scale_init(void)
}
while (atomic_read(&n_rcu_scale_reader_started) < nrealreaders)
schedule_timeout_uninterruptible(1);
- writer_tasks = kcalloc(nrealwriters, sizeof(writer_tasks[0]), GFP_KERNEL);
+ writer_tasks = kzalloc_objs(writer_tasks[0], nrealwriters, GFP_KERNEL);
writer_durations = kcalloc(nrealwriters, sizeof(*writer_durations), GFP_KERNEL);
- writer_n_durations = kcalloc(nrealwriters, sizeof(*writer_n_durations), GFP_KERNEL);
- writer_done = kcalloc(nrealwriters, sizeof(writer_done[0]), GFP_KERNEL);
+ writer_n_durations = kzalloc_objs(*writer_n_durations, nrealwriters,
+ GFP_KERNEL);
+ writer_done = kzalloc_objs(writer_done[0], nrealwriters, GFP_KERNEL);
if (gp_async) {
if (gp_async_max <= 0) {
pr_warn("%s: gp_async_max = %d must be greater than zero.\n",
@@ -1156,7 +1157,8 @@ rcu_scale_init(void)
firsterr = -EINVAL;
goto unwind;
}
- writer_freelists = kcalloc(nrealwriters, sizeof(writer_freelists[0]), GFP_KERNEL);
+ writer_freelists = kzalloc_objs(writer_freelists[0],
+ nrealwriters, GFP_KERNEL);
}
if (!writer_tasks || !writer_durations || !writer_n_durations || !writer_done ||
(gp_async && !writer_freelists)) {
@@ -1177,8 +1179,9 @@ rcu_scale_init(void)
init_llist_head(&wflp->ws_lhg);
init_llist_head(&wflp->ws_lhp);
- wflp->ws_mblocks = kcalloc(gp_async_max, sizeof(wflp->ws_mblocks[0]),
- GFP_KERNEL);
+ wflp->ws_mblocks = kzalloc_objs(wflp->ws_mblocks[0],
+ gp_async_max,
+ GFP_KERNEL);
if (!wflp->ws_mblocks) {
firsterr = -ENOMEM;
goto unwind;
diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c
index 47ce7f49b52c..d2e673771295 100644
--- a/kernel/rcu/rcutorture.c
+++ b/kernel/rcu/rcutorture.c
@@ -1626,7 +1626,7 @@ rcu_torture_writer(void *arg)
ulo_size = cur_ops->poll_active;
}
if (cur_ops->poll_active_full > 0) {
- rgo = kcalloc(cur_ops->poll_active_full, sizeof(*rgo), GFP_KERNEL);
+ rgo = kzalloc_objs(*rgo, cur_ops->poll_active_full, GFP_KERNEL);
if (!WARN_ON(!rgo))
rgo_size = cur_ops->poll_active_full;
}
@@ -2462,7 +2462,7 @@ static void rcu_torture_timer(struct timer_list *unused)
/* Test call_rcu() invocation from interrupt handler. */
if (cur_ops->call) {
- struct rcu_head *rhp = kmalloc(sizeof(*rhp), GFP_NOWAIT);
+ struct rcu_head *rhp = kmalloc_obj(*rhp, GFP_NOWAIT);
if (rhp)
cur_ops->call(rhp, rcu_torture_timer_cb);
@@ -2558,7 +2558,7 @@ static int rcu_torture_updown_init(void)
VERBOSE_TOROUT_STRING("rcu_torture_updown_init: Disabling up/down reader tests due to lack of primitives");
return 0;
}
- updownreaders = kcalloc(n_up_down, sizeof(*updownreaders), GFP_KERNEL);
+ updownreaders = kzalloc_objs(*updownreaders, n_up_down, GFP_KERNEL);
if (!updownreaders) {
VERBOSE_TOROUT_STRING("rcu_torture_updown_init: Out of memory, disabling up/down reader tests");
return -ENOMEM;
@@ -2891,7 +2891,7 @@ static void rcu_torture_mem_dump_obj(void)
mem_dump_obj(&z);
kmem_cache_free(kcp, rhp);
kmem_cache_destroy(kcp);
- rhp = kmalloc(sizeof(*rhp), GFP_KERNEL);
+ rhp = kmalloc_obj(*rhp, GFP_KERNEL);
if (WARN_ON_ONCE(!rhp))
return;
pr_alert("mem_dump_obj() kmalloc test: rcu_torture_stats = %px, &rhp = %px, rhp = %px\n", stats_task, &rhp, rhp);
@@ -3399,7 +3399,7 @@ static void rcu_torture_fwd_prog_cr(struct rcu_fwd *rfp)
n_launders++;
n_launders_sa++;
} else if (!cur_ops->cbflood_max || cur_ops->cbflood_max > n_max_cbs) {
- rfcp = kmalloc(sizeof(*rfcp), GFP_KERNEL);
+ rfcp = kmalloc_obj(*rfcp, GFP_KERNEL);
if (WARN_ON_ONCE(!rfcp)) {
schedule_timeout_interruptible(1);
continue;
@@ -3587,8 +3587,8 @@ static int __init rcu_torture_fwd_prog_init(void)
fwd_progress_holdoff = 1;
if (fwd_progress_div <= 0)
fwd_progress_div = 4;
- rfp = kcalloc(fwd_progress, sizeof(*rfp), GFP_KERNEL);
- fwd_prog_tasks = kcalloc(fwd_progress, sizeof(*fwd_prog_tasks), GFP_KERNEL);
+ rfp = kzalloc_objs(*rfp, fwd_progress, GFP_KERNEL);
+ fwd_prog_tasks = kzalloc_objs(*fwd_prog_tasks, fwd_progress, GFP_KERNEL);
if (!rfp || !fwd_prog_tasks) {
kfree(rfp);
kfree(fwd_prog_tasks);
@@ -3754,10 +3754,9 @@ static int rcu_torture_barrier_init(void)
atomic_set(&barrier_cbs_count, 0);
atomic_set(&barrier_cbs_invoked, 0);
barrier_cbs_tasks =
- kcalloc(n_barrier_cbs, sizeof(barrier_cbs_tasks[0]),
- GFP_KERNEL);
+ kzalloc_objs(barrier_cbs_tasks[0], n_barrier_cbs, GFP_KERNEL);
barrier_cbs_wq =
- kcalloc(n_barrier_cbs, sizeof(barrier_cbs_wq[0]), GFP_KERNEL);
+ kzalloc_objs(barrier_cbs_wq[0], n_barrier_cbs, GFP_KERNEL);
if (barrier_cbs_tasks == NULL || !barrier_cbs_wq)
return -ENOMEM;
for (i = 0; i < n_barrier_cbs; i++) {
@@ -4224,7 +4223,7 @@ static void rcu_test_debug_objects(void)
(!cur_ops->call || !cur_ops->cb_barrier)))
return;
- struct rcu_head *rhp = kmalloc(sizeof(*rhp), GFP_KERNEL);
+ struct rcu_head *rhp = kmalloc_obj(*rhp, GFP_KERNEL);
init_rcu_head_on_stack(&rh1);
init_rcu_head_on_stack(&rh2);
@@ -4549,9 +4548,8 @@ rcu_torture_init(void)
rcu_torture_write_types();
if (nrealfakewriters > 0) {
- fakewriter_tasks = kcalloc(nrealfakewriters,
- sizeof(fakewriter_tasks[0]),
- GFP_KERNEL);
+ fakewriter_tasks = kzalloc_objs(fakewriter_tasks[0],
+ nrealfakewriters, GFP_KERNEL);
if (fakewriter_tasks == NULL) {
TOROUT_ERRSTRING("out of memory");
firsterr = -ENOMEM;
@@ -4564,10 +4562,9 @@ rcu_torture_init(void)
if (torture_init_error(firsterr))
goto unwind;
}
- reader_tasks = kcalloc(nrealreaders, sizeof(reader_tasks[0]),
- GFP_KERNEL);
- rcu_torture_reader_mbchk = kcalloc(nrealreaders, sizeof(*rcu_torture_reader_mbchk),
- GFP_KERNEL);
+ reader_tasks = kzalloc_objs(reader_tasks[0], nrealreaders, GFP_KERNEL);
+ rcu_torture_reader_mbchk = kzalloc_objs(*rcu_torture_reader_mbchk,
+ nrealreaders, GFP_KERNEL);
if (!reader_tasks || !rcu_torture_reader_mbchk) {
TOROUT_ERRSTRING("out of memory");
firsterr = -ENOMEM;
@@ -4595,7 +4592,8 @@ rcu_torture_init(void)
if (WARN_ON(nocbs_toggle < 0))
nocbs_toggle = HZ;
if (nrealnocbers > 0) {
- nocb_tasks = kcalloc(nrealnocbers, sizeof(nocb_tasks[0]), GFP_KERNEL);
+ nocb_tasks = kzalloc_objs(nocb_tasks[0], nrealnocbers,
+ GFP_KERNEL);
if (nocb_tasks == NULL) {
TOROUT_ERRSTRING("out of memory");
firsterr = -ENOMEM;
diff --git a/kernel/rcu/refscale.c b/kernel/rcu/refscale.c
index 07a313782dfd..39d679a4c17e 100644
--- a/kernel/rcu/refscale.c
+++ b/kernel/rcu/refscale.c
@@ -1143,7 +1143,7 @@ static bool typesafe_init(void)
else if (si == 0)
si = nr_cpu_ids;
rtsarray_size = si;
- rtsarray = kcalloc(si, sizeof(*rtsarray), GFP_KERNEL);
+ rtsarray = kzalloc_objs(*rtsarray, si, GFP_KERNEL);
if (!rtsarray)
return false;
for (idx = 0; idx < rtsarray_size; idx++) {
@@ -1575,8 +1575,7 @@ ref_scale_init(void)
"%s: nreaders * loops will overflow, adjusted loops to %d",
__func__, INT_MAX / nreaders))
loops = INT_MAX / nreaders;
- reader_tasks = kcalloc(nreaders, sizeof(reader_tasks[0]),
- GFP_KERNEL);
+ reader_tasks = kzalloc_objs(reader_tasks[0], nreaders, GFP_KERNEL);
if (!reader_tasks) {
SCALEOUT_ERRSTRING("out of memory");
firsterr = -ENOMEM;
diff --git a/kernel/rcu/srcutree.c b/kernel/rcu/srcutree.c
index 66ba6a2f83d3..0faf35f393a3 100644
--- a/kernel/rcu/srcutree.c
+++ b/kernel/rcu/srcutree.c
@@ -173,7 +173,8 @@ static bool init_srcu_struct_nodes(struct srcu_struct *ssp, gfp_t gfp_flags)
/* Initialize geometry if it has not already been initialized. */
rcu_init_geometry();
- ssp->srcu_sup->node = kcalloc(rcu_num_nodes, sizeof(*ssp->srcu_sup->node), gfp_flags);
+ ssp->srcu_sup->node = kzalloc_objs(*ssp->srcu_sup->node, rcu_num_nodes,
+ gfp_flags);
if (!ssp->srcu_sup->node)
return false;
@@ -237,7 +238,7 @@ static bool init_srcu_struct_nodes(struct srcu_struct *ssp, gfp_t gfp_flags)
static int init_srcu_struct_fields(struct srcu_struct *ssp, bool is_static)
{
if (!is_static)
- ssp->srcu_sup = kzalloc(sizeof(*ssp->srcu_sup), GFP_KERNEL);
+ ssp->srcu_sup = kzalloc_obj(*ssp->srcu_sup, GFP_KERNEL);
if (!ssp->srcu_sup)
return -ENOMEM;
if (!is_static)
diff --git a/kernel/rcu/tasks.h b/kernel/rcu/tasks.h
index 76f952196a29..d9ccf18eb035 100644
--- a/kernel/rcu/tasks.h
+++ b/kernel/rcu/tasks.h
@@ -259,7 +259,8 @@ static void cblist_init_generic(struct rcu_tasks *rtp)
}
lim = rcu_task_enqueue_lim;
- rtp->rtpcp_array = kcalloc(num_possible_cpus(), sizeof(struct rcu_tasks_percpu *), GFP_KERNEL);
+ rtp->rtpcp_array = kzalloc_objs(struct rcu_tasks_percpu *,
+ num_possible_cpus(), GFP_KERNEL);
BUG_ON(!rtp->rtpcp_array);
for_each_possible_cpu(cpu) {
diff --git a/kernel/rcu/update.c b/kernel/rcu/update.c
index dfeba9b35395..14150f09fd61 100644
--- a/kernel/rcu/update.c
+++ b/kernel/rcu/update.c
@@ -614,7 +614,7 @@ static void early_boot_test_call_rcu(void)
call_rcu(&head, test_callback);
early_srcu_cookie = start_poll_synchronize_srcu(&early_srcu);
call_srcu(&early_srcu, &shead, test_callback);
- rhp = kmalloc(sizeof(*rhp), GFP_KERNEL);
+ rhp = kmalloc_obj(*rhp, GFP_KERNEL);
if (!WARN_ON_ONCE(!rhp))
kfree_rcu(rhp, rh);
}