diff options
| author | Kees Cook <kees@kernel.org> | 2026-02-20 23:49:23 -0800 |
|---|---|---|
| committer | Kees Cook <kees@kernel.org> | 2026-02-21 01:02:28 -0800 |
| commit | 69050f8d6d075dc01af7a5f2f550a8067510366f (patch) | |
| tree | bb265f94d9dfa7876c06a5d9f88673d496a15341 /kernel | |
| parent | d39a1d7486d98668dd34aaa6732aad7977c45f5a (diff) | |
treewide: Replace kmalloc with kmalloc_obj for non-scalar types
This is the result of running the Coccinelle script from
scripts/coccinelle/api/kmalloc_objs.cocci. The script is designed to
avoid scalar types (which need careful case-by-case checking), and
instead replace kmalloc-family calls that allocate struct or union
object instances:
Single allocations: kmalloc(sizeof(TYPE), ...)
are replaced with: kmalloc_obj(TYPE, ...)
Array allocations: kmalloc_array(COUNT, sizeof(TYPE), ...)
are replaced with: kmalloc_objs(TYPE, COUNT, ...)
Flex array allocations: kmalloc(struct_size(PTR, FAM, COUNT), ...)
are replaced with: kmalloc_flex(*PTR, FAM, COUNT, ...)
(where TYPE may also be *VAR)
The resulting allocations no longer return "void *", instead returning
"TYPE *".
Signed-off-by: Kees Cook <kees@kernel.org>
Diffstat (limited to 'kernel')
165 files changed, 543 insertions, 551 deletions
diff --git a/kernel/acct.c b/kernel/acct.c index 812808e5b1b8..06e8b79eaf7e 100644 --- a/kernel/acct.c +++ b/kernel/acct.c @@ -255,7 +255,7 @@ static int acct_on(const char __user *name) if (!(file->f_mode & FMODE_CAN_WRITE)) return -EIO; - acct = kzalloc(sizeof(struct bsd_acct_struct), GFP_KERNEL); + acct = kzalloc_obj(struct bsd_acct_struct, GFP_KERNEL); if (!acct) return -ENOMEM; diff --git a/kernel/async.c b/kernel/async.c index 4c3e6a44595f..862532ad328a 100644 --- a/kernel/async.c +++ b/kernel/async.c @@ -205,7 +205,7 @@ async_cookie_t async_schedule_node_domain(async_func_t func, void *data, async_cookie_t newcookie; /* allow irq-off callers */ - entry = kzalloc(sizeof(struct async_entry), GFP_ATOMIC); + entry = kzalloc_obj(struct async_entry, GFP_ATOMIC); /* * If we're out of memory or if there's too much work @@ -261,7 +261,7 @@ bool async_schedule_dev_nocall(async_func_t func, struct device *dev) { struct async_entry *entry; - entry = kzalloc(sizeof(struct async_entry), GFP_KERNEL); + entry = kzalloc_obj(struct async_entry, GFP_KERNEL); /* Give up if there is no memory or too much work. */ if (!entry || atomic_read(&entry_count) > MAX_WORK) { diff --git a/kernel/audit.c b/kernel/audit.c index 592d927e70f9..838ca1648f7b 100644 --- a/kernel/audit.c +++ b/kernel/audit.c @@ -545,7 +545,7 @@ static int auditd_set(struct pid *pid, u32 portid, struct net *net, if (!pid || !net) return -EINVAL; - ac_new = kzalloc(sizeof(*ac_new), GFP_KERNEL); + ac_new = kzalloc_obj(*ac_new, GFP_KERNEL); if (!ac_new) return -ENOMEM; ac_new->pid = get_pid(pid); @@ -1044,7 +1044,7 @@ static void audit_send_reply(struct sk_buff *request_skb, int seq, int type, int struct task_struct *tsk; struct audit_reply *reply; - reply = kzalloc(sizeof(*reply), GFP_KERNEL); + reply = kzalloc_obj(*reply, GFP_KERNEL); if (!reply) return; @@ -1517,8 +1517,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh, if (err < 0) return err; } - sig_data = kmalloc(struct_size(sig_data, ctx, lsmctx.len), - GFP_KERNEL); + sig_data = kmalloc_flex(*sig_data, ctx, lsmctx.len, GFP_KERNEL); if (!sig_data) { if (lsmprop_is_set(&audit_sig_lsm)) security_release_secctx(&lsmctx); diff --git a/kernel/audit_fsnotify.c b/kernel/audit_fsnotify.c index b92805b317a2..7b89e1ccb5a4 100644 --- a/kernel/audit_fsnotify.c +++ b/kernel/audit_fsnotify.c @@ -89,7 +89,7 @@ struct audit_fsnotify_mark *audit_alloc_mark(struct audit_krule *krule, char *pa goto out; } - audit_mark = kzalloc(sizeof(*audit_mark), GFP_KERNEL); + audit_mark = kzalloc_obj(*audit_mark, GFP_KERNEL); if (unlikely(!audit_mark)) { audit_mark = ERR_PTR(-ENOMEM); goto out; diff --git a/kernel/audit_tree.c b/kernel/audit_tree.c index fda6beb041e0..3ffd6582bfe5 100644 --- a/kernel/audit_tree.c +++ b/kernel/audit_tree.c @@ -96,7 +96,7 @@ static struct audit_tree *alloc_tree(const char *s) size_t sz; sz = strlen(s) + 1; - tree = kmalloc(struct_size(tree, pathname, sz), GFP_KERNEL); + tree = kmalloc_flex(*tree, pathname, sz, GFP_KERNEL); if (tree) { refcount_set(&tree->count, 1); tree->goner = 0; @@ -192,7 +192,7 @@ static struct audit_chunk *alloc_chunk(int count) struct audit_chunk *chunk; int i; - chunk = kzalloc(struct_size(chunk, owners, count), GFP_KERNEL); + chunk = kzalloc_flex(*chunk, owners, count, GFP_KERNEL); if (!chunk) return NULL; diff --git a/kernel/audit_watch.c b/kernel/audit_watch.c index a700e3c8925f..6a73b30929c0 100644 --- a/kernel/audit_watch.c +++ b/kernel/audit_watch.c @@ -139,7 +139,7 @@ static struct audit_parent *audit_init_parent(const struct path *path) struct audit_parent *parent; int ret; - parent = kzalloc(sizeof(*parent), GFP_KERNEL); + parent = kzalloc_obj(*parent, GFP_KERNEL); if (unlikely(!parent)) return ERR_PTR(-ENOMEM); @@ -161,7 +161,7 @@ static struct audit_watch *audit_init_watch(char *path) { struct audit_watch *watch; - watch = kzalloc(sizeof(*watch), GFP_KERNEL); + watch = kzalloc_obj(*watch, GFP_KERNEL); if (unlikely(!watch)) return ERR_PTR(-ENOMEM); diff --git a/kernel/auditfilter.c b/kernel/auditfilter.c index 6a86c0683b67..e2d6f9a91a49 100644 --- a/kernel/auditfilter.c +++ b/kernel/auditfilter.c @@ -108,11 +108,11 @@ static inline struct audit_entry *audit_init_entry(u32 field_count) struct audit_entry *entry; struct audit_field *fields; - entry = kzalloc(sizeof(*entry), GFP_KERNEL); + entry = kzalloc_obj(*entry, GFP_KERNEL); if (unlikely(!entry)) return NULL; - fields = kcalloc(field_count, sizeof(*fields), GFP_KERNEL); + fields = kzalloc_objs(*fields, field_count, GFP_KERNEL); if (unlikely(!fields)) { kfree(entry); return NULL; @@ -638,7 +638,7 @@ static struct audit_rule_data *audit_krule_to_data(struct audit_krule *krule) void *bufp; int i; - data = kzalloc(struct_size(data, buf, krule->buflen), GFP_KERNEL); + data = kzalloc_flex(*data, buf, krule->buflen, GFP_KERNEL); if (unlikely(!data)) return NULL; @@ -1180,7 +1180,7 @@ int audit_list_rules_send(struct sk_buff *request_skb, int seq) * happen if we're actually running in the context of auditctl * trying to _send_ the stuff */ - dest = kmalloc(sizeof(*dest), GFP_KERNEL); + dest = kmalloc_obj(*dest, GFP_KERNEL); if (!dest) return -ENOMEM; dest->net = get_net(sock_net(NETLINK_CB(request_skb).sk)); diff --git a/kernel/auditsc.c b/kernel/auditsc.c index 86a44b162a87..e45883de200f 100644 --- a/kernel/auditsc.c +++ b/kernel/auditsc.c @@ -255,7 +255,7 @@ static int grow_tree_refs(struct audit_context *ctx) { struct audit_tree_refs *p = ctx->trees; - ctx->trees = kzalloc(sizeof(struct audit_tree_refs), GFP_KERNEL); + ctx->trees = kzalloc_obj(struct audit_tree_refs, GFP_KERNEL); if (!ctx->trees) { ctx->trees = p; return 0; @@ -1032,7 +1032,7 @@ static inline struct audit_context *audit_alloc_context(enum audit_state state) { struct audit_context *context; - context = kzalloc(sizeof(*context), GFP_KERNEL); + context = kzalloc_obj(*context, GFP_KERNEL); if (!context) return NULL; context->context = AUDIT_CTX_UNUSED; @@ -2153,7 +2153,7 @@ static struct audit_names *audit_alloc_name(struct audit_context *context, aname = &context->preallocated_names[context->name_count]; memset(aname, 0, sizeof(*aname)); } else { - aname = kzalloc(sizeof(*aname), GFP_NOFS); + aname = kzalloc_obj(*aname, GFP_NOFS); if (!aname) return NULL; aname->should_free = true; @@ -2650,7 +2650,7 @@ int __audit_sockaddr(int len, void *a) struct audit_context *context = audit_context(); if (!context->sockaddr) { - void *p = kmalloc(sizeof(struct sockaddr_storage), GFP_KERNEL); + void *p = kmalloc_obj(struct sockaddr_storage, GFP_KERNEL); if (!p) return -ENOMEM; @@ -2704,7 +2704,7 @@ int audit_signal_info_syscall(struct task_struct *t) axp = (void *)ctx->aux_pids; if (!axp || axp->pid_count == AUDIT_AUX_PIDS) { - axp = kzalloc(sizeof(*axp), GFP_ATOMIC); + axp = kzalloc_obj(*axp, GFP_ATOMIC); if (!axp) return -ENOMEM; @@ -2743,7 +2743,7 @@ int __audit_log_bprm_fcaps(struct linux_binprm *bprm, struct audit_context *context = audit_context(); struct cpu_vfs_cap_data vcaps; - ax = kmalloc(sizeof(*ax), GFP_KERNEL); + ax = kmalloc_obj(*ax, GFP_KERNEL); if (!ax) return -ENOMEM; diff --git a/kernel/bpf/arena.c b/kernel/bpf/arena.c index 42fae0a9f314..5baea15cb07d 100644 --- a/kernel/bpf/arena.c +++ b/kernel/bpf/arena.c @@ -324,7 +324,7 @@ static int remember_vma(struct bpf_arena *arena, struct vm_area_struct *vma) { struct vma_list *vml; - vml = kmalloc(sizeof(*vml), GFP_KERNEL); + vml = kmalloc_obj(*vml, GFP_KERNEL); if (!vml) return -ENOMEM; refcount_set(&vml->mmap_count, 1); diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c index 67e9e811de3a..188b0e35f856 100644 --- a/kernel/bpf/arraymap.c +++ b/kernel/bpf/arraymap.c @@ -1061,7 +1061,7 @@ static int prog_array_map_poke_track(struct bpf_map *map, goto out; } - elem = kmalloc(sizeof(*elem), GFP_KERNEL); + elem = kmalloc_obj(*elem, GFP_KERNEL); if (!elem) { ret = -ENOMEM; goto out; @@ -1174,7 +1174,7 @@ static struct bpf_map *prog_array_map_alloc(union bpf_attr *attr) struct bpf_array_aux *aux; struct bpf_map *map; - aux = kzalloc(sizeof(*aux), GFP_KERNEL_ACCOUNT); + aux = kzalloc_obj(*aux, GFP_KERNEL_ACCOUNT); if (!aux) return ERR_PTR(-ENOMEM); @@ -1237,7 +1237,7 @@ static struct bpf_event_entry *bpf_event_entry_gen(struct file *perf_file, { struct bpf_event_entry *ee; - ee = kzalloc(sizeof(*ee), GFP_KERNEL); + ee = kzalloc_obj(*ee, GFP_KERNEL); if (ee) { ee->event = perf_file->private_data; ee->perf_file = perf_file; diff --git a/kernel/bpf/bpf_iter.c b/kernel/bpf/bpf_iter.c index 4b58d56ecab1..b5d16050f7b3 100644 --- a/kernel/bpf/bpf_iter.c +++ b/kernel/bpf/bpf_iter.c @@ -295,7 +295,7 @@ int bpf_iter_reg_target(const struct bpf_iter_reg *reg_info) { struct bpf_iter_target_info *tinfo; - tinfo = kzalloc(sizeof(*tinfo), GFP_KERNEL); + tinfo = kzalloc_obj(*tinfo, GFP_KERNEL); if (!tinfo) return -ENOMEM; @@ -548,7 +548,7 @@ int bpf_iter_link_attach(const union bpf_attr *attr, bpfptr_t uattr, if (prog->sleepable && !bpf_iter_target_support_resched(tinfo)) return -EINVAL; - link = kzalloc(sizeof(*link), GFP_USER | __GFP_NOWARN); + link = kzalloc_obj(*link, GFP_USER | __GFP_NOWARN); if (!link) return -ENOMEM; diff --git a/kernel/bpf/bpf_struct_ops.c b/kernel/bpf/bpf_struct_ops.c index c43346cb3d76..1ff292a6f3ed 100644 --- a/kernel/bpf/bpf_struct_ops.c +++ b/kernel/bpf/bpf_struct_ops.c @@ -218,7 +218,7 @@ static int prepare_arg_info(struct btf *btf, args = btf_params(func_proto); stub_args = btf_params(stub_func_proto); - info_buf = kcalloc(nargs, sizeof(*info_buf), GFP_KERNEL); + info_buf = kzalloc_objs(*info_buf, nargs, GFP_KERNEL); if (!info_buf) return -ENOMEM; @@ -378,8 +378,7 @@ int bpf_struct_ops_desc_init(struct bpf_struct_ops_desc *st_ops_desc, if (!is_valid_value_type(btf, value_id, t, value_name)) return -EINVAL; - arg_info = kcalloc(btf_type_vlen(t), sizeof(*arg_info), - GFP_KERNEL); + arg_info = kzalloc_objs(*arg_info, btf_type_vlen(t), GFP_KERNEL); if (!arg_info) return -ENOMEM; @@ -721,7 +720,7 @@ static long bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key, if (uvalue->common.state || refcount_read(&uvalue->common.refcnt)) return -EINVAL; - tlinks = kcalloc(BPF_TRAMP_MAX, sizeof(*tlinks), GFP_KERNEL); + tlinks = kzalloc_objs(*tlinks, BPF_TRAMP_MAX, GFP_KERNEL); if (!tlinks) return -ENOMEM; @@ -815,7 +814,7 @@ static long bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key, /* Poison pointer on error instead of return for backward compatibility */ bpf_prog_assoc_struct_ops(prog, &st_map->map); - link = kzalloc(sizeof(*link), GFP_USER); + link = kzalloc_obj(*link, GFP_USER); if (!link) { bpf_prog_put(prog); err = -ENOMEM; @@ -825,7 +824,7 @@ static long bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key, &bpf_struct_ops_link_lops, prog, prog->expected_attach_type); *plink++ = &link->link; - ksym = kzalloc(sizeof(*ksym), GFP_USER); + ksym = kzalloc_obj(*ksym, GFP_USER); if (!ksym) { err = -ENOMEM; goto reset_unlock; @@ -1376,7 +1375,7 @@ int bpf_struct_ops_link_create(union bpf_attr *attr) goto err_out; } - link = kzalloc(sizeof(*link), GFP_USER); + link = kzalloc_obj(*link, GFP_USER); if (!link) { err = -ENOMEM; goto err_out; diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c index 7708958e3fb8..ee9037aa9ab7 100644 --- a/kernel/bpf/btf.c +++ b/kernel/bpf/btf.c @@ -1729,8 +1729,8 @@ static int btf_add_type(struct btf_verifier_env *env, struct btf_type *t) new_size = min_t(u32, BTF_MAX_TYPE, btf->types_size + expand_by); - new_types = kvcalloc(new_size, sizeof(*new_types), - GFP_KERNEL | __GFP_NOWARN); + new_types = kvzalloc_objs(*new_types, new_size, + GFP_KERNEL | __GFP_NOWARN); if (!new_types) return -ENOMEM; @@ -4072,7 +4072,7 @@ struct btf_record *btf_parse_fields(const struct btf *btf, const struct btf_type /* This needs to be kzalloc to zero out padding and unused fields, see * comment in btf_record_equal. */ - rec = kzalloc(struct_size(rec, fields, cnt), GFP_KERNEL_ACCOUNT | __GFP_NOWARN); + rec = kzalloc_flex(*rec, fields, cnt, GFP_KERNEL_ACCOUNT | __GFP_NOWARN); if (!rec) return ERR_PTR(-ENOMEM); @@ -5687,7 +5687,7 @@ btf_parse_struct_metas(struct bpf_verifier_log *log, struct btf *btf) BUILD_BUG_ON(offsetof(struct btf_id_set, cnt) != 0); BUILD_BUG_ON(sizeof(struct btf_id_set) != sizeof(u32)); - aof = kmalloc(sizeof(*aof), GFP_KERNEL | __GFP_NOWARN); + aof = kmalloc_obj(*aof, GFP_KERNEL | __GFP_NOWARN); if (!aof) return ERR_PTR(-ENOMEM); aof->cnt = 0; @@ -5885,7 +5885,7 @@ static struct btf *btf_parse(const union bpf_attr *attr, bpfptr_t uattr, u32 uat if (attr->btf_size > BTF_MAX_SIZE) return ERR_PTR(-E2BIG); - env = kzalloc(sizeof(*env), GFP_KERNEL | __GFP_NOWARN); + env = kzalloc_obj(*env, GFP_KERNEL | __GFP_NOWARN); if (!env) return ERR_PTR(-ENOMEM); @@ -5897,7 +5897,7 @@ static struct btf *btf_parse(const union bpf_attr *attr, bpfptr_t uattr, u32 uat if (err) goto errout_free; - btf = kzalloc(sizeof(*btf), GFP_KERNEL | __GFP_NOWARN); + btf = kzalloc_obj(*btf, GFP_KERNEL | __GFP_NOWARN); if (!btf) { err = -ENOMEM; goto errout; @@ -6314,7 +6314,7 @@ static struct btf *btf_parse_base(struct btf_verifier_env *env, const char *name if (!IS_ENABLED(CONFIG_DEBUG_INFO_BTF)) return ERR_PTR(-ENOENT); - btf = kzalloc(sizeof(*btf), GFP_KERNEL | __GFP_NOWARN); + btf = kzalloc_obj(*btf, GFP_KERNEL | __GFP_NOWARN); if (!btf) { err = -ENOMEM; goto errout; @@ -6365,7 +6365,7 @@ struct btf *btf_parse_vmlinux(void) struct btf *btf; int err; - env = kzalloc(sizeof(*env), GFP_KERNEL | __GFP_NOWARN); + env = kzalloc_obj(*env, GFP_KERNEL | __GFP_NOWARN); if (!env) return ERR_PTR(-ENOMEM); @@ -6415,7 +6415,7 @@ static struct btf *btf_parse_module(const char *module_name, const void *data, if (!vmlinux_btf) return ERR_PTR(-EINVAL); - env = kzalloc(sizeof(*env), GFP_KERNEL | __GFP_NOWARN); + env = kzalloc_obj(*env, GFP_KERNEL | __GFP_NOWARN); if (!env) return ERR_PTR(-ENOMEM); @@ -6432,7 +6432,7 @@ static struct btf *btf_parse_module(const char *module_name, const void *data, base_btf = vmlinux_btf; } - btf = kzalloc(sizeof(*btf), GFP_KERNEL | __GFP_NOWARN); + btf = kzalloc_obj(*btf, GFP_KERNEL | __GFP_NOWARN); if (!btf) { err = -ENOMEM; goto errout; @@ -8306,7 +8306,7 @@ static int btf_module_notify(struct notifier_block *nb, unsigned long op, switch (op) { case MODULE_STATE_COMING: - btf_mod = kzalloc(sizeof(*btf_mod), GFP_KERNEL); + btf_mod = kzalloc_obj(*btf_mod, GFP_KERNEL); if (!btf_mod) { err = -ENOMEM; goto out; @@ -8341,7 +8341,7 @@ static int btf_module_notify(struct notifier_block *nb, unsigned long op, if (IS_ENABLED(CONFIG_SYSFS)) { struct bin_attribute *attr; - attr = kzalloc(sizeof(*attr), GFP_KERNEL); + attr = kzalloc_obj(*attr, GFP_KERNEL); if (!attr) goto out; @@ -8689,7 +8689,7 @@ static int btf_populate_kfunc_set(struct btf *btf, enum btf_kfunc_hook hook, } if (!tab) { - tab = kzalloc(sizeof(*tab), GFP_KERNEL | __GFP_NOWARN); + tab = kzalloc_obj(*tab, GFP_KERNEL | __GFP_NOWARN); if (!tab) return -ENOMEM; btf->kfunc_set_tab = tab; @@ -9439,7 +9439,7 @@ int bpf_core_apply(struct bpf_core_ctx *ctx, const struct bpf_core_relo *relo, /* ~4k of temp memory necessary to convert LLVM spec like "0:1:0:5" * into arrays of btf_ids of struct fields and array indices. */ - specs = kcalloc(3, sizeof(*specs), GFP_KERNEL_ACCOUNT); + specs = kzalloc_objs(*specs, 3, GFP_KERNEL_ACCOUNT); if (!specs) return -ENOMEM; @@ -9464,7 +9464,8 @@ int bpf_core_apply(struct bpf_core_ctx *ctx, const struct bpf_core_relo *relo, goto out; } if (cc->cnt) { - cands.cands = kcalloc(cc->cnt, sizeof(*cands.cands), GFP_KERNEL_ACCOUNT); + cands.cands = kzalloc_objs(*cands.cands, cc->cnt, + GFP_KERNEL_ACCOUNT); if (!cands.cands) { err = -ENOMEM; goto out; @@ -9616,7 +9617,7 @@ btf_add_struct_ops(struct btf *btf, struct bpf_struct_ops *st_ops, tab = btf->struct_ops_tab; if (!tab) { - tab = kzalloc(struct_size(tab, ops, 4), GFP_KERNEL); + tab = kzalloc_flex(*tab, ops, 4, GFP_KERNEL); if (!tab) return -ENOMEM; tab->capacity = 4; @@ -9705,7 +9706,7 @@ int __register_bpf_struct_ops(struct bpf_struct_ops *st_ops) if (IS_ERR(btf)) return PTR_ERR(btf); - log = kzalloc(sizeof(*log), GFP_KERNEL | __GFP_NOWARN); + log = kzalloc_obj(*log, GFP_KERNEL | __GFP_NOWARN); if (!log) { err = -ENOMEM; goto errout; diff --git a/kernel/bpf/cgroup.c b/kernel/bpf/cgroup.c index b029f0369ecf..5d7a35e476e9 100644 --- a/kernel/bpf/cgroup.c +++ b/kernel/bpf/cgroup.c @@ -845,7 +845,7 @@ static int __cgroup_bpf_attach(struct cgroup *cgrp, if (pl) { old_prog = pl->prog; } else { - pl = kmalloc(sizeof(*pl), GFP_KERNEL); + pl = kmalloc_obj(*pl, GFP_KERNEL); if (!pl) { bpf_cgroup_storages_free(new_storage); return -ENOMEM; @@ -1488,7 +1488,7 @@ int cgroup_bpf_link_attach(const union bpf_attr *attr, struct bpf_prog *prog) if (IS_ERR(cgrp)) return PTR_ERR(cgrp); - link = kzalloc(sizeof(*link), GFP_USER); + link = kzalloc_obj(*link, GFP_USER); if (!link) { err = -ENOMEM; goto out_put_cgroup; diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c index 5ab6bace7d0d..80b3e94f3fe3 100644 --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c @@ -108,7 +108,7 @@ struct bpf_prog *bpf_prog_alloc_no_stats(unsigned int size, gfp_t gfp_extra_flag if (fp == NULL) return NULL; - aux = kzalloc(sizeof(*aux), bpf_memcg_flags(GFP_KERNEL | gfp_extra_flags)); + aux = kzalloc_obj(*aux, bpf_memcg_flags(GFP_KERNEL | gfp_extra_flags)); if (aux == NULL) { vfree(fp); return NULL; @@ -180,9 +180,9 @@ int bpf_prog_alloc_jited_linfo(struct bpf_prog *prog) if (!prog->aux->nr_linfo || !prog->jit_requested) return 0; - prog->aux->jited_linfo = kvcalloc(prog->aux->nr_linfo, - sizeof(*prog->aux->jited_linfo), - bpf_memcg_flags(GFP_KERNEL | __GFP_NOWARN)); + prog->aux->jited_linfo = kvzalloc_objs(*prog->aux->jited_linfo, + prog->aux->nr_linfo, + bpf_memcg_flags(GFP_KERNEL | __GFP_NOWARN)); if (!prog->aux->jited_linfo) return -ENOMEM; @@ -910,8 +910,8 @@ static struct bpf_prog_pack *alloc_new_pack(bpf_jit_fill_hole_t bpf_fill_ill_ins struct bpf_prog_pack *pack; int err; - pack = kzalloc(struct_size(pack, bitmap, BITS_TO_LONGS(BPF_PROG_CHUNK_COUNT)), - GFP_KERNEL); + pack = kzalloc_flex(*pack, bitmap, BITS_TO_LONGS(BPF_PROG_CHUNK_COUNT), + GFP_KERNEL); if (!pack) return NULL; pack->ptr = bpf_jit_alloc_exec(BPF_PROG_PACK_SIZE); @@ -2597,7 +2597,7 @@ struct bpf_prog_array *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags) struct bpf_prog_array *p; if (prog_cnt) - p = kzalloc(struct_size(p, items, prog_cnt + 1), flags); + p = kzalloc_flex(*p, items, prog_cnt + 1, flags); else p = &bpf_empty_prog_array.hdr; diff --git a/kernel/bpf/crypto.c b/kernel/bpf/crypto.c index 7e75a1936256..2b0660c32c92 100644 --- a/kernel/bpf/crypto.c +++ b/kernel/bpf/crypto.c @@ -68,7 +68,7 @@ int bpf_crypto_register_type(const struct bpf_crypto_type *type) goto unlock; } - node = kmalloc(sizeof(*node), GFP_KERNEL); + node = kmalloc_obj(*node, GFP_KERNEL); err = -ENOMEM; if (!node) goto unlock; @@ -176,7 +176,7 @@ bpf_crypto_ctx_create(const struct bpf_crypto_params *params, u32 params__sz, goto err_module_put; } - ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); + ctx = kzalloc_obj(*ctx, GFP_KERNEL); if (!ctx) { *err = -ENOMEM; goto err_module_put; diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c index 7ac32798eb04..42a692682f18 100644 --- a/kernel/bpf/helpers.c +++ b/kernel/bpf/helpers.c @@ -3993,7 +3993,7 @@ __bpf_kfunc struct bpf_key *bpf_lookup_user_key(s32 serial, u64 flags) if (IS_ERR(key_ref)) return NULL; - bkey = kmalloc(sizeof(*bkey), GFP_KERNEL); + bkey = kmalloc_obj(*bkey, GFP_KERNEL); if (!bkey) { key_put(key_ref_to_ptr(key_ref)); return NULL; @@ -4033,7 +4033,7 @@ __bpf_kfunc struct bpf_key *bpf_lookup_system_key(u64 id) if (system_keyring_id_check(id) < 0) return NULL; - bkey = kmalloc(sizeof(*bkey), GFP_ATOMIC); + bkey = kmalloc_obj(*bkey, GFP_ATOMIC); if (!bkey) return NULL; diff --git a/kernel/bpf/inode.c b/kernel/bpf/inode.c index 005ea3a2cda7..a111b0e9214e 100644 --- a/kernel/bpf/inode.c +++ b/kernel/bpf/inode.c @@ -195,7 +195,7 @@ static struct map_iter *map_iter_alloc(struct bpf_map *map) { struct map_iter *iter; - iter = kzalloc(sizeof(*iter), GFP_KERNEL | __GFP_NOWARN); + iter = kzalloc_obj(*iter, GFP_KERNEL | __GFP_NOWARN); if (!iter) goto error; @@ -1044,7 +1044,7 @@ static int bpf_init_fs_context(struct fs_context *fc) { struct bpf_mount_opts *opts; - opts = kzalloc(sizeof(struct bpf_mount_opts), GFP_KERNEL); + opts = kzalloc_obj(struct bpf_mount_opts, GFP_KERNEL); if (!opts) return -ENOMEM; diff --git a/kernel/bpf/liveness.c b/kernel/bpf/liveness.c index 60db5d655495..998986853c61 100644 --- a/kernel/bpf/liveness.c +++ b/kernel/bpf/liveness.c @@ -193,8 +193,8 @@ static struct func_instance *__lookup_instance(struct bpf_verifier_env *env, result = kvzalloc(size, GFP_KERNEL_ACCOUNT); if (!result) return ERR_PTR(-ENOMEM); - result->must_write_set = kvcalloc(subprog_sz, sizeof(*result->must_write_set), - GFP_KERNEL_ACCOUNT); + result->must_write_set = kvzalloc_objs(*result->must_write_set, + subprog_sz, GFP_KERNEL_ACCOUNT); if (!result->must_write_set) { kvfree(result); return ERR_PTR(-ENOMEM); @@ -217,7 +217,7 @@ static struct func_instance *lookup_instance(struct bpf_verifier_env *env, int bpf_stack_liveness_init(struct bpf_verifier_env *env) { - env->liveness = kvzalloc(sizeof(*env->liveness), GFP_KERNEL_ACCOUNT); + env->liveness = kvzalloc_obj(*env->liveness, GFP_KERNEL_ACCOUNT); if (!env->liveness) return -ENOMEM; hash_init(env->liveness->func_instances); @@ -266,7 +266,8 @@ static struct per_frame_masks *alloc_frame_masks(struct bpf_verifier_env *env, struct per_frame_masks *arr; if (!instance->frames[frame]) { - arr = kvcalloc(instance->insn_cnt, sizeof(*arr), GFP_KERNEL_ACCOUNT); + arr = kvzalloc_objs(*arr, instance->insn_cnt, + GFP_KERNEL_ACCOUNT); instance->frames[frame] = arr; if (!arr) return ERR_PTR(-ENOMEM); diff --git a/kernel/bpf/lpm_trie.c b/kernel/bpf/lpm_trie.c index be66d7e520e0..1adeb4d3b8cf 100644 --- a/kernel/bpf/lpm_trie.c +++ b/kernel/bpf/lpm_trie.c @@ -683,9 +683,9 @@ static int trie_get_next_key(struct bpf_map *map, void *_key, void *_next_key) if (!key || key->prefixlen > trie->max_prefixlen) goto find_leftmost; - node_stack = kmalloc_array(trie->max_prefixlen + 1, - sizeof(struct lpm_trie_node *), - GFP_ATOMIC | __GFP_NOWARN); + node_stack = kmalloc_objs(struct lpm_trie_node *, + trie->max_prefixlen + 1, + GFP_ATOMIC | __GFP_NOWARN); if (!node_stack) return -ENOMEM; diff --git a/kernel/bpf/net_namespace.c b/kernel/bpf/net_namespace.c index 8e88201c98bf..25f30f9edaef 100644 --- a/kernel/bpf/net_namespace.c +++ b/kernel/bpf/net_namespace.c @@ -494,7 +494,7 @@ int netns_bpf_link_create(const union bpf_attr *attr, struct bpf_prog *prog) if (IS_ERR(net)) return PTR_ERR(net); - net_link = kzalloc(sizeof(*net_link), GFP_USER); + net_link = kzalloc_obj(*net_link, GFP_USER); if (!net_link) { err = -ENOMEM; goto out_put_net; diff --git a/kernel/bpf/offload.c b/kernel/bpf/offload.c index 227f9b5f388b..7fcbbe0ad925 100644 --- a/kernel/bpf/offload.c +++ b/kernel/bpf/offload.c @@ -72,7 +72,7 @@ static int __bpf_offload_dev_netdev_register(struct bpf_offload_dev *offdev, struct bpf_offload_netdev *ondev; int err; - ondev = kzalloc(sizeof(*ondev), GFP_KERNEL); + ondev = kzalloc_obj(*ondev, GFP_KERNEL); if (!ondev) return -ENOMEM; @@ -182,7 +182,7 @@ static int __bpf_prog_dev_bound_init(struct bpf_prog *prog, struct net_device *n struct bpf_prog_offload *offload; int err; - offload = kzalloc(sizeof(*offload), GFP_USER); + offload = kzalloc_obj(*offload, GFP_USER); if (!offload) return -ENOMEM; @@ -777,7 +777,7 @@ bpf_offload_dev_create(const struct bpf_prog_offload_ops *ops, void *priv) { struct bpf_offload_dev *offdev; - offdev = kzalloc(sizeof(*offdev), GFP_KERNEL); + offdev = kzalloc_obj(*offdev, GFP_KERNEL); if (!offdev) return ERR_PTR(-ENOMEM); diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index dd89bf809772..2d14fb6d0ed0 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -3633,7 +3633,7 @@ static int bpf_tracing_prog_attach(struct bpf_prog *prog, if (prog->expected_attach_type == BPF_TRACE_FSESSION) { struct bpf_fsession_link *fslink; - fslink = kzalloc(sizeof(*fslink), GFP_USER); + fslink = kzalloc_obj(*fslink, GFP_USER); if (fslink) { bpf_link_init(&fslink->fexit.link, BPF_LINK_TYPE_TRACING, &bpf_tracing_link_lops, prog, attach_type); @@ -3643,7 +3643,7 @@ static int bpf_tracing_prog_attach(struct bpf_prog *prog, link = NULL; } } else { - link = kzalloc(sizeof(*link), GFP_USER); + link = kzalloc_obj(*link, GFP_USER); } if (!link) { err = -ENOMEM; @@ -4183,7 +4183,7 @@ static int bpf_perf_link_attach(const union bpf_attr *attr, struct bpf_prog *pro if (IS_ERR(perf_file)) return PTR_ERR(perf_file); - link = kzalloc(sizeof(*link), GFP_USER); + link = kzalloc_obj(*link, GFP_USER); if (!link) { err = -ENOMEM; goto out_put_file; @@ -4261,7 +4261,7 @@ static int bpf_raw_tp_link_attach(struct bpf_prog *prog, if (!btp) return -ENOENT; - link = kzalloc(sizeof(*link), GFP_USER); + link = kzalloc_obj(*link, GFP_USER); if (!link) { err = -ENOMEM; goto out_put_btp; @@ -6076,9 +6076,8 @@ static int bpf_prog_bind_map(union bpf_attr *attr) goto out_unlock; } - used_maps_new = kmalloc_array(prog->aux->used_map_cnt + 1, - sizeof(used_maps_new[0]), - GFP_KERNEL); + used_maps_new = kmalloc_objs(used_maps_new[0], + prog->aux->used_map_cnt + 1, GFP_KERNEL); if (!used_maps_new) { ret = -ENOMEM; goto out_unlock; diff --git a/kernel/bpf/tcx.c b/kernel/bpf/tcx.c index efd987ea6872..02db0113b8e7 100644 --- a/kernel/bpf/tcx.c +++ b/kernel/bpf/tcx.c @@ -321,7 +321,7 @@ int tcx_link_attach(const union bpf_attr *attr, struct bpf_prog *prog) ret = -ENODEV; goto out; } - tcx = kzalloc(sizeof(*tcx), GFP_USER); + tcx = kzalloc_obj(*tcx, GFP_USER); if (!tcx) { ret = -ENOMEM; goto out; diff --git a/kernel/bpf/token.c b/kernel/bpf/token.c index 7e4aa1e44b50..e85a179523f0 100644 --- a/kernel/bpf/token.c +++ b/kernel/bpf/token.c @@ -172,7 +172,7 @@ int bpf_token_create(union bpf_attr *attr) if (fdf.err) return fdf.err; - token = kzalloc(sizeof(*token), GFP_USER); + token = kzalloc_obj(*token, GFP_USER); if (!token) return -ENOMEM; diff --git a/kernel/bpf/trampoline.c b/kernel/bpf/trampoline.c index 952cd7932461..b94565843f77 100644 --- a/kernel/bpf/trampoline.c +++ b/kernel/bpf/trampoline.c @@ -256,7 +256,7 @@ static int direct_ops_mod(struct bpf_trampoline *tr, void *addr, bool lock_direc */ static int direct_ops_alloc(struct bpf_trampoline *tr) { - tr->fops = kzalloc(sizeof(struct ftrace_ops), GFP_KERNEL); + tr->fops = kzalloc_obj(struct ftrace_ops, GFP_KERNEL); if (!tr->fops) return -ENOMEM; tr->fops->private = tr; @@ -342,7 +342,7 @@ static struct bpf_trampoline *bpf_trampoline_lookup(u64 key, unsigned long ip) goto out; } } - tr = kzalloc(sizeof(*tr), GFP_KERNEL); + tr = kzalloc_obj(*tr, GFP_KERNEL); if (!tr) goto out; if (direct_ops_alloc(tr)) { @@ -446,7 +446,7 @@ bpf_trampoline_get_progs(const struct bpf_trampoline *tr, int *total, bool *ip_a int kind; *total = 0; - tlinks = kcalloc(BPF_TRAMP_MAX, sizeof(*tlinks), GFP_KERNEL); + tlinks = kzalloc_objs(*tlinks, BPF_TRAMP_MAX, GFP_KERNEL); if (!tlinks) return ERR_PTR(-ENOMEM); @@ -569,7 +569,7 @@ static struct bpf_tramp_image *bpf_tramp_image_alloc(u64 key, int size) void *image; int err = -ENOMEM; - im = kzalloc(sizeof(*im), GFP_KERNEL); + im = kzalloc_obj(*im, GFP_KERNEL); if (!im) goto out; @@ -928,7 +928,7 @@ static struct bpf_shim_tramp_link *cgroup_shim_alloc(const struct bpf_prog *prog struct bpf_shim_tramp_link *shim_link = NULL; struct bpf_prog *p; - shim_link = kzalloc(sizeof(*shim_link), GFP_USER); + shim_link = kzalloc_obj(*shim_link, GFP_USER); if (!shim_link) return NULL; diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index dbaafb64d3bd..63f05d90e708 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -1779,7 +1779,7 @@ static int copy_verifier_state(struct bpf_verifier_state *dst_state, for (i = 0; i <= src->curframe; i++) { dst = dst_state->frame[i]; if (!dst) { - dst = kzalloc(sizeof(*dst), GFP_KERNEL_ACCOUNT); + dst = kzalloc_obj(*dst, GFP_KERNEL_ACCOUNT); if (!dst) return -ENOMEM; dst_state->frame[i] = dst; @@ -2127,7 +2127,7 @@ static struct bpf_verifier_state *push_stack(struct bpf_verifier_env *env, struct bpf_verifier_stack_elem *elem; int err; - elem = kzalloc(sizeof(struct bpf_verifier_stack_elem), GFP_KERNEL_ACCOUNT); + elem = kzalloc_obj(struct bpf_verifier_stack_elem, GFP_KERNEL_ACCOUNT); if (!elem) return ERR_PTR(-ENOMEM); @@ -2949,7 +2949,7 @@ static struct bpf_verifier_state *push_async_cb(struct bpf_verifier_env *env, struct bpf_verifier_stack_elem *elem; struct bpf_func_state *frame; - elem = kzalloc(sizeof(struct bpf_verifier_stack_elem), GFP_KERNEL_ACCOUNT); + elem = kzalloc_obj(struct bpf_verifier_stack_elem, GFP_KERNEL_ACCOUNT); if (!elem) return ERR_PTR(-ENOMEM); @@ -2972,7 +2972,7 @@ static struct bpf_verifier_state *push_async_cb(struct bpf_verifier_env *env, */ elem->st.branches = 1; elem->st.in_sleepable = is_sleepable; - frame = kzalloc(sizeof(*frame), GFP_KERNEL_ACCOUNT); + frame = kzalloc_obj(*frame, GFP_KERNEL_ACCOUNT); if (!frame) return ERR_PTR(-ENOMEM); init_func_state(env, frame, @@ -3410,7 +3410,7 @@ static int add_kfunc_call(struct bpf_verifier_env *env, u32 func_id, s16 offset) return -EINVAL; } - tab = kzalloc(sizeof(*tab), GFP_KERNEL_ACCOUNT); + tab = kzalloc_obj(*tab, GFP_KERNEL_ACCOUNT); if (!tab) return -ENOMEM; prog_aux->kfunc_tab = tab; @@ -3426,7 +3426,7 @@ static int add_kfunc_call(struct bpf_verifier_env *env, u32 func_id, s16 offset) return 0; if (!btf_tab && offset) { - btf_tab = kzalloc(sizeof(*btf_tab), GFP_KERNEL_ACCOUNT); + btf_tab = kzalloc_obj(*btf_tab, GFP_KERNEL_ACCOUNT); if (!btf_tab) return -ENOMEM; prog_aux->kfunc_btf_tab = btf_tab; @@ -10580,7 +10580,7 @@ static int setup_func_entry(struct bpf_verifier_env *env, int subprog, int calls } caller = state->frame[state->curframe]; - callee = kzalloc(sizeof(*callee), GFP_KERNEL_ACCOUNT); + callee = kzalloc_obj(*callee, GFP_KERNEL_ACCOUNT); if (!callee) return -ENOMEM; state->frame[state->curframe + 1] = callee; @@ -18860,11 +18860,13 @@ static int check_cfg(struct bpf_verifier_env *env) int *insn_stack, *insn_state; int ex_insn_beg, i, ret = 0; - insn_state = env->cfg.insn_state = kvcalloc(insn_cnt, sizeof(int), GFP_KERNEL_ACCOUNT); + insn_state = env->cfg.insn_state = kvzalloc_objs(int, insn_cnt, + GFP_KERNEL_ACCOUNT); if (!insn_state) return -ENOMEM; - insn_stack = env->cfg.insn_stack = kvcalloc(insn_cnt, sizeof(int), GFP_KERNEL_ACCOUNT); + insn_stack = env->cfg.insn_stack = kvzalloc_objs(int, insn_cnt, + GFP_KERNEL_ACCOUNT); if (!insn_stack) { kvfree(insn_state); return -ENOMEM; @@ -18951,9 +18953,9 @@ static int compute_postorder(struct bpf_verifier_env *env) int *stack = NULL, *postorder = NULL, *state = NULL; struct bpf_iarray *succ; - postorder = kvcalloc(env->prog->len, sizeof(int), GFP_KERNEL_ACCOUNT); - state = kvcalloc(env->prog->len, sizeof(int), GFP_KERNEL_ACCOUNT); - stack = kvcalloc(env->prog->len, sizeof(int), GFP_KERNEL_ACCOUNT); + postorder = kvzalloc_objs(int, env->prog->len, GFP_KERNEL_ACCOUNT); + state = kvzalloc_objs(int, env->prog->len, GFP_KERNEL_ACCOUNT); + stack = kvzalloc_objs(int, env->prog->len, GFP_KERNEL_ACCOUNT); if (!postorder || !state || !stack) { kvfree(postorder); kvfree(state); @@ -19147,7 +19149,8 @@ static int check_btf_func(struct bpf_verifier_env *env, urecord = make_bpfptr(attr->func_info, uattr.is_kernel); krecord = prog->aux->func_info; - info_aux = kcalloc(nfuncs, sizeof(*info_aux), GFP_KERNEL_ACCOUNT | __GFP_NOWARN); + info_aux = kzalloc_objs(*info_aux, nfuncs, + GFP_KERNEL_ACCOUNT | __GFP_NOWARN); if (!info_aux) return -ENOMEM; @@ -19232,8 +19235,8 @@ static int check_btf_line(struct bpf_verifier_env *env, /* Need to zero it in case the userspace may * pass in a smaller bpf_line_info object. */ - linfo = kvcalloc(nr_linfo, sizeof(struct bpf_line_info), - GFP_KERNEL_ACCOUNT | __GFP_NOWARN); + linfo = kvzalloc_objs(struct bpf_line_info, nr_linfo, + GFP_KERNEL_ACCOUNT | __GFP_NOWARN); if (!linfo) return -ENOMEM; @@ -20619,7 +20622,8 @@ hit: if (loop) { struct bpf_scc_backedge *backedge; - backedge = kzalloc(sizeof(*backedge), GFP_KERNEL_ACCOUNT); + backedge = kzalloc_obj(*backedge, + GFP_KERNEL_ACCOUNT); if (!backedge) return -ENOMEM; err = copy_verifier_state(&backedge->state, cur); @@ -20683,7 +20687,7 @@ miss: * When looping the sl->state.branches will be > 0 and this state * will not be considered for equivalence until branches == 0. */ - new_sl = kzalloc(sizeof(struct bpf_verifier_state_list), GFP_KERNEL_ACCOUNT); + new_sl = kzalloc_obj(struct bpf_verifier_state_list, GFP_KERNEL_ACCOUNT); if (!new_sl) return -ENOMEM; env->total_states++; @@ -22765,7 +22769,7 @@ static int jit_subprogs(struct bpf_verifier_env *env) goto out_undo_insn; err = -ENOMEM; - func = kcalloc(env->subprog_cnt, sizeof(prog), GFP_KERNEL); + func = kzalloc_objs(prog, env->subprog_cnt, GFP_KERNEL); if (!func) goto out_undo_insn; @@ -24472,14 +24476,14 @@ static int do_check_common(struct bpf_verifier_env *env, int subprog) env->prev_linfo = NULL; env->pass_cnt++; - state = kzalloc(sizeof(struct bpf_verifier_state), GFP_KERNEL_ACCOUNT); + state = kzalloc_obj(struct bpf_verifier_state, GFP_KERNEL_ACCOUNT); if (!state) return -ENOMEM; state->curframe = 0; state->speculative = false; state->branches = 1; state->in_sleepable = env->prog->sleepable; - state->frame[0] = kzalloc(sizeof(struct bpf_func_state), GFP_KERNEL_ACCOUNT); + state->frame[0] = kzalloc_obj(struct bpf_func_state, GFP_KERNEL_ACCOUNT); if (!state->frame[0]) { kfree(state); return -ENOMEM; @@ -25600,7 +25604,7 @@ static int compute_live_registers(struct bpf_verifier_env *env) * - repeat the computation while {in,out} fields changes for * any instruction. */ - state = kvcalloc(insn_cnt, sizeof(*state), GFP_KERNEL_ACCOUNT); + state = kvzalloc_objs(*state, insn_cnt, GFP_KERNEL_ACCOUNT); if (!state) { err = -ENOMEM; goto out; @@ -25828,7 +25832,8 @@ dfs_continue: dfs_sz--; } } - env->scc_info = kvcalloc(next_scc_id, sizeof(*env->scc_info), GFP_KERNEL_ACCOUNT); + env->scc_info = kvzalloc_objs(*env->scc_info, next_scc_id, + GFP_KERNEL_ACCOUNT); if (!env->scc_info) { err = -ENOMEM; goto exit; @@ -25859,7 +25864,7 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr, bpfptr_t uattr, __u3 /* 'struct bpf_verifier_env' can be global, but since it's not small, * allocate/free it every time bpf_check() is called */ - env = kvzalloc(sizeof(struct bpf_verifier_env), GFP_KERNEL_ACCOUNT); + env = kvzalloc_obj(struct bpf_verifier_env, GFP_KERNEL_ACCOUNT); if (!env) return -ENOMEM; @@ -25923,9 +25928,9 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr, bpfptr_t uattr, __u3 env->test_state_freq = attr->prog_flags & BPF_F_TEST_STATE_FREQ; env->test_reg_invariants = attr->prog_flags & BPF_F_TEST_REG_INVARIANTS; - env->explored_states = kvcalloc(state_htab_size(env), - sizeof(struct list_head), - GFP_KERNEL_ACCOUNT); + env->explored_states = kvzalloc_objs(struct list_head, + state_htab_size(env), + GFP_KERNEL_ACCOUNT); ret = -ENOMEM; if (!env->explored_states) goto skip_full_check; @@ -26062,9 +26067,9 @@ skip_full_check: if (env->used_map_cnt) { /* if program passed verifier, update used_maps in bpf_prog_info */ - env->prog->aux->used_maps = kmalloc_array(env->used_map_cnt, - sizeof(env->used_maps[0]), - GFP_KERNEL_ACCOUNT); + env->prog->aux->used_maps = kmalloc_objs(env->used_maps[0], + env->used_map_cnt, + GFP_KERNEL_ACCOUNT); if (!env->prog->aux->used_maps) { ret = -ENOMEM; @@ -26077,9 +26082,9 @@ skip_full_check: } if (env->used_btf_cnt) { /* if program passed verifier, update used_btfs in bpf_prog_aux */ - env->prog->aux->used_btfs = kmalloc_array(env->used_btf_cnt, - sizeof(env->used_btfs[0]), - GFP_KERNEL_ACCOUNT); + env->prog->aux->used_btfs = kmalloc_objs(env->used_btfs[0], + env->used_btf_cnt, + GFP_KERNEL_ACCOUNT); if (!env->prog->aux->used_btfs) { ret = -ENOMEM; goto err_release_maps; diff --git a/kernel/cgroup/cgroup-v1.c b/kernel/cgroup/cgroup-v1.c index 724950c4b690..0449b062dd1c 100644 --- a/kernel/cgroup/cgroup-v1.c +++ b/kernel/cgroup/cgroup-v1.c @@ -317,7 +317,7 @@ static struct cgroup_pidlist *cgroup_pidlist_find_create(struct cgroup *cgrp, return l; /* entry not found; create a new one */ - l = kzalloc(sizeof(struct cgroup_pidlist), GFP_KERNEL); + l = kzalloc_obj(struct cgroup_pidlist, GFP_KERNEL); if (!l) return l; @@ -352,7 +352,7 @@ static int pidlist_array_load(struct cgroup *cgrp, enum cgroup_filetype type, * show up until sometime later on. */ length = cgroup_task_count(cgrp); - array = kvmalloc_array(length, sizeof(pid_t), GFP_KERNEL); + array = kvmalloc_objs(pid_t, length, GFP_KERNEL); if (!array) return -ENOMEM; /* now, populate the array */ @@ -1237,7 +1237,7 @@ static int cgroup1_root_to_use(struct fs_context *fc) if (ctx->ns != &init_cgroup_ns) return -EPERM; - root = kzalloc(sizeof(*root), GFP_KERNEL); + root = kzalloc_obj(*root, GFP_KERNEL); if (!root) return -ENOMEM; diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c index 8af4351536cf..7d220276d019 100644 --- a/kernel/cgroup/cgroup.c +++ b/kernel/cgroup/cgroup.c @@ -1168,7 +1168,7 @@ static int allocate_cgrp_cset_links(int count, struct list_head *tmp_links) INIT_LIST_HEAD(tmp_links); for (i = 0; i < count; i++) { - link = kzalloc(sizeof(*link), GFP_KERNEL); + link = kzalloc_obj(*link, GFP_KERNEL); if (!link) { free_cgrp_cset_links(tmp_links); return -ENOMEM; @@ -1241,7 +1241,7 @@ static struct css_set *find_css_set(struct css_set *old_cset, if (cset) return cset; - cset = kzalloc(sizeof(*cset), GFP_KERNEL); + cset = kzalloc_obj(*cset, GFP_KERNEL); if (!cset) return NULL; @@ -2350,7 +2350,7 @@ static int cgroup_init_fs_context(struct fs_context *fc) { struct cgroup_fs_context *ctx; - ctx = kzalloc(sizeof(struct cgroup_fs_context), GFP_KERNEL); + ctx = kzalloc_obj(struct cgroup_fs_context, GFP_KERNEL); if (!ctx) return -ENOMEM; @@ -4251,7 +4251,7 @@ static int cgroup_file_open(struct kernfs_open_file *of) struct cgroup_file_ctx *ctx; int ret; - ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); + ctx = kzalloc_obj(*ctx, GFP_KERNEL); if (!ctx) return -ENOMEM; @@ -5844,7 +5844,7 @@ static struct cgroup *cgroup_create(struct cgroup *parent, const char *name, int ret; /* allocate the cgroup and its ID, 0 is reserved for the root */ - cgrp = kzalloc(struct_size(cgrp, _low_ancestors, level), GFP_KERNEL); + cgrp = kzalloc_flex(*cgrp, _low_ancestors, level, GFP_KERNEL); if (!cgrp) return ERR_PTR(-ENOMEM); diff --git a/kernel/cgroup/cpuset-v1.c b/kernel/cgroup/cpuset-v1.c index 7a23b9e8778f..8e7ffc205c3b 100644 --- a/kernel/cgroup/cpuset-v1.c +++ b/kernel/cgroup/cpuset-v1.c @@ -316,7 +316,7 @@ void cpuset1_hotplug_update_tasks(struct cpuset *cs, css_tryget_online(&cs->css)) { struct cpuset_remove_tasks_struct *s; - s = kzalloc(sizeof(*s), GFP_KERNEL); + s = kzalloc_obj(*s, GFP_KERNEL); if (WARN_ON_ONCE(!s)) { css_put(&cs->css); return; @@ -653,7 +653,7 @@ int cpuset1_generate_sched_domains(cpumask_var_t **domains, if (!doms) goto done; - dattr = kmalloc(sizeof(struct sched_domain_attr), GFP_KERNEL); + dattr = kmalloc_obj(struct sched_domain_attr, GFP_KERNEL); if (dattr) { *dattr = SD_ATTR_INIT; update_domain_attr_tree(dattr, &top_cpuset); @@ -664,7 +664,7 @@ int cpuset1_generate_sched_domains(cpumask_var_t **domains, goto done; } - csa = kmalloc_array(nr_cpusets(), sizeof(cp), GFP_KERNEL); + csa = kmalloc_objs(cp, nr_cpusets(), GFP_KERNEL); if (!csa) goto done; csn = 0; @@ -727,8 +727,7 @@ int cpuset1_generate_sched_domains(cpumask_var_t **domains, * The rest of the code, including the scheduler, can deal with * dattr==NULL case. No need to abort if alloc fails. */ - dattr = kmalloc_array(ndoms, sizeof(struct sched_domain_attr), - GFP_KERNEL); + dattr = kmalloc_objs(struct sched_domain_attr, ndoms, GFP_KERNEL); for (nslot = 0, i = 0; i < csn; i++) { nslot_update = 0; diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c index 7607dfe516e6..384d9d6e323b 100644 --- a/kernel/cgroup/cpuset.c +++ b/kernel/cgroup/cpuset.c @@ -536,7 +536,7 @@ static struct cpuset *dup_or_alloc_cpuset(struct cpuset *cs) /* Allocate base structure */ trial = cs ? kmemdup(cs, sizeof(*cs), GFP_KERNEL) : - kzalloc(sizeof(*cs), GFP_KERNEL); + kzalloc_obj(*cs, GFP_KERNEL); if (!trial) return NULL; @@ -791,7 +791,7 @@ static int generate_sched_domains(cpumask_var_t **domains, goto generate_doms; } - csa = kmalloc_array(nr_cpusets(), sizeof(cp), GFP_KERNEL); + csa = kmalloc_objs(cp, nr_cpusets(), GFP_KERNEL); if (!csa) goto done; @@ -835,8 +835,7 @@ generate_doms: * The rest of the code, including the scheduler, can deal with * dattr==NULL case. No need to abort if alloc fails. */ - dattr = kmalloc_array(ndoms, sizeof(struct sched_domain_attr), - GFP_KERNEL); + dattr = kmalloc_objs(struct sched_domain_attr, ndoms, GFP_KERNEL); /* * Cgroup v2 doesn't support domain attributes, just set all of them @@ -2479,7 +2478,7 @@ static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from, return; } - mwork = kzalloc(sizeof(*mwork), GFP_KERNEL); + mwork = kzalloc_obj(*mwork, GFP_KERNEL); if (mwork) { mwork->mm = mm; mwork->from = *from; @@ -2501,7 +2500,7 @@ static void schedule_flush_migrate_mm(void) { struct callback_head *flush_cb; - flush_cb = kzalloc(sizeof(struct callback_head), GFP_KERNEL); + flush_cb = kzalloc_obj(struct callback_head, GFP_KERNEL); if (!flush_cb) return; diff --git a/kernel/cgroup/debug.c b/kernel/cgroup/debug.c index a5490097fe52..78429dd9e9c6 100644 --- a/kernel/cgroup/debug.c +++ b/kernel/cgroup/debug.c @@ -14,7 +14,7 @@ static struct cgroup_subsys_state * debug_css_alloc(struct cgroup_subsys_state *parent_css) { - struct cgroup_subsys_state *css = kzalloc(sizeof(*css), GFP_KERNEL); + struct cgroup_subsys_state *css = kzalloc_obj(*css, GFP_KERNEL); if (!css) return ERR_PTR(-ENOMEM); diff --git a/kernel/cgroup/dmem.c b/kernel/cgroup/dmem.c index 1ea6afffa985..0c8c0a135231 100644 --- a/kernel/cgroup/dmem.c +++ b/kernel/cgroup/dmem.c @@ -222,7 +222,7 @@ static void dmemcs_free(struct cgroup_subsys_state *css) static struct cgroup_subsys_state * dmemcs_alloc(struct cgroup_subsys_state *parent_css) { - struct dmemcg_state *dmemcs = kzalloc(sizeof(*dmemcs), GFP_KERNEL); + struct dmemcg_state *dmemcs = kzalloc_obj(*dmemcs, GFP_KERNEL); if (!dmemcs) return ERR_PTR(-ENOMEM); @@ -359,7 +359,7 @@ alloc_pool_single(struct dmemcg_state *dmemcs, struct dmem_cgroup_region *region struct dmem_cgroup_pool_state *pool, *ppool = NULL; if (!*allocpool) { - pool = kzalloc(sizeof(*pool), GFP_NOWAIT); + pool = kzalloc_obj(*pool, GFP_NOWAIT); if (!pool) return ERR_PTR(-ENOMEM); } else { @@ -521,7 +521,7 @@ struct dmem_cgroup_region *dmem_cgroup_register_region(u64 size, const char *fmt if (!region_name) return ERR_PTR(-ENOMEM); - ret = kzalloc(sizeof(*ret), GFP_KERNEL); + ret = kzalloc_obj(*ret, GFP_KERNEL); if (!ret) { kfree(region_name); return ERR_PTR(-ENOMEM); @@ -597,7 +597,7 @@ get_cg_pool_unlocked(struct dmemcg_state *cg, struct dmem_cgroup_region *region) if (WARN_ON(allocpool)) continue; - allocpool = kzalloc(sizeof(*allocpool), GFP_KERNEL); + allocpool = kzalloc_obj(*allocpool, GFP_KERNEL); if (allocpool) { pool = NULL; continue; diff --git a/kernel/cgroup/legacy_freezer.c b/kernel/cgroup/legacy_freezer.c index 817c33450fee..85344b107873 100644 --- a/kernel/cgroup/legacy_freezer.c +++ b/kernel/cgroup/legacy_freezer.c @@ -81,7 +81,7 @@ freezer_css_alloc(struct cgroup_subsys_state *parent_css) { struct freezer *freezer; - freezer = kzalloc(sizeof(struct freezer), GFP_KERNEL); + freezer = kzalloc_obj(struct freezer, GFP_KERNEL); if (!freezer) return ERR_PTR(-ENOMEM); diff --git a/kernel/cgroup/misc.c b/kernel/cgroup/misc.c index 6a01d91ea4cb..7c3ae3a76c8d 100644 --- a/kernel/cgroup/misc.c +++ b/kernel/cgroup/misc.c @@ -445,7 +445,7 @@ misc_cg_alloc(struct cgroup_subsys_state *parent_css) if (!parent_css) { cg = &root_cg; } else { - cg = kzalloc(sizeof(*cg), GFP_KERNEL); + cg = kzalloc_obj(*cg, GFP_KERNEL); if (!cg) return ERR_PTR(-ENOMEM); } diff --git a/kernel/cgroup/namespace.c b/kernel/cgroup/namespace.c index db9617556dd7..ea4ee13936be 100644 --- a/kernel/cgroup/namespace.c +++ b/kernel/cgroup/namespace.c @@ -24,7 +24,7 @@ static struct cgroup_namespace *alloc_cgroup_ns(void) struct cgroup_namespace *new_ns __free(kfree) = NULL; int ret; - new_ns = kzalloc(sizeof(struct cgroup_namespace), GFP_KERNEL_ACCOUNT); + new_ns = kzalloc_obj(struct cgroup_namespace, GFP_KERNEL_ACCOUNT); if (!new_ns) return ERR_PTR(-ENOMEM); ret = ns_common_init(new_ns); diff --git a/kernel/cgroup/pids.c b/kernel/cgroup/pids.c index 8f61114c36dd..6221573fc6ad 100644 --- a/kernel/cgroup/pids.c +++ b/kernel/cgroup/pids.c @@ -80,7 +80,7 @@ pids_css_alloc(struct cgroup_subsys_state *parent) { struct pids_cgroup *pids; - pids = kzalloc(sizeof(struct pids_cgroup), GFP_KERNEL); + pids = kzalloc_obj(struct pids_cgroup, GFP_KERNEL); if (!pids) return ERR_PTR(-ENOMEM); diff --git a/kernel/cgroup/rdma.c b/kernel/cgroup/rdma.c index ef5878fb2005..9d3693574b11 100644 --- a/kernel/cgroup/rdma.c +++ b/kernel/cgroup/rdma.c @@ -134,7 +134,7 @@ get_cg_rpool_locked(struct rdma_cgroup *cg, struct rdmacg_device *device) if (rpool) return rpool; - rpool = kzalloc(sizeof(*rpool), GFP_KERNEL); + rpool = kzalloc_obj(*rpool, GFP_KERNEL); if (!rpool) return ERR_PTR(-ENOMEM); @@ -443,7 +443,7 @@ static ssize_t rdmacg_resource_set_max(struct kernfs_open_file *of, goto err; } - new_limits = kcalloc(RDMACG_RESOURCE_MAX, sizeof(int), GFP_KERNEL); + new_limits = kzalloc_objs(int, RDMACG_RESOURCE_MAX, GFP_KERNEL); if (!new_limits) { ret = -ENOMEM; goto err; @@ -566,7 +566,7 @@ rdmacg_css_alloc(struct cgroup_subsys_state *parent) { struct rdma_cgroup *cg; - cg = kzalloc(sizeof(*cg), GFP_KERNEL); + cg = kzalloc_obj(*cg, GFP_KERNEL); if (!cg) return ERR_PTR(-ENOMEM); diff --git a/kernel/crash_core.c b/kernel/crash_core.c index 3952b3e102e0..2146ca0f0ed8 100644 --- a/kernel/crash_core.c +++ b/kernel/crash_core.c @@ -368,7 +368,7 @@ static int __crash_shrink_memory(struct resource *old_res, { struct resource *ram_res; - ram_res = kzalloc(sizeof(*ram_res), GFP_KERNEL); + ram_res = kzalloc_obj(*ram_res, GFP_KERNEL); if (!ram_res) return -ENOMEM; diff --git a/kernel/crash_dump_dm_crypt.c b/kernel/crash_dump_dm_crypt.c index 37129243054d..13191d7c7a32 100644 --- a/kernel/crash_dump_dm_crypt.c +++ b/kernel/crash_dump_dm_crypt.c @@ -252,7 +252,7 @@ static struct config_item *config_keys_make_item(struct config_group *group, return ERR_PTR(-EINVAL); } - config_key = kzalloc(sizeof(struct config_key), GFP_KERNEL); + config_key = kzalloc_obj(struct config_key, GFP_KERNEL); if (!config_key) return ERR_PTR(-ENOMEM); diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c index 314787fb8ce7..ddce56b47b25 100644 --- a/kernel/debug/kdb/kdb_main.c +++ b/kernel/debug/kdb/kdb_main.c @@ -661,7 +661,7 @@ static int kdb_defcmd2(const char *cmdstr, const char *argv0) return 0; } - kms = kmalloc(sizeof(*kms), GFP_KDB); + kms = kmalloc_obj(*kms, GFP_KDB); if (!kms) { kdb_printf("Could not allocate new kdb macro command: %s\n", cmdstr); @@ -707,7 +707,7 @@ static int kdb_defcmd(int argc, const char **argv) kdb_printf("Command only available during kdb_init()\n"); return KDB_NOTIMP; } - kdb_macro = kzalloc(sizeof(*kdb_macro), GFP_KDB); + kdb_macro = kzalloc_obj(*kdb_macro, GFP_KDB); if (!kdb_macro) goto fail_defcmd; diff --git a/kernel/dma/coherent.c b/kernel/dma/coherent.c index 77c8d9487a9a..d580ab6d2e33 100644 --- a/kernel/dma/coherent.c +++ b/kernel/dma/coherent.c @@ -49,7 +49,7 @@ static struct dma_coherent_mem *dma_init_coherent_memory(phys_addr_t phys_addr, if (!mem_base) return ERR_PTR(-EINVAL); - dma_mem = kzalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL); + dma_mem = kzalloc_obj(struct dma_coherent_mem, GFP_KERNEL); if (!dma_mem) goto out_unmap_membase; dma_mem->bitmap = bitmap_zalloc(pages, GFP_KERNEL); diff --git a/kernel/dma/debug.c b/kernel/dma/debug.c index 43d6a996d7a7..3be263d7afd6 100644 --- a/kernel/dma/debug.c +++ b/kernel/dma/debug.c @@ -900,7 +900,7 @@ void dma_debug_add_bus(const struct bus_type *bus) if (dma_debug_disabled()) return; - nb = kzalloc(sizeof(struct notifier_block), GFP_KERNEL); + nb = kzalloc_obj(struct notifier_block, GFP_KERNEL); if (nb == NULL) { pr_err("dma_debug_add_bus: out of memory\n"); return; diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c index c9fa983990cd..280ec952c5e1 100644 --- a/kernel/dma/direct.c +++ b/kernel/dma/direct.c @@ -654,7 +654,7 @@ int dma_direct_set_offset(struct device *dev, phys_addr_t cpu_start, if (!offset) return 0; - map = kcalloc(2, sizeof(*map), GFP_KERNEL); + map = kzalloc_objs(*map, 2, GFP_KERNEL); if (!map) return -ENOMEM; map[0].cpu_start = cpu_start; diff --git a/kernel/dma/map_benchmark.c b/kernel/dma/map_benchmark.c index 794041a39e65..48ab3d957960 100644 --- a/kernel/dma/map_benchmark.c +++ b/kernel/dma/map_benchmark.c @@ -121,7 +121,7 @@ static int do_map_benchmark(struct map_benchmark_data *map) int ret = 0; int i; - tsk = kmalloc_array(threads, sizeof(*tsk), GFP_KERNEL); + tsk = kmalloc_objs(*tsk, threads, GFP_KERNEL); if (!tsk) return -ENOMEM; diff --git a/kernel/dma/mapping.c b/kernel/dma/mapping.c index ee29c47781e3..3928a509c44c 100644 --- a/kernel/dma/mapping.c +++ b/kernel/dma/mapping.c @@ -768,7 +768,7 @@ static struct sg_table *alloc_single_sgt(struct device *dev, size_t size, struct sg_table *sgt; struct page *page; - sgt = kmalloc(sizeof(*sgt), gfp); + sgt = kmalloc_obj(*sgt, gfp); if (!sgt) return NULL; if (sg_alloc_table(sgt, 1, gfp)) diff --git a/kernel/dma/remap.c b/kernel/dma/remap.c index b7c1c0c92d0c..b53e66417e5f 100644 --- a/kernel/dma/remap.c +++ b/kernel/dma/remap.c @@ -45,7 +45,7 @@ void *dma_common_contiguous_remap(struct page *page, size_t size, void *vaddr; int i; - pages = kvmalloc_array(count, sizeof(struct page *), GFP_KERNEL); + pages = kvmalloc_objs(struct page *, count, GFP_KERNEL); if (!pages) return NULL; for (i = 0; i < count; i++) diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c index a547c7693135..cb8efc059e6a 100644 --- a/kernel/dma/swiotlb.c +++ b/kernel/dma/swiotlb.c @@ -1809,19 +1809,18 @@ static int rmem_swiotlb_device_init(struct reserved_mem *rmem, if (!mem) { struct io_tlb_pool *pool; - mem = kzalloc(sizeof(*mem), GFP_KERNEL); + mem = kzalloc_obj(*mem, GFP_KERNEL); if (!mem) return -ENOMEM; pool = &mem->defpool; - pool->slots = kcalloc(nslabs, sizeof(*pool->slots), GFP_KERNEL); + pool->slots = kzalloc_objs(*pool->slots, nslabs, GFP_KERNEL); if (!pool->slots) { kfree(mem); return -ENOMEM; } - pool->areas = kcalloc(nareas, sizeof(*pool->areas), - GFP_KERNEL); + pool->areas = kzalloc_objs(*pool->areas, nareas, GFP_KERNEL); if (!pool->areas) { kfree(pool->slots); kfree(mem); diff --git a/kernel/events/core.c b/kernel/events/core.c index e18119f30c29..33c84a605799 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -5058,7 +5058,7 @@ alloc_perf_context(struct task_struct *task) { struct perf_event_context *ctx; - ctx = kzalloc(sizeof(struct perf_event_context), GFP_KERNEL); + ctx = kzalloc_obj(struct perf_event_context, GFP_KERNEL); if (!ctx) return NULL; @@ -5198,7 +5198,7 @@ find_get_pmu_context(struct pmu *pmu, struct perf_event_context *ctx, return epc; } - new = kzalloc(sizeof(*epc), GFP_KERNEL); + new = kzalloc_obj(*epc, GFP_KERNEL); if (!new) return ERR_PTR(-ENOMEM); @@ -5374,7 +5374,7 @@ alloc_perf_ctx_data(struct kmem_cache *ctx_cache, bool global) { struct perf_ctx_data *cd; - cd = kzalloc(sizeof(*cd), GFP_KERNEL); + cd = kzalloc_obj(*cd, GFP_KERNEL); if (!cd) return NULL; @@ -11111,7 +11111,7 @@ static int swevent_hlist_get_cpu(int cpu) cpumask_test_cpu(cpu, perf_online_mask)) { struct swevent_hlist *hlist; - hlist = kzalloc(sizeof(*hlist), GFP_KERNEL); + hlist = kzalloc_obj(*hlist, GFP_KERNEL); if (!hlist) { err = -ENOMEM; goto exit; @@ -12634,7 +12634,7 @@ static int pmu_dev_alloc(struct pmu *pmu) { int ret = -ENOMEM; - pmu->dev = kzalloc(sizeof(struct device), GFP_KERNEL); + pmu->dev = kzalloc_obj(struct device, GFP_KERNEL); if (!pmu->dev) goto out; @@ -15269,7 +15269,7 @@ perf_cgroup_css_alloc(struct cgroup_subsys_state *parent_css) { struct perf_cgroup *jc; - jc = kzalloc(sizeof(*jc), GFP_KERNEL); + jc = kzalloc_obj(*jc, GFP_KERNEL); if (!jc) return ERR_PTR(-ENOMEM); diff --git a/kernel/events/hw_breakpoint.c b/kernel/events/hw_breakpoint.c index 8ec2cb688903..6c44fbdcfa4d 100644 --- a/kernel/events/hw_breakpoint.c +++ b/kernel/events/hw_breakpoint.c @@ -185,7 +185,8 @@ static inline int hw_breakpoint_slots_cached(int type) static __init bool bp_slots_histogram_alloc(struct bp_slots_histogram *hist, enum bp_type_idx type) { - hist->count = kcalloc(hw_breakpoint_slots_cached(type), sizeof(*hist->count), GFP_KERNEL); + hist->count = kzalloc_objs(*hist->count, + hw_breakpoint_slots_cached(type), GFP_KERNEL); return hist->count; } diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index 424ef2235b07..d39dcc19d21e 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c @@ -238,7 +238,7 @@ static int delayed_uprobe_add(struct uprobe *uprobe, struct mm_struct *mm) if (delayed_uprobe_check(uprobe, mm)) return 0; - du = kzalloc(sizeof(*du), GFP_KERNEL); + du = kzalloc_obj(*du, GFP_KERNEL); if (!du) return -ENOMEM; @@ -994,7 +994,7 @@ static struct uprobe *alloc_uprobe(struct inode *inode, loff_t offset, { struct uprobe *uprobe, *cur_uprobe; - uprobe = kzalloc(sizeof(struct uprobe), GFP_KERNEL); + uprobe = kzalloc_obj(struct uprobe, GFP_KERNEL); if (!uprobe) return ERR_PTR(-ENOMEM); @@ -1219,8 +1219,8 @@ build_map_info(struct address_space *mapping, loff_t offset, bool is_register) * Needs GFP_NOWAIT to avoid i_mmap_rwsem recursion through * reclaim. This is optimistic, no harm done if it fails. */ - prev = kmalloc(sizeof(struct map_info), - GFP_NOWAIT | __GFP_NOMEMALLOC); + prev = kmalloc_obj(struct map_info, + GFP_NOWAIT | __GFP_NOMEMALLOC); if (prev) prev->next = NULL; } @@ -1252,7 +1252,7 @@ build_map_info(struct address_space *mapping, loff_t offset, bool is_register) } do { - info = kmalloc(sizeof(struct map_info), GFP_KERNEL); + info = kmalloc_obj(struct map_info, GFP_KERNEL); if (!info) { curr = ERR_PTR(-ENOMEM); goto out; @@ -1755,7 +1755,7 @@ static struct xol_area *__create_xol_area(unsigned long vaddr) struct xol_area *area; void *insns; - area = kzalloc(sizeof(*area), GFP_KERNEL); + area = kzalloc_obj(*area, GFP_KERNEL); if (unlikely(!area)) goto out; @@ -2069,7 +2069,7 @@ static struct uprobe_task *alloc_utask(void) { struct uprobe_task *utask; - utask = kzalloc(sizeof(*utask), GFP_KERNEL); + utask = kzalloc_obj(*utask, GFP_KERNEL); if (!utask) return NULL; @@ -2102,7 +2102,7 @@ static struct return_instance *alloc_return_instance(struct uprobe_task *utask) if (ri) return ri; - ri = kzalloc(sizeof(*ri), GFP_KERNEL); + ri = kzalloc_obj(*ri, GFP_KERNEL); if (!ri) return ZERO_SIZE_PTR; diff --git a/kernel/fail_function.c b/kernel/fail_function.c index d971a0189319..18993fcbdbda 100644 --- a/kernel/fail_function.c +++ b/kernel/fail_function.c @@ -57,7 +57,7 @@ static struct fei_attr *fei_attr_new(const char *sym, unsigned long addr) { struct fei_attr *attr; - attr = kzalloc(sizeof(*attr), GFP_KERNEL); + attr = kzalloc_obj(*attr, GFP_KERNEL); if (attr) { attr->kp.symbol_name = kstrdup(sym, GFP_KERNEL); if (!attr->kp.symbol_name) { diff --git a/kernel/futex/pi.c b/kernel/futex/pi.c index dacb2330f1fb..a73b6c713d83 100644 --- a/kernel/futex/pi.c +++ b/kernel/futex/pi.c @@ -17,7 +17,7 @@ int refill_pi_state_cache(void) if (likely(current->pi_state_cache)) return 0; - pi_state = kzalloc(sizeof(*pi_state), GFP_KERNEL); + pi_state = kzalloc_obj(*pi_state, GFP_KERNEL); if (!pi_state) return -ENOMEM; diff --git a/kernel/futex/syscalls.c b/kernel/futex/syscalls.c index 880c9bf2f315..aec0495adabe 100644 --- a/kernel/futex/syscalls.c +++ b/kernel/futex/syscalls.c @@ -333,7 +333,7 @@ SYSCALL_DEFINE5(futex_waitv, struct futex_waitv __user *, waiters, if (timeout && (ret = futex2_setup_timeout(timeout, clockid, &to))) return ret; - futexv = kcalloc(nr_futexes, sizeof(*futexv), GFP_KERNEL); + futexv = kzalloc_objs(*futexv, nr_futexes, GFP_KERNEL); if (!futexv) { ret = -ENOMEM; goto destroy_timer; diff --git a/kernel/gcov/clang.c b/kernel/gcov/clang.c index 8b888a6193cc..4cfdeb2c9dc2 100644 --- a/kernel/gcov/clang.c +++ b/kernel/gcov/clang.c @@ -81,7 +81,7 @@ static LIST_HEAD(clang_gcov_list); void llvm_gcov_init(llvm_gcov_callback writeout, llvm_gcov_callback flush) { - struct gcov_info *info = kzalloc(sizeof(*info), GFP_KERNEL); + struct gcov_info *info = kzalloc_obj(*info, GFP_KERNEL); if (!info) return; @@ -112,7 +112,7 @@ EXPORT_SYMBOL(llvm_gcda_start_file); void llvm_gcda_emit_function(u32 ident, u32 func_checksum, u32 cfg_checksum) { - struct gcov_fn_info *info = kzalloc(sizeof(*info), GFP_KERNEL); + struct gcov_fn_info *info = kzalloc_obj(*info, GFP_KERNEL); if (!info) return; diff --git a/kernel/gcov/fs.c b/kernel/gcov/fs.c index 01520689b57c..8430f5cd21b6 100644 --- a/kernel/gcov/fs.c +++ b/kernel/gcov/fs.c @@ -116,7 +116,7 @@ static struct gcov_iterator *gcov_iter_new(struct gcov_info *info) /* Dry-run to get the actual buffer size. */ size = convert_to_gcda(NULL, info); - iter = kvmalloc(struct_size(iter, buffer, size), GFP_KERNEL); + iter = kvmalloc_flex(*iter, buffer, size, GFP_KERNEL); if (!iter) return NULL; @@ -482,7 +482,7 @@ static void add_links(struct gcov_node *node, struct dentry *parent) for (num = 0; gcov_link[num].ext; num++) /* Nothing. */; - node->links = kcalloc(num, sizeof(struct dentry *), GFP_KERNEL); + node->links = kzalloc_objs(struct dentry *, num, GFP_KERNEL); if (!node->links) return; for (i = 0; i < num; i++) { @@ -545,8 +545,8 @@ static struct gcov_node *new_node(struct gcov_node *parent, if (!node) goto err_nomem; if (info) { - node->loaded_info = kcalloc(1, sizeof(struct gcov_info *), - GFP_KERNEL); + node->loaded_info = kzalloc_objs(struct gcov_info *, 1, + GFP_KERNEL); if (!node->loaded_info) goto err_nomem; } @@ -731,7 +731,7 @@ static void add_info(struct gcov_node *node, struct gcov_info *info) * case the new data set is incompatible, the node only contains * unloaded data sets and there's not enough memory for the array. */ - loaded_info = kcalloc(num + 1, sizeof(struct gcov_info *), GFP_KERNEL); + loaded_info = kzalloc_objs(struct gcov_info *, num + 1, GFP_KERNEL); if (!loaded_info) { pr_warn("could not add '%s' (out of memory)\n", gcov_info_filename(info)); diff --git a/kernel/gcov/gcc_4_7.c b/kernel/gcov/gcc_4_7.c index ffde93d051a4..46dbba7b0efd 100644 --- a/kernel/gcov/gcc_4_7.c +++ b/kernel/gcov/gcc_4_7.c @@ -298,8 +298,8 @@ struct gcov_info *gcov_info_dup(struct gcov_info *info) if (!dup->filename) goto err_free; - dup->functions = kcalloc(info->n_functions, - sizeof(struct gcov_fn_info *), GFP_KERNEL); + dup->functions = kzalloc_objs(struct gcov_fn_info *, info->n_functions, + GFP_KERNEL); if (!dup->functions) goto err_free; diff --git a/kernel/groups.c b/kernel/groups.c index 9b43da22647d..b5e3be6a6b1f 100644 --- a/kernel/groups.c +++ b/kernel/groups.c @@ -15,7 +15,7 @@ struct group_info *groups_alloc(int gidsetsize) { struct group_info *gi; - gi = kvmalloc(struct_size(gi, gid, gidsetsize), GFP_KERNEL_ACCOUNT); + gi = kvmalloc_flex(*gi, gid, gidsetsize, GFP_KERNEL_ACCOUNT); if (!gi) return NULL; diff --git a/kernel/irq/affinity.c b/kernel/irq/affinity.c index 4013e6ad2b2f..cf6729888ee3 100644 --- a/kernel/irq/affinity.c +++ b/kernel/irq/affinity.c @@ -56,7 +56,7 @@ irq_create_affinity_masks(unsigned int nvecs, struct irq_affinity *affd) if (!affvecs) return NULL; - masks = kcalloc(nvecs, sizeof(*masks), GFP_KERNEL); + masks = kzalloc_objs(*masks, nvecs, GFP_KERNEL); if (!masks) return NULL; diff --git a/kernel/irq/generic-chip.c b/kernel/irq/generic-chip.c index 3cd0c40282c0..da1da1a4b2d0 100644 --- a/kernel/irq/generic-chip.c +++ b/kernel/irq/generic-chip.c @@ -240,7 +240,7 @@ irq_alloc_generic_chip(const char *name, int num_ct, unsigned int irq_base, { struct irq_chip_generic *gc; - gc = kzalloc(struct_size(gc, chip_types, num_ct), GFP_KERNEL); + gc = kzalloc_flex(*gc, chip_types, num_ct, GFP_KERNEL); if (gc) { irq_init_generic_chip(gc, name, num_ct, irq_base, reg_base, handler); diff --git a/kernel/irq/irq_sim.c b/kernel/irq/irq_sim.c index ae4c9cbd1b4b..59b84a10465c 100644 --- a/kernel/irq/irq_sim.c +++ b/kernel/irq/irq_sim.c @@ -148,7 +148,7 @@ static int irq_sim_domain_map(struct irq_domain *domain, struct irq_sim_work_ctx *work_ctx = domain->host_data; struct irq_sim_irq_ctx *irq_ctx; - irq_ctx = kzalloc(sizeof(*irq_ctx), GFP_KERNEL); + irq_ctx = kzalloc_obj(*irq_ctx, GFP_KERNEL); if (!irq_ctx) return -ENOMEM; @@ -202,7 +202,7 @@ struct irq_domain *irq_domain_create_sim_full(struct fwnode_handle *fwnode, void *data) { struct irq_sim_work_ctx *work_ctx __free(kfree) = - kzalloc(sizeof(*work_ctx), GFP_KERNEL); + kzalloc_obj(*work_ctx, GFP_KERNEL); if (!work_ctx) return ERR_PTR(-ENOMEM); diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c index 022b3741dd7a..ddc9d01b3091 100644 --- a/kernel/irq/irqdesc.c +++ b/kernel/irq/irqdesc.c @@ -969,7 +969,7 @@ int irq_set_percpu_devid(unsigned int irq) if (!desc || desc->percpu_enabled) return -EINVAL; - desc->percpu_enabled = kzalloc(sizeof(*desc->percpu_enabled), GFP_KERNEL); + desc->percpu_enabled = kzalloc_obj(*desc->percpu_enabled, GFP_KERNEL); if (!desc->percpu_enabled) return -ENOMEM; diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c index c2258b133939..857fcd74ebda 100644 --- a/kernel/irq/irqdomain.c +++ b/kernel/irq/irqdomain.c @@ -92,7 +92,7 @@ struct fwnode_handle *__irq_domain_alloc_fwnode(unsigned int type, int id, struct irqchip_fwid *fwid; char *n; - fwid = kzalloc(sizeof(*fwid), GFP_KERNEL); + fwid = kzalloc_obj(*fwid, GFP_KERNEL); switch (type) { case IRQCHIP_FWNODE_NAMED: diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index cded3d960eb7..2b05c45be1b3 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c @@ -1332,7 +1332,7 @@ static int irq_setup_forced_threading(struct irqaction *new) */ if (new->handler && new->thread_fn) { /* Allocate the secondary action */ - new->secondary = kzalloc(sizeof(struct irqaction), GFP_KERNEL); + new->secondary = kzalloc_obj(struct irqaction, GFP_KERNEL); if (!new->secondary) return -ENOMEM; new->secondary->handler = irq_forced_secondary_handler; @@ -2156,7 +2156,7 @@ int request_threaded_irq(unsigned int irq, irq_handler_t handler, handler = irq_default_primary_handler; } - action = kzalloc(sizeof(struct irqaction), GFP_KERNEL); + action = kzalloc_obj(struct irqaction, GFP_KERNEL); if (!action) return -ENOMEM; @@ -2486,7 +2486,7 @@ struct irqaction *create_percpu_irqaction(irq_handler_t handler, unsigned long f if (!affinity) affinity = cpu_possible_mask; - action = kzalloc(sizeof(struct irqaction), GFP_KERNEL); + action = kzalloc_obj(struct irqaction, GFP_KERNEL); if (!action) return NULL; diff --git a/kernel/irq/matrix.c b/kernel/irq/matrix.c index a50f2305a8dc..8151c14ca35a 100644 --- a/kernel/irq/matrix.c +++ b/kernel/irq/matrix.c @@ -51,7 +51,7 @@ __init struct irq_matrix *irq_alloc_matrix(unsigned int matrix_bits, unsigned int cpu, matrix_size = BITS_TO_LONGS(matrix_bits); struct irq_matrix *m; - m = kzalloc(struct_size(m, scratch_map, matrix_size * 2), GFP_KERNEL); + m = kzalloc_flex(*m, scratch_map, matrix_size * 2, GFP_KERNEL); if (!m) return NULL; diff --git a/kernel/irq/msi.c b/kernel/irq/msi.c index 68886881fe10..e4bae8f1c414 100644 --- a/kernel/irq/msi.c +++ b/kernel/irq/msi.c @@ -76,7 +76,7 @@ static int msi_domain_prepare_irqs(struct irq_domain *domain, struct device *dev static struct msi_desc *msi_alloc_desc(struct device *dev, int nvec, const struct irq_affinity_desc *affinity) { - struct msi_desc *desc = kzalloc(sizeof(*desc), GFP_KERNEL); + struct msi_desc *desc = kzalloc_obj(*desc, GFP_KERNEL); if (!desc) return NULL; @@ -530,7 +530,7 @@ static int msi_sysfs_populate_desc(struct device *dev, struct msi_desc *desc) struct device_attribute *attrs; int ret, i; - attrs = kcalloc(desc->nvec_used, sizeof(*attrs), GFP_KERNEL); + attrs = kzalloc_objs(*attrs, desc->nvec_used, GFP_KERNEL); if (!attrs) return -ENOMEM; diff --git a/kernel/kallsyms_selftest.c b/kernel/kallsyms_selftest.c index 2b082a7e24a2..d2aeb6b7c393 100644 --- a/kernel/kallsyms_selftest.c +++ b/kernel/kallsyms_selftest.c @@ -264,7 +264,7 @@ static int test_kallsyms_basic_function(void) char namebuf[KSYM_NAME_LEN]; struct test_stat *stat, *stat2; - stat = kmalloc_array(2, sizeof(*stat), GFP_KERNEL); + stat = kmalloc_objs(*stat, 2, GFP_KERNEL); if (!stat) return -ENOMEM; stat2 = stat + 1; diff --git a/kernel/kcov.c b/kernel/kcov.c index 5397d0c14127..b9d4db7ea439 100644 --- a/kernel/kcov.c +++ b/kernel/kcov.c @@ -122,7 +122,7 @@ static struct kcov_remote *kcov_remote_add(struct kcov *kcov, u64 handle) if (kcov_remote_find(handle)) return ERR_PTR(-EEXIST); - remote = kmalloc(sizeof(*remote), GFP_ATOMIC); + remote = kmalloc_obj(*remote, GFP_ATOMIC); if (!remote) return ERR_PTR(-ENOMEM); remote->handle = handle; @@ -527,7 +527,7 @@ static int kcov_open(struct inode *inode, struct file *filep) { struct kcov *kcov; - kcov = kzalloc(sizeof(*kcov), GFP_KERNEL); + kcov = kzalloc_obj(*kcov, GFP_KERNEL); if (!kcov) return -ENOMEM; guard(spinlock_init)(&kcov->lock); diff --git a/kernel/kcsan/kcsan_test.c b/kernel/kcsan/kcsan_test.c index 8ef8167be745..edb062fb43b4 100644 --- a/kernel/kcsan/kcsan_test.c +++ b/kernel/kcsan/kcsan_test.c @@ -168,7 +168,7 @@ static bool __report_matches(const struct expect_report *r) if (!report_available()) return false; - expect = kmalloc(sizeof(observed.lines), GFP_KERNEL); + expect = kmalloc_obj(observed.lines, GFP_KERNEL); if (WARN_ON(!expect)) return false; @@ -1538,7 +1538,7 @@ static int test_init(struct kunit *test) if (WARN_ON(!nthreads)) goto err; - threads = kcalloc(nthreads + 1, sizeof(struct task_struct *), GFP_KERNEL); + threads = kzalloc_objs(struct task_struct *, nthreads + 1, GFP_KERNEL); if (WARN_ON(!threads)) goto err; diff --git a/kernel/kexec.c b/kernel/kexec.c index 28008e3d462e..3902e7bb99fe 100644 --- a/kernel/kexec.c +++ b/kernel/kexec.c @@ -284,8 +284,7 @@ COMPAT_SYSCALL_DEFINE4(kexec_load, compat_ulong_t, entry, if ((flags & KEXEC_ARCH_MASK) == KEXEC_ARCH_DEFAULT) return -EINVAL; - ksegments = kmalloc_array(nr_segments, sizeof(ksegments[0]), - GFP_KERNEL); + ksegments = kmalloc_objs(ksegments[0], nr_segments, GFP_KERNEL); if (!ksegments) return -ENOMEM; diff --git a/kernel/kexec_core.c b/kernel/kexec_core.c index 95c585c6ddc3..76e4287a4f1d 100644 --- a/kernel/kexec_core.c +++ b/kernel/kexec_core.c @@ -231,7 +231,7 @@ struct kimage *do_kimage_alloc_init(void) struct kimage *image; /* Allocate a controlling structure */ - image = kzalloc(sizeof(*image), GFP_KERNEL); + image = kzalloc_obj(*image, GFP_KERNEL); if (!image) return NULL; @@ -975,7 +975,7 @@ void *kimage_map_segment(struct kimage *image, int idx) * Collect the source pages and map them in a contiguous VA range. */ npages = PFN_UP(eaddr) - PFN_DOWN(addr); - src_pages = kmalloc_array(npages, sizeof(*src_pages), GFP_KERNEL); + src_pages = kmalloc_objs(*src_pages, npages, GFP_KERNEL); if (!src_pages) { pr_err("Could not allocate ima pages array.\n"); return NULL; diff --git a/kernel/kprobes.c b/kernel/kprobes.c index e2cd01cf5968..b6744137b11e 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c @@ -172,7 +172,7 @@ kprobe_opcode_t *__get_insn_slot(struct kprobe_insn_cache *c) } while (c->nr_garbage && collect_garbage_slots(c) == 0); /* All out of space. Need to allocate a new page. */ - kip = kmalloc(struct_size(kip, slot_used, slots_per_page(c)), GFP_KERNEL); + kip = kmalloc_flex(*kip, slot_used, slots_per_page(c), GFP_KERNEL); if (!kip) return NULL; @@ -900,7 +900,7 @@ static struct kprobe *alloc_aggr_kprobe(struct kprobe *p) { struct optimized_kprobe *op; - op = kzalloc(sizeof(struct optimized_kprobe), GFP_KERNEL); + op = kzalloc_obj(struct optimized_kprobe, GFP_KERNEL); if (!op) return NULL; @@ -1117,7 +1117,7 @@ static void free_aggr_kprobe(struct kprobe *p) static struct kprobe *alloc_aggr_kprobe(struct kprobe *p) { - return kzalloc(sizeof(struct kprobe), GFP_KERNEL); + return kzalloc_obj(struct kprobe, GFP_KERNEL); } #endif /* CONFIG_OPTPROBES */ @@ -2295,7 +2295,7 @@ int register_kretprobe(struct kretprobe *rp) rp->rh = NULL; } #else /* !CONFIG_KRETPROBE_ON_RETHOOK */ - rp->rph = kzalloc(sizeof(struct kretprobe_holder), GFP_KERNEL); + rp->rph = kzalloc_obj(struct kretprobe_holder, GFP_KERNEL); if (!rp->rph) return -ENOMEM; @@ -2499,7 +2499,7 @@ int kprobe_add_ksym_blacklist(unsigned long entry) !kallsyms_lookup_size_offset(entry, &size, &offset)) return -EINVAL; - ent = kmalloc(sizeof(*ent), GFP_KERNEL); + ent = kmalloc_obj(*ent, GFP_KERNEL); if (!ent) return -ENOMEM; ent->start_addr = entry; diff --git a/kernel/kthread.c b/kernel/kthread.c index c9507689e181..0b4f7328096f 100644 --- a/kernel/kthread.c +++ b/kernel/kthread.c @@ -122,7 +122,7 @@ bool set_kthread_struct(struct task_struct *p) if (WARN_ON_ONCE(to_kthread(p))) return false; - kthread = kzalloc(sizeof(*kthread), GFP_KERNEL); + kthread = kzalloc_obj(*kthread, GFP_KERNEL); if (!kthread) return false; @@ -511,8 +511,7 @@ struct task_struct *__kthread_create_on_node(int (*threadfn)(void *data), { DECLARE_COMPLETION_ONSTACK(done); struct task_struct *task; - struct kthread_create_info *create = kmalloc(sizeof(*create), - GFP_KERNEL); + struct kthread_create_info *create = kmalloc_obj(*create, GFP_KERNEL); if (!create) return ERR_PTR(-ENOMEM); @@ -1084,7 +1083,7 @@ __kthread_create_worker_on_node(unsigned int flags, int node, struct kthread_worker *worker; struct task_struct *task; - worker = kzalloc(sizeof(*worker), GFP_KERNEL); + worker = kzalloc_obj(*worker, GFP_KERNEL); if (!worker) return ERR_PTR(-ENOMEM); diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c index 1acbad2dbfdf..0d52e48918eb 100644 --- a/kernel/livepatch/core.c +++ b/kernel/livepatch/core.c @@ -525,7 +525,7 @@ static struct klp_object *klp_alloc_object_dynamic(const char *name, { struct klp_object *obj; - obj = kzalloc(sizeof(*obj), GFP_KERNEL); + obj = kzalloc_obj(*obj, GFP_KERNEL); if (!obj) return NULL; @@ -554,7 +554,7 @@ static struct klp_func *klp_alloc_func_nop(struct klp_func *old_func, { struct klp_func *func; - func = kzalloc(sizeof(*func), GFP_KERNEL); + func = kzalloc_obj(*func, GFP_KERNEL); if (!func) return NULL; diff --git a/kernel/livepatch/patch.c b/kernel/livepatch/patch.c index 90408500e5a3..1149840cd538 100644 --- a/kernel/livepatch/patch.c +++ b/kernel/livepatch/patch.c @@ -179,7 +179,7 @@ static int klp_patch_func(struct klp_func *func) return -EINVAL; } - ops = kzalloc(sizeof(*ops), GFP_KERNEL); + ops = kzalloc_obj(*ops, GFP_KERNEL); if (!ops) return -ENOMEM; diff --git a/kernel/liveupdate/kexec_handover.c b/kernel/liveupdate/kexec_handover.c index 95601623b4d6..23d76678d233 100644 --- a/kernel/liveupdate/kexec_handover.c +++ b/kernel/liveupdate/kexec_handover.c @@ -187,7 +187,7 @@ static int __kho_preserve_order(struct kho_mem_track *track, unsigned long pfn, if (!physxa) { int err; - new_physxa = kzalloc(sizeof(*physxa), GFP_KERNEL); + new_physxa = kzalloc_obj(*physxa, GFP_KERNEL); if (!new_physxa) return -ENOMEM; @@ -1090,7 +1090,7 @@ void *kho_restore_vmalloc(const struct kho_vmalloc *preservation) return NULL; total_pages = preservation->total_pages; - pages = kvmalloc_array(total_pages, sizeof(*pages), GFP_KERNEL); + pages = kvmalloc_objs(*pages, total_pages, GFP_KERNEL); if (!pages) return NULL; order = preservation->order; diff --git a/kernel/liveupdate/kexec_handover_debugfs.c b/kernel/liveupdate/kexec_handover_debugfs.c index 2abbf62ba942..d42fc940d14d 100644 --- a/kernel/liveupdate/kexec_handover_debugfs.c +++ b/kernel/liveupdate/kexec_handover_debugfs.c @@ -29,7 +29,7 @@ static int __kho_debugfs_fdt_add(struct list_head *list, struct dentry *dir, struct fdt_debugfs *f; struct dentry *file; - f = kmalloc(sizeof(*f), GFP_KERNEL); + f = kmalloc_obj(*f, GFP_KERNEL); if (!f) return -ENOMEM; diff --git a/kernel/liveupdate/luo_file.c b/kernel/liveupdate/luo_file.c index 4c7df52a6507..ca96edb3b4e5 100644 --- a/kernel/liveupdate/luo_file.c +++ b/kernel/liveupdate/luo_file.c @@ -289,7 +289,7 @@ int luo_preserve_file(struct luo_file_set *file_set, u64 token, int fd) if (err) goto err_free_files_mem; - luo_file = kzalloc(sizeof(*luo_file), GFP_KERNEL); + luo_file = kzalloc_obj(*luo_file, GFP_KERNEL); if (!luo_file) { err = -ENOMEM; goto err_flb_unpreserve; @@ -780,7 +780,7 @@ int luo_file_deserialize(struct luo_file_set *file_set, return -ENOENT; } - luo_file = kzalloc(sizeof(*luo_file), GFP_KERNEL); + luo_file = kzalloc_obj(*luo_file, GFP_KERNEL); if (!luo_file) return -ENOMEM; diff --git a/kernel/liveupdate/luo_flb.c b/kernel/liveupdate/luo_flb.c index 4c437de5c0b0..5f2cdf9caa7b 100644 --- a/kernel/liveupdate/luo_flb.c +++ b/kernel/liveupdate/luo_flb.c @@ -343,7 +343,7 @@ int liveupdate_register_flb(struct liveupdate_file_handler *fh, if (WARN_ON(list_empty(&ACCESS_PRIVATE(fh, list)))) return -EINVAL; - link = kzalloc(sizeof(*link), GFP_KERNEL); + link = kzalloc_obj(*link, GFP_KERNEL); if (!link) return -ENOMEM; diff --git a/kernel/liveupdate/luo_session.c b/kernel/liveupdate/luo_session.c index dbdbc3bd7929..c0262ca00533 100644 --- a/kernel/liveupdate/luo_session.c +++ b/kernel/liveupdate/luo_session.c @@ -119,7 +119,7 @@ static struct luo_session_global luo_session_global = { static struct luo_session *luo_session_alloc(const char *name) { - struct luo_session *session = kzalloc(sizeof(*session), GFP_KERNEL); + struct luo_session *session = kzalloc_obj(*session, GFP_KERNEL); if (!session) return ERR_PTR(-ENOMEM); diff --git a/kernel/locking/locktorture.c b/kernel/locking/locktorture.c index 6567e5eeacc0..96a8647a0074 100644 --- a/kernel/locking/locktorture.c +++ b/kernel/locking/locktorture.c @@ -610,9 +610,8 @@ static void torture_ww_mutex_init(void) ww_mutex_init(&torture_ww_mutex_1, &torture_ww_class); ww_mutex_init(&torture_ww_mutex_2, &torture_ww_class); - ww_acquire_ctxs = kmalloc_array(cxt.nrealwriters_stress, - sizeof(*ww_acquire_ctxs), - GFP_KERNEL); + ww_acquire_ctxs = kmalloc_objs(*ww_acquire_ctxs, + cxt.nrealwriters_stress, GFP_KERNEL); if (!ww_acquire_ctxs) VERBOSE_TOROUT_STRING("ww_acquire_ctx: Out of memory"); } @@ -1129,7 +1128,8 @@ static int call_rcu_chain_init(void) if (call_rcu_chains <= 0) return 0; - call_rcu_chain_list = kcalloc(call_rcu_chains, sizeof(*call_rcu_chain_list), GFP_KERNEL); + call_rcu_chain_list = kzalloc_objs(*call_rcu_chain_list, + call_rcu_chains, GFP_KERNEL); if (!call_rcu_chain_list) return -ENOMEM; for (i = 0; i < call_rcu_chains; i++) { @@ -1293,9 +1293,8 @@ static int __init lock_torture_init(void) /* Initialize the statistics so that each run gets its own numbers. */ if (nwriters_stress) { lock_is_write_held = false; - cxt.lwsa = kmalloc_array(cxt.nrealwriters_stress, - sizeof(*cxt.lwsa), - GFP_KERNEL); + cxt.lwsa = kmalloc_objs(*cxt.lwsa, cxt.nrealwriters_stress, + GFP_KERNEL); if (cxt.lwsa == NULL) { VERBOSE_TOROUT_STRING("cxt.lwsa: Out of memory"); firsterr = -ENOMEM; @@ -1323,9 +1322,9 @@ static int __init lock_torture_init(void) } if (nreaders_stress) { - cxt.lrsa = kmalloc_array(cxt.nrealreaders_stress, - sizeof(*cxt.lrsa), - GFP_KERNEL); + cxt.lrsa = kmalloc_objs(*cxt.lrsa, + cxt.nrealreaders_stress, + GFP_KERNEL); if (cxt.lrsa == NULL) { VERBOSE_TOROUT_STRING("cxt.lrsa: Out of memory"); firsterr = -ENOMEM; @@ -1372,9 +1371,8 @@ static int __init lock_torture_init(void) } if (nwriters_stress) { - writer_tasks = kcalloc(cxt.nrealwriters_stress, - sizeof(writer_tasks[0]), - GFP_KERNEL); + writer_tasks = kzalloc_objs(writer_tasks[0], + cxt.nrealwriters_stress, GFP_KERNEL); if (writer_tasks == NULL) { TOROUT_ERRSTRING("writer_tasks: Out of memory"); firsterr = -ENOMEM; @@ -1387,9 +1385,8 @@ static int __init lock_torture_init(void) nested_locks = MAX_NESTED_LOCKS; if (cxt.cur_ops->readlock) { - reader_tasks = kcalloc(cxt.nrealreaders_stress, - sizeof(reader_tasks[0]), - GFP_KERNEL); + reader_tasks = kzalloc_objs(reader_tasks[0], + cxt.nrealreaders_stress, GFP_KERNEL); if (reader_tasks == NULL) { TOROUT_ERRSTRING("reader_tasks: Out of memory"); kfree(writer_tasks); diff --git a/kernel/locking/test-ww_mutex.c b/kernel/locking/test-ww_mutex.c index 79b5e45f8d4c..2cc6d1937670 100644 --- a/kernel/locking/test-ww_mutex.c +++ b/kernel/locking/test-ww_mutex.c @@ -324,7 +324,7 @@ static int __test_cycle(struct ww_class *class, unsigned int nthreads) unsigned int n, last = nthreads - 1; int ret; - cycles = kmalloc_array(nthreads, sizeof(*cycles), GFP_KERNEL); + cycles = kmalloc_objs(*cycles, nthreads, GFP_KERNEL); if (!cycles) return -ENOMEM; @@ -412,7 +412,7 @@ static int *get_random_order(int count) int *order; int n, r; - order = kmalloc_array(count, sizeof(*order), GFP_KERNEL); + order = kmalloc_objs(*order, count, GFP_KERNEL); if (!order) return order; @@ -506,7 +506,7 @@ static void stress_reorder_work(struct work_struct *work) return; for (n = 0; n < stress->nlocks; n++) { - ll = kmalloc(sizeof(*ll), GFP_KERNEL); + ll = kmalloc_obj(*ll, GFP_KERNEL); if (!ll) goto out; @@ -582,12 +582,11 @@ static int stress(struct ww_class *class, int nlocks, int nthreads, unsigned int struct stress *stress_array; int n, count; - locks = kmalloc_array(nlocks, sizeof(*locks), GFP_KERNEL); + locks = kmalloc_objs(*locks, nlocks, GFP_KERNEL); if (!locks) return -ENOMEM; - stress_array = kmalloc_array(nthreads, sizeof(*stress_array), - GFP_KERNEL); + stress_array = kmalloc_objs(*stress_array, nthreads, GFP_KERNEL); if (!stress_array) { kfree(locks); return -ENOMEM; diff --git a/kernel/module/dups.c b/kernel/module/dups.c index 0b633f2edda6..bbc72ad93058 100644 --- a/kernel/module/dups.c +++ b/kernel/module/dups.c @@ -125,7 +125,7 @@ bool kmod_dup_request_exists_wait(char *module_name, bool wait, int *dup_ret) * Pre-allocate the entry in case we have to use it later * to avoid contention with the mutex. */ - new_kmod_req = kzalloc(sizeof(*new_kmod_req), GFP_KERNEL); + new_kmod_req = kzalloc_obj(*new_kmod_req, GFP_KERNEL); if (!new_kmod_req) return false; diff --git a/kernel/module/main.c b/kernel/module/main.c index 710ee30b3bea..b2ac20299915 100644 --- a/kernel/module/main.c +++ b/kernel/module/main.c @@ -662,7 +662,7 @@ static int add_module_usage(struct module *a, struct module *b) struct module_use *use; pr_debug("Allocating new usage for %s.\n", a->name); - use = kmalloc(sizeof(*use), GFP_ATOMIC); + use = kmalloc_obj(*use, GFP_ATOMIC); if (!use) return -ENOMEM; @@ -3024,7 +3024,7 @@ static noinline int do_init_module(struct module *mod) } #endif - freeinit = kmalloc(sizeof(*freeinit), GFP_KERNEL); + freeinit = kmalloc_obj(*freeinit, GFP_KERNEL); if (!freeinit) { ret = -ENOMEM; goto fail; diff --git a/kernel/module/stats.c b/kernel/module/stats.c index 3ba0e98b3c91..2fc64f2729e6 100644 --- a/kernel/module/stats.c +++ b/kernel/module/stats.c @@ -250,7 +250,7 @@ int try_add_failed_module(const char *name, enum fail_dup_mod_reason reason) } } - mod_fail = kzalloc(sizeof(*mod_fail), GFP_KERNEL); + mod_fail = kzalloc_obj(*mod_fail, GFP_KERNEL); if (!mod_fail) return -ENOMEM; memcpy(mod_fail->name, name, strlen(name)); diff --git a/kernel/module/sysfs.c b/kernel/module/sysfs.c index c7622ff5226a..734ea3180478 100644 --- a/kernel/module/sysfs.c +++ b/kernel/module/sysfs.c @@ -74,11 +74,11 @@ static int add_sect_attrs(struct module *mod, const struct load_info *info) for (i = 0; i < info->hdr->e_shnum; i++) if (!sect_empty(&info->sechdrs[i])) nloaded++; - sect_attrs = kzalloc(struct_size(sect_attrs, attrs, nloaded), GFP_KERNEL); + sect_attrs = kzalloc_flex(*sect_attrs, attrs, nloaded, GFP_KERNEL); if (!sect_attrs) return -ENOMEM; - gattr = kcalloc(nloaded + 1, sizeof(*gattr), GFP_KERNEL); + gattr = kzalloc_objs(*gattr, nloaded + 1, GFP_KERNEL); if (!gattr) { kfree(sect_attrs); return -ENOMEM; @@ -166,12 +166,11 @@ static int add_notes_attrs(struct module *mod, const struct load_info *info) if (notes == 0) return 0; - notes_attrs = kzalloc(struct_size(notes_attrs, attrs, notes), - GFP_KERNEL); + notes_attrs = kzalloc_flex(*notes_attrs, attrs, notes, GFP_KERNEL); if (!notes_attrs) return -ENOMEM; - gattr = kcalloc(notes + 1, sizeof(*gattr), GFP_KERNEL); + gattr = kzalloc_objs(*gattr, notes + 1, GFP_KERNEL); if (!gattr) { kfree(notes_attrs); return -ENOMEM; diff --git a/kernel/module/tracking.c b/kernel/module/tracking.c index 4fefec5b683c..41425054a97a 100644 --- a/kernel/module/tracking.c +++ b/kernel/module/tracking.c @@ -33,7 +33,7 @@ int try_add_tainted_module(struct module *mod) } } - mod_taint = kmalloc(sizeof(*mod_taint), GFP_KERNEL); + mod_taint = kmalloc_obj(*mod_taint, GFP_KERNEL); if (unlikely(!mod_taint)) return -ENOMEM; strscpy(mod_taint->name, mod->name, MODULE_NAME_LEN); diff --git a/kernel/padata.c b/kernel/padata.c index db7c75787a2b..f0bf62e9a1f2 100644 --- a/kernel/padata.c +++ b/kernel/padata.c @@ -540,7 +540,7 @@ static struct parallel_data *padata_alloc_pd(struct padata_shell *ps) struct padata_instance *pinst = ps->pinst; struct parallel_data *pd; - pd = kzalloc(sizeof(struct parallel_data), GFP_KERNEL); + pd = kzalloc_obj(struct parallel_data, GFP_KERNEL); if (!pd) goto err; @@ -952,7 +952,7 @@ struct padata_instance *padata_alloc(const char *name) { struct padata_instance *pinst; - pinst = kzalloc(sizeof(struct padata_instance), GFP_KERNEL); + pinst = kzalloc_obj(struct padata_instance, GFP_KERNEL); if (!pinst) goto err; @@ -1038,7 +1038,7 @@ struct padata_shell *padata_alloc_shell(struct padata_instance *pinst) struct parallel_data *pd; struct padata_shell *ps; - ps = kzalloc(sizeof(*ps), GFP_KERNEL); + ps = kzalloc_obj(*ps, GFP_KERNEL); if (!ps) goto out; @@ -1106,8 +1106,8 @@ void __init padata_init(void) #endif possible_cpus = num_possible_cpus(); - padata_works = kmalloc_array(possible_cpus, sizeof(struct padata_work), - GFP_KERNEL); + padata_works = kmalloc_objs(struct padata_work, possible_cpus, + GFP_KERNEL); if (!padata_works) goto remove_dead_state; diff --git a/kernel/params.c b/kernel/params.c index 7c2242f64bf0..d26bdfae96e5 100644 --- a/kernel/params.c +++ b/kernel/params.c @@ -633,13 +633,13 @@ static __init_or_module int add_sysfs_param(struct module_kobject *mk, if (!mk->mp) { /* First allocation. */ - mk->mp = kzalloc(sizeof(*mk->mp), GFP_KERNEL); + mk->mp = kzalloc_obj(*mk->mp, GFP_KERNEL); if (!mk->mp) return -ENOMEM; mk->mp->grp.name = "parameters"; /* NULL-terminated attribute array. */ - mk->mp->grp.attrs = kzalloc(sizeof(mk->mp->grp.attrs[0]), - GFP_KERNEL); + mk->mp->grp.attrs = kzalloc_obj(mk->mp->grp.attrs[0], + GFP_KERNEL); /* Caller will cleanup via free_module_param_attrs */ if (!mk->mp->grp.attrs) return -ENOMEM; @@ -766,7 +766,7 @@ lookup_or_create_module_kobject(const char *name) if (kobj) return to_module_kobject(kobj); - mk = kzalloc(sizeof(struct module_kobject), GFP_KERNEL); + mk = kzalloc_obj(struct module_kobject, GFP_KERNEL); if (!mk) return NULL; diff --git a/kernel/power/console.c b/kernel/power/console.c index a906a0ac0f9b..5ed9e1be1560 100644 --- a/kernel/power/console.c +++ b/kernel/power/console.c @@ -58,7 +58,7 @@ int pm_vt_switch_required(struct device *dev, bool required) } } - entry = kmalloc(sizeof(*entry), GFP_KERNEL); + entry = kmalloc_obj(*entry, GFP_KERNEL); if (!entry) { ret = -ENOMEM; goto out; diff --git a/kernel/power/energy_model.c b/kernel/power/energy_model.c index 5b055cbe5341..43ddfc11b84a 100644 --- a/kernel/power/energy_model.c +++ b/kernel/power/energy_model.c @@ -439,7 +439,7 @@ static int em_create_pd(struct device *dev, int nr_states, cpumask_copy(em_span_cpus(pd), cpus); } else { - pd = kzalloc(sizeof(*pd), GFP_KERNEL); + pd = kzalloc_obj(*pd, GFP_KERNEL); if (!pd) return -ENOMEM; } diff --git a/kernel/power/qos.c b/kernel/power/qos.c index f7d8064e9adc..750b80f45b9f 100644 --- a/kernel/power/qos.c +++ b/kernel/power/qos.c @@ -341,7 +341,7 @@ static int cpu_latency_qos_open(struct inode *inode, struct file *filp) { struct pm_qos_request *req; - req = kzalloc(sizeof(*req), GFP_KERNEL); + req = kzalloc_obj(*req, GFP_KERNEL); if (!req) return -ENOMEM; @@ -440,7 +440,7 @@ static int cpu_wakeup_latency_qos_open(struct inode *inode, struct file *filp) { struct pm_qos_request *req; - req = kzalloc(sizeof(*req), GFP_KERNEL); + req = kzalloc_obj(*req, GFP_KERNEL); if (!req) return -ENOMEM; diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c index 0a946932d5c1..be0b3304339f 100644 --- a/kernel/power/snapshot.c +++ b/kernel/power/snapshot.c @@ -646,7 +646,7 @@ static int create_mem_extents(struct list_head *list, gfp_t gfp_mask) /* New extent is necessary */ struct mem_extent *new_ext; - new_ext = kzalloc(sizeof(struct mem_extent), gfp_mask); + new_ext = kzalloc_obj(struct mem_extent, gfp_mask); if (!new_ext) { free_mem_extents(list); return -ENOMEM; @@ -1124,7 +1124,7 @@ int create_basic_memory_bitmaps(void) else BUG_ON(forbidden_pages_map || free_pages_map); - bm1 = kzalloc(sizeof(struct memory_bitmap), GFP_KERNEL); + bm1 = kzalloc_obj(struct memory_bitmap, GFP_KERNEL); if (!bm1) return -ENOMEM; @@ -1132,7 +1132,7 @@ int create_basic_memory_bitmaps(void) if (error) goto Free_first_object; - bm2 = kzalloc(sizeof(struct memory_bitmap), GFP_KERNEL); + bm2 = kzalloc_obj(struct memory_bitmap, GFP_KERNEL); if (!bm2) goto Free_first_bitmap; diff --git a/kernel/power/swap.c b/kernel/power/swap.c index c4eb284b8e72..9bc1241259d3 100644 --- a/kernel/power/swap.c +++ b/kernel/power/swap.c @@ -155,7 +155,7 @@ static int swsusp_extents_insert(unsigned long swap_offset) } } /* Add the new node and rebalance the tree. */ - ext = kzalloc(sizeof(struct swsusp_extent), GFP_KERNEL); + ext = kzalloc_obj(struct swsusp_extent, GFP_KERNEL); if (!ext) return -ENOMEM; @@ -577,7 +577,7 @@ static struct crc_data *alloc_crc_data(int nr_threads) { struct crc_data *crc; - crc = kzalloc(sizeof(*crc), GFP_KERNEL); + crc = kzalloc_obj(*crc, GFP_KERNEL); if (!crc) return NULL; @@ -585,7 +585,7 @@ static struct crc_data *alloc_crc_data(int nr_threads) if (!crc->unc) goto err_free_crc; - crc->unc_len = kcalloc(nr_threads, sizeof(*crc->unc_len), GFP_KERNEL); + crc->unc_len = kzalloc_objs(*crc->unc_len, nr_threads, GFP_KERNEL); if (!crc->unc_len) goto err_free_unc; @@ -1016,7 +1016,7 @@ static int get_swap_reader(struct swap_map_handle *handle, last = handle->maps = NULL; offset = swsusp_header->image; while (offset) { - tmp = kzalloc(sizeof(*handle->maps), GFP_KERNEL); + tmp = kzalloc_obj(*handle->maps, GFP_KERNEL); if (!tmp) { release_swap_reader(handle); return -ENOMEM; diff --git a/kernel/power/wakelock.c b/kernel/power/wakelock.c index 4e941999a53b..49712d9e7cfa 100644 --- a/kernel/power/wakelock.c +++ b/kernel/power/wakelock.c @@ -178,7 +178,7 @@ static struct wakelock *wakelock_lookup_add(const char *name, size_t len, return ERR_PTR(-ENOSPC); /* Not found, we have to add a new one. */ - wl = kzalloc(sizeof(*wl), GFP_KERNEL); + wl = kzalloc_obj(*wl, GFP_KERNEL); if (!wl) return ERR_PTR(-ENOMEM); diff --git a/kernel/printk/nbcon.c b/kernel/printk/nbcon.c index d558b18505cd..c98241238f2a 100644 --- a/kernel/printk/nbcon.c +++ b/kernel/printk/nbcon.c @@ -1801,7 +1801,7 @@ bool nbcon_alloc(struct console *con) */ con->pbufs = &printk_shared_pbufs; } else { - con->pbufs = kmalloc(sizeof(*con->pbufs), GFP_KERNEL); + con->pbufs = kmalloc_obj(*con->pbufs, GFP_KERNEL); if (!con->pbufs) { con_printk(KERN_ERR, con, "failed to allocate printing buffer\n"); return false; diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c index a181394604d1..599d56300ded 100644 --- a/kernel/printk/printk.c +++ b/kernel/printk/printk.c @@ -933,7 +933,7 @@ static int devkmsg_open(struct inode *inode, struct file *file) return err; } - user = kvmalloc(sizeof(struct devkmsg_user), GFP_KERNEL); + user = kvmalloc_obj(struct devkmsg_user, GFP_KERNEL); if (!user) return -ENOMEM; diff --git a/kernel/rcu/rcuscale.c b/kernel/rcu/rcuscale.c index 1c50f89fbd6f..5512686be5d0 100644 --- a/kernel/rcu/rcuscale.c +++ b/kernel/rcu/rcuscale.c @@ -755,7 +755,8 @@ kfree_scale_thread(void *arg) } for (i = 0; i < kfree_alloc_num; i++) { - alloc_ptr = kcalloc(kfree_mult, sizeof(struct kfree_obj), GFP_KERNEL); + alloc_ptr = kzalloc_objs(struct kfree_obj, kfree_mult, + GFP_KERNEL); if (!alloc_ptr) return -ENOMEM; @@ -908,8 +909,8 @@ kfree_scale_init(void) kfree_mult * sizeof(struct kfree_obj), kfree_by_call_rcu); - kfree_reader_tasks = kcalloc(kfree_nrealthreads, sizeof(kfree_reader_tasks[0]), - GFP_KERNEL); + kfree_reader_tasks = kzalloc_objs(kfree_reader_tasks[0], + kfree_nrealthreads, GFP_KERNEL); if (kfree_reader_tasks == NULL) { firsterr = -ENOMEM; goto unwind; @@ -1129,8 +1130,7 @@ rcu_scale_init(void) goto unwind; schedule_timeout_uninterruptible(1); } - reader_tasks = kcalloc(nrealreaders, sizeof(reader_tasks[0]), - GFP_KERNEL); + reader_tasks = kzalloc_objs(reader_tasks[0], nrealreaders, GFP_KERNEL); if (reader_tasks == NULL) { SCALEOUT_ERRSTRING("out of memory"); firsterr = -ENOMEM; @@ -1144,10 +1144,11 @@ rcu_scale_init(void) } while (atomic_read(&n_rcu_scale_reader_started) < nrealreaders) schedule_timeout_uninterruptible(1); - writer_tasks = kcalloc(nrealwriters, sizeof(writer_tasks[0]), GFP_KERNEL); + writer_tasks = kzalloc_objs(writer_tasks[0], nrealwriters, GFP_KERNEL); writer_durations = kcalloc(nrealwriters, sizeof(*writer_durations), GFP_KERNEL); - writer_n_durations = kcalloc(nrealwriters, sizeof(*writer_n_durations), GFP_KERNEL); - writer_done = kcalloc(nrealwriters, sizeof(writer_done[0]), GFP_KERNEL); + writer_n_durations = kzalloc_objs(*writer_n_durations, nrealwriters, + GFP_KERNEL); + writer_done = kzalloc_objs(writer_done[0], nrealwriters, GFP_KERNEL); if (gp_async) { if (gp_async_max <= 0) { pr_warn("%s: gp_async_max = %d must be greater than zero.\n", @@ -1156,7 +1157,8 @@ rcu_scale_init(void) firsterr = -EINVAL; goto unwind; } - writer_freelists = kcalloc(nrealwriters, sizeof(writer_freelists[0]), GFP_KERNEL); + writer_freelists = kzalloc_objs(writer_freelists[0], + nrealwriters, GFP_KERNEL); } if (!writer_tasks || !writer_durations || !writer_n_durations || !writer_done || (gp_async && !writer_freelists)) { @@ -1177,8 +1179,9 @@ rcu_scale_init(void) init_llist_head(&wflp->ws_lhg); init_llist_head(&wflp->ws_lhp); - wflp->ws_mblocks = kcalloc(gp_async_max, sizeof(wflp->ws_mblocks[0]), - GFP_KERNEL); + wflp->ws_mblocks = kzalloc_objs(wflp->ws_mblocks[0], + gp_async_max, + GFP_KERNEL); if (!wflp->ws_mblocks) { firsterr = -ENOMEM; goto unwind; diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c index 47ce7f49b52c..d2e673771295 100644 --- a/kernel/rcu/rcutorture.c +++ b/kernel/rcu/rcutorture.c @@ -1626,7 +1626,7 @@ rcu_torture_writer(void *arg) ulo_size = cur_ops->poll_active; } if (cur_ops->poll_active_full > 0) { - rgo = kcalloc(cur_ops->poll_active_full, sizeof(*rgo), GFP_KERNEL); + rgo = kzalloc_objs(*rgo, cur_ops->poll_active_full, GFP_KERNEL); if (!WARN_ON(!rgo)) rgo_size = cur_ops->poll_active_full; } @@ -2462,7 +2462,7 @@ static void rcu_torture_timer(struct timer_list *unused) /* Test call_rcu() invocation from interrupt handler. */ if (cur_ops->call) { - struct rcu_head *rhp = kmalloc(sizeof(*rhp), GFP_NOWAIT); + struct rcu_head *rhp = kmalloc_obj(*rhp, GFP_NOWAIT); if (rhp) cur_ops->call(rhp, rcu_torture_timer_cb); @@ -2558,7 +2558,7 @@ static int rcu_torture_updown_init(void) VERBOSE_TOROUT_STRING("rcu_torture_updown_init: Disabling up/down reader tests due to lack of primitives"); return 0; } - updownreaders = kcalloc(n_up_down, sizeof(*updownreaders), GFP_KERNEL); + updownreaders = kzalloc_objs(*updownreaders, n_up_down, GFP_KERNEL); if (!updownreaders) { VERBOSE_TOROUT_STRING("rcu_torture_updown_init: Out of memory, disabling up/down reader tests"); return -ENOMEM; @@ -2891,7 +2891,7 @@ static void rcu_torture_mem_dump_obj(void) mem_dump_obj(&z); kmem_cache_free(kcp, rhp); kmem_cache_destroy(kcp); - rhp = kmalloc(sizeof(*rhp), GFP_KERNEL); + rhp = kmalloc_obj(*rhp, GFP_KERNEL); if (WARN_ON_ONCE(!rhp)) return; pr_alert("mem_dump_obj() kmalloc test: rcu_torture_stats = %px, &rhp = %px, rhp = %px\n", stats_task, &rhp, rhp); @@ -3399,7 +3399,7 @@ static void rcu_torture_fwd_prog_cr(struct rcu_fwd *rfp) n_launders++; n_launders_sa++; } else if (!cur_ops->cbflood_max || cur_ops->cbflood_max > n_max_cbs) { - rfcp = kmalloc(sizeof(*rfcp), GFP_KERNEL); + rfcp = kmalloc_obj(*rfcp, GFP_KERNEL); if (WARN_ON_ONCE(!rfcp)) { schedule_timeout_interruptible(1); continue; @@ -3587,8 +3587,8 @@ static int __init rcu_torture_fwd_prog_init(void) fwd_progress_holdoff = 1; if (fwd_progress_div <= 0) fwd_progress_div = 4; - rfp = kcalloc(fwd_progress, sizeof(*rfp), GFP_KERNEL); - fwd_prog_tasks = kcalloc(fwd_progress, sizeof(*fwd_prog_tasks), GFP_KERNEL); + rfp = kzalloc_objs(*rfp, fwd_progress, GFP_KERNEL); + fwd_prog_tasks = kzalloc_objs(*fwd_prog_tasks, fwd_progress, GFP_KERNEL); if (!rfp || !fwd_prog_tasks) { kfree(rfp); kfree(fwd_prog_tasks); @@ -3754,10 +3754,9 @@ static int rcu_torture_barrier_init(void) atomic_set(&barrier_cbs_count, 0); atomic_set(&barrier_cbs_invoked, 0); barrier_cbs_tasks = - kcalloc(n_barrier_cbs, sizeof(barrier_cbs_tasks[0]), - GFP_KERNEL); + kzalloc_objs(barrier_cbs_tasks[0], n_barrier_cbs, GFP_KERNEL); barrier_cbs_wq = - kcalloc(n_barrier_cbs, sizeof(barrier_cbs_wq[0]), GFP_KERNEL); + kzalloc_objs(barrier_cbs_wq[0], n_barrier_cbs, GFP_KERNEL); if (barrier_cbs_tasks == NULL || !barrier_cbs_wq) return -ENOMEM; for (i = 0; i < n_barrier_cbs; i++) { @@ -4224,7 +4223,7 @@ static void rcu_test_debug_objects(void) (!cur_ops->call || !cur_ops->cb_barrier))) return; - struct rcu_head *rhp = kmalloc(sizeof(*rhp), GFP_KERNEL); + struct rcu_head *rhp = kmalloc_obj(*rhp, GFP_KERNEL); init_rcu_head_on_stack(&rh1); init_rcu_head_on_stack(&rh2); @@ -4549,9 +4548,8 @@ rcu_torture_init(void) rcu_torture_write_types(); if (nrealfakewriters > 0) { - fakewriter_tasks = kcalloc(nrealfakewriters, - sizeof(fakewriter_tasks[0]), - GFP_KERNEL); + fakewriter_tasks = kzalloc_objs(fakewriter_tasks[0], + nrealfakewriters, GFP_KERNEL); if (fakewriter_tasks == NULL) { TOROUT_ERRSTRING("out of memory"); firsterr = -ENOMEM; @@ -4564,10 +4562,9 @@ rcu_torture_init(void) if (torture_init_error(firsterr)) goto unwind; } - reader_tasks = kcalloc(nrealreaders, sizeof(reader_tasks[0]), - GFP_KERNEL); - rcu_torture_reader_mbchk = kcalloc(nrealreaders, sizeof(*rcu_torture_reader_mbchk), - GFP_KERNEL); + reader_tasks = kzalloc_objs(reader_tasks[0], nrealreaders, GFP_KERNEL); + rcu_torture_reader_mbchk = kzalloc_objs(*rcu_torture_reader_mbchk, + nrealreaders, GFP_KERNEL); if (!reader_tasks || !rcu_torture_reader_mbchk) { TOROUT_ERRSTRING("out of memory"); firsterr = -ENOMEM; @@ -4595,7 +4592,8 @@ rcu_torture_init(void) if (WARN_ON(nocbs_toggle < 0)) nocbs_toggle = HZ; if (nrealnocbers > 0) { - nocb_tasks = kcalloc(nrealnocbers, sizeof(nocb_tasks[0]), GFP_KERNEL); + nocb_tasks = kzalloc_objs(nocb_tasks[0], nrealnocbers, + GFP_KERNEL); if (nocb_tasks == NULL) { TOROUT_ERRSTRING("out of memory"); firsterr = -ENOMEM; diff --git a/kernel/rcu/refscale.c b/kernel/rcu/refscale.c index 07a313782dfd..39d679a4c17e 100644 --- a/kernel/rcu/refscale.c +++ b/kernel/rcu/refscale.c @@ -1143,7 +1143,7 @@ static bool typesafe_init(void) else if (si == 0) si = nr_cpu_ids; rtsarray_size = si; - rtsarray = kcalloc(si, sizeof(*rtsarray), GFP_KERNEL); + rtsarray = kzalloc_objs(*rtsarray, si, GFP_KERNEL); if (!rtsarray) return false; for (idx = 0; idx < rtsarray_size; idx++) { @@ -1575,8 +1575,7 @@ ref_scale_init(void) "%s: nreaders * loops will overflow, adjusted loops to %d", __func__, INT_MAX / nreaders)) loops = INT_MAX / nreaders; - reader_tasks = kcalloc(nreaders, sizeof(reader_tasks[0]), - GFP_KERNEL); + reader_tasks = kzalloc_objs(reader_tasks[0], nreaders, GFP_KERNEL); if (!reader_tasks) { SCALEOUT_ERRSTRING("out of memory"); firsterr = -ENOMEM; diff --git a/kernel/rcu/srcutree.c b/kernel/rcu/srcutree.c index 66ba6a2f83d3..0faf35f393a3 100644 --- a/kernel/rcu/srcutree.c +++ b/kernel/rcu/srcutree.c @@ -173,7 +173,8 @@ static bool init_srcu_struct_nodes(struct srcu_struct *ssp, gfp_t gfp_flags) /* Initialize geometry if it has not already been initialized. */ rcu_init_geometry(); - ssp->srcu_sup->node = kcalloc(rcu_num_nodes, sizeof(*ssp->srcu_sup->node), gfp_flags); + ssp->srcu_sup->node = kzalloc_objs(*ssp->srcu_sup->node, rcu_num_nodes, + gfp_flags); if (!ssp->srcu_sup->node) return false; @@ -237,7 +238,7 @@ static bool init_srcu_struct_nodes(struct srcu_struct *ssp, gfp_t gfp_flags) static int init_srcu_struct_fields(struct srcu_struct *ssp, bool is_static) { if (!is_static) - ssp->srcu_sup = kzalloc(sizeof(*ssp->srcu_sup), GFP_KERNEL); + ssp->srcu_sup = kzalloc_obj(*ssp->srcu_sup, GFP_KERNEL); if (!ssp->srcu_sup) return -ENOMEM; if (!is_static) diff --git a/kernel/rcu/tasks.h b/kernel/rcu/tasks.h index 76f952196a29..d9ccf18eb035 100644 --- a/kernel/rcu/tasks.h +++ b/kernel/rcu/tasks.h @@ -259,7 +259,8 @@ static void cblist_init_generic(struct rcu_tasks *rtp) } lim = rcu_task_enqueue_lim; - rtp->rtpcp_array = kcalloc(num_possible_cpus(), sizeof(struct rcu_tasks_percpu *), GFP_KERNEL); + rtp->rtpcp_array = kzalloc_objs(struct rcu_tasks_percpu *, + num_possible_cpus(), GFP_KERNEL); BUG_ON(!rtp->rtpcp_array); for_each_possible_cpu(cpu) { diff --git a/kernel/rcu/update.c b/kernel/rcu/update.c index dfeba9b35395..14150f09fd61 100644 --- a/kernel/rcu/update.c +++ b/kernel/rcu/update.c @@ -614,7 +614,7 @@ static void early_boot_test_call_rcu(void) call_rcu(&head, test_callback); early_srcu_cookie = start_poll_synchronize_srcu(&early_srcu); call_srcu(&early_srcu, &shead, test_callback); - rhp = kmalloc(sizeof(*rhp), GFP_KERNEL); + rhp = kmalloc_obj(*rhp, GFP_KERNEL); if (!WARN_ON_ONCE(!rhp)) kfree_rcu(rhp, rh); } diff --git a/kernel/reboot.c b/kernel/reboot.c index ec087827c85c..695c33e75efd 100644 --- a/kernel/reboot.c +++ b/kernel/reboot.c @@ -374,7 +374,7 @@ static struct sys_off_handler *alloc_sys_off_handler(int priority) else flags = GFP_KERNEL; - handler = kzalloc(sizeof(*handler), flags); + handler = kzalloc_obj(*handler, flags); if (!handler) return ERR_PTR(-ENOMEM); } diff --git a/kernel/relay.c b/kernel/relay.c index 5c665b729132..c28fc5dd3ded 100644 --- a/kernel/relay.c +++ b/kernel/relay.c @@ -59,7 +59,7 @@ static const struct vm_operations_struct relay_file_mmap_ops = { */ static struct page **relay_alloc_page_array(unsigned int n_pages) { - return kvcalloc(n_pages, sizeof(struct page *), GFP_KERNEL); + return kvzalloc_objs(struct page *, n_pages, GFP_KERNEL); } /* @@ -150,11 +150,10 @@ static struct rchan_buf *relay_create_buf(struct rchan *chan) if (chan->n_subbufs > KMALLOC_MAX_SIZE / sizeof(size_t)) return NULL; - buf = kzalloc(sizeof(struct rchan_buf), GFP_KERNEL); + buf = kzalloc_obj(struct rchan_buf, GFP_KERNEL); if (!buf) return NULL; - buf->padding = kmalloc_array(chan->n_subbufs, sizeof(size_t), - GFP_KERNEL); + buf->padding = kmalloc_objs(size_t, chan->n_subbufs, GFP_KERNEL); if (!buf->padding) goto free_buf; @@ -490,7 +489,7 @@ struct rchan *relay_open(const char *base_filename, if (!cb || !cb->create_buf_file || !cb->remove_buf_file) return NULL; - chan = kzalloc(sizeof(struct rchan), GFP_KERNEL); + chan = kzalloc_obj(struct rchan, GFP_KERNEL); if (!chan) return NULL; diff --git a/kernel/resource.c b/kernel/resource.c index 31341bdd7707..d591e76c1535 100644 --- a/kernel/resource.c +++ b/kernel/resource.c @@ -182,7 +182,7 @@ static void free_resource(struct resource *res) static struct resource *alloc_resource(gfp_t flags) { - return kzalloc(sizeof(struct resource), flags); + return kzalloc_obj(struct resource, flags); } /* Return the conflict entry if you can't request it */ @@ -502,7 +502,7 @@ int walk_system_ram_res_rev(u64 start, u64 end, void *arg, int ret = -1; /* create a list */ - rams = kvcalloc(rams_size, sizeof(struct resource), GFP_KERNEL); + rams = kvzalloc_objs(struct resource, rams_size, GFP_KERNEL); if (!rams) return ret; diff --git a/kernel/resource_kunit.c b/kernel/resource_kunit.c index b8ef75b99eb2..378218df2427 100644 --- a/kernel/resource_kunit.c +++ b/kernel/resource_kunit.c @@ -204,7 +204,7 @@ static void resource_test_insert_resource(struct kunit *test, struct resource *p { struct resource *res; - res = kzalloc(sizeof(*res), GFP_KERNEL); + res = kzalloc_obj(*res, GFP_KERNEL); KUNIT_ASSERT_NOT_NULL(test, res); res->name = name; diff --git a/kernel/scftorture.c b/kernel/scftorture.c index d86d2d9c4624..02b3a5d2f0aa 100644 --- a/kernel/scftorture.c +++ b/kernel/scftorture.c @@ -350,7 +350,7 @@ static void scftorture_invoke_one(struct scf_statistics *scfp, struct torture_ra struct scf_selector *scfsp = scf_sel_rand(trsp); if (scfsp->scfs_prim == SCF_PRIM_SINGLE || scfsp->scfs_wait) { - scfcp = kmalloc(sizeof(*scfcp), GFP_ATOMIC); + scfcp = kmalloc_obj(*scfcp, GFP_ATOMIC); if (!scfcp) { WARN_ON_ONCE(!IS_ENABLED(CONFIG_KASAN)); atomic_inc(&n_alloc_errs); @@ -661,7 +661,7 @@ static int __init scf_torture_init(void) // Worker tasks invoking smp_call_function(). if (nthreads < 0) nthreads = num_online_cpus(); - scf_stats_p = kcalloc(nthreads, sizeof(scf_stats_p[0]), GFP_KERNEL); + scf_stats_p = kzalloc_objs(scf_stats_p[0], nthreads, GFP_KERNEL); if (!scf_stats_p) { SCFTORTOUT_ERRSTRING("out of memory"); firsterr = -ENOMEM; diff --git a/kernel/sched/autogroup.c b/kernel/sched/autogroup.c index 954137775f38..c5a1019cbe83 100644 --- a/kernel/sched/autogroup.c +++ b/kernel/sched/autogroup.c @@ -86,7 +86,7 @@ static inline struct autogroup *autogroup_task_get(struct task_struct *p) static inline struct autogroup *autogroup_create(void) { - struct autogroup *ag = kzalloc(sizeof(*ag), GFP_KERNEL); + struct autogroup *ag = kzalloc_obj(*ag, GFP_KERNEL); struct task_group *tg; if (!ag) diff --git a/kernel/sched/core_sched.c b/kernel/sched/core_sched.c index 9ede71ecba7f..6065cf725eee 100644 --- a/kernel/sched/core_sched.c +++ b/kernel/sched/core_sched.c @@ -12,7 +12,7 @@ struct sched_core_cookie { static unsigned long sched_core_alloc_cookie(void) { - struct sched_core_cookie *ck = kmalloc(sizeof(*ck), GFP_KERNEL); + struct sched_core_cookie *ck = kmalloc_obj(*ck, GFP_KERNEL); if (!ck) return 0; diff --git a/kernel/sched/cpuacct.c b/kernel/sched/cpuacct.c index 23a56ba12d81..6e9a2e067886 100644 --- a/kernel/sched/cpuacct.c +++ b/kernel/sched/cpuacct.c @@ -61,7 +61,7 @@ cpuacct_css_alloc(struct cgroup_subsys_state *parent_css) if (!parent_css) return &root_cpuacct.css; - ca = kzalloc(sizeof(*ca), GFP_KERNEL); + ca = kzalloc_obj(*ca, GFP_KERNEL); if (!ca) goto out; diff --git a/kernel/sched/cpudeadline.c b/kernel/sched/cpudeadline.c index 37b572cc8aca..bbb2d68df86a 100644 --- a/kernel/sched/cpudeadline.c +++ b/kernel/sched/cpudeadline.c @@ -252,9 +252,7 @@ int cpudl_init(struct cpudl *cp) raw_spin_lock_init(&cp->lock); cp->size = 0; - cp->elements = kcalloc(nr_cpu_ids, - sizeof(struct cpudl_item), - GFP_KERNEL); + cp->elements = kzalloc_objs(struct cpudl_item, nr_cpu_ids, GFP_KERNEL); if (!cp->elements) return -ENOMEM; diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c index cfc40181f66e..d71d09ed1b3b 100644 --- a/kernel/sched/cpufreq_schedutil.c +++ b/kernel/sched/cpufreq_schedutil.c @@ -638,7 +638,7 @@ static struct sugov_policy *sugov_policy_alloc(struct cpufreq_policy *policy) { struct sugov_policy *sg_policy; - sg_policy = kzalloc(sizeof(*sg_policy), GFP_KERNEL); + sg_policy = kzalloc_obj(*sg_policy, GFP_KERNEL); if (!sg_policy) return NULL; @@ -722,7 +722,7 @@ static struct sugov_tunables *sugov_tunables_alloc(struct sugov_policy *sg_polic { struct sugov_tunables *tunables; - tunables = kzalloc(sizeof(*tunables), GFP_KERNEL); + tunables = kzalloc_obj(*tunables, GFP_KERNEL); if (tunables) { gov_attr_set_init(&tunables->attr_set, &sg_policy->tunables_hook); if (!have_governor_per_policy()) diff --git a/kernel/sched/cpupri.c b/kernel/sched/cpupri.c index 76a9ac5eb794..c2642deeaabc 100644 --- a/kernel/sched/cpupri.c +++ b/kernel/sched/cpupri.c @@ -288,7 +288,7 @@ int cpupri_init(struct cpupri *cp) goto cleanup; } - cp->cpu_to_pri = kcalloc(nr_cpu_ids, sizeof(int), GFP_KERNEL); + cp->cpu_to_pri = kzalloc_objs(int, nr_cpu_ids, GFP_KERNEL); if (!cp->cpu_to_pri) goto cleanup; diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c index c18e81e8ef51..b9fadb2583ea 100644 --- a/kernel/sched/ext.c +++ b/kernel/sched/ext.c @@ -4223,11 +4223,11 @@ static struct scx_exit_info *alloc_exit_info(size_t exit_dump_len) { struct scx_exit_info *ei; - ei = kzalloc(sizeof(*ei), GFP_KERNEL); + ei = kzalloc_obj(*ei, GFP_KERNEL); if (!ei) return NULL; - ei->bt = kcalloc(SCX_EXIT_BT_LEN, sizeof(ei->bt[0]), GFP_KERNEL); + ei->bt = kzalloc_objs(ei->bt[0], SCX_EXIT_BT_LEN, GFP_KERNEL); ei->msg = kzalloc(SCX_EXIT_MSG_LEN, GFP_KERNEL); ei->dump = kvzalloc(exit_dump_len, GFP_KERNEL); @@ -4824,7 +4824,7 @@ static struct scx_sched *scx_alloc_and_add_sched(struct sched_ext_ops *ops) struct scx_sched *sch; int node, ret; - sch = kzalloc(sizeof(*sch), GFP_KERNEL); + sch = kzalloc_obj(*sch, GFP_KERNEL); if (!sch) return ERR_PTR(-ENOMEM); @@ -4838,8 +4838,8 @@ static struct scx_sched *scx_alloc_and_add_sched(struct sched_ext_ops *ops) if (ret < 0) goto err_free_ei; - sch->global_dsqs = kcalloc(nr_node_ids, sizeof(sch->global_dsqs[0]), - GFP_KERNEL); + sch->global_dsqs = kzalloc_objs(sch->global_dsqs[0], nr_node_ids, + GFP_KERNEL); if (!sch->global_dsqs) { ret = -ENOMEM; goto err_free_hash; diff --git a/kernel/sched/ext_idle.c b/kernel/sched/ext_idle.c index 3d9d404d5cd2..cd630772e164 100644 --- a/kernel/sched/ext_idle.c +++ b/kernel/sched/ext_idle.c @@ -664,8 +664,8 @@ void scx_idle_init_masks(void) BUG_ON(!alloc_cpumask_var(&scx_idle_global_masks.smt, GFP_KERNEL)); /* Allocate per-node idle cpumasks */ - scx_idle_node_masks = kcalloc(num_possible_nodes(), - sizeof(*scx_idle_node_masks), GFP_KERNEL); + scx_idle_node_masks = kzalloc_objs(*scx_idle_node_masks, + num_possible_nodes(), GFP_KERNEL); BUG_ON(!scx_idle_node_masks); for_each_node(i) { diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 1e22b7fadd70..f6f050f2faec 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -3427,7 +3427,7 @@ retry_pids: if (!vma->numab_state) { struct vma_numab_state *ptr; - ptr = kzalloc(sizeof(*ptr), GFP_KERNEL); + ptr = kzalloc_obj(*ptr, GFP_KERNEL); if (!ptr) continue; @@ -13622,10 +13622,10 @@ int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent) struct cfs_rq *cfs_rq; int i; - tg->cfs_rq = kcalloc(nr_cpu_ids, sizeof(cfs_rq), GFP_KERNEL); + tg->cfs_rq = kzalloc_objs(cfs_rq, nr_cpu_ids, GFP_KERNEL); if (!tg->cfs_rq) goto err; - tg->se = kcalloc(nr_cpu_ids, sizeof(se), GFP_KERNEL); + tg->se = kzalloc_objs(se, nr_cpu_ids, GFP_KERNEL); if (!tg->se) goto err; diff --git a/kernel/sched/psi.c b/kernel/sched/psi.c index 59fdb7ebbf22..bf8a70598a09 100644 --- a/kernel/sched/psi.c +++ b/kernel/sched/psi.c @@ -1114,7 +1114,7 @@ int psi_cgroup_alloc(struct cgroup *cgroup) if (!static_branch_likely(&psi_cgroups_enabled)) return 0; - cgroup->psi = kzalloc(sizeof(struct psi_group), GFP_KERNEL); + cgroup->psi = kzalloc_obj(struct psi_group, GFP_KERNEL); if (!cgroup->psi) return -ENOMEM; @@ -1340,7 +1340,7 @@ struct psi_trigger *psi_trigger_create(struct psi_group *group, char *buf, if (threshold_us == 0 || threshold_us > window_us) return ERR_PTR(-EINVAL); - t = kmalloc(sizeof(*t), GFP_KERNEL); + t = kmalloc_obj(*t, GFP_KERNEL); if (!t) return ERR_PTR(-ENOMEM); diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index a7680477fa6f..e72df7045592 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c @@ -259,10 +259,10 @@ int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent) if (!rt_group_sched_enabled()) return 1; - tg->rt_rq = kcalloc(nr_cpu_ids, sizeof(rt_rq), GFP_KERNEL); + tg->rt_rq = kzalloc_objs(rt_rq, nr_cpu_ids, GFP_KERNEL); if (!tg->rt_rq) goto err; - tg->rt_se = kcalloc(nr_cpu_ids, sizeof(rt_se), GFP_KERNEL); + tg->rt_se = kzalloc_objs(rt_se, nr_cpu_ids, GFP_KERNEL); if (!tg->rt_se) goto err; diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c index ac268da91778..ac54fcae5de7 100644 --- a/kernel/sched/topology.c +++ b/kernel/sched/topology.c @@ -350,7 +350,7 @@ static struct perf_domain *pd_init(int cpu) return NULL; } - pd = kzalloc(sizeof(*pd), GFP_KERNEL); + pd = kzalloc_obj(*pd, GFP_KERNEL); if (!pd) return NULL; pd->em_pd = obj; @@ -589,7 +589,7 @@ static struct root_domain *alloc_rootdomain(void) { struct root_domain *rd; - rd = kzalloc(sizeof(*rd), GFP_KERNEL); + rd = kzalloc_obj(*rd, GFP_KERNEL); if (!rd) return NULL; @@ -1998,7 +1998,7 @@ static int sched_record_numa_dist(int offline_node, int (*n_dist)(int, int), */ nr_levels = bitmap_weight(distance_map, NR_DISTANCE_VALUES); - distances = kcalloc(nr_levels, sizeof(int), GFP_KERNEL); + distances = kzalloc_objs(int, nr_levels, GFP_KERNEL); if (!distances) return -ENOMEM; @@ -2734,7 +2734,7 @@ cpumask_var_t *alloc_sched_domains(unsigned int ndoms) int i; cpumask_var_t *doms; - doms = kmalloc_array(ndoms, sizeof(*doms), GFP_KERNEL); + doms = kmalloc_objs(*doms, ndoms, GFP_KERNEL); if (!doms) return NULL; for (i = 0; i < ndoms; i++) { diff --git a/kernel/seccomp.c b/kernel/seccomp.c index 25f62867a16d..b2297243071d 100644 --- a/kernel/seccomp.c +++ b/kernel/seccomp.c @@ -693,7 +693,7 @@ static struct seccomp_filter *seccomp_prepare_filter(struct sock_fprog *fprog) return ERR_PTR(-EACCES); /* Allocate a new seccomp_filter */ - sfilter = kzalloc(sizeof(*sfilter), GFP_KERNEL | __GFP_NOWARN); + sfilter = kzalloc_obj(*sfilter, GFP_KERNEL | __GFP_NOWARN); if (!sfilter) return ERR_PTR(-ENOMEM); @@ -1893,7 +1893,7 @@ static struct file *init_listener(struct seccomp_filter *filter) struct file *ret; ret = ERR_PTR(-ENOMEM); - filter->notif = kzalloc(sizeof(*(filter->notif)), GFP_KERNEL); + filter->notif = kzalloc_obj(*(filter->notif), GFP_KERNEL); if (!filter->notif) goto out; diff --git a/kernel/static_call_inline.c b/kernel/static_call_inline.c index 269683d41aa9..864ae2da708f 100644 --- a/kernel/static_call_inline.c +++ b/kernel/static_call_inline.c @@ -255,7 +255,7 @@ static int __static_call_init(struct module *mod, goto do_transform; } - site_mod = kzalloc(sizeof(*site_mod), GFP_KERNEL); + site_mod = kzalloc_obj(*site_mod, GFP_KERNEL); if (!site_mod) return -ENOMEM; @@ -271,7 +271,7 @@ static int __static_call_init(struct module *mod, key->mods = site_mod; - site_mod = kzalloc(sizeof(*site_mod), GFP_KERNEL); + site_mod = kzalloc_obj(*site_mod, GFP_KERNEL); if (!site_mod) return -ENOMEM; } diff --git a/kernel/time/namespace.c b/kernel/time/namespace.c index e76be24b132c..652744e00eb4 100644 --- a/kernel/time/namespace.c +++ b/kernel/time/namespace.c @@ -89,7 +89,7 @@ static struct time_namespace *clone_time_ns(struct user_namespace *user_ns, goto fail; err = -ENOMEM; - ns = kzalloc(sizeof(*ns), GFP_KERNEL_ACCOUNT); + ns = kzalloc_obj(*ns, GFP_KERNEL_ACCOUNT); if (!ns) goto fail_dec; diff --git a/kernel/time/posix-clock.c b/kernel/time/posix-clock.c index 101a0f7c43e0..3a67e7e4c875 100644 --- a/kernel/time/posix-clock.c +++ b/kernel/time/posix-clock.c @@ -103,7 +103,7 @@ static int posix_clock_open(struct inode *inode, struct file *fp) err = -ENODEV; goto out; } - pccontext = kzalloc(sizeof(*pccontext), GFP_KERNEL); + pccontext = kzalloc_obj(*pccontext, GFP_KERNEL); if (!pccontext) { err = -ENOMEM; goto out; diff --git a/kernel/time/timer_migration.c b/kernel/time/timer_migration.c index 6da9cd562b20..21e72318aeb8 100644 --- a/kernel/time/timer_migration.c +++ b/kernel/time/timer_migration.c @@ -1766,7 +1766,7 @@ static int tmigr_setup_groups(unsigned int cpu, unsigned int node, int i, top = 0, err = 0, start_lvl = 0; bool root_mismatch = false; - stack = kcalloc(tmigr_hierarchy_levels, sizeof(*stack), GFP_KERNEL); + stack = kzalloc_objs(*stack, tmigr_hierarchy_levels, GFP_KERNEL); if (!stack) return -ENOMEM; @@ -2001,7 +2001,8 @@ static int __init tmigr_init(void) */ tmigr_crossnode_level = cpulvl; - tmigr_level_list = kcalloc(tmigr_hierarchy_levels, sizeof(struct list_head), GFP_KERNEL); + tmigr_level_list = kzalloc_objs(struct list_head, + tmigr_hierarchy_levels, GFP_KERNEL); if (!tmigr_level_list) goto err; diff --git a/kernel/torture.c b/kernel/torture.c index 1ea9f67953a7..27c9bb6122d8 100644 --- a/kernel/torture.c +++ b/kernel/torture.c @@ -494,7 +494,7 @@ void torture_shuffle_task_register(struct task_struct *tp) if (WARN_ON_ONCE(tp == NULL)) return; - stp = kmalloc(sizeof(*stp), GFP_KERNEL); + stp = kmalloc_obj(*stp, GFP_KERNEL); if (WARN_ON_ONCE(stp == NULL)) return; stp->st_t = tp; diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c index e6988929ead2..5526b141b433 100644 --- a/kernel/trace/blktrace.c +++ b/kernel/trace/blktrace.c @@ -671,7 +671,7 @@ static struct blk_trace *blk_trace_setup_prepare(struct request_queue *q, return ERR_PTR(-EBUSY); } - bt = kzalloc(sizeof(*bt), GFP_KERNEL); + bt = kzalloc_obj(*bt, GFP_KERNEL); if (!bt) return ERR_PTR(-ENOMEM); @@ -1904,7 +1904,7 @@ static int blk_trace_setup_queue(struct request_queue *q, struct blk_trace *bt = NULL; int ret = -ENOMEM; - bt = kzalloc(sizeof(*bt), GFP_KERNEL); + bt = kzalloc_obj(*bt, GFP_KERNEL); if (!bt) return -ENOMEM; diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index eadaef8592a3..c09268c6e9b7 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c @@ -2243,7 +2243,7 @@ static int bpf_event_notify(struct notifier_block *nb, unsigned long op, switch (op) { case MODULE_STATE_COMING: - btm = kzalloc(sizeof(*btm), GFP_KERNEL); + btm = kzalloc_obj(*btm, GFP_KERNEL); if (btm) { btm->module = module; list_add(&btm->list, &bpf_trace_modules); @@ -2819,7 +2819,7 @@ int bpf_kprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *pr goto error; } - link = kzalloc(sizeof(*link), GFP_KERNEL); + link = kzalloc_obj(*link, GFP_KERNEL); if (!link) { err = -ENOMEM; goto error; @@ -3238,8 +3238,8 @@ int bpf_uprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *pr err = -ENOMEM; - link = kzalloc(sizeof(*link), GFP_KERNEL); - uprobes = kvcalloc(cnt, sizeof(*uprobes), GFP_KERNEL); + link = kzalloc_obj(*link, GFP_KERNEL); + uprobes = kvzalloc_objs(*uprobes, cnt, GFP_KERNEL); if (!uprobes || !link) goto error_free; diff --git a/kernel/trace/fprobe.c b/kernel/trace/fprobe.c index 1188eefef07c..0d649ca71ce0 100644 --- a/kernel/trace/fprobe.c +++ b/kernel/trace/fprobe.c @@ -749,7 +749,7 @@ static int fprobe_init(struct fprobe *fp, unsigned long *addrs, int num) return -E2BIG; fp->entry_data_size = size; - hlist_array = kzalloc(struct_size(hlist_array, array, num), GFP_KERNEL); + hlist_array = kzalloc_flex(*hlist_array, array, num, GFP_KERNEL); if (!hlist_array) return -ENOMEM; @@ -805,7 +805,7 @@ int register_fprobe(struct fprobe *fp, const char *filter, const char *notfilter if (!addrs) return -ENOMEM; - mods = kcalloc(num, sizeof(*mods), GFP_KERNEL); + mods = kzalloc_objs(*mods, num, GFP_KERNEL); if (!mods) return -ENOMEM; diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 1ce17c8af409..fb3915a67013 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -702,7 +702,7 @@ static int ftrace_profile_init_cpu(int cpu) */ size = FTRACE_PROFILE_HASH_SIZE; - stat->hash = kcalloc(size, sizeof(struct hlist_head), GFP_KERNEL); + stat->hash = kzalloc_objs(struct hlist_head, size, GFP_KERNEL); if (!stat->hash) return -ENOMEM; @@ -1215,7 +1215,7 @@ add_ftrace_hash_entry_direct(struct ftrace_hash *hash, unsigned long ip, unsigne { struct ftrace_func_entry *entry; - entry = kmalloc(sizeof(*entry), GFP_KERNEL); + entry = kmalloc_obj(*entry, GFP_KERNEL); if (!entry) return NULL; @@ -1335,12 +1335,12 @@ struct ftrace_hash *alloc_ftrace_hash(int size_bits) struct ftrace_hash *hash; int size; - hash = kzalloc(sizeof(*hash), GFP_KERNEL); + hash = kzalloc_obj(*hash, GFP_KERNEL); if (!hash) return NULL; size = 1 << size_bits; - hash->buckets = kcalloc(size, sizeof(*hash->buckets), GFP_KERNEL); + hash->buckets = kzalloc_objs(*hash->buckets, size, GFP_KERNEL); if (!hash->buckets) { kfree(hash); @@ -1360,7 +1360,7 @@ static int ftrace_add_mod(struct trace_array *tr, struct ftrace_mod_load *ftrace_mod; struct list_head *mod_head = enable ? &tr->mod_trace : &tr->mod_notrace; - ftrace_mod = kzalloc(sizeof(*ftrace_mod), GFP_KERNEL); + ftrace_mod = kzalloc_obj(*ftrace_mod, GFP_KERNEL); if (!ftrace_mod) return -ENOMEM; @@ -3911,7 +3911,7 @@ ftrace_allocate_pages(unsigned long num_to_init, unsigned long *num_pages) if (!num_to_init) return NULL; - start_pg = pg = kzalloc(sizeof(*pg), GFP_KERNEL); + start_pg = pg = kzalloc_obj(*pg, GFP_KERNEL); if (!pg) return NULL; @@ -3929,7 +3929,7 @@ ftrace_allocate_pages(unsigned long num_to_init, unsigned long *num_pages) if (!num_to_init) break; - pg->next = kzalloc(sizeof(*pg), GFP_KERNEL); + pg->next = kzalloc_obj(*pg, GFP_KERNEL); if (!pg->next) goto free_pages; @@ -4686,7 +4686,7 @@ ftrace_regex_open(struct ftrace_ops *ops, int flag, if (tracing_check_open_get_tr(tr)) return -ENODEV; - iter = kzalloc(sizeof(*iter), GFP_KERNEL); + iter = kzalloc_obj(*iter, GFP_KERNEL); if (!iter) goto out; @@ -5334,7 +5334,7 @@ int ftrace_func_mapper_add_ip(struct ftrace_func_mapper *mapper, if (entry) return -EBUSY; - map = kmalloc(sizeof(*map), GFP_KERNEL); + map = kmalloc_obj(*map, GFP_KERNEL); if (!map) return -ENOMEM; @@ -5474,7 +5474,7 @@ register_ftrace_function_probe(char *glob, struct trace_array *tr, } } if (!probe) { - probe = kzalloc(sizeof(*probe), GFP_KERNEL); + probe = kzalloc_obj(*probe, GFP_KERNEL); if (!probe) { mutex_unlock(&ftrace_lock); return -ENOMEM; @@ -7223,7 +7223,7 @@ ftrace_graph_open(struct inode *inode, struct file *file) if (unlikely(ftrace_disabled)) return -ENODEV; - fgd = kmalloc(sizeof(*fgd), GFP_KERNEL); + fgd = kmalloc_obj(*fgd, GFP_KERNEL); if (fgd == NULL) return -ENOMEM; @@ -7251,7 +7251,7 @@ ftrace_graph_notrace_open(struct inode *inode, struct file *file) if (unlikely(ftrace_disabled)) return -ENODEV; - fgd = kmalloc(sizeof(*fgd), GFP_KERNEL); + fgd = kmalloc_obj(*fgd, GFP_KERNEL); if (fgd == NULL) return -ENOMEM; @@ -8041,7 +8041,7 @@ static void save_ftrace_mod_rec(struct ftrace_mod_map *mod_map, if (!ret) return; - mod_func = kmalloc(sizeof(*mod_func), GFP_KERNEL); + mod_func = kmalloc_obj(*mod_func, GFP_KERNEL); if (!mod_func) return; @@ -8068,7 +8068,7 @@ allocate_ftrace_mod_map(struct module *mod, if (ftrace_disabled) return NULL; - mod_map = kmalloc(sizeof(*mod_map), GFP_KERNEL); + mod_map = kmalloc_obj(*mod_map, GFP_KERNEL); if (!mod_map) return NULL; @@ -8241,7 +8241,7 @@ static void add_to_clear_hash_list(struct list_head *clear_list, { struct ftrace_init_func *func; - func = kmalloc(sizeof(*func), GFP_KERNEL); + func = kmalloc_obj(*func, GFP_KERNEL); if (!func) { MEM_FAIL(1, "alloc failure, ftrace filter could be stale\n"); return; diff --git a/kernel/trace/pid_list.c b/kernel/trace/pid_list.c index dbee72d69d0a..6d12855b0277 100644 --- a/kernel/trace/pid_list.c +++ b/kernel/trace/pid_list.c @@ -359,7 +359,7 @@ static void pid_list_refill_irq(struct irq_work *iwork) while (upper_count-- > 0) { union upper_chunk *chunk; - chunk = kzalloc(sizeof(*chunk), GFP_NOWAIT); + chunk = kzalloc_obj(*chunk, GFP_NOWAIT); if (!chunk) break; *upper_next = chunk; @@ -370,7 +370,7 @@ static void pid_list_refill_irq(struct irq_work *iwork) while (lower_count-- > 0) { union lower_chunk *chunk; - chunk = kzalloc(sizeof(*chunk), GFP_NOWAIT); + chunk = kzalloc_obj(*chunk, GFP_NOWAIT); if (!chunk) break; *lower_next = chunk; @@ -423,7 +423,7 @@ struct trace_pid_list *trace_pid_list_alloc(void) /* According to linux/thread.h, pids can be no bigger that 30 bits */ WARN_ON_ONCE(init_pid_ns.pid_max > (1 << 30)); - pid_list = kzalloc(sizeof(*pid_list), GFP_KERNEL); + pid_list = kzalloc_obj(*pid_list, GFP_KERNEL); if (!pid_list) return NULL; @@ -435,7 +435,7 @@ struct trace_pid_list *trace_pid_list_alloc(void) for (i = 0; i < CHUNK_ALLOC; i++) { union upper_chunk *chunk; - chunk = kzalloc(sizeof(*chunk), GFP_KERNEL); + chunk = kzalloc_obj(*chunk, GFP_KERNEL); if (!chunk) break; chunk->next = pid_list->upper_list; @@ -446,7 +446,7 @@ struct trace_pid_list *trace_pid_list_alloc(void) for (i = 0; i < CHUNK_ALLOC; i++) { union lower_chunk *chunk; - chunk = kzalloc(sizeof(*chunk), GFP_KERNEL); + chunk = kzalloc_obj(*chunk, GFP_KERNEL); if (!chunk) break; chunk->next = pid_list->lower_list; diff --git a/kernel/trace/rethook.c b/kernel/trace/rethook.c index 30d224946881..d09d5a204627 100644 --- a/kernel/trace/rethook.c +++ b/kernel/trace/rethook.c @@ -108,7 +108,7 @@ struct rethook *rethook_alloc(void *data, rethook_handler_t handler, if (!handler || num <= 0 || size < sizeof(struct rethook_node)) return ERR_PTR(-EINVAL); - rh = kzalloc(sizeof(struct rethook), GFP_KERNEL); + rh = kzalloc_obj(struct rethook, GFP_KERNEL); if (!rh) return ERR_PTR(-ENOMEM); diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 1e7a34a31851..e1395834886e 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c @@ -6001,7 +6001,7 @@ ring_buffer_read_start(struct trace_buffer *buffer, int cpu, gfp_t flags) if (!cpumask_test_cpu(cpu, buffer->cpumask)) return NULL; - iter = kzalloc(sizeof(*iter), flags); + iter = kzalloc_obj(*iter, flags); if (!iter) return NULL; @@ -6509,7 +6509,7 @@ ring_buffer_alloc_read_page(struct trace_buffer *buffer, int cpu) if (!cpumask_test_cpu(cpu, buffer->cpumask)) return ERR_PTR(-ENODEV); - bpage = kzalloc(sizeof(*bpage), GFP_KERNEL); + bpage = kzalloc_obj(*bpage, GFP_KERNEL); if (!bpage) return ERR_PTR(-ENOMEM); @@ -7190,7 +7190,7 @@ static int __rb_map_vma(struct ring_buffer_per_cpu *cpu_buffer, nr_pages = nr_vma_pages; - pages = kcalloc(nr_pages, sizeof(*pages), GFP_KERNEL); + pages = kzalloc_objs(*pages, nr_pages, GFP_KERNEL); if (!pages) return -ENOMEM; diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 2f6fbf9e7caf..83ae2e8e931c 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -1064,7 +1064,7 @@ int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data, cond_update_fn_t update) { struct cond_snapshot *cond_snapshot __free(kfree) = - kzalloc(sizeof(*cond_snapshot), GFP_KERNEL); + kzalloc_obj(*cond_snapshot, GFP_KERNEL); int ret; if (!cond_snapshot) @@ -3903,8 +3903,8 @@ __tracing_open(struct inode *inode, struct file *file, bool snapshot) if (!iter) return ERR_PTR(-ENOMEM); - iter->buffer_iter = kcalloc(nr_cpu_ids, sizeof(*iter->buffer_iter), - GFP_KERNEL); + iter->buffer_iter = kzalloc_objs(*iter->buffer_iter, nr_cpu_ids, + GFP_KERNEL); if (!iter->buffer_iter) goto release; @@ -5132,7 +5132,7 @@ trace_insert_eval_map_file(struct module *mod, struct trace_eval_map **start, * where the head holds the module and length of array, and the * tail holds a pointer to the next list. */ - map_array = kmalloc_array(len + 2, sizeof(*map_array), GFP_KERNEL); + map_array = kmalloc_objs(*map_array, len + 2, GFP_KERNEL); if (!map_array) { pr_warn("Unable to allocate trace eval mapping\n"); return; @@ -5809,7 +5809,7 @@ static int tracing_open_pipe(struct inode *inode, struct file *filp) goto fail_pipe_on_cpu; /* create a buffer to store the information to pass to userspace */ - iter = kzalloc(sizeof(*iter), GFP_KERNEL); + iter = kzalloc_obj(*iter, GFP_KERNEL); if (!iter) { ret = -ENOMEM; goto fail_alloc_iter; @@ -6628,7 +6628,7 @@ static int user_buffer_init(struct trace_user_buf_info **tinfo, size_t size) if (!*tinfo) { alloc = true; - *tinfo = kzalloc(sizeof(**tinfo), GFP_KERNEL); + *tinfo = kzalloc_obj(**tinfo, GFP_KERNEL); if (!*tinfo) return -ENOMEM; } @@ -7153,10 +7153,10 @@ static int tracing_snapshot_open(struct inode *inode, struct file *file) } else { /* Writes still need the seq_file to hold the private data */ ret = -ENOMEM; - m = kzalloc(sizeof(*m), GFP_KERNEL); + m = kzalloc_obj(*m, GFP_KERNEL); if (!m) goto out; - iter = kzalloc(sizeof(*iter), GFP_KERNEL); + iter = kzalloc_obj(*iter, GFP_KERNEL); if (!iter) { kfree(m); goto out; @@ -7545,7 +7545,7 @@ static struct tracing_log_err *alloc_tracing_log_err(int len) { struct tracing_log_err *err; - err = kzalloc(sizeof(*err), GFP_KERNEL); + err = kzalloc_obj(*err, GFP_KERNEL); if (!err) return ERR_PTR(-ENOMEM); @@ -7804,7 +7804,7 @@ static int tracing_buffers_open(struct inode *inode, struct file *filp) if (ret) return ret; - info = kvzalloc(sizeof(*info), GFP_KERNEL); + info = kvzalloc_obj(*info, GFP_KERNEL); if (!info) { trace_array_put(tr); return -ENOMEM; @@ -8065,7 +8065,7 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos, struct page *page; int r; - ref = kzalloc(sizeof(*ref), GFP_KERNEL); + ref = kzalloc_obj(*ref, GFP_KERNEL); if (!ref) { ret = -ENOMEM; break; @@ -8284,7 +8284,7 @@ tracing_stats_read(struct file *filp, char __user *ubuf, unsigned long long t; unsigned long usec_rem; - s = kmalloc(sizeof(*s), GFP_KERNEL); + s = kmalloc_obj(*s, GFP_KERNEL); if (!s) return -ENOMEM; @@ -8878,7 +8878,7 @@ create_trace_option_files(struct trace_array *tr, struct tracer *tracer, for (cnt = 0; opts[cnt].name; cnt++) ; - topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL); + topts = kzalloc_objs(*topts, cnt + 1, GFP_KERNEL); if (!topts) return 0; @@ -8950,7 +8950,7 @@ static int add_tracer(struct trace_array *tr, struct tracer *tracer) if (!trace_ok_for_array(tracer, tr)) return 0; - t = kmalloc(sizeof(*t), GFP_KERNEL); + t = kmalloc_obj(*t, GFP_KERNEL); if (!t) return -ENOMEM; @@ -8967,7 +8967,7 @@ static int add_tracer(struct trace_array *tr, struct tracer *tracer) * If the tracer defines default flags, it means the flags are * per trace instance. */ - flags = kmalloc(sizeof(*flags), GFP_KERNEL); + flags = kmalloc_obj(*flags, GFP_KERNEL); if (!flags) return -ENOMEM; @@ -9310,7 +9310,8 @@ static void setup_trace_scratch(struct trace_array *tr, mod_addr_comp, NULL, NULL); if (IS_ENABLED(CONFIG_MODULES)) { - module_delta = kzalloc(struct_size(module_delta, delta, nr_entries), GFP_KERNEL); + module_delta = kzalloc_flex(*module_delta, delta, nr_entries, + GFP_KERNEL); if (!module_delta) { pr_info("module_delta allocation failed. Not able to decode module address."); goto reset; @@ -9537,7 +9538,7 @@ trace_array_create_systems(const char *name, const char *systems, int ret; ret = -ENOMEM; - tr = kzalloc(sizeof(*tr), GFP_KERNEL); + tr = kzalloc_obj(*tr, GFP_KERNEL); if (!tr) return ERR_PTR(ret); @@ -10928,8 +10929,8 @@ void __init ftrace_boot_snapshot(void) void __init early_trace_init(void) { if (tracepoint_printk) { - tracepoint_print_iter = - kzalloc(sizeof(*tracepoint_print_iter), GFP_KERNEL); + tracepoint_print_iter = kzalloc_obj(*tracepoint_print_iter, + GFP_KERNEL); if (MEM_FAIL(!tracepoint_print_iter, "Failed to allocate trace iterator\n")) tracepoint_printk = 0; diff --git a/kernel/trace/trace_btf.c b/kernel/trace/trace_btf.c index 5bbdbcbbde3c..1d3c42527736 100644 --- a/kernel/trace/trace_btf.c +++ b/kernel/trace/trace_btf.c @@ -78,7 +78,7 @@ const struct btf_member *btf_find_struct_member(struct btf *btf, const char *name; int i, top = 0; - anon_stack = kcalloc(BTF_ANON_STACK_MAX, sizeof(*anon_stack), GFP_KERNEL); + anon_stack = kzalloc_objs(*anon_stack, BTF_ANON_STACK_MAX, GFP_KERNEL); if (!anon_stack) return ERR_PTR(-ENOMEM); diff --git a/kernel/trace/trace_eprobe.c b/kernel/trace/trace_eprobe.c index 3ee39715d5e4..3adc9a8c29a9 100644 --- a/kernel/trace/trace_eprobe.c +++ b/kernel/trace/trace_eprobe.c @@ -211,7 +211,7 @@ static struct trace_eprobe *alloc_event_probe(const char *group, sys_name = event->class->system; event_name = trace_event_name(event); - ep = kzalloc(struct_size(ep, tp.args, nargs), GFP_KERNEL); + ep = kzalloc_flex(*ep, tp.args, nargs, GFP_KERNEL); if (!ep) { trace_event_put_ref(event); return ERR_PTR(-ENOMEM); @@ -529,8 +529,8 @@ new_eprobe_trigger(struct trace_eprobe *ep, struct trace_event_file *file) struct eprobe_data *edata; int ret; - edata = kzalloc(sizeof(*edata), GFP_KERNEL); - trigger = kzalloc(sizeof(*trigger), GFP_KERNEL); + edata = kzalloc_obj(*edata, GFP_KERNEL); + trigger = kzalloc_obj(*trigger, GFP_KERNEL); if (!trigger || !edata) { ret = -ENOMEM; goto error; diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index b659653dc03a..1d5ce0244f8c 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c @@ -976,7 +976,7 @@ static int cache_mod(struct trace_array *tr, const char *mod, int set, if (!set) return remove_cache_mod(tr, mod, match, system, event); - event_mod = kzalloc(sizeof(*event_mod), GFP_KERNEL); + event_mod = kzalloc_obj(*event_mod, GFP_KERNEL); if (!event_mod) return -ENOMEM; @@ -1648,7 +1648,7 @@ static void *s_start(struct seq_file *m, loff_t *pos) struct set_event_iter *iter; loff_t l; - iter = kzalloc(sizeof(*iter), GFP_KERNEL); + iter = kzalloc_obj(*iter, GFP_KERNEL); mutex_lock(&event_mutex); if (!iter) return NULL; @@ -2206,7 +2206,7 @@ event_filter_read(struct file *filp, char __user *ubuf, size_t cnt, if (*ppos) return 0; - s = kmalloc(sizeof(*s), GFP_KERNEL); + s = kmalloc_obj(*s, GFP_KERNEL); if (!s) return -ENOMEM; @@ -2320,7 +2320,7 @@ static int system_tr_open(struct inode *inode, struct file *filp) int ret; /* Make a temporary dir that has no system but points to tr */ - dir = kzalloc(sizeof(*dir), GFP_KERNEL); + dir = kzalloc_obj(*dir, GFP_KERNEL); if (!dir) return -ENOMEM; @@ -2366,7 +2366,7 @@ subsystem_filter_read(struct file *filp, char __user *ubuf, size_t cnt, if (*ppos) return 0; - s = kmalloc(sizeof(*s), GFP_KERNEL); + s = kmalloc_obj(*s, GFP_KERNEL); if (!s) return -ENOMEM; @@ -2416,7 +2416,7 @@ show_header_page_file(struct file *filp, char __user *ubuf, size_t cnt, loff_t * if (*ppos) return 0; - s = kmalloc(sizeof(*s), GFP_KERNEL); + s = kmalloc_obj(*s, GFP_KERNEL); if (!s) return -ENOMEM; @@ -2440,7 +2440,7 @@ show_header_event_file(struct file *filp, char __user *ubuf, size_t cnt, loff_t if (*ppos) return 0; - s = kmalloc(sizeof(*s), GFP_KERNEL); + s = kmalloc_obj(*s, GFP_KERNEL); if (!s) return -ENOMEM; @@ -2881,7 +2881,7 @@ create_new_subsystem(const char *name) struct event_subsystem *system; /* need to create new entry */ - system = kmalloc(sizeof(*system), GFP_KERNEL); + system = kmalloc_obj(*system, GFP_KERNEL); if (!system) return NULL; @@ -2892,7 +2892,7 @@ create_new_subsystem(const char *name) if (!system->name) goto out_free; - system->filter = kzalloc(sizeof(struct event_filter), GFP_KERNEL); + system->filter = kzalloc_obj(struct event_filter, GFP_KERNEL); if (!system->filter) goto out_free; @@ -2960,7 +2960,7 @@ event_subsystem_dir(struct trace_array *tr, const char *name, } } - dir = kmalloc(sizeof(*dir), GFP_KERNEL); + dir = kmalloc_obj(*dir, GFP_KERNEL); if (!dir) goto out_fail; @@ -3403,7 +3403,7 @@ static void add_str_to_module(struct module *module, char *str) { struct module_string *modstr; - modstr = kmalloc(sizeof(*modstr), GFP_KERNEL); + modstr = kmalloc_obj(*modstr, GFP_KERNEL); /* * If we failed to allocate memory here, then we'll just @@ -4365,7 +4365,7 @@ event_enable_func(struct trace_array *tr, struct ftrace_hash *hash, goto out_put; ret = -ENOMEM; - data = kzalloc(sizeof(*data), GFP_KERNEL); + data = kzalloc_obj(*data, GFP_KERNEL); if (!data) goto out_put; diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c index 7001e34476ee..b84bdad362e9 100644 --- a/kernel/trace/trace_events_filter.c +++ b/kernel/trace/trace_events_filter.c @@ -485,10 +485,10 @@ predicate_parse(const char *str, int nr_parens, int nr_preds, nr_preds += 2; /* For TRUE and FALSE */ - op_stack = kmalloc_array(nr_parens, sizeof(*op_stack), GFP_KERNEL); + op_stack = kmalloc_objs(*op_stack, nr_parens, GFP_KERNEL); if (!op_stack) return ERR_PTR(-ENOMEM); - prog_stack = kcalloc(nr_preds, sizeof(*prog_stack), GFP_KERNEL); + prog_stack = kzalloc_objs(*prog_stack, nr_preds, GFP_KERNEL); if (!prog_stack) { parse_error(pe, -ENOMEM, 0); goto out_free; @@ -1213,7 +1213,7 @@ static void append_filter_err(struct trace_array *tr, if (WARN_ON(!filter->filter_string)) return; - s = kmalloc(sizeof(*s), GFP_KERNEL); + s = kmalloc_obj(*s, GFP_KERNEL); if (!s) return; trace_seq_init(s); @@ -1394,13 +1394,13 @@ static void try_delay_free_filter(struct event_filter *filter) struct filter_head *head; struct filter_list *item; - head = kmalloc(sizeof(*head), GFP_KERNEL); + head = kmalloc_obj(*head, GFP_KERNEL); if (!head) goto free_now; INIT_LIST_HEAD(&head->list); - item = kmalloc(sizeof(*item), GFP_KERNEL); + item = kmalloc_obj(*item, GFP_KERNEL); if (!item) { kfree(head); goto free_now; @@ -1442,7 +1442,7 @@ static void filter_free_subsystem_filters(struct trace_subsystem_dir *dir, struct filter_head *head; struct filter_list *item; - head = kmalloc(sizeof(*head), GFP_KERNEL); + head = kmalloc_obj(*head, GFP_KERNEL); if (!head) goto free_now; @@ -1451,7 +1451,7 @@ static void filter_free_subsystem_filters(struct trace_subsystem_dir *dir, list_for_each_entry(file, &tr->events, list) { if (file->system != dir) continue; - item = kmalloc(sizeof(*item), GFP_KERNEL); + item = kmalloc_obj(*item, GFP_KERNEL); if (!item) goto free_now; item->filter = event_filter(file); @@ -1459,7 +1459,7 @@ static void filter_free_subsystem_filters(struct trace_subsystem_dir *dir, event_clear_filter(file); } - item = kmalloc(sizeof(*item), GFP_KERNEL); + item = kmalloc_obj(*item, GFP_KERNEL); if (!item) goto free_now; @@ -1708,7 +1708,7 @@ static int parse_pred(const char *str, void *data, s = i; - pred = kzalloc(sizeof(*pred), GFP_KERNEL); + pred = kzalloc_obj(*pred, GFP_KERNEL); if (!pred) return -ENOMEM; @@ -1819,7 +1819,7 @@ static int parse_pred(const char *str, void *data, goto err_free; } - pred->regex = kzalloc(sizeof(*pred->regex), GFP_KERNEL); + pred->regex = kzalloc_obj(*pred->regex, GFP_KERNEL); if (!pred->regex) goto err_mem; pred->regex->len = len; @@ -1984,7 +1984,7 @@ static int parse_pred(const char *str, void *data, goto err_free; } - pred->regex = kzalloc(sizeof(*pred->regex), GFP_KERNEL); + pred->regex = kzalloc_obj(*pred->regex, GFP_KERNEL); if (!pred->regex) goto err_mem; pred->regex->len = len; @@ -2261,7 +2261,7 @@ static int process_system_preds(struct trace_subsystem_dir *dir, bool fail = true; int err; - filter_list = kmalloc(sizeof(*filter_list), GFP_KERNEL); + filter_list = kmalloc_obj(*filter_list, GFP_KERNEL); if (!filter_list) return -ENOMEM; @@ -2272,7 +2272,7 @@ static int process_system_preds(struct trace_subsystem_dir *dir, if (file->system != dir) continue; - filter = kzalloc(sizeof(*filter), GFP_KERNEL); + filter = kzalloc_obj(*filter, GFP_KERNEL); if (!filter) goto fail_mem; @@ -2289,7 +2289,7 @@ static int process_system_preds(struct trace_subsystem_dir *dir, event_set_filtered_flag(file); - filter_item = kzalloc(sizeof(*filter_item), GFP_KERNEL); + filter_item = kzalloc_obj(*filter_item, GFP_KERNEL); if (!filter_item) goto fail_mem; @@ -2343,14 +2343,14 @@ static int create_filter_start(char *filter_string, bool set_str, if (WARN_ON_ONCE(*pse || *filterp)) return -EINVAL; - filter = kzalloc(sizeof(*filter), GFP_KERNEL); + filter = kzalloc_obj(*filter, GFP_KERNEL); if (filter && set_str) { filter->filter_string = kstrdup(filter_string, GFP_KERNEL); if (!filter->filter_string) err = -ENOMEM; } - pe = kzalloc(sizeof(*pe), GFP_KERNEL); + pe = kzalloc_obj(*pe, GFP_KERNEL); if (!filter || !pe || err) { kfree(pe); diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c index 768df987419e..da42a087d646 100644 --- a/kernel/trace/trace_events_hist.c +++ b/kernel/trace/trace_events_hist.c @@ -732,7 +732,7 @@ static struct track_data *track_data_alloc(unsigned int key_len, struct action_data *action_data, struct hist_trigger_data *hist_data) { - struct track_data *data = kzalloc(sizeof(*data), GFP_KERNEL); + struct track_data *data = kzalloc_obj(*data, GFP_KERNEL); struct hist_elt_data *elt_data; if (!data) @@ -748,7 +748,7 @@ static struct track_data *track_data_alloc(unsigned int key_len, data->action_data = action_data; data->hist_data = hist_data; - elt_data = kzalloc(sizeof(*elt_data), GFP_KERNEL); + elt_data = kzalloc_obj(*elt_data, GFP_KERNEL); if (!elt_data) { track_data_free(data); return ERR_PTR(-ENOMEM); @@ -1086,7 +1086,7 @@ static int save_hist_vars(struct hist_trigger_data *hist_data) if (tracing_check_open_get_tr(tr)) return -ENODEV; - var_data = kzalloc(sizeof(*var_data), GFP_KERNEL); + var_data = kzalloc_obj(*var_data, GFP_KERNEL); if (!var_data) { trace_array_put(tr); return -ENOMEM; @@ -1548,7 +1548,7 @@ parse_hist_trigger_attrs(struct trace_array *tr, char *trigger_str) struct hist_trigger_attrs *attrs; int ret = 0; - attrs = kzalloc(sizeof(*attrs), GFP_KERNEL); + attrs = kzalloc_obj(*attrs, GFP_KERNEL); if (!attrs) return ERR_PTR(-ENOMEM); @@ -1646,7 +1646,7 @@ static int hist_trigger_elt_data_alloc(struct tracing_map_elt *elt) struct hist_field *hist_field; unsigned int i, n_str; - elt_data = kzalloc(sizeof(*elt_data), GFP_KERNEL); + elt_data = kzalloc_obj(*elt_data, GFP_KERNEL); if (!elt_data) return -ENOMEM; @@ -1962,7 +1962,7 @@ static struct hist_field *create_hist_field(struct hist_trigger_data *hist_data, if (field && is_function_field(field)) return NULL; - hist_field = kzalloc(sizeof(struct hist_field), GFP_KERNEL); + hist_field = kzalloc_obj(struct hist_field, GFP_KERNEL); if (!hist_field) return NULL; @@ -3049,7 +3049,7 @@ create_field_var_hist(struct hist_trigger_data *target_hist_data, if (!IS_ERR_OR_NULL(event_var)) return event_var; - var_hist = kzalloc(sizeof(*var_hist), GFP_KERNEL); + var_hist = kzalloc_obj(*var_hist, GFP_KERNEL); if (!var_hist) return ERR_PTR(-ENOMEM); @@ -3231,7 +3231,7 @@ static struct hist_field *create_var(struct hist_trigger_data *hist_data, goto out; } - var = kzalloc(sizeof(struct hist_field), GFP_KERNEL); + var = kzalloc_obj(struct hist_field, GFP_KERNEL); if (!var) { var = ERR_PTR(-ENOMEM); goto out; @@ -3292,7 +3292,7 @@ static struct field_var *create_field_var(struct hist_trigger_data *hist_data, goto err; } - field_var = kzalloc(sizeof(struct field_var), GFP_KERNEL); + field_var = kzalloc_obj(struct field_var, GFP_KERNEL); if (!field_var) { destroy_hist_field(val, 0); kfree_const(var->type); @@ -3831,7 +3831,7 @@ static struct action_data *track_data_parse(struct hist_trigger_data *hist_data, int ret = -EINVAL; char *var_str; - data = kzalloc(sizeof(*data), GFP_KERNEL); + data = kzalloc_obj(*data, GFP_KERNEL); if (!data) return ERR_PTR(-ENOMEM); @@ -4198,7 +4198,7 @@ static struct action_data *onmatch_parse(struct trace_array *tr, char *str) struct action_data *data; int ret = -EINVAL; - data = kzalloc(sizeof(*data), GFP_KERNEL); + data = kzalloc_obj(*data, GFP_KERNEL); if (!data) return ERR_PTR(-ENOMEM); @@ -5136,7 +5136,7 @@ create_hist_data(unsigned int map_bits, struct hist_trigger_data *hist_data; int ret = 0; - hist_data = kzalloc(sizeof(*hist_data), GFP_KERNEL); + hist_data = kzalloc_obj(*hist_data, GFP_KERNEL); if (!hist_data) return ERR_PTR(-ENOMEM); @@ -5674,8 +5674,8 @@ static int print_entries(struct seq_file *m, (HIST_FIELD_FL_PERCENT | HIST_FIELD_FL_GRAPH))) continue; if (!stats) { - stats = kcalloc(hist_data->n_vals, sizeof(*stats), - GFP_KERNEL); + stats = kzalloc_objs(*stats, hist_data->n_vals, + GFP_KERNEL); if (!stats) { n_entries = -ENOMEM; goto out; @@ -5828,7 +5828,7 @@ static int event_hist_open(struct inode *inode, struct file *file) goto err; } - hist_file = kzalloc(sizeof(*hist_file), GFP_KERNEL); + hist_file = kzalloc_obj(*hist_file, GFP_KERNEL); if (!hist_file) { ret = -ENOMEM; goto err; @@ -6602,7 +6602,7 @@ static int hist_register_trigger(char *glob, data->private_data = named_data->private_data; set_named_trigger_data(data, named_data); /* Copy the command ops and update some of the functions */ - cmd_ops = kmalloc(sizeof(*cmd_ops), GFP_KERNEL); + cmd_ops = kmalloc_obj(*cmd_ops, GFP_KERNEL); if (!cmd_ops) { ret = -ENOMEM; goto out; diff --git a/kernel/trace/trace_events_synth.c b/kernel/trace/trace_events_synth.c index ce42fbf16f4a..db74b2c663f8 100644 --- a/kernel/trace/trace_events_synth.c +++ b/kernel/trace/trace_events_synth.c @@ -711,7 +711,7 @@ static struct synth_field *parse_synth_field(int argc, char **argv, *field_version = check_field_version(prefix, field_type, field_name); - field = kzalloc(sizeof(*field), GFP_KERNEL); + field = kzalloc_obj(*field, GFP_KERNEL); if (!field) return ERR_PTR(-ENOMEM); @@ -819,7 +819,7 @@ static struct tracepoint *alloc_synth_tracepoint(char *name) { struct tracepoint *tp; - tp = kzalloc(sizeof(*tp), GFP_KERNEL); + tp = kzalloc_obj(*tp, GFP_KERNEL); if (!tp) return ERR_PTR(-ENOMEM); @@ -973,7 +973,7 @@ static struct synth_event *alloc_synth_event(const char *name, int n_fields, unsigned int i, j, n_dynamic_fields = 0; struct synth_event *event; - event = kzalloc(sizeof(*event), GFP_KERNEL); + event = kzalloc_obj(*event, GFP_KERNEL); if (!event) { event = ERR_PTR(-ENOMEM); goto out; @@ -986,7 +986,7 @@ static struct synth_event *alloc_synth_event(const char *name, int n_fields, goto out; } - event->fields = kcalloc(n_fields, sizeof(*event->fields), GFP_KERNEL); + event->fields = kzalloc_objs(*event->fields, n_fields, GFP_KERNEL); if (!event->fields) { free_synth_event(event); event = ERR_PTR(-ENOMEM); @@ -998,9 +998,9 @@ static struct synth_event *alloc_synth_event(const char *name, int n_fields, n_dynamic_fields++; if (n_dynamic_fields) { - event->dynamic_fields = kcalloc(n_dynamic_fields, - sizeof(*event->dynamic_fields), - GFP_KERNEL); + event->dynamic_fields = kzalloc_objs(*event->dynamic_fields, + n_dynamic_fields, + GFP_KERNEL); if (!event->dynamic_fields) { free_synth_event(event); event = ERR_PTR(-ENOMEM); diff --git a/kernel/trace/trace_events_trigger.c b/kernel/trace/trace_events_trigger.c index 7fa26327c9c7..7ba3548a2f60 100644 --- a/kernel/trace/trace_events_trigger.c +++ b/kernel/trace/trace_events_trigger.c @@ -914,7 +914,7 @@ struct event_trigger_data *trigger_data_alloc(struct event_command *cmd_ops, { struct event_trigger_data *trigger_data; - trigger_data = kzalloc(sizeof(*trigger_data), GFP_KERNEL); + trigger_data = kzalloc_obj(*trigger_data, GFP_KERNEL); if (!trigger_data) return NULL; @@ -1724,7 +1724,7 @@ int event_enable_trigger_parse(struct event_command *cmd_ops, #endif ret = -ENOMEM; - enable_data = kzalloc(sizeof(*enable_data), GFP_KERNEL); + enable_data = kzalloc_obj(*enable_data, GFP_KERNEL); if (!enable_data) return ret; diff --git a/kernel/trace/trace_events_user.c b/kernel/trace/trace_events_user.c index dca6e50b3b21..c35182cb7286 100644 --- a/kernel/trace/trace_events_user.c +++ b/kernel/trace/trace_events_user.c @@ -370,7 +370,7 @@ static struct user_event_group *user_event_group_create(void) { struct user_event_group *group; - group = kzalloc(sizeof(*group), GFP_KERNEL); + group = kzalloc_obj(*group, GFP_KERNEL); if (!group) return NULL; @@ -637,7 +637,7 @@ static bool user_event_enabler_dup(struct user_event_enabler *orig, if (unlikely(test_bit(ENABLE_VAL_FREEING_BIT, ENABLE_BITOPS(orig)))) return true; - enabler = kzalloc(sizeof(*enabler), GFP_NOWAIT | __GFP_ACCOUNT); + enabler = kzalloc_obj(*enabler, GFP_NOWAIT | __GFP_ACCOUNT); if (!enabler) return false; @@ -706,7 +706,7 @@ static struct user_event_mm *user_event_mm_alloc(struct task_struct *t) { struct user_event_mm *user_mm; - user_mm = kzalloc(sizeof(*user_mm), GFP_KERNEL_ACCOUNT); + user_mm = kzalloc_obj(*user_mm, GFP_KERNEL_ACCOUNT); if (!user_mm) return NULL; @@ -892,7 +892,7 @@ static struct user_event_enabler if (!user_mm) return NULL; - enabler = kzalloc(sizeof(*enabler), GFP_KERNEL_ACCOUNT); + enabler = kzalloc_obj(*enabler, GFP_KERNEL_ACCOUNT); if (!enabler) goto out; @@ -1113,7 +1113,7 @@ static int user_event_add_field(struct user_event *user, const char *type, struct ftrace_event_field *field; int validator_flags = 0; - field = kmalloc(sizeof(*field), GFP_KERNEL_ACCOUNT); + field = kmalloc_obj(*field, GFP_KERNEL_ACCOUNT); if (!field) return -ENOMEM; @@ -1132,7 +1132,7 @@ add_validator: if (strstr(type, "char") != NULL) validator_flags |= VALIDATOR_ENSURE_NULL; - validator = kmalloc(sizeof(*validator), GFP_KERNEL_ACCOUNT); + validator = kmalloc_obj(*validator, GFP_KERNEL_ACCOUNT); if (!validator) { kfree(field); @@ -2105,7 +2105,7 @@ static int user_event_parse(struct user_event_group *group, char *name, return 0; } - user = kzalloc(sizeof(*user), GFP_KERNEL_ACCOUNT); + user = kzalloc_obj(*user, GFP_KERNEL_ACCOUNT); if (!user) return -ENOMEM; @@ -2315,7 +2315,7 @@ static int user_events_open(struct inode *node, struct file *file) if (!group) return -ENOENT; - info = kzalloc(sizeof(*info), GFP_KERNEL_ACCOUNT); + info = kzalloc_obj(*info, GFP_KERNEL_ACCOUNT); if (!info) return -ENOMEM; diff --git a/kernel/trace/trace_fprobe.c b/kernel/trace/trace_fprobe.c index 262c0556e4af..7decd8383d67 100644 --- a/kernel/trace/trace_fprobe.c +++ b/kernel/trace/trace_fprobe.c @@ -99,7 +99,7 @@ static struct tracepoint_user *__tracepoint_user_init(const char *name, struct t struct tracepoint_user *tuser __free(tuser_free) = NULL; int ret; - tuser = kzalloc(sizeof(*tuser), GFP_KERNEL); + tuser = kzalloc_obj(*tuser, GFP_KERNEL); if (!tuser) return NULL; tuser->name = kstrdup(name, GFP_KERNEL); @@ -579,7 +579,7 @@ static struct trace_fprobe *alloc_trace_fprobe(const char *group, struct trace_fprobe *tf __free(free_trace_fprobe) = NULL; int ret = -ENOMEM; - tf = kzalloc(struct_size(tf, tp.args, nargs), GFP_KERNEL); + tf = kzalloc_flex(*tf, tp.args, nargs, GFP_KERNEL); if (!tf) return ERR_PTR(ret); @@ -1403,7 +1403,7 @@ static int trace_fprobe_create_cb(int argc, const char *argv[]) struct traceprobe_parse_context *ctx __free(traceprobe_parse_context) = NULL; int ret; - ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); + ctx = kzalloc_obj(*ctx, GFP_KERNEL); if (!ctx) return -ENOMEM; diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c index c12795c2fb39..a7e4ad088acf 100644 --- a/kernel/trace/trace_functions.c +++ b/kernel/trace/trace_functions.c @@ -61,7 +61,7 @@ int ftrace_allocate_ftrace_ops(struct trace_array *tr) if (tr->flags & TRACE_ARRAY_FL_GLOBAL) return 0; - ops = kzalloc(sizeof(*ops), GFP_KERNEL); + ops = kzalloc_obj(*ops, GFP_KERNEL); if (!ops) return -ENOMEM; diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c index 1de6f1573621..73f0479aeac0 100644 --- a/kernel/trace/trace_functions_graph.c +++ b/kernel/trace/trace_functions_graph.c @@ -434,7 +434,7 @@ int allocate_fgraph_ops(struct trace_array *tr, struct ftrace_ops *ops) { struct fgraph_ops *gops; - gops = kzalloc(sizeof(*gops), GFP_KERNEL); + gops = kzalloc_obj(*gops, GFP_KERNEL); if (!gops) return -ENOMEM; @@ -1613,7 +1613,7 @@ void graph_trace_open(struct trace_iterator *iter) /* We can be called in atomic context via ftrace_dump() */ gfpflags = (in_atomic() || irqs_disabled()) ? GFP_ATOMIC : GFP_KERNEL; - data = kzalloc(sizeof(*data), gfpflags); + data = kzalloc_obj(*data, gfpflags); if (!data) goto out_err; diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index b4f62d2e41ed..808b91873bd6 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c @@ -275,7 +275,7 @@ static struct trace_kprobe *alloc_trace_kprobe(const char *group, struct trace_kprobe *tk __free(free_trace_kprobe) = NULL; int ret = -ENOMEM; - tk = kzalloc(struct_size(tk, tp.args, nargs), GFP_KERNEL); + tk = kzalloc_flex(*tk, tp.args, nargs, GFP_KERNEL); if (!tk) return ERR_PTR(ret); @@ -1082,7 +1082,7 @@ static int trace_kprobe_create_cb(int argc, const char *argv[]) struct traceprobe_parse_context *ctx __free(traceprobe_parse_context) = NULL; int ret; - ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); + ctx = kzalloc_obj(*ctx, GFP_KERNEL); if (!ctx) return -ENOMEM; ctx->flags = TPARG_FL_KERNEL; diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c index c706544be60c..1c752a691317 100644 --- a/kernel/trace/trace_mmiotrace.c +++ b/kernel/trace/trace_mmiotrace.c @@ -101,7 +101,7 @@ static void mmio_pipe_open(struct trace_iterator *iter) trace_seq_puts(s, "VERSION 20070824\n"); - hiter = kzalloc(sizeof(*hiter), GFP_KERNEL); + hiter = kzalloc_obj(*hiter, GFP_KERNEL); if (!hiter) return; diff --git a/kernel/trace/trace_osnoise.c b/kernel/trace/trace_osnoise.c index 827104d00bc0..51e7b0476a7f 100644 --- a/kernel/trace/trace_osnoise.c +++ b/kernel/trace/trace_osnoise.c @@ -122,7 +122,7 @@ static int osnoise_register_instance(struct trace_array *tr) */ lockdep_assert_held(&trace_types_lock); - inst = kmalloc(sizeof(*inst), GFP_KERNEL); + inst = kmalloc_obj(*inst, GFP_KERNEL); if (!inst) return -ENOMEM; diff --git a/kernel/trace/trace_printk.c b/kernel/trace/trace_printk.c index 6a29e4350b55..05b61ec67622 100644 --- a/kernel/trace/trace_printk.c +++ b/kernel/trace/trace_printk.c @@ -69,7 +69,7 @@ void hold_module_trace_bprintk_format(const char **start, const char **end) } fmt = NULL; - tb_fmt = kmalloc(sizeof(*tb_fmt), GFP_KERNEL); + tb_fmt = kmalloc_obj(*tb_fmt, GFP_KERNEL); if (tb_fmt) { fmt = kmalloc(strlen(*iter) + 1, GFP_KERNEL); if (fmt) { diff --git a/kernel/trace/trace_probe.c b/kernel/trace/trace_probe.c index 2f571083ce9e..fff0879cb0e9 100644 --- a/kernel/trace/trace_probe.c +++ b/kernel/trace/trace_probe.c @@ -838,12 +838,12 @@ static int __store_entry_arg(struct trace_probe *tp, int argnum) int i, offset, last_offset = 0; if (!earg) { - earg = kzalloc(sizeof(*tp->entry_arg), GFP_KERNEL); + earg = kzalloc_obj(*tp->entry_arg, GFP_KERNEL); if (!earg) return -ENOMEM; earg->size = 2 * tp->nr_args + 1; - earg->code = kcalloc(earg->size, sizeof(struct fetch_insn), - GFP_KERNEL); + earg->code = kzalloc_objs(struct fetch_insn, earg->size, + GFP_KERNEL); if (!earg->code) { kfree(earg); return -ENOMEM; @@ -1499,7 +1499,7 @@ static int traceprobe_parse_probe_arg_body(const char *argv, ssize_t *size, if (IS_ERR(type)) return PTR_ERR(type); - code = tmp = kcalloc(FETCH_INSN_MAX, sizeof(*code), GFP_KERNEL); + code = tmp = kzalloc_objs(*code, FETCH_INSN_MAX, GFP_KERNEL); if (!code) return -ENOMEM; code[FETCH_INSN_MAX - 1].op = FETCH_OP_END; @@ -1543,7 +1543,7 @@ static int traceprobe_parse_probe_arg_body(const char *argv, ssize_t *size, if (code->op == FETCH_OP_END) break; /* Shrink down the code buffer */ - parg->code = kcalloc(code - tmp + 1, sizeof(*code), GFP_KERNEL); + parg->code = kzalloc_objs(*code, code - tmp + 1, GFP_KERNEL); if (!parg->code) ret = -ENOMEM; else @@ -2149,7 +2149,7 @@ int trace_probe_add_file(struct trace_probe *tp, struct trace_event_file *file) { struct event_file_link *link; - link = kmalloc(sizeof(*link), GFP_KERNEL); + link = kmalloc_obj(*link, GFP_KERNEL); if (!link) return -ENOMEM; diff --git a/kernel/trace/trace_recursion_record.c b/kernel/trace/trace_recursion_record.c index a520b11afb0d..852069484060 100644 --- a/kernel/trace/trace_recursion_record.c +++ b/kernel/trace/trace_recursion_record.c @@ -129,7 +129,7 @@ static void *recursed_function_seq_start(struct seq_file *m, loff_t *pos) ret = &recursed_functions[*pos]; } - tseq = kzalloc(sizeof(*tseq), GFP_KERNEL); + tseq = kzalloc_obj(*tseq, GFP_KERNEL); if (!tseq) return ERR_PTR(-ENOMEM); diff --git a/kernel/trace/trace_sched_switch.c b/kernel/trace/trace_sched_switch.c index c46d584ded3b..ded84f1d8121 100644 --- a/kernel/trace/trace_sched_switch.c +++ b/kernel/trace/trace_sched_switch.c @@ -444,8 +444,7 @@ int trace_alloc_tgid_map(void) return 0; tgid_map_max = init_pid_ns.pid_max; - map = kvcalloc(tgid_map_max + 1, sizeof(*tgid_map), - GFP_KERNEL); + map = kvzalloc_objs(*tgid_map, tgid_map_max + 1, GFP_KERNEL); if (!map) return -ENOMEM; diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c index be53fe6fee6a..43ed16b3b160 100644 --- a/kernel/trace/trace_selftest.c +++ b/kernel/trace/trace_selftest.c @@ -248,7 +248,7 @@ static int trace_selftest_ops(struct trace_array *tr, int cnt) goto out; /* Add a dynamic probe */ - dyn_ops = kzalloc(sizeof(*dyn_ops), GFP_KERNEL); + dyn_ops = kzalloc_obj(*dyn_ops, GFP_KERNEL); if (!dyn_ops) { printk("MEMORY ERROR "); goto out; diff --git a/kernel/trace/trace_stat.c b/kernel/trace/trace_stat.c index b3b5586f104d..3fec69e8a6d4 100644 --- a/kernel/trace/trace_stat.c +++ b/kernel/trace/trace_stat.c @@ -77,7 +77,7 @@ static int insert_stat(struct rb_root *root, void *stat, cmp_func_t cmp) struct rb_node **new = &(root->rb_node), *parent = NULL; struct stat_node *data; - data = kzalloc(sizeof(*data), GFP_KERNEL); + data = kzalloc_obj(*data, GFP_KERNEL); if (!data) return -ENOMEM; data->stat = stat; @@ -322,7 +322,7 @@ int register_stat_tracer(struct tracer_stat *trace) } /* Init the session */ - session = kzalloc(sizeof(*session), GFP_KERNEL); + session = kzalloc_obj(*session, GFP_KERNEL); if (!session) return -ENOMEM; diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c index e96d0063cbcf..2f495e46034f 100644 --- a/kernel/trace/trace_syscalls.c +++ b/kernel/trace/trace_syscalls.c @@ -617,7 +617,7 @@ static int syscall_fault_buffer_enable(void) return 0; } - sbuf = kmalloc(sizeof(*sbuf), GFP_KERNEL); + sbuf = kmalloc_obj(*sbuf, GFP_KERNEL); if (!sbuf) return -ENOMEM; @@ -1337,9 +1337,8 @@ void __init init_ftrace_syscalls(void) void *ret; if (!IS_ENABLED(CONFIG_HAVE_SPARSE_SYSCALL_NR)) { - syscalls_metadata = kcalloc(NR_syscalls, - sizeof(*syscalls_metadata), - GFP_KERNEL); + syscalls_metadata = kzalloc_objs(*syscalls_metadata, + NR_syscalls, GFP_KERNEL); if (!syscalls_metadata) { WARN_ON(1); return; diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c index 1b4f32e2b9bd..83c17b90daad 100644 --- a/kernel/trace/trace_uprobe.c +++ b/kernel/trace/trace_uprobe.c @@ -338,7 +338,7 @@ alloc_trace_uprobe(const char *group, const char *event, int nargs, bool is_ret) struct trace_uprobe *tu; int ret; - tu = kzalloc(struct_size(tu, tp.args, nargs), GFP_KERNEL); + tu = kzalloc_flex(*tu, tp.args, nargs, GFP_KERNEL); if (!tu) return ERR_PTR(-ENOMEM); @@ -699,7 +699,7 @@ static int __trace_uprobe_create(int argc, const char **argv) memset(&path, 0, sizeof(path)); tu->filename = no_free_ptr(filename); - ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); + ctx = kzalloc_obj(*ctx, GFP_KERNEL); if (!ctx) return -ENOMEM; ctx->flags = (is_return ? TPARG_FL_RETURN : 0) | TPARG_FL_USER; diff --git a/kernel/trace/tracing_map.c b/kernel/trace/tracing_map.c index 7f8da4dab69d..ef28c6c52295 100644 --- a/kernel/trace/tracing_map.c +++ b/kernel/trace/tracing_map.c @@ -324,7 +324,7 @@ static struct tracing_map_array *tracing_map_array_alloc(unsigned int n_elts, struct tracing_map_array *a; unsigned int i; - a = kzalloc(sizeof(*a), GFP_KERNEL); + a = kzalloc_obj(*a, GFP_KERNEL); if (!a) return NULL; @@ -405,7 +405,7 @@ static struct tracing_map_elt *tracing_map_elt_alloc(struct tracing_map *map) struct tracing_map_elt *elt; int err = 0; - elt = kzalloc(sizeof(*elt), GFP_KERNEL); + elt = kzalloc_obj(*elt, GFP_KERNEL); if (!elt) return ERR_PTR(-ENOMEM); @@ -417,19 +417,19 @@ static struct tracing_map_elt *tracing_map_elt_alloc(struct tracing_map *map) goto free; } - elt->fields = kcalloc(map->n_fields, sizeof(*elt->fields), GFP_KERNEL); + elt->fields = kzalloc_objs(*elt->fields, map->n_fields, GFP_KERNEL); if (!elt->fields) { err = -ENOMEM; goto free; } - elt->vars = kcalloc(map->n_vars, sizeof(*elt->vars), GFP_KERNEL); + elt->vars = kzalloc_objs(*elt->vars, map->n_vars, GFP_KERNEL); if (!elt->vars) { err = -ENOMEM; goto free; } - elt->var_set = kcalloc(map->n_vars, sizeof(*elt->var_set), GFP_KERNEL); + elt->var_set = kzalloc_objs(*elt->var_set, map->n_vars, GFP_KERNEL); if (!elt->var_set) { err = -ENOMEM; goto free; @@ -777,7 +777,7 @@ struct tracing_map *tracing_map_create(unsigned int map_bits, map_bits > TRACING_MAP_BITS_MAX) return ERR_PTR(-EINVAL); - map = kzalloc(sizeof(*map), GFP_KERNEL); + map = kzalloc_obj(*map, GFP_KERNEL); if (!map) return ERR_PTR(-ENOMEM); @@ -949,7 +949,7 @@ create_sort_entry(void *key, struct tracing_map_elt *elt) { struct tracing_map_sort_entry *sort_entry; - sort_entry = kzalloc(sizeof(*sort_entry), GFP_KERNEL); + sort_entry = kzalloc_obj(*sort_entry, GFP_KERNEL); if (!sort_entry) return NULL; diff --git a/kernel/tracepoint.c b/kernel/tracepoint.c index fd2ee879815c..8287a4ff3f18 100644 --- a/kernel/tracepoint.c +++ b/kernel/tracepoint.c @@ -103,8 +103,7 @@ static void tp_stub_func(void) static inline void *allocate_probes(int count) { - struct tp_probes *p = kmalloc(struct_size(p, probes, count), - GFP_KERNEL); + struct tp_probes *p = kmalloc_flex(*p, probes, count, GFP_KERNEL); return p == NULL ? NULL : p->probes; } @@ -615,7 +614,7 @@ static int tracepoint_module_coming(struct module *mod) if (trace_module_has_bad_taint(mod)) return 0; - tp_mod = kmalloc(sizeof(struct tp_module), GFP_KERNEL); + tp_mod = kmalloc_obj(struct tp_module, GFP_KERNEL); if (!tp_mod) return -ENOMEM; tp_mod->mod = mod; diff --git a/kernel/ucount.c b/kernel/ucount.c index fc4a8f2d3096..d1f723805c6d 100644 --- a/kernel/ucount.c +++ b/kernel/ucount.c @@ -163,7 +163,7 @@ struct ucounts *alloc_ucounts(struct user_namespace *ns, kuid_t uid) if (ucounts) return ucounts; - new = kzalloc(sizeof(*new), GFP_KERNEL); + new = kzalloc_obj(*new, GFP_KERNEL); if (!new) return NULL; diff --git a/kernel/umh.c b/kernel/umh.c index b4da45a3a7cf..cffda97d961c 100644 --- a/kernel/umh.c +++ b/kernel/umh.c @@ -359,7 +359,7 @@ struct subprocess_info *call_usermodehelper_setup(const char *path, char **argv, void *data) { struct subprocess_info *sub_info; - sub_info = kzalloc(sizeof(struct subprocess_info), gfp_mask); + sub_info = kzalloc_obj(struct subprocess_info, gfp_mask); if (!sub_info) goto out; diff --git a/kernel/unwind/deferred.c b/kernel/unwind/deferred.c index a88fb481c4a3..23a7d7ea93d4 100644 --- a/kernel/unwind/deferred.c +++ b/kernel/unwind/deferred.c @@ -120,8 +120,8 @@ int unwind_user_faultable(struct unwind_stacktrace *trace) return -EINVAL; if (!info->cache) { - info->cache = kzalloc(struct_size(cache, entries, UNWIND_MAX_ENTRIES), - GFP_KERNEL); + info->cache = kzalloc_flex(*cache, entries, UNWIND_MAX_ENTRIES, + GFP_KERNEL); if (!info->cache) return -ENOMEM; } diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c index 03cb63883d04..bb42a4c35dd3 100644 --- a/kernel/user_namespace.c +++ b/kernel/user_namespace.c @@ -794,9 +794,8 @@ static int insert_extent(struct uid_gid_map *map, struct uid_gid_extent *extent) struct uid_gid_extent *forward; /* Allocate memory for 340 mappings. */ - forward = kmalloc_array(UID_GID_MAP_MAX_EXTENTS, - sizeof(struct uid_gid_extent), - GFP_KERNEL); + forward = kmalloc_objs(struct uid_gid_extent, + UID_GID_MAP_MAX_EXTENTS, GFP_KERNEL); if (!forward) return -ENOMEM; diff --git a/kernel/vhost_task.c b/kernel/vhost_task.c index 27107dcc1cbf..bf84af48dce8 100644 --- a/kernel/vhost_task.c +++ b/kernel/vhost_task.c @@ -132,7 +132,7 @@ struct vhost_task *vhost_task_create(bool (*fn)(void *), struct vhost_task *vtsk; struct task_struct *tsk; - vtsk = kzalloc(sizeof(*vtsk), GFP_KERNEL); + vtsk = kzalloc_obj(*vtsk, GFP_KERNEL); if (!vtsk) return ERR_PTR(-ENOMEM); init_completion(&vtsk->exited); diff --git a/kernel/watch_queue.c b/kernel/watch_queue.c index 52f89f1137da..d966b8c99052 100644 --- a/kernel/watch_queue.c +++ b/kernel/watch_queue.c @@ -278,7 +278,7 @@ long watch_queue_set_size(struct pipe_inode_info *pipe, unsigned int nr_notes) pipe->nr_accounted = nr_pages; ret = -ENOMEM; - pages = kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL); + pages = kzalloc_objs(struct page *, nr_pages, GFP_KERNEL); if (!pages) goto error; @@ -358,7 +358,7 @@ long watch_queue_set_filter(struct pipe_inode_info *pipe, * user-specified filters. */ ret = -ENOMEM; - wfilter = kzalloc(struct_size(wfilter, filters, nr_filter), GFP_KERNEL); + wfilter = kzalloc_flex(*wfilter, filters, nr_filter, GFP_KERNEL); if (!wfilter) goto err_filter; wfilter->nr_filters = nr_filter; @@ -692,7 +692,7 @@ int watch_queue_init(struct pipe_inode_info *pipe) { struct watch_queue *wqueue; - wqueue = kzalloc(sizeof(*wqueue), GFP_KERNEL); + wqueue = kzalloc_obj(*wqueue, GFP_KERNEL); if (!wqueue) return -ENOMEM; diff --git a/kernel/workqueue.c b/kernel/workqueue.c index c515cff01828..ee3e81133f78 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -4714,7 +4714,7 @@ struct workqueue_attrs *alloc_workqueue_attrs_noprof(void) { struct workqueue_attrs *attrs; - attrs = kzalloc(sizeof(*attrs), GFP_KERNEL); + attrs = kzalloc_obj(*attrs, GFP_KERNEL); if (!attrs) goto fail; if (!alloc_cpumask_var(&attrs->cpumask, GFP_KERNEL)) @@ -5370,7 +5370,7 @@ apply_wqattrs_prepare(struct workqueue_struct *wq, attrs->affn_scope >= WQ_AFFN_NR_TYPES)) return ERR_PTR(-EINVAL); - ctx = kzalloc(struct_size(ctx, pwq_tbl, nr_cpu_ids), GFP_KERNEL); + ctx = kzalloc_flex(*ctx, pwq_tbl, nr_cpu_ids, GFP_KERNEL); new_attrs = alloc_workqueue_attrs(); if (!ctx || !new_attrs) @@ -7486,7 +7486,7 @@ int workqueue_sysfs_register(struct workqueue_struct *wq) if (WARN_ON(wq->flags & __WQ_ORDERED)) return -EINVAL; - wq->wq_dev = wq_dev = kzalloc(sizeof(*wq_dev), GFP_KERNEL); + wq->wq_dev = wq_dev = kzalloc_obj(*wq_dev, GFP_KERNEL); if (!wq_dev) return -ENOMEM; @@ -7879,9 +7879,9 @@ void __init workqueue_init_early(void) wq_power_efficient = true; /* initialize WQ_AFFN_SYSTEM pods */ - pt->pod_cpus = kcalloc(1, sizeof(pt->pod_cpus[0]), GFP_KERNEL); - pt->pod_node = kcalloc(1, sizeof(pt->pod_node[0]), GFP_KERNEL); - pt->cpu_pod = kcalloc(nr_cpu_ids, sizeof(pt->cpu_pod[0]), GFP_KERNEL); + pt->pod_cpus = kzalloc_objs(pt->pod_cpus[0], 1, GFP_KERNEL); + pt->pod_node = kzalloc_objs(pt->pod_node[0], 1, GFP_KERNEL); + pt->cpu_pod = kzalloc_objs(pt->cpu_pod[0], nr_cpu_ids, GFP_KERNEL); BUG_ON(!pt->pod_cpus || !pt->pod_node || !pt->cpu_pod); BUG_ON(!zalloc_cpumask_var_node(&pt->pod_cpus[0], GFP_KERNEL, NUMA_NO_NODE)); @@ -8063,7 +8063,7 @@ static void __init init_pod_type(struct wq_pod_type *pt, pt->nr_pods = 0; /* init @pt->cpu_pod[] according to @cpus_share_pod() */ - pt->cpu_pod = kcalloc(nr_cpu_ids, sizeof(pt->cpu_pod[0]), GFP_KERNEL); + pt->cpu_pod = kzalloc_objs(pt->cpu_pod[0], nr_cpu_ids, GFP_KERNEL); BUG_ON(!pt->cpu_pod); for_each_possible_cpu(cur) { @@ -8080,8 +8080,8 @@ static void __init init_pod_type(struct wq_pod_type *pt, } /* init the rest to match @pt->cpu_pod[] */ - pt->pod_cpus = kcalloc(pt->nr_pods, sizeof(pt->pod_cpus[0]), GFP_KERNEL); - pt->pod_node = kcalloc(pt->nr_pods, sizeof(pt->pod_node[0]), GFP_KERNEL); + pt->pod_cpus = kzalloc_objs(pt->pod_cpus[0], pt->nr_pods, GFP_KERNEL); + pt->pod_node = kzalloc_objs(pt->pod_node[0], pt->nr_pods, GFP_KERNEL); BUG_ON(!pt->pod_cpus || !pt->pod_node); for (pod = 0; pod < pt->nr_pods; pod++) |
