diff options
Diffstat (limited to 'kernel')
141 files changed, 337 insertions, 337 deletions
diff --git a/kernel/acct.c b/kernel/acct.c index 06e8b79eaf7e..1e19722c64c3 100644 --- a/kernel/acct.c +++ b/kernel/acct.c @@ -255,7 +255,7 @@ static int acct_on(const char __user *name) if (!(file->f_mode & FMODE_CAN_WRITE)) return -EIO; - acct = kzalloc_obj(struct bsd_acct_struct, GFP_KERNEL); + acct = kzalloc_obj(struct bsd_acct_struct); if (!acct) return -ENOMEM; diff --git a/kernel/async.c b/kernel/async.c index 862532ad328a..0e3a783dc991 100644 --- a/kernel/async.c +++ b/kernel/async.c @@ -261,7 +261,7 @@ bool async_schedule_dev_nocall(async_func_t func, struct device *dev) { struct async_entry *entry; - entry = kzalloc_obj(struct async_entry, GFP_KERNEL); + entry = kzalloc_obj(struct async_entry); /* Give up if there is no memory or too much work. */ if (!entry || atomic_read(&entry_count) > MAX_WORK) { diff --git a/kernel/audit.c b/kernel/audit.c index 838ca1648f7b..ad46aa11d42c 100644 --- a/kernel/audit.c +++ b/kernel/audit.c @@ -545,7 +545,7 @@ static int auditd_set(struct pid *pid, u32 portid, struct net *net, if (!pid || !net) return -EINVAL; - ac_new = kzalloc_obj(*ac_new, GFP_KERNEL); + ac_new = kzalloc_obj(*ac_new); if (!ac_new) return -ENOMEM; ac_new->pid = get_pid(pid); @@ -1044,7 +1044,7 @@ static void audit_send_reply(struct sk_buff *request_skb, int seq, int type, int struct task_struct *tsk; struct audit_reply *reply; - reply = kzalloc_obj(*reply, GFP_KERNEL); + reply = kzalloc_obj(*reply); if (!reply) return; diff --git a/kernel/audit_fsnotify.c b/kernel/audit_fsnotify.c index 7b89e1ccb5a4..a4401f651060 100644 --- a/kernel/audit_fsnotify.c +++ b/kernel/audit_fsnotify.c @@ -89,7 +89,7 @@ struct audit_fsnotify_mark *audit_alloc_mark(struct audit_krule *krule, char *pa goto out; } - audit_mark = kzalloc_obj(*audit_mark, GFP_KERNEL); + audit_mark = kzalloc_obj(*audit_mark); if (unlikely(!audit_mark)) { audit_mark = ERR_PTR(-ENOMEM); goto out; diff --git a/kernel/audit_watch.c b/kernel/audit_watch.c index 6a73b30929c0..096faac2435c 100644 --- a/kernel/audit_watch.c +++ b/kernel/audit_watch.c @@ -139,7 +139,7 @@ static struct audit_parent *audit_init_parent(const struct path *path) struct audit_parent *parent; int ret; - parent = kzalloc_obj(*parent, GFP_KERNEL); + parent = kzalloc_obj(*parent); if (unlikely(!parent)) return ERR_PTR(-ENOMEM); @@ -161,7 +161,7 @@ static struct audit_watch *audit_init_watch(char *path) { struct audit_watch *watch; - watch = kzalloc_obj(*watch, GFP_KERNEL); + watch = kzalloc_obj(*watch); if (unlikely(!watch)) return ERR_PTR(-ENOMEM); diff --git a/kernel/auditfilter.c b/kernel/auditfilter.c index e2d6f9a91a49..2bffaef0011b 100644 --- a/kernel/auditfilter.c +++ b/kernel/auditfilter.c @@ -108,11 +108,11 @@ static inline struct audit_entry *audit_init_entry(u32 field_count) struct audit_entry *entry; struct audit_field *fields; - entry = kzalloc_obj(*entry, GFP_KERNEL); + entry = kzalloc_obj(*entry); if (unlikely(!entry)) return NULL; - fields = kzalloc_objs(*fields, field_count, GFP_KERNEL); + fields = kzalloc_objs(*fields, field_count); if (unlikely(!fields)) { kfree(entry); return NULL; @@ -1180,7 +1180,7 @@ int audit_list_rules_send(struct sk_buff *request_skb, int seq) * happen if we're actually running in the context of auditctl * trying to _send_ the stuff */ - dest = kmalloc_obj(*dest, GFP_KERNEL); + dest = kmalloc_obj(*dest); if (!dest) return -ENOMEM; dest->net = get_net(sock_net(NETLINK_CB(request_skb).sk)); diff --git a/kernel/auditsc.c b/kernel/auditsc.c index e45883de200f..f6af6a8f68c4 100644 --- a/kernel/auditsc.c +++ b/kernel/auditsc.c @@ -255,7 +255,7 @@ static int grow_tree_refs(struct audit_context *ctx) { struct audit_tree_refs *p = ctx->trees; - ctx->trees = kzalloc_obj(struct audit_tree_refs, GFP_KERNEL); + ctx->trees = kzalloc_obj(struct audit_tree_refs); if (!ctx->trees) { ctx->trees = p; return 0; @@ -1032,7 +1032,7 @@ static inline struct audit_context *audit_alloc_context(enum audit_state state) { struct audit_context *context; - context = kzalloc_obj(*context, GFP_KERNEL); + context = kzalloc_obj(*context); if (!context) return NULL; context->context = AUDIT_CTX_UNUSED; @@ -2650,7 +2650,7 @@ int __audit_sockaddr(int len, void *a) struct audit_context *context = audit_context(); if (!context->sockaddr) { - void *p = kmalloc_obj(struct sockaddr_storage, GFP_KERNEL); + void *p = kmalloc_obj(struct sockaddr_storage); if (!p) return -ENOMEM; @@ -2743,7 +2743,7 @@ int __audit_log_bprm_fcaps(struct linux_binprm *bprm, struct audit_context *context = audit_context(); struct cpu_vfs_cap_data vcaps; - ax = kmalloc_obj(*ax, GFP_KERNEL); + ax = kmalloc_obj(*ax); if (!ax) return -ENOMEM; diff --git a/kernel/bpf/arena.c b/kernel/bpf/arena.c index 5baea15cb07d..144f30e740e8 100644 --- a/kernel/bpf/arena.c +++ b/kernel/bpf/arena.c @@ -324,7 +324,7 @@ static int remember_vma(struct bpf_arena *arena, struct vm_area_struct *vma) { struct vma_list *vml; - vml = kmalloc_obj(*vml, GFP_KERNEL); + vml = kmalloc_obj(*vml); if (!vml) return -ENOMEM; refcount_set(&vml->mmap_count, 1); diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c index 188b0e35f856..26763df6134a 100644 --- a/kernel/bpf/arraymap.c +++ b/kernel/bpf/arraymap.c @@ -1061,7 +1061,7 @@ static int prog_array_map_poke_track(struct bpf_map *map, goto out; } - elem = kmalloc_obj(*elem, GFP_KERNEL); + elem = kmalloc_obj(*elem); if (!elem) { ret = -ENOMEM; goto out; @@ -1237,7 +1237,7 @@ static struct bpf_event_entry *bpf_event_entry_gen(struct file *perf_file, { struct bpf_event_entry *ee; - ee = kzalloc_obj(*ee, GFP_KERNEL); + ee = kzalloc_obj(*ee); if (ee) { ee->event = perf_file->private_data; ee->perf_file = perf_file; diff --git a/kernel/bpf/bpf_iter.c b/kernel/bpf/bpf_iter.c index b5d16050f7b3..f5eaeb2493d4 100644 --- a/kernel/bpf/bpf_iter.c +++ b/kernel/bpf/bpf_iter.c @@ -295,7 +295,7 @@ int bpf_iter_reg_target(const struct bpf_iter_reg *reg_info) { struct bpf_iter_target_info *tinfo; - tinfo = kzalloc_obj(*tinfo, GFP_KERNEL); + tinfo = kzalloc_obj(*tinfo); if (!tinfo) return -ENOMEM; diff --git a/kernel/bpf/bpf_struct_ops.c b/kernel/bpf/bpf_struct_ops.c index 1ff292a6f3ed..05b366b821c3 100644 --- a/kernel/bpf/bpf_struct_ops.c +++ b/kernel/bpf/bpf_struct_ops.c @@ -218,7 +218,7 @@ static int prepare_arg_info(struct btf *btf, args = btf_params(func_proto); stub_args = btf_params(stub_func_proto); - info_buf = kzalloc_objs(*info_buf, nargs, GFP_KERNEL); + info_buf = kzalloc_objs(*info_buf, nargs); if (!info_buf) return -ENOMEM; @@ -378,7 +378,7 @@ int bpf_struct_ops_desc_init(struct bpf_struct_ops_desc *st_ops_desc, if (!is_valid_value_type(btf, value_id, t, value_name)) return -EINVAL; - arg_info = kzalloc_objs(*arg_info, btf_type_vlen(t), GFP_KERNEL); + arg_info = kzalloc_objs(*arg_info, btf_type_vlen(t)); if (!arg_info) return -ENOMEM; @@ -720,7 +720,7 @@ static long bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key, if (uvalue->common.state || refcount_read(&uvalue->common.refcnt)) return -EINVAL; - tlinks = kzalloc_objs(*tlinks, BPF_TRAMP_MAX, GFP_KERNEL); + tlinks = kzalloc_objs(*tlinks, BPF_TRAMP_MAX); if (!tlinks) return -ENOMEM; diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c index ee9037aa9ab7..319916f8fc64 100644 --- a/kernel/bpf/btf.c +++ b/kernel/bpf/btf.c @@ -8306,7 +8306,7 @@ static int btf_module_notify(struct notifier_block *nb, unsigned long op, switch (op) { case MODULE_STATE_COMING: - btf_mod = kzalloc_obj(*btf_mod, GFP_KERNEL); + btf_mod = kzalloc_obj(*btf_mod); if (!btf_mod) { err = -ENOMEM; goto out; @@ -8341,7 +8341,7 @@ static int btf_module_notify(struct notifier_block *nb, unsigned long op, if (IS_ENABLED(CONFIG_SYSFS)) { struct bin_attribute *attr; - attr = kzalloc_obj(*attr, GFP_KERNEL); + attr = kzalloc_obj(*attr); if (!attr) goto out; diff --git a/kernel/bpf/cgroup.c b/kernel/bpf/cgroup.c index 5d7a35e476e9..876f6a81a9b6 100644 --- a/kernel/bpf/cgroup.c +++ b/kernel/bpf/cgroup.c @@ -845,7 +845,7 @@ static int __cgroup_bpf_attach(struct cgroup *cgrp, if (pl) { old_prog = pl->prog; } else { - pl = kmalloc_obj(*pl, GFP_KERNEL); + pl = kmalloc_obj(*pl); if (!pl) { bpf_cgroup_storages_free(new_storage); return -ENOMEM; diff --git a/kernel/bpf/crypto.c b/kernel/bpf/crypto.c index 2b0660c32c92..51f89cecefb4 100644 --- a/kernel/bpf/crypto.c +++ b/kernel/bpf/crypto.c @@ -68,7 +68,7 @@ int bpf_crypto_register_type(const struct bpf_crypto_type *type) goto unlock; } - node = kmalloc_obj(*node, GFP_KERNEL); + node = kmalloc_obj(*node); err = -ENOMEM; if (!node) goto unlock; @@ -176,7 +176,7 @@ bpf_crypto_ctx_create(const struct bpf_crypto_params *params, u32 params__sz, goto err_module_put; } - ctx = kzalloc_obj(*ctx, GFP_KERNEL); + ctx = kzalloc_obj(*ctx); if (!ctx) { *err = -ENOMEM; goto err_module_put; diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c index 42a692682f18..6eb6c82ed2ee 100644 --- a/kernel/bpf/helpers.c +++ b/kernel/bpf/helpers.c @@ -3993,7 +3993,7 @@ __bpf_kfunc struct bpf_key *bpf_lookup_user_key(s32 serial, u64 flags) if (IS_ERR(key_ref)) return NULL; - bkey = kmalloc_obj(*bkey, GFP_KERNEL); + bkey = kmalloc_obj(*bkey); if (!bkey) { key_put(key_ref_to_ptr(key_ref)); return NULL; diff --git a/kernel/bpf/inode.c b/kernel/bpf/inode.c index a111b0e9214e..25c06a011825 100644 --- a/kernel/bpf/inode.c +++ b/kernel/bpf/inode.c @@ -1044,7 +1044,7 @@ static int bpf_init_fs_context(struct fs_context *fc) { struct bpf_mount_opts *opts; - opts = kzalloc_obj(struct bpf_mount_opts, GFP_KERNEL); + opts = kzalloc_obj(struct bpf_mount_opts); if (!opts) return -ENOMEM; diff --git a/kernel/bpf/offload.c b/kernel/bpf/offload.c index 7fcbbe0ad925..0ad97d643bf4 100644 --- a/kernel/bpf/offload.c +++ b/kernel/bpf/offload.c @@ -72,7 +72,7 @@ static int __bpf_offload_dev_netdev_register(struct bpf_offload_dev *offdev, struct bpf_offload_netdev *ondev; int err; - ondev = kzalloc_obj(*ondev, GFP_KERNEL); + ondev = kzalloc_obj(*ondev); if (!ondev) return -ENOMEM; @@ -777,7 +777,7 @@ bpf_offload_dev_create(const struct bpf_prog_offload_ops *ops, void *priv) { struct bpf_offload_dev *offdev; - offdev = kzalloc_obj(*offdev, GFP_KERNEL); + offdev = kzalloc_obj(*offdev); if (!offdev) return ERR_PTR(-ENOMEM); diff --git a/kernel/bpf/trampoline.c b/kernel/bpf/trampoline.c index b94565843f77..84db9e658e52 100644 --- a/kernel/bpf/trampoline.c +++ b/kernel/bpf/trampoline.c @@ -256,7 +256,7 @@ static int direct_ops_mod(struct bpf_trampoline *tr, void *addr, bool lock_direc */ static int direct_ops_alloc(struct bpf_trampoline *tr) { - tr->fops = kzalloc_obj(struct ftrace_ops, GFP_KERNEL); + tr->fops = kzalloc_obj(struct ftrace_ops); if (!tr->fops) return -ENOMEM; tr->fops->private = tr; @@ -342,7 +342,7 @@ static struct bpf_trampoline *bpf_trampoline_lookup(u64 key, unsigned long ip) goto out; } } - tr = kzalloc_obj(*tr, GFP_KERNEL); + tr = kzalloc_obj(*tr); if (!tr) goto out; if (direct_ops_alloc(tr)) { @@ -446,7 +446,7 @@ bpf_trampoline_get_progs(const struct bpf_trampoline *tr, int *total, bool *ip_a int kind; *total = 0; - tlinks = kzalloc_objs(*tlinks, BPF_TRAMP_MAX, GFP_KERNEL); + tlinks = kzalloc_objs(*tlinks, BPF_TRAMP_MAX); if (!tlinks) return ERR_PTR(-ENOMEM); @@ -569,7 +569,7 @@ static struct bpf_tramp_image *bpf_tramp_image_alloc(u64 key, int size) void *image; int err = -ENOMEM; - im = kzalloc_obj(*im, GFP_KERNEL); + im = kzalloc_obj(*im); if (!im) goto out; diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 63f05d90e708..bb12ba020649 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -22769,7 +22769,7 @@ static int jit_subprogs(struct bpf_verifier_env *env) goto out_undo_insn; err = -ENOMEM; - func = kzalloc_objs(prog, env->subprog_cnt, GFP_KERNEL); + func = kzalloc_objs(prog, env->subprog_cnt); if (!func) goto out_undo_insn; diff --git a/kernel/cgroup/cgroup-v1.c b/kernel/cgroup/cgroup-v1.c index 0449b062dd1c..a4337c9b5287 100644 --- a/kernel/cgroup/cgroup-v1.c +++ b/kernel/cgroup/cgroup-v1.c @@ -317,7 +317,7 @@ static struct cgroup_pidlist *cgroup_pidlist_find_create(struct cgroup *cgrp, return l; /* entry not found; create a new one */ - l = kzalloc_obj(struct cgroup_pidlist, GFP_KERNEL); + l = kzalloc_obj(struct cgroup_pidlist); if (!l) return l; @@ -352,7 +352,7 @@ static int pidlist_array_load(struct cgroup *cgrp, enum cgroup_filetype type, * show up until sometime later on. */ length = cgroup_task_count(cgrp); - array = kvmalloc_objs(pid_t, length, GFP_KERNEL); + array = kvmalloc_objs(pid_t, length); if (!array) return -ENOMEM; /* now, populate the array */ @@ -1237,7 +1237,7 @@ static int cgroup1_root_to_use(struct fs_context *fc) if (ctx->ns != &init_cgroup_ns) return -EPERM; - root = kzalloc_obj(*root, GFP_KERNEL); + root = kzalloc_obj(*root); if (!root) return -ENOMEM; diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c index 7d220276d019..c14fbdc4cdbe 100644 --- a/kernel/cgroup/cgroup.c +++ b/kernel/cgroup/cgroup.c @@ -1168,7 +1168,7 @@ static int allocate_cgrp_cset_links(int count, struct list_head *tmp_links) INIT_LIST_HEAD(tmp_links); for (i = 0; i < count; i++) { - link = kzalloc_obj(*link, GFP_KERNEL); + link = kzalloc_obj(*link); if (!link) { free_cgrp_cset_links(tmp_links); return -ENOMEM; @@ -1241,7 +1241,7 @@ static struct css_set *find_css_set(struct css_set *old_cset, if (cset) return cset; - cset = kzalloc_obj(*cset, GFP_KERNEL); + cset = kzalloc_obj(*cset); if (!cset) return NULL; @@ -2350,7 +2350,7 @@ static int cgroup_init_fs_context(struct fs_context *fc) { struct cgroup_fs_context *ctx; - ctx = kzalloc_obj(struct cgroup_fs_context, GFP_KERNEL); + ctx = kzalloc_obj(struct cgroup_fs_context); if (!ctx) return -ENOMEM; @@ -4251,7 +4251,7 @@ static int cgroup_file_open(struct kernfs_open_file *of) struct cgroup_file_ctx *ctx; int ret; - ctx = kzalloc_obj(*ctx, GFP_KERNEL); + ctx = kzalloc_obj(*ctx); if (!ctx) return -ENOMEM; diff --git a/kernel/cgroup/cpuset-v1.c b/kernel/cgroup/cpuset-v1.c index 8e7ffc205c3b..7308e9b02495 100644 --- a/kernel/cgroup/cpuset-v1.c +++ b/kernel/cgroup/cpuset-v1.c @@ -316,7 +316,7 @@ void cpuset1_hotplug_update_tasks(struct cpuset *cs, css_tryget_online(&cs->css)) { struct cpuset_remove_tasks_struct *s; - s = kzalloc_obj(*s, GFP_KERNEL); + s = kzalloc_obj(*s); if (WARN_ON_ONCE(!s)) { css_put(&cs->css); return; @@ -653,7 +653,7 @@ int cpuset1_generate_sched_domains(cpumask_var_t **domains, if (!doms) goto done; - dattr = kmalloc_obj(struct sched_domain_attr, GFP_KERNEL); + dattr = kmalloc_obj(struct sched_domain_attr); if (dattr) { *dattr = SD_ATTR_INIT; update_domain_attr_tree(dattr, &top_cpuset); @@ -664,7 +664,7 @@ int cpuset1_generate_sched_domains(cpumask_var_t **domains, goto done; } - csa = kmalloc_objs(cp, nr_cpusets(), GFP_KERNEL); + csa = kmalloc_objs(cp, nr_cpusets()); if (!csa) goto done; csn = 0; @@ -727,7 +727,7 @@ int cpuset1_generate_sched_domains(cpumask_var_t **domains, * The rest of the code, including the scheduler, can deal with * dattr==NULL case. No need to abort if alloc fails. */ - dattr = kmalloc_objs(struct sched_domain_attr, ndoms, GFP_KERNEL); + dattr = kmalloc_objs(struct sched_domain_attr, ndoms); for (nslot = 0, i = 0; i < csn; i++) { nslot_update = 0; diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c index 384d9d6e323b..9faf34377a88 100644 --- a/kernel/cgroup/cpuset.c +++ b/kernel/cgroup/cpuset.c @@ -536,7 +536,7 @@ static struct cpuset *dup_or_alloc_cpuset(struct cpuset *cs) /* Allocate base structure */ trial = cs ? kmemdup(cs, sizeof(*cs), GFP_KERNEL) : - kzalloc_obj(*cs, GFP_KERNEL); + kzalloc_obj(*cs); if (!trial) return NULL; @@ -791,7 +791,7 @@ static int generate_sched_domains(cpumask_var_t **domains, goto generate_doms; } - csa = kmalloc_objs(cp, nr_cpusets(), GFP_KERNEL); + csa = kmalloc_objs(cp, nr_cpusets()); if (!csa) goto done; @@ -835,7 +835,7 @@ generate_doms: * The rest of the code, including the scheduler, can deal with * dattr==NULL case. No need to abort if alloc fails. */ - dattr = kmalloc_objs(struct sched_domain_attr, ndoms, GFP_KERNEL); + dattr = kmalloc_objs(struct sched_domain_attr, ndoms); /* * Cgroup v2 doesn't support domain attributes, just set all of them @@ -2478,7 +2478,7 @@ static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from, return; } - mwork = kzalloc_obj(*mwork, GFP_KERNEL); + mwork = kzalloc_obj(*mwork); if (mwork) { mwork->mm = mm; mwork->from = *from; @@ -2500,7 +2500,7 @@ static void schedule_flush_migrate_mm(void) { struct callback_head *flush_cb; - flush_cb = kzalloc_obj(struct callback_head, GFP_KERNEL); + flush_cb = kzalloc_obj(struct callback_head); if (!flush_cb) return; diff --git a/kernel/cgroup/debug.c b/kernel/cgroup/debug.c index 78429dd9e9c6..883347b87842 100644 --- a/kernel/cgroup/debug.c +++ b/kernel/cgroup/debug.c @@ -14,7 +14,7 @@ static struct cgroup_subsys_state * debug_css_alloc(struct cgroup_subsys_state *parent_css) { - struct cgroup_subsys_state *css = kzalloc_obj(*css, GFP_KERNEL); + struct cgroup_subsys_state *css = kzalloc_obj(*css); if (!css) return ERR_PTR(-ENOMEM); diff --git a/kernel/cgroup/dmem.c b/kernel/cgroup/dmem.c index 0c8c0a135231..9d95824dc6fa 100644 --- a/kernel/cgroup/dmem.c +++ b/kernel/cgroup/dmem.c @@ -222,7 +222,7 @@ static void dmemcs_free(struct cgroup_subsys_state *css) static struct cgroup_subsys_state * dmemcs_alloc(struct cgroup_subsys_state *parent_css) { - struct dmemcg_state *dmemcs = kzalloc_obj(*dmemcs, GFP_KERNEL); + struct dmemcg_state *dmemcs = kzalloc_obj(*dmemcs); if (!dmemcs) return ERR_PTR(-ENOMEM); @@ -521,7 +521,7 @@ struct dmem_cgroup_region *dmem_cgroup_register_region(u64 size, const char *fmt if (!region_name) return ERR_PTR(-ENOMEM); - ret = kzalloc_obj(*ret, GFP_KERNEL); + ret = kzalloc_obj(*ret); if (!ret) { kfree(region_name); return ERR_PTR(-ENOMEM); @@ -597,7 +597,7 @@ get_cg_pool_unlocked(struct dmemcg_state *cg, struct dmem_cgroup_region *region) if (WARN_ON(allocpool)) continue; - allocpool = kzalloc_obj(*allocpool, GFP_KERNEL); + allocpool = kzalloc_obj(*allocpool); if (allocpool) { pool = NULL; continue; diff --git a/kernel/cgroup/legacy_freezer.c b/kernel/cgroup/legacy_freezer.c index 85344b107873..8545e0d1ba3d 100644 --- a/kernel/cgroup/legacy_freezer.c +++ b/kernel/cgroup/legacy_freezer.c @@ -81,7 +81,7 @@ freezer_css_alloc(struct cgroup_subsys_state *parent_css) { struct freezer *freezer; - freezer = kzalloc_obj(struct freezer, GFP_KERNEL); + freezer = kzalloc_obj(struct freezer); if (!freezer) return ERR_PTR(-ENOMEM); diff --git a/kernel/cgroup/misc.c b/kernel/cgroup/misc.c index 7c3ae3a76c8d..4a9e2557141c 100644 --- a/kernel/cgroup/misc.c +++ b/kernel/cgroup/misc.c @@ -445,7 +445,7 @@ misc_cg_alloc(struct cgroup_subsys_state *parent_css) if (!parent_css) { cg = &root_cg; } else { - cg = kzalloc_obj(*cg, GFP_KERNEL); + cg = kzalloc_obj(*cg); if (!cg) return ERR_PTR(-ENOMEM); } diff --git a/kernel/cgroup/pids.c b/kernel/cgroup/pids.c index 6221573fc6ad..ecbb839d2acb 100644 --- a/kernel/cgroup/pids.c +++ b/kernel/cgroup/pids.c @@ -80,7 +80,7 @@ pids_css_alloc(struct cgroup_subsys_state *parent) { struct pids_cgroup *pids; - pids = kzalloc_obj(struct pids_cgroup, GFP_KERNEL); + pids = kzalloc_obj(struct pids_cgroup); if (!pids) return ERR_PTR(-ENOMEM); diff --git a/kernel/cgroup/rdma.c b/kernel/cgroup/rdma.c index 9d3693574b11..09258eebb5c7 100644 --- a/kernel/cgroup/rdma.c +++ b/kernel/cgroup/rdma.c @@ -134,7 +134,7 @@ get_cg_rpool_locked(struct rdma_cgroup *cg, struct rdmacg_device *device) if (rpool) return rpool; - rpool = kzalloc_obj(*rpool, GFP_KERNEL); + rpool = kzalloc_obj(*rpool); if (!rpool) return ERR_PTR(-ENOMEM); @@ -443,7 +443,7 @@ static ssize_t rdmacg_resource_set_max(struct kernfs_open_file *of, goto err; } - new_limits = kzalloc_objs(int, RDMACG_RESOURCE_MAX, GFP_KERNEL); + new_limits = kzalloc_objs(int, RDMACG_RESOURCE_MAX); if (!new_limits) { ret = -ENOMEM; goto err; @@ -566,7 +566,7 @@ rdmacg_css_alloc(struct cgroup_subsys_state *parent) { struct rdma_cgroup *cg; - cg = kzalloc_obj(*cg, GFP_KERNEL); + cg = kzalloc_obj(*cg); if (!cg) return ERR_PTR(-ENOMEM); diff --git a/kernel/crash_core.c b/kernel/crash_core.c index 2146ca0f0ed8..2c1a3791e410 100644 --- a/kernel/crash_core.c +++ b/kernel/crash_core.c @@ -368,7 +368,7 @@ static int __crash_shrink_memory(struct resource *old_res, { struct resource *ram_res; - ram_res = kzalloc_obj(*ram_res, GFP_KERNEL); + ram_res = kzalloc_obj(*ram_res); if (!ram_res) return -ENOMEM; diff --git a/kernel/crash_dump_dm_crypt.c b/kernel/crash_dump_dm_crypt.c index 13191d7c7a32..1f4067fbdb94 100644 --- a/kernel/crash_dump_dm_crypt.c +++ b/kernel/crash_dump_dm_crypt.c @@ -252,7 +252,7 @@ static struct config_item *config_keys_make_item(struct config_group *group, return ERR_PTR(-EINVAL); } - config_key = kzalloc_obj(struct config_key, GFP_KERNEL); + config_key = kzalloc_obj(struct config_key); if (!config_key) return ERR_PTR(-ENOMEM); diff --git a/kernel/dma/coherent.c b/kernel/dma/coherent.c index d580ab6d2e33..1147497bc512 100644 --- a/kernel/dma/coherent.c +++ b/kernel/dma/coherent.c @@ -49,7 +49,7 @@ static struct dma_coherent_mem *dma_init_coherent_memory(phys_addr_t phys_addr, if (!mem_base) return ERR_PTR(-EINVAL); - dma_mem = kzalloc_obj(struct dma_coherent_mem, GFP_KERNEL); + dma_mem = kzalloc_obj(struct dma_coherent_mem); if (!dma_mem) goto out_unmap_membase; dma_mem->bitmap = bitmap_zalloc(pages, GFP_KERNEL); diff --git a/kernel/dma/debug.c b/kernel/dma/debug.c index 3be263d7afd6..86f87e43438c 100644 --- a/kernel/dma/debug.c +++ b/kernel/dma/debug.c @@ -900,7 +900,7 @@ void dma_debug_add_bus(const struct bus_type *bus) if (dma_debug_disabled()) return; - nb = kzalloc_obj(struct notifier_block, GFP_KERNEL); + nb = kzalloc_obj(struct notifier_block); if (nb == NULL) { pr_err("dma_debug_add_bus: out of memory\n"); return; diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c index 280ec952c5e1..8f43a930716d 100644 --- a/kernel/dma/direct.c +++ b/kernel/dma/direct.c @@ -654,7 +654,7 @@ int dma_direct_set_offset(struct device *dev, phys_addr_t cpu_start, if (!offset) return 0; - map = kzalloc_objs(*map, 2, GFP_KERNEL); + map = kzalloc_objs(*map, 2); if (!map) return -ENOMEM; map[0].cpu_start = cpu_start; diff --git a/kernel/dma/map_benchmark.c b/kernel/dma/map_benchmark.c index 48ab3d957960..0f33b3ea7daf 100644 --- a/kernel/dma/map_benchmark.c +++ b/kernel/dma/map_benchmark.c @@ -121,7 +121,7 @@ static int do_map_benchmark(struct map_benchmark_data *map) int ret = 0; int i; - tsk = kmalloc_objs(*tsk, threads, GFP_KERNEL); + tsk = kmalloc_objs(*tsk, threads); if (!tsk) return -ENOMEM; diff --git a/kernel/dma/remap.c b/kernel/dma/remap.c index b53e66417e5f..205c0c0ba2fe 100644 --- a/kernel/dma/remap.c +++ b/kernel/dma/remap.c @@ -45,7 +45,7 @@ void *dma_common_contiguous_remap(struct page *page, size_t size, void *vaddr; int i; - pages = kvmalloc_objs(struct page *, count, GFP_KERNEL); + pages = kvmalloc_objs(struct page *, count); if (!pages) return NULL; for (i = 0; i < count; i++) diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c index cb8efc059e6a..d8e6f1d889d5 100644 --- a/kernel/dma/swiotlb.c +++ b/kernel/dma/swiotlb.c @@ -1809,18 +1809,18 @@ static int rmem_swiotlb_device_init(struct reserved_mem *rmem, if (!mem) { struct io_tlb_pool *pool; - mem = kzalloc_obj(*mem, GFP_KERNEL); + mem = kzalloc_obj(*mem); if (!mem) return -ENOMEM; pool = &mem->defpool; - pool->slots = kzalloc_objs(*pool->slots, nslabs, GFP_KERNEL); + pool->slots = kzalloc_objs(*pool->slots, nslabs); if (!pool->slots) { kfree(mem); return -ENOMEM; } - pool->areas = kzalloc_objs(*pool->areas, nareas, GFP_KERNEL); + pool->areas = kzalloc_objs(*pool->areas, nareas); if (!pool->areas) { kfree(pool->slots); kfree(mem); diff --git a/kernel/events/core.c b/kernel/events/core.c index 33c84a605799..ac70d68217b6 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -5058,7 +5058,7 @@ alloc_perf_context(struct task_struct *task) { struct perf_event_context *ctx; - ctx = kzalloc_obj(struct perf_event_context, GFP_KERNEL); + ctx = kzalloc_obj(struct perf_event_context); if (!ctx) return NULL; @@ -5198,7 +5198,7 @@ find_get_pmu_context(struct pmu *pmu, struct perf_event_context *ctx, return epc; } - new = kzalloc_obj(*epc, GFP_KERNEL); + new = kzalloc_obj(*epc); if (!new) return ERR_PTR(-ENOMEM); @@ -5374,7 +5374,7 @@ alloc_perf_ctx_data(struct kmem_cache *ctx_cache, bool global) { struct perf_ctx_data *cd; - cd = kzalloc_obj(*cd, GFP_KERNEL); + cd = kzalloc_obj(*cd); if (!cd) return NULL; @@ -11111,7 +11111,7 @@ static int swevent_hlist_get_cpu(int cpu) cpumask_test_cpu(cpu, perf_online_mask)) { struct swevent_hlist *hlist; - hlist = kzalloc_obj(*hlist, GFP_KERNEL); + hlist = kzalloc_obj(*hlist); if (!hlist) { err = -ENOMEM; goto exit; @@ -12634,7 +12634,7 @@ static int pmu_dev_alloc(struct pmu *pmu) { int ret = -ENOMEM; - pmu->dev = kzalloc_obj(struct device, GFP_KERNEL); + pmu->dev = kzalloc_obj(struct device); if (!pmu->dev) goto out; @@ -15269,7 +15269,7 @@ perf_cgroup_css_alloc(struct cgroup_subsys_state *parent_css) { struct perf_cgroup *jc; - jc = kzalloc_obj(*jc, GFP_KERNEL); + jc = kzalloc_obj(*jc); if (!jc) return ERR_PTR(-ENOMEM); diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index d39dcc19d21e..923b24b321cc 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c @@ -238,7 +238,7 @@ static int delayed_uprobe_add(struct uprobe *uprobe, struct mm_struct *mm) if (delayed_uprobe_check(uprobe, mm)) return 0; - du = kzalloc_obj(*du, GFP_KERNEL); + du = kzalloc_obj(*du); if (!du) return -ENOMEM; @@ -994,7 +994,7 @@ static struct uprobe *alloc_uprobe(struct inode *inode, loff_t offset, { struct uprobe *uprobe, *cur_uprobe; - uprobe = kzalloc_obj(struct uprobe, GFP_KERNEL); + uprobe = kzalloc_obj(struct uprobe); if (!uprobe) return ERR_PTR(-ENOMEM); @@ -1252,7 +1252,7 @@ build_map_info(struct address_space *mapping, loff_t offset, bool is_register) } do { - info = kmalloc_obj(struct map_info, GFP_KERNEL); + info = kmalloc_obj(struct map_info); if (!info) { curr = ERR_PTR(-ENOMEM); goto out; @@ -1755,7 +1755,7 @@ static struct xol_area *__create_xol_area(unsigned long vaddr) struct xol_area *area; void *insns; - area = kzalloc_obj(*area, GFP_KERNEL); + area = kzalloc_obj(*area); if (unlikely(!area)) goto out; @@ -2069,7 +2069,7 @@ static struct uprobe_task *alloc_utask(void) { struct uprobe_task *utask; - utask = kzalloc_obj(*utask, GFP_KERNEL); + utask = kzalloc_obj(*utask); if (!utask) return NULL; @@ -2102,7 +2102,7 @@ static struct return_instance *alloc_return_instance(struct uprobe_task *utask) if (ri) return ri; - ri = kzalloc_obj(*ri, GFP_KERNEL); + ri = kzalloc_obj(*ri); if (!ri) return ZERO_SIZE_PTR; diff --git a/kernel/fail_function.c b/kernel/fail_function.c index 18993fcbdbda..2eaf55005f49 100644 --- a/kernel/fail_function.c +++ b/kernel/fail_function.c @@ -57,7 +57,7 @@ static struct fei_attr *fei_attr_new(const char *sym, unsigned long addr) { struct fei_attr *attr; - attr = kzalloc_obj(*attr, GFP_KERNEL); + attr = kzalloc_obj(*attr); if (attr) { attr->kp.symbol_name = kstrdup(sym, GFP_KERNEL); if (!attr->kp.symbol_name) { diff --git a/kernel/futex/pi.c b/kernel/futex/pi.c index a73b6c713d83..bc1f7e83a37e 100644 --- a/kernel/futex/pi.c +++ b/kernel/futex/pi.c @@ -17,7 +17,7 @@ int refill_pi_state_cache(void) if (likely(current->pi_state_cache)) return 0; - pi_state = kzalloc_obj(*pi_state, GFP_KERNEL); + pi_state = kzalloc_obj(*pi_state); if (!pi_state) return -ENOMEM; diff --git a/kernel/futex/syscalls.c b/kernel/futex/syscalls.c index aec0495adabe..743c7a728237 100644 --- a/kernel/futex/syscalls.c +++ b/kernel/futex/syscalls.c @@ -333,7 +333,7 @@ SYSCALL_DEFINE5(futex_waitv, struct futex_waitv __user *, waiters, if (timeout && (ret = futex2_setup_timeout(timeout, clockid, &to))) return ret; - futexv = kzalloc_objs(*futexv, nr_futexes, GFP_KERNEL); + futexv = kzalloc_objs(*futexv, nr_futexes); if (!futexv) { ret = -ENOMEM; goto destroy_timer; diff --git a/kernel/gcov/clang.c b/kernel/gcov/clang.c index 4cfdeb2c9dc2..fd98ced0e51d 100644 --- a/kernel/gcov/clang.c +++ b/kernel/gcov/clang.c @@ -81,7 +81,7 @@ static LIST_HEAD(clang_gcov_list); void llvm_gcov_init(llvm_gcov_callback writeout, llvm_gcov_callback flush) { - struct gcov_info *info = kzalloc_obj(*info, GFP_KERNEL); + struct gcov_info *info = kzalloc_obj(*info); if (!info) return; @@ -112,7 +112,7 @@ EXPORT_SYMBOL(llvm_gcda_start_file); void llvm_gcda_emit_function(u32 ident, u32 func_checksum, u32 cfg_checksum) { - struct gcov_fn_info *info = kzalloc_obj(*info, GFP_KERNEL); + struct gcov_fn_info *info = kzalloc_obj(*info); if (!info) return; diff --git a/kernel/gcov/fs.c b/kernel/gcov/fs.c index 8430f5cd21b6..2acf677171b1 100644 --- a/kernel/gcov/fs.c +++ b/kernel/gcov/fs.c @@ -482,7 +482,7 @@ static void add_links(struct gcov_node *node, struct dentry *parent) for (num = 0; gcov_link[num].ext; num++) /* Nothing. */; - node->links = kzalloc_objs(struct dentry *, num, GFP_KERNEL); + node->links = kzalloc_objs(struct dentry *, num); if (!node->links) return; for (i = 0; i < num; i++) { @@ -731,7 +731,7 @@ static void add_info(struct gcov_node *node, struct gcov_info *info) * case the new data set is incompatible, the node only contains * unloaded data sets and there's not enough memory for the array. */ - loaded_info = kzalloc_objs(struct gcov_info *, num + 1, GFP_KERNEL); + loaded_info = kzalloc_objs(struct gcov_info *, num + 1); if (!loaded_info) { pr_warn("could not add '%s' (out of memory)\n", gcov_info_filename(info)); diff --git a/kernel/irq/affinity.c b/kernel/irq/affinity.c index cf6729888ee3..85c45cfe7223 100644 --- a/kernel/irq/affinity.c +++ b/kernel/irq/affinity.c @@ -56,7 +56,7 @@ irq_create_affinity_masks(unsigned int nvecs, struct irq_affinity *affd) if (!affvecs) return NULL; - masks = kzalloc_objs(*masks, nvecs, GFP_KERNEL); + masks = kzalloc_objs(*masks, nvecs); if (!masks) return NULL; diff --git a/kernel/irq/irq_sim.c b/kernel/irq/irq_sim.c index 59b84a10465c..44747754530d 100644 --- a/kernel/irq/irq_sim.c +++ b/kernel/irq/irq_sim.c @@ -148,7 +148,7 @@ static int irq_sim_domain_map(struct irq_domain *domain, struct irq_sim_work_ctx *work_ctx = domain->host_data; struct irq_sim_irq_ctx *irq_ctx; - irq_ctx = kzalloc_obj(*irq_ctx, GFP_KERNEL); + irq_ctx = kzalloc_obj(*irq_ctx); if (!irq_ctx) return -ENOMEM; @@ -202,7 +202,7 @@ struct irq_domain *irq_domain_create_sim_full(struct fwnode_handle *fwnode, void *data) { struct irq_sim_work_ctx *work_ctx __free(kfree) = - kzalloc_obj(*work_ctx, GFP_KERNEL); + kzalloc_obj(*work_ctx); if (!work_ctx) return ERR_PTR(-ENOMEM); diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c index ddc9d01b3091..7173b8b634f2 100644 --- a/kernel/irq/irqdesc.c +++ b/kernel/irq/irqdesc.c @@ -969,7 +969,7 @@ int irq_set_percpu_devid(unsigned int irq) if (!desc || desc->percpu_enabled) return -EINVAL; - desc->percpu_enabled = kzalloc_obj(*desc->percpu_enabled, GFP_KERNEL); + desc->percpu_enabled = kzalloc_obj(*desc->percpu_enabled); if (!desc->percpu_enabled) return -ENOMEM; diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c index 857fcd74ebda..cc93abf009e8 100644 --- a/kernel/irq/irqdomain.c +++ b/kernel/irq/irqdomain.c @@ -92,7 +92,7 @@ struct fwnode_handle *__irq_domain_alloc_fwnode(unsigned int type, int id, struct irqchip_fwid *fwid; char *n; - fwid = kzalloc_obj(*fwid, GFP_KERNEL); + fwid = kzalloc_obj(*fwid); switch (type) { case IRQCHIP_FWNODE_NAMED: diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 2b05c45be1b3..2e8072437826 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c @@ -1332,7 +1332,7 @@ static int irq_setup_forced_threading(struct irqaction *new) */ if (new->handler && new->thread_fn) { /* Allocate the secondary action */ - new->secondary = kzalloc_obj(struct irqaction, GFP_KERNEL); + new->secondary = kzalloc_obj(struct irqaction); if (!new->secondary) return -ENOMEM; new->secondary->handler = irq_forced_secondary_handler; @@ -2156,7 +2156,7 @@ int request_threaded_irq(unsigned int irq, irq_handler_t handler, handler = irq_default_primary_handler; } - action = kzalloc_obj(struct irqaction, GFP_KERNEL); + action = kzalloc_obj(struct irqaction); if (!action) return -ENOMEM; @@ -2486,7 +2486,7 @@ struct irqaction *create_percpu_irqaction(irq_handler_t handler, unsigned long f if (!affinity) affinity = cpu_possible_mask; - action = kzalloc_obj(struct irqaction, GFP_KERNEL); + action = kzalloc_obj(struct irqaction); if (!action) return NULL; diff --git a/kernel/irq/msi.c b/kernel/irq/msi.c index e4bae8f1c414..3cafa40e6ce3 100644 --- a/kernel/irq/msi.c +++ b/kernel/irq/msi.c @@ -76,7 +76,7 @@ static int msi_domain_prepare_irqs(struct irq_domain *domain, struct device *dev static struct msi_desc *msi_alloc_desc(struct device *dev, int nvec, const struct irq_affinity_desc *affinity) { - struct msi_desc *desc = kzalloc_obj(*desc, GFP_KERNEL); + struct msi_desc *desc = kzalloc_obj(*desc); if (!desc) return NULL; @@ -530,7 +530,7 @@ static int msi_sysfs_populate_desc(struct device *dev, struct msi_desc *desc) struct device_attribute *attrs; int ret, i; - attrs = kzalloc_objs(*attrs, desc->nvec_used, GFP_KERNEL); + attrs = kzalloc_objs(*attrs, desc->nvec_used); if (!attrs) return -ENOMEM; diff --git a/kernel/kallsyms_selftest.c b/kernel/kallsyms_selftest.c index d2aeb6b7c393..8f6c4e9b3a1c 100644 --- a/kernel/kallsyms_selftest.c +++ b/kernel/kallsyms_selftest.c @@ -264,7 +264,7 @@ static int test_kallsyms_basic_function(void) char namebuf[KSYM_NAME_LEN]; struct test_stat *stat, *stat2; - stat = kmalloc_objs(*stat, 2, GFP_KERNEL); + stat = kmalloc_objs(*stat, 2); if (!stat) return -ENOMEM; stat2 = stat + 1; diff --git a/kernel/kcov.c b/kernel/kcov.c index b9d4db7ea439..0b369e88c7c9 100644 --- a/kernel/kcov.c +++ b/kernel/kcov.c @@ -527,7 +527,7 @@ static int kcov_open(struct inode *inode, struct file *filep) { struct kcov *kcov; - kcov = kzalloc_obj(*kcov, GFP_KERNEL); + kcov = kzalloc_obj(*kcov); if (!kcov) return -ENOMEM; guard(spinlock_init)(&kcov->lock); diff --git a/kernel/kcsan/kcsan_test.c b/kernel/kcsan/kcsan_test.c index edb062fb43b4..79e655ea4ca1 100644 --- a/kernel/kcsan/kcsan_test.c +++ b/kernel/kcsan/kcsan_test.c @@ -168,7 +168,7 @@ static bool __report_matches(const struct expect_report *r) if (!report_available()) return false; - expect = kmalloc_obj(observed.lines, GFP_KERNEL); + expect = kmalloc_obj(observed.lines); if (WARN_ON(!expect)) return false; @@ -1538,7 +1538,7 @@ static int test_init(struct kunit *test) if (WARN_ON(!nthreads)) goto err; - threads = kzalloc_objs(struct task_struct *, nthreads + 1, GFP_KERNEL); + threads = kzalloc_objs(struct task_struct *, nthreads + 1); if (WARN_ON(!threads)) goto err; diff --git a/kernel/kexec.c b/kernel/kexec.c index 3902e7bb99fe..90756dc6339b 100644 --- a/kernel/kexec.c +++ b/kernel/kexec.c @@ -284,7 +284,7 @@ COMPAT_SYSCALL_DEFINE4(kexec_load, compat_ulong_t, entry, if ((flags & KEXEC_ARCH_MASK) == KEXEC_ARCH_DEFAULT) return -EINVAL; - ksegments = kmalloc_objs(ksegments[0], nr_segments, GFP_KERNEL); + ksegments = kmalloc_objs(ksegments[0], nr_segments); if (!ksegments) return -ENOMEM; diff --git a/kernel/kexec_core.c b/kernel/kexec_core.c index 76e4287a4f1d..2fea396d29b9 100644 --- a/kernel/kexec_core.c +++ b/kernel/kexec_core.c @@ -231,7 +231,7 @@ struct kimage *do_kimage_alloc_init(void) struct kimage *image; /* Allocate a controlling structure */ - image = kzalloc_obj(*image, GFP_KERNEL); + image = kzalloc_obj(*image); if (!image) return NULL; @@ -975,7 +975,7 @@ void *kimage_map_segment(struct kimage *image, int idx) * Collect the source pages and map them in a contiguous VA range. */ npages = PFN_UP(eaddr) - PFN_DOWN(addr); - src_pages = kmalloc_objs(*src_pages, npages, GFP_KERNEL); + src_pages = kmalloc_objs(*src_pages, npages); if (!src_pages) { pr_err("Could not allocate ima pages array.\n"); return NULL; diff --git a/kernel/kprobes.c b/kernel/kprobes.c index b6744137b11e..15cc289a6fdf 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c @@ -900,7 +900,7 @@ static struct kprobe *alloc_aggr_kprobe(struct kprobe *p) { struct optimized_kprobe *op; - op = kzalloc_obj(struct optimized_kprobe, GFP_KERNEL); + op = kzalloc_obj(struct optimized_kprobe); if (!op) return NULL; @@ -1117,7 +1117,7 @@ static void free_aggr_kprobe(struct kprobe *p) static struct kprobe *alloc_aggr_kprobe(struct kprobe *p) { - return kzalloc_obj(struct kprobe, GFP_KERNEL); + return kzalloc_obj(struct kprobe); } #endif /* CONFIG_OPTPROBES */ @@ -2295,7 +2295,7 @@ int register_kretprobe(struct kretprobe *rp) rp->rh = NULL; } #else /* !CONFIG_KRETPROBE_ON_RETHOOK */ - rp->rph = kzalloc_obj(struct kretprobe_holder, GFP_KERNEL); + rp->rph = kzalloc_obj(struct kretprobe_holder); if (!rp->rph) return -ENOMEM; @@ -2499,7 +2499,7 @@ int kprobe_add_ksym_blacklist(unsigned long entry) !kallsyms_lookup_size_offset(entry, &size, &offset)) return -EINVAL; - ent = kmalloc_obj(*ent, GFP_KERNEL); + ent = kmalloc_obj(*ent); if (!ent) return -ENOMEM; ent->start_addr = entry; diff --git a/kernel/kthread.c b/kernel/kthread.c index 0b4f7328096f..20451b624b67 100644 --- a/kernel/kthread.c +++ b/kernel/kthread.c @@ -122,7 +122,7 @@ bool set_kthread_struct(struct task_struct *p) if (WARN_ON_ONCE(to_kthread(p))) return false; - kthread = kzalloc_obj(*kthread, GFP_KERNEL); + kthread = kzalloc_obj(*kthread); if (!kthread) return false; @@ -511,7 +511,7 @@ struct task_struct *__kthread_create_on_node(int (*threadfn)(void *data), { DECLARE_COMPLETION_ONSTACK(done); struct task_struct *task; - struct kthread_create_info *create = kmalloc_obj(*create, GFP_KERNEL); + struct kthread_create_info *create = kmalloc_obj(*create); if (!create) return ERR_PTR(-ENOMEM); @@ -1083,7 +1083,7 @@ __kthread_create_worker_on_node(unsigned int flags, int node, struct kthread_worker *worker; struct task_struct *task; - worker = kzalloc_obj(*worker, GFP_KERNEL); + worker = kzalloc_obj(*worker); if (!worker) return ERR_PTR(-ENOMEM); diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c index 0d52e48918eb..28d15ba58a26 100644 --- a/kernel/livepatch/core.c +++ b/kernel/livepatch/core.c @@ -525,7 +525,7 @@ static struct klp_object *klp_alloc_object_dynamic(const char *name, { struct klp_object *obj; - obj = kzalloc_obj(*obj, GFP_KERNEL); + obj = kzalloc_obj(*obj); if (!obj) return NULL; @@ -554,7 +554,7 @@ static struct klp_func *klp_alloc_func_nop(struct klp_func *old_func, { struct klp_func *func; - func = kzalloc_obj(*func, GFP_KERNEL); + func = kzalloc_obj(*func); if (!func) return NULL; diff --git a/kernel/livepatch/patch.c b/kernel/livepatch/patch.c index 1149840cd538..3f54a017bbf6 100644 --- a/kernel/livepatch/patch.c +++ b/kernel/livepatch/patch.c @@ -179,7 +179,7 @@ static int klp_patch_func(struct klp_func *func) return -EINVAL; } - ops = kzalloc_obj(*ops, GFP_KERNEL); + ops = kzalloc_obj(*ops); if (!ops) return -ENOMEM; diff --git a/kernel/liveupdate/kexec_handover.c b/kernel/liveupdate/kexec_handover.c index 23d76678d233..cc68a3692905 100644 --- a/kernel/liveupdate/kexec_handover.c +++ b/kernel/liveupdate/kexec_handover.c @@ -187,7 +187,7 @@ static int __kho_preserve_order(struct kho_mem_track *track, unsigned long pfn, if (!physxa) { int err; - new_physxa = kzalloc_obj(*physxa, GFP_KERNEL); + new_physxa = kzalloc_obj(*physxa); if (!new_physxa) return -ENOMEM; @@ -1090,7 +1090,7 @@ void *kho_restore_vmalloc(const struct kho_vmalloc *preservation) return NULL; total_pages = preservation->total_pages; - pages = kvmalloc_objs(*pages, total_pages, GFP_KERNEL); + pages = kvmalloc_objs(*pages, total_pages); if (!pages) return NULL; order = preservation->order; diff --git a/kernel/liveupdate/kexec_handover_debugfs.c b/kernel/liveupdate/kexec_handover_debugfs.c index d42fc940d14d..2f93939168ab 100644 --- a/kernel/liveupdate/kexec_handover_debugfs.c +++ b/kernel/liveupdate/kexec_handover_debugfs.c @@ -29,7 +29,7 @@ static int __kho_debugfs_fdt_add(struct list_head *list, struct dentry *dir, struct fdt_debugfs *f; struct dentry *file; - f = kmalloc_obj(*f, GFP_KERNEL); + f = kmalloc_obj(*f); if (!f) return -ENOMEM; diff --git a/kernel/liveupdate/luo_file.c b/kernel/liveupdate/luo_file.c index ca96edb3b4e5..8c79058253e1 100644 --- a/kernel/liveupdate/luo_file.c +++ b/kernel/liveupdate/luo_file.c @@ -289,7 +289,7 @@ int luo_preserve_file(struct luo_file_set *file_set, u64 token, int fd) if (err) goto err_free_files_mem; - luo_file = kzalloc_obj(*luo_file, GFP_KERNEL); + luo_file = kzalloc_obj(*luo_file); if (!luo_file) { err = -ENOMEM; goto err_flb_unpreserve; @@ -780,7 +780,7 @@ int luo_file_deserialize(struct luo_file_set *file_set, return -ENOENT; } - luo_file = kzalloc_obj(*luo_file, GFP_KERNEL); + luo_file = kzalloc_obj(*luo_file); if (!luo_file) return -ENOMEM; diff --git a/kernel/liveupdate/luo_flb.c b/kernel/liveupdate/luo_flb.c index 5f2cdf9caa7b..f52e8114837e 100644 --- a/kernel/liveupdate/luo_flb.c +++ b/kernel/liveupdate/luo_flb.c @@ -343,7 +343,7 @@ int liveupdate_register_flb(struct liveupdate_file_handler *fh, if (WARN_ON(list_empty(&ACCESS_PRIVATE(fh, list)))) return -EINVAL; - link = kzalloc_obj(*link, GFP_KERNEL); + link = kzalloc_obj(*link); if (!link) return -ENOMEM; diff --git a/kernel/liveupdate/luo_session.c b/kernel/liveupdate/luo_session.c index c0262ca00533..783677295640 100644 --- a/kernel/liveupdate/luo_session.c +++ b/kernel/liveupdate/luo_session.c @@ -119,7 +119,7 @@ static struct luo_session_global luo_session_global = { static struct luo_session *luo_session_alloc(const char *name) { - struct luo_session *session = kzalloc_obj(*session, GFP_KERNEL); + struct luo_session *session = kzalloc_obj(*session); if (!session) return ERR_PTR(-ENOMEM); diff --git a/kernel/locking/test-ww_mutex.c b/kernel/locking/test-ww_mutex.c index 2cc6d1937670..838d631544ed 100644 --- a/kernel/locking/test-ww_mutex.c +++ b/kernel/locking/test-ww_mutex.c @@ -324,7 +324,7 @@ static int __test_cycle(struct ww_class *class, unsigned int nthreads) unsigned int n, last = nthreads - 1; int ret; - cycles = kmalloc_objs(*cycles, nthreads, GFP_KERNEL); + cycles = kmalloc_objs(*cycles, nthreads); if (!cycles) return -ENOMEM; @@ -412,7 +412,7 @@ static int *get_random_order(int count) int *order; int n, r; - order = kmalloc_objs(*order, count, GFP_KERNEL); + order = kmalloc_objs(*order, count); if (!order) return order; @@ -506,7 +506,7 @@ static void stress_reorder_work(struct work_struct *work) return; for (n = 0; n < stress->nlocks; n++) { - ll = kmalloc_obj(*ll, GFP_KERNEL); + ll = kmalloc_obj(*ll); if (!ll) goto out; @@ -582,11 +582,11 @@ static int stress(struct ww_class *class, int nlocks, int nthreads, unsigned int struct stress *stress_array; int n, count; - locks = kmalloc_objs(*locks, nlocks, GFP_KERNEL); + locks = kmalloc_objs(*locks, nlocks); if (!locks) return -ENOMEM; - stress_array = kmalloc_objs(*stress_array, nthreads, GFP_KERNEL); + stress_array = kmalloc_objs(*stress_array, nthreads); if (!stress_array) { kfree(locks); return -ENOMEM; diff --git a/kernel/module/dups.c b/kernel/module/dups.c index bbc72ad93058..1d720a5311ba 100644 --- a/kernel/module/dups.c +++ b/kernel/module/dups.c @@ -125,7 +125,7 @@ bool kmod_dup_request_exists_wait(char *module_name, bool wait, int *dup_ret) * Pre-allocate the entry in case we have to use it later * to avoid contention with the mutex. */ - new_kmod_req = kzalloc_obj(*new_kmod_req, GFP_KERNEL); + new_kmod_req = kzalloc_obj(*new_kmod_req); if (!new_kmod_req) return false; diff --git a/kernel/module/main.c b/kernel/module/main.c index b2ac20299915..2bac4c7cd019 100644 --- a/kernel/module/main.c +++ b/kernel/module/main.c @@ -3024,7 +3024,7 @@ static noinline int do_init_module(struct module *mod) } #endif - freeinit = kmalloc_obj(*freeinit, GFP_KERNEL); + freeinit = kmalloc_obj(*freeinit); if (!freeinit) { ret = -ENOMEM; goto fail; diff --git a/kernel/module/stats.c b/kernel/module/stats.c index 2fc64f2729e6..3a9672f93a8e 100644 --- a/kernel/module/stats.c +++ b/kernel/module/stats.c @@ -250,7 +250,7 @@ int try_add_failed_module(const char *name, enum fail_dup_mod_reason reason) } } - mod_fail = kzalloc_obj(*mod_fail, GFP_KERNEL); + mod_fail = kzalloc_obj(*mod_fail); if (!mod_fail) return -ENOMEM; memcpy(mod_fail->name, name, strlen(name)); diff --git a/kernel/module/sysfs.c b/kernel/module/sysfs.c index 734ea3180478..768f74e99026 100644 --- a/kernel/module/sysfs.c +++ b/kernel/module/sysfs.c @@ -78,7 +78,7 @@ static int add_sect_attrs(struct module *mod, const struct load_info *info) if (!sect_attrs) return -ENOMEM; - gattr = kzalloc_objs(*gattr, nloaded + 1, GFP_KERNEL); + gattr = kzalloc_objs(*gattr, nloaded + 1); if (!gattr) { kfree(sect_attrs); return -ENOMEM; @@ -170,7 +170,7 @@ static int add_notes_attrs(struct module *mod, const struct load_info *info) if (!notes_attrs) return -ENOMEM; - gattr = kzalloc_objs(*gattr, notes + 1, GFP_KERNEL); + gattr = kzalloc_objs(*gattr, notes + 1); if (!gattr) { kfree(notes_attrs); return -ENOMEM; diff --git a/kernel/module/tracking.c b/kernel/module/tracking.c index 41425054a97a..9033ff54c4e2 100644 --- a/kernel/module/tracking.c +++ b/kernel/module/tracking.c @@ -33,7 +33,7 @@ int try_add_tainted_module(struct module *mod) } } - mod_taint = kmalloc_obj(*mod_taint, GFP_KERNEL); + mod_taint = kmalloc_obj(*mod_taint); if (unlikely(!mod_taint)) return -ENOMEM; strscpy(mod_taint->name, mod->name, MODULE_NAME_LEN); diff --git a/kernel/padata.c b/kernel/padata.c index f0bf62e9a1f2..0af32c78ea69 100644 --- a/kernel/padata.c +++ b/kernel/padata.c @@ -540,7 +540,7 @@ static struct parallel_data *padata_alloc_pd(struct padata_shell *ps) struct padata_instance *pinst = ps->pinst; struct parallel_data *pd; - pd = kzalloc_obj(struct parallel_data, GFP_KERNEL); + pd = kzalloc_obj(struct parallel_data); if (!pd) goto err; @@ -952,7 +952,7 @@ struct padata_instance *padata_alloc(const char *name) { struct padata_instance *pinst; - pinst = kzalloc_obj(struct padata_instance, GFP_KERNEL); + pinst = kzalloc_obj(struct padata_instance); if (!pinst) goto err; @@ -1038,7 +1038,7 @@ struct padata_shell *padata_alloc_shell(struct padata_instance *pinst) struct parallel_data *pd; struct padata_shell *ps; - ps = kzalloc_obj(*ps, GFP_KERNEL); + ps = kzalloc_obj(*ps); if (!ps) goto out; diff --git a/kernel/params.c b/kernel/params.c index d26bdfae96e5..5d1cd7d0b51a 100644 --- a/kernel/params.c +++ b/kernel/params.c @@ -633,7 +633,7 @@ static __init_or_module int add_sysfs_param(struct module_kobject *mk, if (!mk->mp) { /* First allocation. */ - mk->mp = kzalloc_obj(*mk->mp, GFP_KERNEL); + mk->mp = kzalloc_obj(*mk->mp); if (!mk->mp) return -ENOMEM; mk->mp->grp.name = "parameters"; @@ -766,7 +766,7 @@ lookup_or_create_module_kobject(const char *name) if (kobj) return to_module_kobject(kobj); - mk = kzalloc_obj(struct module_kobject, GFP_KERNEL); + mk = kzalloc_obj(struct module_kobject); if (!mk) return NULL; diff --git a/kernel/power/console.c b/kernel/power/console.c index 5ed9e1be1560..33ace63b1088 100644 --- a/kernel/power/console.c +++ b/kernel/power/console.c @@ -58,7 +58,7 @@ int pm_vt_switch_required(struct device *dev, bool required) } } - entry = kmalloc_obj(*entry, GFP_KERNEL); + entry = kmalloc_obj(*entry); if (!entry) { ret = -ENOMEM; goto out; diff --git a/kernel/power/energy_model.c b/kernel/power/energy_model.c index 43ddfc11b84a..e610cf8e9a06 100644 --- a/kernel/power/energy_model.c +++ b/kernel/power/energy_model.c @@ -439,7 +439,7 @@ static int em_create_pd(struct device *dev, int nr_states, cpumask_copy(em_span_cpus(pd), cpus); } else { - pd = kzalloc_obj(*pd, GFP_KERNEL); + pd = kzalloc_obj(*pd); if (!pd) return -ENOMEM; } diff --git a/kernel/power/qos.c b/kernel/power/qos.c index 750b80f45b9f..398b994b73aa 100644 --- a/kernel/power/qos.c +++ b/kernel/power/qos.c @@ -341,7 +341,7 @@ static int cpu_latency_qos_open(struct inode *inode, struct file *filp) { struct pm_qos_request *req; - req = kzalloc_obj(*req, GFP_KERNEL); + req = kzalloc_obj(*req); if (!req) return -ENOMEM; @@ -440,7 +440,7 @@ static int cpu_wakeup_latency_qos_open(struct inode *inode, struct file *filp) { struct pm_qos_request *req; - req = kzalloc_obj(*req, GFP_KERNEL); + req = kzalloc_obj(*req); if (!req) return -ENOMEM; diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c index be0b3304339f..6e1321837c66 100644 --- a/kernel/power/snapshot.c +++ b/kernel/power/snapshot.c @@ -1124,7 +1124,7 @@ int create_basic_memory_bitmaps(void) else BUG_ON(forbidden_pages_map || free_pages_map); - bm1 = kzalloc_obj(struct memory_bitmap, GFP_KERNEL); + bm1 = kzalloc_obj(struct memory_bitmap); if (!bm1) return -ENOMEM; @@ -1132,7 +1132,7 @@ int create_basic_memory_bitmaps(void) if (error) goto Free_first_object; - bm2 = kzalloc_obj(struct memory_bitmap, GFP_KERNEL); + bm2 = kzalloc_obj(struct memory_bitmap); if (!bm2) goto Free_first_bitmap; diff --git a/kernel/power/swap.c b/kernel/power/swap.c index 9bc1241259d3..2e64869bb5a0 100644 --- a/kernel/power/swap.c +++ b/kernel/power/swap.c @@ -155,7 +155,7 @@ static int swsusp_extents_insert(unsigned long swap_offset) } } /* Add the new node and rebalance the tree. */ - ext = kzalloc_obj(struct swsusp_extent, GFP_KERNEL); + ext = kzalloc_obj(struct swsusp_extent); if (!ext) return -ENOMEM; @@ -577,7 +577,7 @@ static struct crc_data *alloc_crc_data(int nr_threads) { struct crc_data *crc; - crc = kzalloc_obj(*crc, GFP_KERNEL); + crc = kzalloc_obj(*crc); if (!crc) return NULL; @@ -585,7 +585,7 @@ static struct crc_data *alloc_crc_data(int nr_threads) if (!crc->unc) goto err_free_crc; - crc->unc_len = kzalloc_objs(*crc->unc_len, nr_threads, GFP_KERNEL); + crc->unc_len = kzalloc_objs(*crc->unc_len, nr_threads); if (!crc->unc_len) goto err_free_unc; @@ -1016,7 +1016,7 @@ static int get_swap_reader(struct swap_map_handle *handle, last = handle->maps = NULL; offset = swsusp_header->image; while (offset) { - tmp = kzalloc_obj(*handle->maps, GFP_KERNEL); + tmp = kzalloc_obj(*handle->maps); if (!tmp) { release_swap_reader(handle); return -ENOMEM; diff --git a/kernel/power/wakelock.c b/kernel/power/wakelock.c index 49712d9e7cfa..fd763da06a87 100644 --- a/kernel/power/wakelock.c +++ b/kernel/power/wakelock.c @@ -178,7 +178,7 @@ static struct wakelock *wakelock_lookup_add(const char *name, size_t len, return ERR_PTR(-ENOSPC); /* Not found, we have to add a new one. */ - wl = kzalloc_obj(*wl, GFP_KERNEL); + wl = kzalloc_obj(*wl); if (!wl) return ERR_PTR(-ENOMEM); diff --git a/kernel/printk/nbcon.c b/kernel/printk/nbcon.c index c98241238f2a..d7044a7a214b 100644 --- a/kernel/printk/nbcon.c +++ b/kernel/printk/nbcon.c @@ -1801,7 +1801,7 @@ bool nbcon_alloc(struct console *con) */ con->pbufs = &printk_shared_pbufs; } else { - con->pbufs = kmalloc_obj(*con->pbufs, GFP_KERNEL); + con->pbufs = kmalloc_obj(*con->pbufs); if (!con->pbufs) { con_printk(KERN_ERR, con, "failed to allocate printing buffer\n"); return false; diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c index 599d56300ded..0323149548f6 100644 --- a/kernel/printk/printk.c +++ b/kernel/printk/printk.c @@ -933,7 +933,7 @@ static int devkmsg_open(struct inode *inode, struct file *file) return err; } - user = kvmalloc_obj(struct devkmsg_user, GFP_KERNEL); + user = kvmalloc_obj(struct devkmsg_user); if (!user) return -ENOMEM; diff --git a/kernel/rcu/rcuscale.c b/kernel/rcu/rcuscale.c index 5512686be5d0..6c1acf9ba69b 100644 --- a/kernel/rcu/rcuscale.c +++ b/kernel/rcu/rcuscale.c @@ -1130,7 +1130,7 @@ rcu_scale_init(void) goto unwind; schedule_timeout_uninterruptible(1); } - reader_tasks = kzalloc_objs(reader_tasks[0], nrealreaders, GFP_KERNEL); + reader_tasks = kzalloc_objs(reader_tasks[0], nrealreaders); if (reader_tasks == NULL) { SCALEOUT_ERRSTRING("out of memory"); firsterr = -ENOMEM; @@ -1144,11 +1144,11 @@ rcu_scale_init(void) } while (atomic_read(&n_rcu_scale_reader_started) < nrealreaders) schedule_timeout_uninterruptible(1); - writer_tasks = kzalloc_objs(writer_tasks[0], nrealwriters, GFP_KERNEL); + writer_tasks = kzalloc_objs(writer_tasks[0], nrealwriters); writer_durations = kcalloc(nrealwriters, sizeof(*writer_durations), GFP_KERNEL); writer_n_durations = kzalloc_objs(*writer_n_durations, nrealwriters, GFP_KERNEL); - writer_done = kzalloc_objs(writer_done[0], nrealwriters, GFP_KERNEL); + writer_done = kzalloc_objs(writer_done[0], nrealwriters); if (gp_async) { if (gp_async_max <= 0) { pr_warn("%s: gp_async_max = %d must be greater than zero.\n", diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c index d2e673771295..197cea4d1f26 100644 --- a/kernel/rcu/rcutorture.c +++ b/kernel/rcu/rcutorture.c @@ -1626,7 +1626,7 @@ rcu_torture_writer(void *arg) ulo_size = cur_ops->poll_active; } if (cur_ops->poll_active_full > 0) { - rgo = kzalloc_objs(*rgo, cur_ops->poll_active_full, GFP_KERNEL); + rgo = kzalloc_objs(*rgo, cur_ops->poll_active_full); if (!WARN_ON(!rgo)) rgo_size = cur_ops->poll_active_full; } @@ -2558,7 +2558,7 @@ static int rcu_torture_updown_init(void) VERBOSE_TOROUT_STRING("rcu_torture_updown_init: Disabling up/down reader tests due to lack of primitives"); return 0; } - updownreaders = kzalloc_objs(*updownreaders, n_up_down, GFP_KERNEL); + updownreaders = kzalloc_objs(*updownreaders, n_up_down); if (!updownreaders) { VERBOSE_TOROUT_STRING("rcu_torture_updown_init: Out of memory, disabling up/down reader tests"); return -ENOMEM; @@ -2891,7 +2891,7 @@ static void rcu_torture_mem_dump_obj(void) mem_dump_obj(&z); kmem_cache_free(kcp, rhp); kmem_cache_destroy(kcp); - rhp = kmalloc_obj(*rhp, GFP_KERNEL); + rhp = kmalloc_obj(*rhp); if (WARN_ON_ONCE(!rhp)) return; pr_alert("mem_dump_obj() kmalloc test: rcu_torture_stats = %px, &rhp = %px, rhp = %px\n", stats_task, &rhp, rhp); @@ -3399,7 +3399,7 @@ static void rcu_torture_fwd_prog_cr(struct rcu_fwd *rfp) n_launders++; n_launders_sa++; } else if (!cur_ops->cbflood_max || cur_ops->cbflood_max > n_max_cbs) { - rfcp = kmalloc_obj(*rfcp, GFP_KERNEL); + rfcp = kmalloc_obj(*rfcp); if (WARN_ON_ONCE(!rfcp)) { schedule_timeout_interruptible(1); continue; @@ -3587,8 +3587,8 @@ static int __init rcu_torture_fwd_prog_init(void) fwd_progress_holdoff = 1; if (fwd_progress_div <= 0) fwd_progress_div = 4; - rfp = kzalloc_objs(*rfp, fwd_progress, GFP_KERNEL); - fwd_prog_tasks = kzalloc_objs(*fwd_prog_tasks, fwd_progress, GFP_KERNEL); + rfp = kzalloc_objs(*rfp, fwd_progress); + fwd_prog_tasks = kzalloc_objs(*fwd_prog_tasks, fwd_progress); if (!rfp || !fwd_prog_tasks) { kfree(rfp); kfree(fwd_prog_tasks); @@ -3754,9 +3754,9 @@ static int rcu_torture_barrier_init(void) atomic_set(&barrier_cbs_count, 0); atomic_set(&barrier_cbs_invoked, 0); barrier_cbs_tasks = - kzalloc_objs(barrier_cbs_tasks[0], n_barrier_cbs, GFP_KERNEL); + kzalloc_objs(barrier_cbs_tasks[0], n_barrier_cbs); barrier_cbs_wq = - kzalloc_objs(barrier_cbs_wq[0], n_barrier_cbs, GFP_KERNEL); + kzalloc_objs(barrier_cbs_wq[0], n_barrier_cbs); if (barrier_cbs_tasks == NULL || !barrier_cbs_wq) return -ENOMEM; for (i = 0; i < n_barrier_cbs; i++) { @@ -4223,7 +4223,7 @@ static void rcu_test_debug_objects(void) (!cur_ops->call || !cur_ops->cb_barrier))) return; - struct rcu_head *rhp = kmalloc_obj(*rhp, GFP_KERNEL); + struct rcu_head *rhp = kmalloc_obj(*rhp); init_rcu_head_on_stack(&rh1); init_rcu_head_on_stack(&rh2); @@ -4562,7 +4562,7 @@ rcu_torture_init(void) if (torture_init_error(firsterr)) goto unwind; } - reader_tasks = kzalloc_objs(reader_tasks[0], nrealreaders, GFP_KERNEL); + reader_tasks = kzalloc_objs(reader_tasks[0], nrealreaders); rcu_torture_reader_mbchk = kzalloc_objs(*rcu_torture_reader_mbchk, nrealreaders, GFP_KERNEL); if (!reader_tasks || !rcu_torture_reader_mbchk) { diff --git a/kernel/rcu/refscale.c b/kernel/rcu/refscale.c index 39d679a4c17e..c158b6a947cd 100644 --- a/kernel/rcu/refscale.c +++ b/kernel/rcu/refscale.c @@ -1143,7 +1143,7 @@ static bool typesafe_init(void) else if (si == 0) si = nr_cpu_ids; rtsarray_size = si; - rtsarray = kzalloc_objs(*rtsarray, si, GFP_KERNEL); + rtsarray = kzalloc_objs(*rtsarray, si); if (!rtsarray) return false; for (idx = 0; idx < rtsarray_size; idx++) { @@ -1575,7 +1575,7 @@ ref_scale_init(void) "%s: nreaders * loops will overflow, adjusted loops to %d", __func__, INT_MAX / nreaders)) loops = INT_MAX / nreaders; - reader_tasks = kzalloc_objs(reader_tasks[0], nreaders, GFP_KERNEL); + reader_tasks = kzalloc_objs(reader_tasks[0], nreaders); if (!reader_tasks) { SCALEOUT_ERRSTRING("out of memory"); firsterr = -ENOMEM; diff --git a/kernel/rcu/srcutree.c b/kernel/rcu/srcutree.c index 0faf35f393a3..aef8e91ad33e 100644 --- a/kernel/rcu/srcutree.c +++ b/kernel/rcu/srcutree.c @@ -238,7 +238,7 @@ static bool init_srcu_struct_nodes(struct srcu_struct *ssp, gfp_t gfp_flags) static int init_srcu_struct_fields(struct srcu_struct *ssp, bool is_static) { if (!is_static) - ssp->srcu_sup = kzalloc_obj(*ssp->srcu_sup, GFP_KERNEL); + ssp->srcu_sup = kzalloc_obj(*ssp->srcu_sup); if (!ssp->srcu_sup) return -ENOMEM; if (!is_static) diff --git a/kernel/rcu/update.c b/kernel/rcu/update.c index 14150f09fd61..d98a5c38e19c 100644 --- a/kernel/rcu/update.c +++ b/kernel/rcu/update.c @@ -614,7 +614,7 @@ static void early_boot_test_call_rcu(void) call_rcu(&head, test_callback); early_srcu_cookie = start_poll_synchronize_srcu(&early_srcu); call_srcu(&early_srcu, &shead, test_callback); - rhp = kmalloc_obj(*rhp, GFP_KERNEL); + rhp = kmalloc_obj(*rhp); if (!WARN_ON_ONCE(!rhp)) kfree_rcu(rhp, rh); } diff --git a/kernel/relay.c b/kernel/relay.c index c28fc5dd3ded..62b059ff2759 100644 --- a/kernel/relay.c +++ b/kernel/relay.c @@ -59,7 +59,7 @@ static const struct vm_operations_struct relay_file_mmap_ops = { */ static struct page **relay_alloc_page_array(unsigned int n_pages) { - return kvzalloc_objs(struct page *, n_pages, GFP_KERNEL); + return kvzalloc_objs(struct page *, n_pages); } /* @@ -150,10 +150,10 @@ static struct rchan_buf *relay_create_buf(struct rchan *chan) if (chan->n_subbufs > KMALLOC_MAX_SIZE / sizeof(size_t)) return NULL; - buf = kzalloc_obj(struct rchan_buf, GFP_KERNEL); + buf = kzalloc_obj(struct rchan_buf); if (!buf) return NULL; - buf->padding = kmalloc_objs(size_t, chan->n_subbufs, GFP_KERNEL); + buf->padding = kmalloc_objs(size_t, chan->n_subbufs); if (!buf->padding) goto free_buf; @@ -489,7 +489,7 @@ struct rchan *relay_open(const char *base_filename, if (!cb || !cb->create_buf_file || !cb->remove_buf_file) return NULL; - chan = kzalloc_obj(struct rchan, GFP_KERNEL); + chan = kzalloc_obj(struct rchan); if (!chan) return NULL; diff --git a/kernel/resource.c b/kernel/resource.c index d591e76c1535..bb966699da31 100644 --- a/kernel/resource.c +++ b/kernel/resource.c @@ -502,7 +502,7 @@ int walk_system_ram_res_rev(u64 start, u64 end, void *arg, int ret = -1; /* create a list */ - rams = kvzalloc_objs(struct resource, rams_size, GFP_KERNEL); + rams = kvzalloc_objs(struct resource, rams_size); if (!rams) return ret; diff --git a/kernel/resource_kunit.c b/kernel/resource_kunit.c index 378218df2427..42785796f1db 100644 --- a/kernel/resource_kunit.c +++ b/kernel/resource_kunit.c @@ -204,7 +204,7 @@ static void resource_test_insert_resource(struct kunit *test, struct resource *p { struct resource *res; - res = kzalloc_obj(*res, GFP_KERNEL); + res = kzalloc_obj(*res); KUNIT_ASSERT_NOT_NULL(test, res); res->name = name; diff --git a/kernel/scftorture.c b/kernel/scftorture.c index 02b3a5d2f0aa..327c315f411c 100644 --- a/kernel/scftorture.c +++ b/kernel/scftorture.c @@ -661,7 +661,7 @@ static int __init scf_torture_init(void) // Worker tasks invoking smp_call_function(). if (nthreads < 0) nthreads = num_online_cpus(); - scf_stats_p = kzalloc_objs(scf_stats_p[0], nthreads, GFP_KERNEL); + scf_stats_p = kzalloc_objs(scf_stats_p[0], nthreads); if (!scf_stats_p) { SCFTORTOUT_ERRSTRING("out of memory"); firsterr = -ENOMEM; diff --git a/kernel/sched/autogroup.c b/kernel/sched/autogroup.c index c5a1019cbe83..e380cf9372bb 100644 --- a/kernel/sched/autogroup.c +++ b/kernel/sched/autogroup.c @@ -86,7 +86,7 @@ static inline struct autogroup *autogroup_task_get(struct task_struct *p) static inline struct autogroup *autogroup_create(void) { - struct autogroup *ag = kzalloc_obj(*ag, GFP_KERNEL); + struct autogroup *ag = kzalloc_obj(*ag); struct task_group *tg; if (!ag) diff --git a/kernel/sched/core_sched.c b/kernel/sched/core_sched.c index 6065cf725eee..73b6b2426911 100644 --- a/kernel/sched/core_sched.c +++ b/kernel/sched/core_sched.c @@ -12,7 +12,7 @@ struct sched_core_cookie { static unsigned long sched_core_alloc_cookie(void) { - struct sched_core_cookie *ck = kmalloc_obj(*ck, GFP_KERNEL); + struct sched_core_cookie *ck = kmalloc_obj(*ck); if (!ck) return 0; diff --git a/kernel/sched/cpuacct.c b/kernel/sched/cpuacct.c index 6e9a2e067886..ca9d52cb1ebb 100644 --- a/kernel/sched/cpuacct.c +++ b/kernel/sched/cpuacct.c @@ -61,7 +61,7 @@ cpuacct_css_alloc(struct cgroup_subsys_state *parent_css) if (!parent_css) return &root_cpuacct.css; - ca = kzalloc_obj(*ca, GFP_KERNEL); + ca = kzalloc_obj(*ca); if (!ca) goto out; diff --git a/kernel/sched/cpudeadline.c b/kernel/sched/cpudeadline.c index bbb2d68df86a..0a2b7e30fd10 100644 --- a/kernel/sched/cpudeadline.c +++ b/kernel/sched/cpudeadline.c @@ -252,7 +252,7 @@ int cpudl_init(struct cpudl *cp) raw_spin_lock_init(&cp->lock); cp->size = 0; - cp->elements = kzalloc_objs(struct cpudl_item, nr_cpu_ids, GFP_KERNEL); + cp->elements = kzalloc_objs(struct cpudl_item, nr_cpu_ids); if (!cp->elements) return -ENOMEM; diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c index d71d09ed1b3b..153232dd8276 100644 --- a/kernel/sched/cpufreq_schedutil.c +++ b/kernel/sched/cpufreq_schedutil.c @@ -638,7 +638,7 @@ static struct sugov_policy *sugov_policy_alloc(struct cpufreq_policy *policy) { struct sugov_policy *sg_policy; - sg_policy = kzalloc_obj(*sg_policy, GFP_KERNEL); + sg_policy = kzalloc_obj(*sg_policy); if (!sg_policy) return NULL; @@ -722,7 +722,7 @@ static struct sugov_tunables *sugov_tunables_alloc(struct sugov_policy *sg_polic { struct sugov_tunables *tunables; - tunables = kzalloc_obj(*tunables, GFP_KERNEL); + tunables = kzalloc_obj(*tunables); if (tunables) { gov_attr_set_init(&tunables->attr_set, &sg_policy->tunables_hook); if (!have_governor_per_policy()) diff --git a/kernel/sched/cpupri.c b/kernel/sched/cpupri.c index c2642deeaabc..8f2237e8b484 100644 --- a/kernel/sched/cpupri.c +++ b/kernel/sched/cpupri.c @@ -288,7 +288,7 @@ int cpupri_init(struct cpupri *cp) goto cleanup; } - cp->cpu_to_pri = kzalloc_objs(int, nr_cpu_ids, GFP_KERNEL); + cp->cpu_to_pri = kzalloc_objs(int, nr_cpu_ids); if (!cp->cpu_to_pri) goto cleanup; diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c index b9fadb2583ea..5a812b510d5d 100644 --- a/kernel/sched/ext.c +++ b/kernel/sched/ext.c @@ -4223,11 +4223,11 @@ static struct scx_exit_info *alloc_exit_info(size_t exit_dump_len) { struct scx_exit_info *ei; - ei = kzalloc_obj(*ei, GFP_KERNEL); + ei = kzalloc_obj(*ei); if (!ei) return NULL; - ei->bt = kzalloc_objs(ei->bt[0], SCX_EXIT_BT_LEN, GFP_KERNEL); + ei->bt = kzalloc_objs(ei->bt[0], SCX_EXIT_BT_LEN); ei->msg = kzalloc(SCX_EXIT_MSG_LEN, GFP_KERNEL); ei->dump = kvzalloc(exit_dump_len, GFP_KERNEL); @@ -4824,7 +4824,7 @@ static struct scx_sched *scx_alloc_and_add_sched(struct sched_ext_ops *ops) struct scx_sched *sch; int node, ret; - sch = kzalloc_obj(*sch, GFP_KERNEL); + sch = kzalloc_obj(*sch); if (!sch) return ERR_PTR(-ENOMEM); diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index f6f050f2faec..eea99ec01a3f 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -3427,7 +3427,7 @@ retry_pids: if (!vma->numab_state) { struct vma_numab_state *ptr; - ptr = kzalloc_obj(*ptr, GFP_KERNEL); + ptr = kzalloc_obj(*ptr); if (!ptr) continue; @@ -13622,10 +13622,10 @@ int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent) struct cfs_rq *cfs_rq; int i; - tg->cfs_rq = kzalloc_objs(cfs_rq, nr_cpu_ids, GFP_KERNEL); + tg->cfs_rq = kzalloc_objs(cfs_rq, nr_cpu_ids); if (!tg->cfs_rq) goto err; - tg->se = kzalloc_objs(se, nr_cpu_ids, GFP_KERNEL); + tg->se = kzalloc_objs(se, nr_cpu_ids); if (!tg->se) goto err; diff --git a/kernel/sched/psi.c b/kernel/sched/psi.c index bf8a70598a09..d9c9d9480a45 100644 --- a/kernel/sched/psi.c +++ b/kernel/sched/psi.c @@ -1114,7 +1114,7 @@ int psi_cgroup_alloc(struct cgroup *cgroup) if (!static_branch_likely(&psi_cgroups_enabled)) return 0; - cgroup->psi = kzalloc_obj(struct psi_group, GFP_KERNEL); + cgroup->psi = kzalloc_obj(struct psi_group); if (!cgroup->psi) return -ENOMEM; @@ -1340,7 +1340,7 @@ struct psi_trigger *psi_trigger_create(struct psi_group *group, char *buf, if (threshold_us == 0 || threshold_us > window_us) return ERR_PTR(-EINVAL); - t = kmalloc_obj(*t, GFP_KERNEL); + t = kmalloc_obj(*t); if (!t) return ERR_PTR(-ENOMEM); diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index e72df7045592..f69e1f16d923 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c @@ -259,10 +259,10 @@ int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent) if (!rt_group_sched_enabled()) return 1; - tg->rt_rq = kzalloc_objs(rt_rq, nr_cpu_ids, GFP_KERNEL); + tg->rt_rq = kzalloc_objs(rt_rq, nr_cpu_ids); if (!tg->rt_rq) goto err; - tg->rt_se = kzalloc_objs(rt_se, nr_cpu_ids, GFP_KERNEL); + tg->rt_se = kzalloc_objs(rt_se, nr_cpu_ids); if (!tg->rt_se) goto err; diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c index ac54fcae5de7..32dcddaead82 100644 --- a/kernel/sched/topology.c +++ b/kernel/sched/topology.c @@ -350,7 +350,7 @@ static struct perf_domain *pd_init(int cpu) return NULL; } - pd = kzalloc_obj(*pd, GFP_KERNEL); + pd = kzalloc_obj(*pd); if (!pd) return NULL; pd->em_pd = obj; @@ -589,7 +589,7 @@ static struct root_domain *alloc_rootdomain(void) { struct root_domain *rd; - rd = kzalloc_obj(*rd, GFP_KERNEL); + rd = kzalloc_obj(*rd); if (!rd) return NULL; @@ -1998,7 +1998,7 @@ static int sched_record_numa_dist(int offline_node, int (*n_dist)(int, int), */ nr_levels = bitmap_weight(distance_map, NR_DISTANCE_VALUES); - distances = kzalloc_objs(int, nr_levels, GFP_KERNEL); + distances = kzalloc_objs(int, nr_levels); if (!distances) return -ENOMEM; @@ -2734,7 +2734,7 @@ cpumask_var_t *alloc_sched_domains(unsigned int ndoms) int i; cpumask_var_t *doms; - doms = kmalloc_objs(*doms, ndoms, GFP_KERNEL); + doms = kmalloc_objs(*doms, ndoms); if (!doms) return NULL; for (i = 0; i < ndoms; i++) { diff --git a/kernel/seccomp.c b/kernel/seccomp.c index b2297243071d..066909393c38 100644 --- a/kernel/seccomp.c +++ b/kernel/seccomp.c @@ -1893,7 +1893,7 @@ static struct file *init_listener(struct seccomp_filter *filter) struct file *ret; ret = ERR_PTR(-ENOMEM); - filter->notif = kzalloc_obj(*(filter->notif), GFP_KERNEL); + filter->notif = kzalloc_obj(*(filter->notif)); if (!filter->notif) goto out; diff --git a/kernel/static_call_inline.c b/kernel/static_call_inline.c index 864ae2da708f..2b6a0d99cdbe 100644 --- a/kernel/static_call_inline.c +++ b/kernel/static_call_inline.c @@ -255,7 +255,7 @@ static int __static_call_init(struct module *mod, goto do_transform; } - site_mod = kzalloc_obj(*site_mod, GFP_KERNEL); + site_mod = kzalloc_obj(*site_mod); if (!site_mod) return -ENOMEM; @@ -271,7 +271,7 @@ static int __static_call_init(struct module *mod, key->mods = site_mod; - site_mod = kzalloc_obj(*site_mod, GFP_KERNEL); + site_mod = kzalloc_obj(*site_mod); if (!site_mod) return -ENOMEM; } diff --git a/kernel/time/posix-clock.c b/kernel/time/posix-clock.c index 3a67e7e4c875..dab37295c8c2 100644 --- a/kernel/time/posix-clock.c +++ b/kernel/time/posix-clock.c @@ -103,7 +103,7 @@ static int posix_clock_open(struct inode *inode, struct file *fp) err = -ENODEV; goto out; } - pccontext = kzalloc_obj(*pccontext, GFP_KERNEL); + pccontext = kzalloc_obj(*pccontext); if (!pccontext) { err = -ENOMEM; goto out; diff --git a/kernel/time/timer_migration.c b/kernel/time/timer_migration.c index 21e72318aeb8..a8421f3025cd 100644 --- a/kernel/time/timer_migration.c +++ b/kernel/time/timer_migration.c @@ -1766,7 +1766,7 @@ static int tmigr_setup_groups(unsigned int cpu, unsigned int node, int i, top = 0, err = 0, start_lvl = 0; bool root_mismatch = false; - stack = kzalloc_objs(*stack, tmigr_hierarchy_levels, GFP_KERNEL); + stack = kzalloc_objs(*stack, tmigr_hierarchy_levels); if (!stack) return -ENOMEM; diff --git a/kernel/torture.c b/kernel/torture.c index 27c9bb6122d8..ec3370986976 100644 --- a/kernel/torture.c +++ b/kernel/torture.c @@ -494,7 +494,7 @@ void torture_shuffle_task_register(struct task_struct *tp) if (WARN_ON_ONCE(tp == NULL)) return; - stp = kmalloc_obj(*stp, GFP_KERNEL); + stp = kmalloc_obj(*stp); if (WARN_ON_ONCE(stp == NULL)) return; stp->st_t = tp; diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c index 5526b141b433..30259dcaa838 100644 --- a/kernel/trace/blktrace.c +++ b/kernel/trace/blktrace.c @@ -671,7 +671,7 @@ static struct blk_trace *blk_trace_setup_prepare(struct request_queue *q, return ERR_PTR(-EBUSY); } - bt = kzalloc_obj(*bt, GFP_KERNEL); + bt = kzalloc_obj(*bt); if (!bt) return ERR_PTR(-ENOMEM); @@ -1904,7 +1904,7 @@ static int blk_trace_setup_queue(struct request_queue *q, struct blk_trace *bt = NULL; int ret = -ENOMEM; - bt = kzalloc_obj(*bt, GFP_KERNEL); + bt = kzalloc_obj(*bt); if (!bt) return -ENOMEM; diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index c09268c6e9b7..9bc0dfd235af 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c @@ -2243,7 +2243,7 @@ static int bpf_event_notify(struct notifier_block *nb, unsigned long op, switch (op) { case MODULE_STATE_COMING: - btm = kzalloc_obj(*btm, GFP_KERNEL); + btm = kzalloc_obj(*btm); if (btm) { btm->module = module; list_add(&btm->list, &bpf_trace_modules); @@ -2819,7 +2819,7 @@ int bpf_kprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *pr goto error; } - link = kzalloc_obj(*link, GFP_KERNEL); + link = kzalloc_obj(*link); if (!link) { err = -ENOMEM; goto error; @@ -3238,8 +3238,8 @@ int bpf_uprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *pr err = -ENOMEM; - link = kzalloc_obj(*link, GFP_KERNEL); - uprobes = kvzalloc_objs(*uprobes, cnt, GFP_KERNEL); + link = kzalloc_obj(*link); + uprobes = kvzalloc_objs(*uprobes, cnt); if (!uprobes || !link) goto error_free; diff --git a/kernel/trace/fprobe.c b/kernel/trace/fprobe.c index 0d649ca71ce0..ec90ba215405 100644 --- a/kernel/trace/fprobe.c +++ b/kernel/trace/fprobe.c @@ -805,7 +805,7 @@ int register_fprobe(struct fprobe *fp, const char *filter, const char *notfilter if (!addrs) return -ENOMEM; - mods = kzalloc_objs(*mods, num, GFP_KERNEL); + mods = kzalloc_objs(*mods, num); if (!mods) return -ENOMEM; diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index fb3915a67013..827fb9a0bf0d 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -702,7 +702,7 @@ static int ftrace_profile_init_cpu(int cpu) */ size = FTRACE_PROFILE_HASH_SIZE; - stat->hash = kzalloc_objs(struct hlist_head, size, GFP_KERNEL); + stat->hash = kzalloc_objs(struct hlist_head, size); if (!stat->hash) return -ENOMEM; @@ -1215,7 +1215,7 @@ add_ftrace_hash_entry_direct(struct ftrace_hash *hash, unsigned long ip, unsigne { struct ftrace_func_entry *entry; - entry = kmalloc_obj(*entry, GFP_KERNEL); + entry = kmalloc_obj(*entry); if (!entry) return NULL; @@ -1335,12 +1335,12 @@ struct ftrace_hash *alloc_ftrace_hash(int size_bits) struct ftrace_hash *hash; int size; - hash = kzalloc_obj(*hash, GFP_KERNEL); + hash = kzalloc_obj(*hash); if (!hash) return NULL; size = 1 << size_bits; - hash->buckets = kzalloc_objs(*hash->buckets, size, GFP_KERNEL); + hash->buckets = kzalloc_objs(*hash->buckets, size); if (!hash->buckets) { kfree(hash); @@ -1360,7 +1360,7 @@ static int ftrace_add_mod(struct trace_array *tr, struct ftrace_mod_load *ftrace_mod; struct list_head *mod_head = enable ? &tr->mod_trace : &tr->mod_notrace; - ftrace_mod = kzalloc_obj(*ftrace_mod, GFP_KERNEL); + ftrace_mod = kzalloc_obj(*ftrace_mod); if (!ftrace_mod) return -ENOMEM; @@ -3911,7 +3911,7 @@ ftrace_allocate_pages(unsigned long num_to_init, unsigned long *num_pages) if (!num_to_init) return NULL; - start_pg = pg = kzalloc_obj(*pg, GFP_KERNEL); + start_pg = pg = kzalloc_obj(*pg); if (!pg) return NULL; @@ -3929,7 +3929,7 @@ ftrace_allocate_pages(unsigned long num_to_init, unsigned long *num_pages) if (!num_to_init) break; - pg->next = kzalloc_obj(*pg, GFP_KERNEL); + pg->next = kzalloc_obj(*pg); if (!pg->next) goto free_pages; @@ -4686,7 +4686,7 @@ ftrace_regex_open(struct ftrace_ops *ops, int flag, if (tracing_check_open_get_tr(tr)) return -ENODEV; - iter = kzalloc_obj(*iter, GFP_KERNEL); + iter = kzalloc_obj(*iter); if (!iter) goto out; @@ -5334,7 +5334,7 @@ int ftrace_func_mapper_add_ip(struct ftrace_func_mapper *mapper, if (entry) return -EBUSY; - map = kmalloc_obj(*map, GFP_KERNEL); + map = kmalloc_obj(*map); if (!map) return -ENOMEM; @@ -5474,7 +5474,7 @@ register_ftrace_function_probe(char *glob, struct trace_array *tr, } } if (!probe) { - probe = kzalloc_obj(*probe, GFP_KERNEL); + probe = kzalloc_obj(*probe); if (!probe) { mutex_unlock(&ftrace_lock); return -ENOMEM; @@ -7223,7 +7223,7 @@ ftrace_graph_open(struct inode *inode, struct file *file) if (unlikely(ftrace_disabled)) return -ENODEV; - fgd = kmalloc_obj(*fgd, GFP_KERNEL); + fgd = kmalloc_obj(*fgd); if (fgd == NULL) return -ENOMEM; @@ -7251,7 +7251,7 @@ ftrace_graph_notrace_open(struct inode *inode, struct file *file) if (unlikely(ftrace_disabled)) return -ENODEV; - fgd = kmalloc_obj(*fgd, GFP_KERNEL); + fgd = kmalloc_obj(*fgd); if (fgd == NULL) return -ENOMEM; @@ -8041,7 +8041,7 @@ static void save_ftrace_mod_rec(struct ftrace_mod_map *mod_map, if (!ret) return; - mod_func = kmalloc_obj(*mod_func, GFP_KERNEL); + mod_func = kmalloc_obj(*mod_func); if (!mod_func) return; @@ -8068,7 +8068,7 @@ allocate_ftrace_mod_map(struct module *mod, if (ftrace_disabled) return NULL; - mod_map = kmalloc_obj(*mod_map, GFP_KERNEL); + mod_map = kmalloc_obj(*mod_map); if (!mod_map) return NULL; @@ -8241,7 +8241,7 @@ static void add_to_clear_hash_list(struct list_head *clear_list, { struct ftrace_init_func *func; - func = kmalloc_obj(*func, GFP_KERNEL); + func = kmalloc_obj(*func); if (!func) { MEM_FAIL(1, "alloc failure, ftrace filter could be stale\n"); return; diff --git a/kernel/trace/pid_list.c b/kernel/trace/pid_list.c index 6d12855b0277..30e3fdae6a17 100644 --- a/kernel/trace/pid_list.c +++ b/kernel/trace/pid_list.c @@ -423,7 +423,7 @@ struct trace_pid_list *trace_pid_list_alloc(void) /* According to linux/thread.h, pids can be no bigger that 30 bits */ WARN_ON_ONCE(init_pid_ns.pid_max > (1 << 30)); - pid_list = kzalloc_obj(*pid_list, GFP_KERNEL); + pid_list = kzalloc_obj(*pid_list); if (!pid_list) return NULL; @@ -435,7 +435,7 @@ struct trace_pid_list *trace_pid_list_alloc(void) for (i = 0; i < CHUNK_ALLOC; i++) { union upper_chunk *chunk; - chunk = kzalloc_obj(*chunk, GFP_KERNEL); + chunk = kzalloc_obj(*chunk); if (!chunk) break; chunk->next = pid_list->upper_list; @@ -446,7 +446,7 @@ struct trace_pid_list *trace_pid_list_alloc(void) for (i = 0; i < CHUNK_ALLOC; i++) { union lower_chunk *chunk; - chunk = kzalloc_obj(*chunk, GFP_KERNEL); + chunk = kzalloc_obj(*chunk); if (!chunk) break; chunk->next = pid_list->lower_list; diff --git a/kernel/trace/rethook.c b/kernel/trace/rethook.c index d09d5a204627..5a8bdf88999a 100644 --- a/kernel/trace/rethook.c +++ b/kernel/trace/rethook.c @@ -108,7 +108,7 @@ struct rethook *rethook_alloc(void *data, rethook_handler_t handler, if (!handler || num <= 0 || size < sizeof(struct rethook_node)) return ERR_PTR(-EINVAL); - rh = kzalloc_obj(struct rethook, GFP_KERNEL); + rh = kzalloc_obj(struct rethook); if (!rh) return ERR_PTR(-ENOMEM); diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index e1395834886e..f16f053ef77d 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c @@ -6509,7 +6509,7 @@ ring_buffer_alloc_read_page(struct trace_buffer *buffer, int cpu) if (!cpumask_test_cpu(cpu, buffer->cpumask)) return ERR_PTR(-ENODEV); - bpage = kzalloc_obj(*bpage, GFP_KERNEL); + bpage = kzalloc_obj(*bpage); if (!bpage) return ERR_PTR(-ENOMEM); @@ -7190,7 +7190,7 @@ static int __rb_map_vma(struct ring_buffer_per_cpu *cpu_buffer, nr_pages = nr_vma_pages; - pages = kzalloc_objs(*pages, nr_pages, GFP_KERNEL); + pages = kzalloc_objs(*pages, nr_pages); if (!pages) return -ENOMEM; diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 83ae2e8e931c..b44f5ae8958e 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -1064,7 +1064,7 @@ int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data, cond_update_fn_t update) { struct cond_snapshot *cond_snapshot __free(kfree) = - kzalloc_obj(*cond_snapshot, GFP_KERNEL); + kzalloc_obj(*cond_snapshot); int ret; if (!cond_snapshot) @@ -5132,7 +5132,7 @@ trace_insert_eval_map_file(struct module *mod, struct trace_eval_map **start, * where the head holds the module and length of array, and the * tail holds a pointer to the next list. */ - map_array = kmalloc_objs(*map_array, len + 2, GFP_KERNEL); + map_array = kmalloc_objs(*map_array, len + 2); if (!map_array) { pr_warn("Unable to allocate trace eval mapping\n"); return; @@ -5809,7 +5809,7 @@ static int tracing_open_pipe(struct inode *inode, struct file *filp) goto fail_pipe_on_cpu; /* create a buffer to store the information to pass to userspace */ - iter = kzalloc_obj(*iter, GFP_KERNEL); + iter = kzalloc_obj(*iter); if (!iter) { ret = -ENOMEM; goto fail_alloc_iter; @@ -6628,7 +6628,7 @@ static int user_buffer_init(struct trace_user_buf_info **tinfo, size_t size) if (!*tinfo) { alloc = true; - *tinfo = kzalloc_obj(**tinfo, GFP_KERNEL); + *tinfo = kzalloc_obj(**tinfo); if (!*tinfo) return -ENOMEM; } @@ -7153,10 +7153,10 @@ static int tracing_snapshot_open(struct inode *inode, struct file *file) } else { /* Writes still need the seq_file to hold the private data */ ret = -ENOMEM; - m = kzalloc_obj(*m, GFP_KERNEL); + m = kzalloc_obj(*m); if (!m) goto out; - iter = kzalloc_obj(*iter, GFP_KERNEL); + iter = kzalloc_obj(*iter); if (!iter) { kfree(m); goto out; @@ -7545,7 +7545,7 @@ static struct tracing_log_err *alloc_tracing_log_err(int len) { struct tracing_log_err *err; - err = kzalloc_obj(*err, GFP_KERNEL); + err = kzalloc_obj(*err); if (!err) return ERR_PTR(-ENOMEM); @@ -7804,7 +7804,7 @@ static int tracing_buffers_open(struct inode *inode, struct file *filp) if (ret) return ret; - info = kvzalloc_obj(*info, GFP_KERNEL); + info = kvzalloc_obj(*info); if (!info) { trace_array_put(tr); return -ENOMEM; @@ -8065,7 +8065,7 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos, struct page *page; int r; - ref = kzalloc_obj(*ref, GFP_KERNEL); + ref = kzalloc_obj(*ref); if (!ref) { ret = -ENOMEM; break; @@ -8284,7 +8284,7 @@ tracing_stats_read(struct file *filp, char __user *ubuf, unsigned long long t; unsigned long usec_rem; - s = kmalloc_obj(*s, GFP_KERNEL); + s = kmalloc_obj(*s); if (!s) return -ENOMEM; @@ -8878,7 +8878,7 @@ create_trace_option_files(struct trace_array *tr, struct tracer *tracer, for (cnt = 0; opts[cnt].name; cnt++) ; - topts = kzalloc_objs(*topts, cnt + 1, GFP_KERNEL); + topts = kzalloc_objs(*topts, cnt + 1); if (!topts) return 0; @@ -8950,7 +8950,7 @@ static int add_tracer(struct trace_array *tr, struct tracer *tracer) if (!trace_ok_for_array(tracer, tr)) return 0; - t = kmalloc_obj(*t, GFP_KERNEL); + t = kmalloc_obj(*t); if (!t) return -ENOMEM; @@ -8967,7 +8967,7 @@ static int add_tracer(struct trace_array *tr, struct tracer *tracer) * If the tracer defines default flags, it means the flags are * per trace instance. */ - flags = kmalloc_obj(*flags, GFP_KERNEL); + flags = kmalloc_obj(*flags); if (!flags) return -ENOMEM; @@ -9538,7 +9538,7 @@ trace_array_create_systems(const char *name, const char *systems, int ret; ret = -ENOMEM; - tr = kzalloc_obj(*tr, GFP_KERNEL); + tr = kzalloc_obj(*tr); if (!tr) return ERR_PTR(ret); diff --git a/kernel/trace/trace_btf.c b/kernel/trace/trace_btf.c index 1d3c42527736..00172f301f25 100644 --- a/kernel/trace/trace_btf.c +++ b/kernel/trace/trace_btf.c @@ -78,7 +78,7 @@ const struct btf_member *btf_find_struct_member(struct btf *btf, const char *name; int i, top = 0; - anon_stack = kzalloc_objs(*anon_stack, BTF_ANON_STACK_MAX, GFP_KERNEL); + anon_stack = kzalloc_objs(*anon_stack, BTF_ANON_STACK_MAX); if (!anon_stack) return ERR_PTR(-ENOMEM); diff --git a/kernel/trace/trace_eprobe.c b/kernel/trace/trace_eprobe.c index 3adc9a8c29a9..3eeaa5df7fc8 100644 --- a/kernel/trace/trace_eprobe.c +++ b/kernel/trace/trace_eprobe.c @@ -529,8 +529,8 @@ new_eprobe_trigger(struct trace_eprobe *ep, struct trace_event_file *file) struct eprobe_data *edata; int ret; - edata = kzalloc_obj(*edata, GFP_KERNEL); - trigger = kzalloc_obj(*trigger, GFP_KERNEL); + edata = kzalloc_obj(*edata); + trigger = kzalloc_obj(*trigger); if (!trigger || !edata) { ret = -ENOMEM; goto error; diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index 1d5ce0244f8c..9928da636c9d 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c @@ -976,7 +976,7 @@ static int cache_mod(struct trace_array *tr, const char *mod, int set, if (!set) return remove_cache_mod(tr, mod, match, system, event); - event_mod = kzalloc_obj(*event_mod, GFP_KERNEL); + event_mod = kzalloc_obj(*event_mod); if (!event_mod) return -ENOMEM; @@ -1648,7 +1648,7 @@ static void *s_start(struct seq_file *m, loff_t *pos) struct set_event_iter *iter; loff_t l; - iter = kzalloc_obj(*iter, GFP_KERNEL); + iter = kzalloc_obj(*iter); mutex_lock(&event_mutex); if (!iter) return NULL; @@ -2206,7 +2206,7 @@ event_filter_read(struct file *filp, char __user *ubuf, size_t cnt, if (*ppos) return 0; - s = kmalloc_obj(*s, GFP_KERNEL); + s = kmalloc_obj(*s); if (!s) return -ENOMEM; @@ -2320,7 +2320,7 @@ static int system_tr_open(struct inode *inode, struct file *filp) int ret; /* Make a temporary dir that has no system but points to tr */ - dir = kzalloc_obj(*dir, GFP_KERNEL); + dir = kzalloc_obj(*dir); if (!dir) return -ENOMEM; @@ -2366,7 +2366,7 @@ subsystem_filter_read(struct file *filp, char __user *ubuf, size_t cnt, if (*ppos) return 0; - s = kmalloc_obj(*s, GFP_KERNEL); + s = kmalloc_obj(*s); if (!s) return -ENOMEM; @@ -2416,7 +2416,7 @@ show_header_page_file(struct file *filp, char __user *ubuf, size_t cnt, loff_t * if (*ppos) return 0; - s = kmalloc_obj(*s, GFP_KERNEL); + s = kmalloc_obj(*s); if (!s) return -ENOMEM; @@ -2440,7 +2440,7 @@ show_header_event_file(struct file *filp, char __user *ubuf, size_t cnt, loff_t if (*ppos) return 0; - s = kmalloc_obj(*s, GFP_KERNEL); + s = kmalloc_obj(*s); if (!s) return -ENOMEM; @@ -2881,7 +2881,7 @@ create_new_subsystem(const char *name) struct event_subsystem *system; /* need to create new entry */ - system = kmalloc_obj(*system, GFP_KERNEL); + system = kmalloc_obj(*system); if (!system) return NULL; @@ -2892,7 +2892,7 @@ create_new_subsystem(const char *name) if (!system->name) goto out_free; - system->filter = kzalloc_obj(struct event_filter, GFP_KERNEL); + system->filter = kzalloc_obj(struct event_filter); if (!system->filter) goto out_free; @@ -2960,7 +2960,7 @@ event_subsystem_dir(struct trace_array *tr, const char *name, } } - dir = kmalloc_obj(*dir, GFP_KERNEL); + dir = kmalloc_obj(*dir); if (!dir) goto out_fail; @@ -3403,7 +3403,7 @@ static void add_str_to_module(struct module *module, char *str) { struct module_string *modstr; - modstr = kmalloc_obj(*modstr, GFP_KERNEL); + modstr = kmalloc_obj(*modstr); /* * If we failed to allocate memory here, then we'll just @@ -4365,7 +4365,7 @@ event_enable_func(struct trace_array *tr, struct ftrace_hash *hash, goto out_put; ret = -ENOMEM; - data = kzalloc_obj(*data, GFP_KERNEL); + data = kzalloc_obj(*data); if (!data) goto out_put; diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c index b84bdad362e9..609325f57942 100644 --- a/kernel/trace/trace_events_filter.c +++ b/kernel/trace/trace_events_filter.c @@ -485,10 +485,10 @@ predicate_parse(const char *str, int nr_parens, int nr_preds, nr_preds += 2; /* For TRUE and FALSE */ - op_stack = kmalloc_objs(*op_stack, nr_parens, GFP_KERNEL); + op_stack = kmalloc_objs(*op_stack, nr_parens); if (!op_stack) return ERR_PTR(-ENOMEM); - prog_stack = kzalloc_objs(*prog_stack, nr_preds, GFP_KERNEL); + prog_stack = kzalloc_objs(*prog_stack, nr_preds); if (!prog_stack) { parse_error(pe, -ENOMEM, 0); goto out_free; @@ -1213,7 +1213,7 @@ static void append_filter_err(struct trace_array *tr, if (WARN_ON(!filter->filter_string)) return; - s = kmalloc_obj(*s, GFP_KERNEL); + s = kmalloc_obj(*s); if (!s) return; trace_seq_init(s); @@ -1394,13 +1394,13 @@ static void try_delay_free_filter(struct event_filter *filter) struct filter_head *head; struct filter_list *item; - head = kmalloc_obj(*head, GFP_KERNEL); + head = kmalloc_obj(*head); if (!head) goto free_now; INIT_LIST_HEAD(&head->list); - item = kmalloc_obj(*item, GFP_KERNEL); + item = kmalloc_obj(*item); if (!item) { kfree(head); goto free_now; @@ -1442,7 +1442,7 @@ static void filter_free_subsystem_filters(struct trace_subsystem_dir *dir, struct filter_head *head; struct filter_list *item; - head = kmalloc_obj(*head, GFP_KERNEL); + head = kmalloc_obj(*head); if (!head) goto free_now; @@ -1451,7 +1451,7 @@ static void filter_free_subsystem_filters(struct trace_subsystem_dir *dir, list_for_each_entry(file, &tr->events, list) { if (file->system != dir) continue; - item = kmalloc_obj(*item, GFP_KERNEL); + item = kmalloc_obj(*item); if (!item) goto free_now; item->filter = event_filter(file); @@ -1459,7 +1459,7 @@ static void filter_free_subsystem_filters(struct trace_subsystem_dir *dir, event_clear_filter(file); } - item = kmalloc_obj(*item, GFP_KERNEL); + item = kmalloc_obj(*item); if (!item) goto free_now; @@ -1708,7 +1708,7 @@ static int parse_pred(const char *str, void *data, s = i; - pred = kzalloc_obj(*pred, GFP_KERNEL); + pred = kzalloc_obj(*pred); if (!pred) return -ENOMEM; @@ -1819,7 +1819,7 @@ static int parse_pred(const char *str, void *data, goto err_free; } - pred->regex = kzalloc_obj(*pred->regex, GFP_KERNEL); + pred->regex = kzalloc_obj(*pred->regex); if (!pred->regex) goto err_mem; pred->regex->len = len; @@ -1984,7 +1984,7 @@ static int parse_pred(const char *str, void *data, goto err_free; } - pred->regex = kzalloc_obj(*pred->regex, GFP_KERNEL); + pred->regex = kzalloc_obj(*pred->regex); if (!pred->regex) goto err_mem; pred->regex->len = len; @@ -2261,7 +2261,7 @@ static int process_system_preds(struct trace_subsystem_dir *dir, bool fail = true; int err; - filter_list = kmalloc_obj(*filter_list, GFP_KERNEL); + filter_list = kmalloc_obj(*filter_list); if (!filter_list) return -ENOMEM; @@ -2272,7 +2272,7 @@ static int process_system_preds(struct trace_subsystem_dir *dir, if (file->system != dir) continue; - filter = kzalloc_obj(*filter, GFP_KERNEL); + filter = kzalloc_obj(*filter); if (!filter) goto fail_mem; @@ -2289,7 +2289,7 @@ static int process_system_preds(struct trace_subsystem_dir *dir, event_set_filtered_flag(file); - filter_item = kzalloc_obj(*filter_item, GFP_KERNEL); + filter_item = kzalloc_obj(*filter_item); if (!filter_item) goto fail_mem; @@ -2343,14 +2343,14 @@ static int create_filter_start(char *filter_string, bool set_str, if (WARN_ON_ONCE(*pse || *filterp)) return -EINVAL; - filter = kzalloc_obj(*filter, GFP_KERNEL); + filter = kzalloc_obj(*filter); if (filter && set_str) { filter->filter_string = kstrdup(filter_string, GFP_KERNEL); if (!filter->filter_string) err = -ENOMEM; } - pe = kzalloc_obj(*pe, GFP_KERNEL); + pe = kzalloc_obj(*pe); if (!filter || !pe || err) { kfree(pe); diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c index da42a087d646..a45cdd05123b 100644 --- a/kernel/trace/trace_events_hist.c +++ b/kernel/trace/trace_events_hist.c @@ -732,7 +732,7 @@ static struct track_data *track_data_alloc(unsigned int key_len, struct action_data *action_data, struct hist_trigger_data *hist_data) { - struct track_data *data = kzalloc_obj(*data, GFP_KERNEL); + struct track_data *data = kzalloc_obj(*data); struct hist_elt_data *elt_data; if (!data) @@ -748,7 +748,7 @@ static struct track_data *track_data_alloc(unsigned int key_len, data->action_data = action_data; data->hist_data = hist_data; - elt_data = kzalloc_obj(*elt_data, GFP_KERNEL); + elt_data = kzalloc_obj(*elt_data); if (!elt_data) { track_data_free(data); return ERR_PTR(-ENOMEM); @@ -1086,7 +1086,7 @@ static int save_hist_vars(struct hist_trigger_data *hist_data) if (tracing_check_open_get_tr(tr)) return -ENODEV; - var_data = kzalloc_obj(*var_data, GFP_KERNEL); + var_data = kzalloc_obj(*var_data); if (!var_data) { trace_array_put(tr); return -ENOMEM; @@ -1548,7 +1548,7 @@ parse_hist_trigger_attrs(struct trace_array *tr, char *trigger_str) struct hist_trigger_attrs *attrs; int ret = 0; - attrs = kzalloc_obj(*attrs, GFP_KERNEL); + attrs = kzalloc_obj(*attrs); if (!attrs) return ERR_PTR(-ENOMEM); @@ -1646,7 +1646,7 @@ static int hist_trigger_elt_data_alloc(struct tracing_map_elt *elt) struct hist_field *hist_field; unsigned int i, n_str; - elt_data = kzalloc_obj(*elt_data, GFP_KERNEL); + elt_data = kzalloc_obj(*elt_data); if (!elt_data) return -ENOMEM; @@ -1962,7 +1962,7 @@ static struct hist_field *create_hist_field(struct hist_trigger_data *hist_data, if (field && is_function_field(field)) return NULL; - hist_field = kzalloc_obj(struct hist_field, GFP_KERNEL); + hist_field = kzalloc_obj(struct hist_field); if (!hist_field) return NULL; @@ -3049,7 +3049,7 @@ create_field_var_hist(struct hist_trigger_data *target_hist_data, if (!IS_ERR_OR_NULL(event_var)) return event_var; - var_hist = kzalloc_obj(*var_hist, GFP_KERNEL); + var_hist = kzalloc_obj(*var_hist); if (!var_hist) return ERR_PTR(-ENOMEM); @@ -3231,7 +3231,7 @@ static struct hist_field *create_var(struct hist_trigger_data *hist_data, goto out; } - var = kzalloc_obj(struct hist_field, GFP_KERNEL); + var = kzalloc_obj(struct hist_field); if (!var) { var = ERR_PTR(-ENOMEM); goto out; @@ -3292,7 +3292,7 @@ static struct field_var *create_field_var(struct hist_trigger_data *hist_data, goto err; } - field_var = kzalloc_obj(struct field_var, GFP_KERNEL); + field_var = kzalloc_obj(struct field_var); if (!field_var) { destroy_hist_field(val, 0); kfree_const(var->type); @@ -3831,7 +3831,7 @@ static struct action_data *track_data_parse(struct hist_trigger_data *hist_data, int ret = -EINVAL; char *var_str; - data = kzalloc_obj(*data, GFP_KERNEL); + data = kzalloc_obj(*data); if (!data) return ERR_PTR(-ENOMEM); @@ -4198,7 +4198,7 @@ static struct action_data *onmatch_parse(struct trace_array *tr, char *str) struct action_data *data; int ret = -EINVAL; - data = kzalloc_obj(*data, GFP_KERNEL); + data = kzalloc_obj(*data); if (!data) return ERR_PTR(-ENOMEM); @@ -5136,7 +5136,7 @@ create_hist_data(unsigned int map_bits, struct hist_trigger_data *hist_data; int ret = 0; - hist_data = kzalloc_obj(*hist_data, GFP_KERNEL); + hist_data = kzalloc_obj(*hist_data); if (!hist_data) return ERR_PTR(-ENOMEM); @@ -5828,7 +5828,7 @@ static int event_hist_open(struct inode *inode, struct file *file) goto err; } - hist_file = kzalloc_obj(*hist_file, GFP_KERNEL); + hist_file = kzalloc_obj(*hist_file); if (!hist_file) { ret = -ENOMEM; goto err; @@ -6602,7 +6602,7 @@ static int hist_register_trigger(char *glob, data->private_data = named_data->private_data; set_named_trigger_data(data, named_data); /* Copy the command ops and update some of the functions */ - cmd_ops = kmalloc_obj(*cmd_ops, GFP_KERNEL); + cmd_ops = kmalloc_obj(*cmd_ops); if (!cmd_ops) { ret = -ENOMEM; goto out; diff --git a/kernel/trace/trace_events_synth.c b/kernel/trace/trace_events_synth.c index db74b2c663f8..7303491e299d 100644 --- a/kernel/trace/trace_events_synth.c +++ b/kernel/trace/trace_events_synth.c @@ -711,7 +711,7 @@ static struct synth_field *parse_synth_field(int argc, char **argv, *field_version = check_field_version(prefix, field_type, field_name); - field = kzalloc_obj(*field, GFP_KERNEL); + field = kzalloc_obj(*field); if (!field) return ERR_PTR(-ENOMEM); @@ -819,7 +819,7 @@ static struct tracepoint *alloc_synth_tracepoint(char *name) { struct tracepoint *tp; - tp = kzalloc_obj(*tp, GFP_KERNEL); + tp = kzalloc_obj(*tp); if (!tp) return ERR_PTR(-ENOMEM); @@ -973,7 +973,7 @@ static struct synth_event *alloc_synth_event(const char *name, int n_fields, unsigned int i, j, n_dynamic_fields = 0; struct synth_event *event; - event = kzalloc_obj(*event, GFP_KERNEL); + event = kzalloc_obj(*event); if (!event) { event = ERR_PTR(-ENOMEM); goto out; @@ -986,7 +986,7 @@ static struct synth_event *alloc_synth_event(const char *name, int n_fields, goto out; } - event->fields = kzalloc_objs(*event->fields, n_fields, GFP_KERNEL); + event->fields = kzalloc_objs(*event->fields, n_fields); if (!event->fields) { free_synth_event(event); event = ERR_PTR(-ENOMEM); diff --git a/kernel/trace/trace_events_trigger.c b/kernel/trace/trace_events_trigger.c index 7ba3548a2f60..fecbd679d432 100644 --- a/kernel/trace/trace_events_trigger.c +++ b/kernel/trace/trace_events_trigger.c @@ -914,7 +914,7 @@ struct event_trigger_data *trigger_data_alloc(struct event_command *cmd_ops, { struct event_trigger_data *trigger_data; - trigger_data = kzalloc_obj(*trigger_data, GFP_KERNEL); + trigger_data = kzalloc_obj(*trigger_data); if (!trigger_data) return NULL; @@ -1724,7 +1724,7 @@ int event_enable_trigger_parse(struct event_command *cmd_ops, #endif ret = -ENOMEM; - enable_data = kzalloc_obj(*enable_data, GFP_KERNEL); + enable_data = kzalloc_obj(*enable_data); if (!enable_data) return ret; diff --git a/kernel/trace/trace_events_user.c b/kernel/trace/trace_events_user.c index c35182cb7286..c4ba484f7b38 100644 --- a/kernel/trace/trace_events_user.c +++ b/kernel/trace/trace_events_user.c @@ -370,7 +370,7 @@ static struct user_event_group *user_event_group_create(void) { struct user_event_group *group; - group = kzalloc_obj(*group, GFP_KERNEL); + group = kzalloc_obj(*group); if (!group) return NULL; diff --git a/kernel/trace/trace_fprobe.c b/kernel/trace/trace_fprobe.c index 7decd8383d67..8cd7eb790071 100644 --- a/kernel/trace/trace_fprobe.c +++ b/kernel/trace/trace_fprobe.c @@ -99,7 +99,7 @@ static struct tracepoint_user *__tracepoint_user_init(const char *name, struct t struct tracepoint_user *tuser __free(tuser_free) = NULL; int ret; - tuser = kzalloc_obj(*tuser, GFP_KERNEL); + tuser = kzalloc_obj(*tuser); if (!tuser) return NULL; tuser->name = kstrdup(name, GFP_KERNEL); @@ -1403,7 +1403,7 @@ static int trace_fprobe_create_cb(int argc, const char *argv[]) struct traceprobe_parse_context *ctx __free(traceprobe_parse_context) = NULL; int ret; - ctx = kzalloc_obj(*ctx, GFP_KERNEL); + ctx = kzalloc_obj(*ctx); if (!ctx) return -ENOMEM; diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c index a7e4ad088acf..f283391a4dc8 100644 --- a/kernel/trace/trace_functions.c +++ b/kernel/trace/trace_functions.c @@ -61,7 +61,7 @@ int ftrace_allocate_ftrace_ops(struct trace_array *tr) if (tr->flags & TRACE_ARRAY_FL_GLOBAL) return 0; - ops = kzalloc_obj(*ops, GFP_KERNEL); + ops = kzalloc_obj(*ops); if (!ops) return -ENOMEM; diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c index 73f0479aeac0..3d8239fee004 100644 --- a/kernel/trace/trace_functions_graph.c +++ b/kernel/trace/trace_functions_graph.c @@ -434,7 +434,7 @@ int allocate_fgraph_ops(struct trace_array *tr, struct ftrace_ops *ops) { struct fgraph_ops *gops; - gops = kzalloc_obj(*gops, GFP_KERNEL); + gops = kzalloc_obj(*gops); if (!gops) return -ENOMEM; diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index 808b91873bd6..84539e1cd27e 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c @@ -1082,7 +1082,7 @@ static int trace_kprobe_create_cb(int argc, const char *argv[]) struct traceprobe_parse_context *ctx __free(traceprobe_parse_context) = NULL; int ret; - ctx = kzalloc_obj(*ctx, GFP_KERNEL); + ctx = kzalloc_obj(*ctx); if (!ctx) return -ENOMEM; ctx->flags = TPARG_FL_KERNEL; diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c index 1c752a691317..226cf66e0d68 100644 --- a/kernel/trace/trace_mmiotrace.c +++ b/kernel/trace/trace_mmiotrace.c @@ -101,7 +101,7 @@ static void mmio_pipe_open(struct trace_iterator *iter) trace_seq_puts(s, "VERSION 20070824\n"); - hiter = kzalloc_obj(*hiter, GFP_KERNEL); + hiter = kzalloc_obj(*hiter); if (!hiter) return; diff --git a/kernel/trace/trace_osnoise.c b/kernel/trace/trace_osnoise.c index 51e7b0476a7f..dee610e465b9 100644 --- a/kernel/trace/trace_osnoise.c +++ b/kernel/trace/trace_osnoise.c @@ -122,7 +122,7 @@ static int osnoise_register_instance(struct trace_array *tr) */ lockdep_assert_held(&trace_types_lock); - inst = kmalloc_obj(*inst, GFP_KERNEL); + inst = kmalloc_obj(*inst); if (!inst) return -ENOMEM; diff --git a/kernel/trace/trace_printk.c b/kernel/trace/trace_printk.c index 05b61ec67622..5ea5e0d76f00 100644 --- a/kernel/trace/trace_printk.c +++ b/kernel/trace/trace_printk.c @@ -69,7 +69,7 @@ void hold_module_trace_bprintk_format(const char **start, const char **end) } fmt = NULL; - tb_fmt = kmalloc_obj(*tb_fmt, GFP_KERNEL); + tb_fmt = kmalloc_obj(*tb_fmt); if (tb_fmt) { fmt = kmalloc(strlen(*iter) + 1, GFP_KERNEL); if (fmt) { diff --git a/kernel/trace/trace_probe.c b/kernel/trace/trace_probe.c index fff0879cb0e9..b3ce9bb0b971 100644 --- a/kernel/trace/trace_probe.c +++ b/kernel/trace/trace_probe.c @@ -838,7 +838,7 @@ static int __store_entry_arg(struct trace_probe *tp, int argnum) int i, offset, last_offset = 0; if (!earg) { - earg = kzalloc_obj(*tp->entry_arg, GFP_KERNEL); + earg = kzalloc_obj(*tp->entry_arg); if (!earg) return -ENOMEM; earg->size = 2 * tp->nr_args + 1; @@ -1499,7 +1499,7 @@ static int traceprobe_parse_probe_arg_body(const char *argv, ssize_t *size, if (IS_ERR(type)) return PTR_ERR(type); - code = tmp = kzalloc_objs(*code, FETCH_INSN_MAX, GFP_KERNEL); + code = tmp = kzalloc_objs(*code, FETCH_INSN_MAX); if (!code) return -ENOMEM; code[FETCH_INSN_MAX - 1].op = FETCH_OP_END; @@ -1543,7 +1543,7 @@ static int traceprobe_parse_probe_arg_body(const char *argv, ssize_t *size, if (code->op == FETCH_OP_END) break; /* Shrink down the code buffer */ - parg->code = kzalloc_objs(*code, code - tmp + 1, GFP_KERNEL); + parg->code = kzalloc_objs(*code, code - tmp + 1); if (!parg->code) ret = -ENOMEM; else @@ -2149,7 +2149,7 @@ int trace_probe_add_file(struct trace_probe *tp, struct trace_event_file *file) { struct event_file_link *link; - link = kmalloc_obj(*link, GFP_KERNEL); + link = kmalloc_obj(*link); if (!link) return -ENOMEM; diff --git a/kernel/trace/trace_recursion_record.c b/kernel/trace/trace_recursion_record.c index 852069484060..784fe1fbb866 100644 --- a/kernel/trace/trace_recursion_record.c +++ b/kernel/trace/trace_recursion_record.c @@ -129,7 +129,7 @@ static void *recursed_function_seq_start(struct seq_file *m, loff_t *pos) ret = &recursed_functions[*pos]; } - tseq = kzalloc_obj(*tseq, GFP_KERNEL); + tseq = kzalloc_obj(*tseq); if (!tseq) return ERR_PTR(-ENOMEM); diff --git a/kernel/trace/trace_sched_switch.c b/kernel/trace/trace_sched_switch.c index ded84f1d8121..e9f0ff962660 100644 --- a/kernel/trace/trace_sched_switch.c +++ b/kernel/trace/trace_sched_switch.c @@ -444,7 +444,7 @@ int trace_alloc_tgid_map(void) return 0; tgid_map_max = init_pid_ns.pid_max; - map = kvzalloc_objs(*tgid_map, tgid_map_max + 1, GFP_KERNEL); + map = kvzalloc_objs(*tgid_map, tgid_map_max + 1); if (!map) return -ENOMEM; diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c index 43ed16b3b160..929c84075315 100644 --- a/kernel/trace/trace_selftest.c +++ b/kernel/trace/trace_selftest.c @@ -248,7 +248,7 @@ static int trace_selftest_ops(struct trace_array *tr, int cnt) goto out; /* Add a dynamic probe */ - dyn_ops = kzalloc_obj(*dyn_ops, GFP_KERNEL); + dyn_ops = kzalloc_obj(*dyn_ops); if (!dyn_ops) { printk("MEMORY ERROR "); goto out; diff --git a/kernel/trace/trace_stat.c b/kernel/trace/trace_stat.c index 3fec69e8a6d4..856ece13b7dc 100644 --- a/kernel/trace/trace_stat.c +++ b/kernel/trace/trace_stat.c @@ -77,7 +77,7 @@ static int insert_stat(struct rb_root *root, void *stat, cmp_func_t cmp) struct rb_node **new = &(root->rb_node), *parent = NULL; struct stat_node *data; - data = kzalloc_obj(*data, GFP_KERNEL); + data = kzalloc_obj(*data); if (!data) return -ENOMEM; data->stat = stat; @@ -322,7 +322,7 @@ int register_stat_tracer(struct tracer_stat *trace) } /* Init the session */ - session = kzalloc_obj(*session, GFP_KERNEL); + session = kzalloc_obj(*session); if (!session) return -ENOMEM; diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c index 2f495e46034f..6ecae3e6d10a 100644 --- a/kernel/trace/trace_syscalls.c +++ b/kernel/trace/trace_syscalls.c @@ -617,7 +617,7 @@ static int syscall_fault_buffer_enable(void) return 0; } - sbuf = kmalloc_obj(*sbuf, GFP_KERNEL); + sbuf = kmalloc_obj(*sbuf); if (!sbuf) return -ENOMEM; diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c index 83c17b90daad..00ca63934763 100644 --- a/kernel/trace/trace_uprobe.c +++ b/kernel/trace/trace_uprobe.c @@ -699,7 +699,7 @@ static int __trace_uprobe_create(int argc, const char **argv) memset(&path, 0, sizeof(path)); tu->filename = no_free_ptr(filename); - ctx = kzalloc_obj(*ctx, GFP_KERNEL); + ctx = kzalloc_obj(*ctx); if (!ctx) return -ENOMEM; ctx->flags = (is_return ? TPARG_FL_RETURN : 0) | TPARG_FL_USER; diff --git a/kernel/trace/tracing_map.c b/kernel/trace/tracing_map.c index ef28c6c52295..bf1a507695b6 100644 --- a/kernel/trace/tracing_map.c +++ b/kernel/trace/tracing_map.c @@ -324,7 +324,7 @@ static struct tracing_map_array *tracing_map_array_alloc(unsigned int n_elts, struct tracing_map_array *a; unsigned int i; - a = kzalloc_obj(*a, GFP_KERNEL); + a = kzalloc_obj(*a); if (!a) return NULL; @@ -405,7 +405,7 @@ static struct tracing_map_elt *tracing_map_elt_alloc(struct tracing_map *map) struct tracing_map_elt *elt; int err = 0; - elt = kzalloc_obj(*elt, GFP_KERNEL); + elt = kzalloc_obj(*elt); if (!elt) return ERR_PTR(-ENOMEM); @@ -417,19 +417,19 @@ static struct tracing_map_elt *tracing_map_elt_alloc(struct tracing_map *map) goto free; } - elt->fields = kzalloc_objs(*elt->fields, map->n_fields, GFP_KERNEL); + elt->fields = kzalloc_objs(*elt->fields, map->n_fields); if (!elt->fields) { err = -ENOMEM; goto free; } - elt->vars = kzalloc_objs(*elt->vars, map->n_vars, GFP_KERNEL); + elt->vars = kzalloc_objs(*elt->vars, map->n_vars); if (!elt->vars) { err = -ENOMEM; goto free; } - elt->var_set = kzalloc_objs(*elt->var_set, map->n_vars, GFP_KERNEL); + elt->var_set = kzalloc_objs(*elt->var_set, map->n_vars); if (!elt->var_set) { err = -ENOMEM; goto free; @@ -777,7 +777,7 @@ struct tracing_map *tracing_map_create(unsigned int map_bits, map_bits > TRACING_MAP_BITS_MAX) return ERR_PTR(-EINVAL); - map = kzalloc_obj(*map, GFP_KERNEL); + map = kzalloc_obj(*map); if (!map) return ERR_PTR(-ENOMEM); @@ -949,7 +949,7 @@ create_sort_entry(void *key, struct tracing_map_elt *elt) { struct tracing_map_sort_entry *sort_entry; - sort_entry = kzalloc_obj(*sort_entry, GFP_KERNEL); + sort_entry = kzalloc_obj(*sort_entry); if (!sort_entry) return NULL; diff --git a/kernel/tracepoint.c b/kernel/tracepoint.c index 8287a4ff3f18..df7ab773c7f3 100644 --- a/kernel/tracepoint.c +++ b/kernel/tracepoint.c @@ -614,7 +614,7 @@ static int tracepoint_module_coming(struct module *mod) if (trace_module_has_bad_taint(mod)) return 0; - tp_mod = kmalloc_obj(struct tp_module, GFP_KERNEL); + tp_mod = kmalloc_obj(struct tp_module); if (!tp_mod) return -ENOMEM; tp_mod->mod = mod; diff --git a/kernel/ucount.c b/kernel/ucount.c index d1f723805c6d..d6dc3e859f12 100644 --- a/kernel/ucount.c +++ b/kernel/ucount.c @@ -163,7 +163,7 @@ struct ucounts *alloc_ucounts(struct user_namespace *ns, kuid_t uid) if (ucounts) return ucounts; - new = kzalloc_obj(*new, GFP_KERNEL); + new = kzalloc_obj(*new); if (!new) return NULL; diff --git a/kernel/vhost_task.c b/kernel/vhost_task.c index bf84af48dce8..3f1ed7ef0582 100644 --- a/kernel/vhost_task.c +++ b/kernel/vhost_task.c @@ -132,7 +132,7 @@ struct vhost_task *vhost_task_create(bool (*fn)(void *), struct vhost_task *vtsk; struct task_struct *tsk; - vtsk = kzalloc_obj(*vtsk, GFP_KERNEL); + vtsk = kzalloc_obj(*vtsk); if (!vtsk) return ERR_PTR(-ENOMEM); init_completion(&vtsk->exited); diff --git a/kernel/watch_queue.c b/kernel/watch_queue.c index d966b8c99052..765c152f6084 100644 --- a/kernel/watch_queue.c +++ b/kernel/watch_queue.c @@ -278,7 +278,7 @@ long watch_queue_set_size(struct pipe_inode_info *pipe, unsigned int nr_notes) pipe->nr_accounted = nr_pages; ret = -ENOMEM; - pages = kzalloc_objs(struct page *, nr_pages, GFP_KERNEL); + pages = kzalloc_objs(struct page *, nr_pages); if (!pages) goto error; @@ -692,7 +692,7 @@ int watch_queue_init(struct pipe_inode_info *pipe) { struct watch_queue *wqueue; - wqueue = kzalloc_obj(*wqueue, GFP_KERNEL); + wqueue = kzalloc_obj(*wqueue); if (!wqueue) return -ENOMEM; diff --git a/kernel/workqueue.c b/kernel/workqueue.c index ee3e81133f78..399b0375a66a 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -4714,7 +4714,7 @@ struct workqueue_attrs *alloc_workqueue_attrs_noprof(void) { struct workqueue_attrs *attrs; - attrs = kzalloc_obj(*attrs, GFP_KERNEL); + attrs = kzalloc_obj(*attrs); if (!attrs) goto fail; if (!alloc_cpumask_var(&attrs->cpumask, GFP_KERNEL)) @@ -7486,7 +7486,7 @@ int workqueue_sysfs_register(struct workqueue_struct *wq) if (WARN_ON(wq->flags & __WQ_ORDERED)) return -EINVAL; - wq->wq_dev = wq_dev = kzalloc_obj(*wq_dev, GFP_KERNEL); + wq->wq_dev = wq_dev = kzalloc_obj(*wq_dev); if (!wq_dev) return -ENOMEM; @@ -7879,9 +7879,9 @@ void __init workqueue_init_early(void) wq_power_efficient = true; /* initialize WQ_AFFN_SYSTEM pods */ - pt->pod_cpus = kzalloc_objs(pt->pod_cpus[0], 1, GFP_KERNEL); - pt->pod_node = kzalloc_objs(pt->pod_node[0], 1, GFP_KERNEL); - pt->cpu_pod = kzalloc_objs(pt->cpu_pod[0], nr_cpu_ids, GFP_KERNEL); + pt->pod_cpus = kzalloc_objs(pt->pod_cpus[0], 1); + pt->pod_node = kzalloc_objs(pt->pod_node[0], 1); + pt->cpu_pod = kzalloc_objs(pt->cpu_pod[0], nr_cpu_ids); BUG_ON(!pt->pod_cpus || !pt->pod_node || !pt->cpu_pod); BUG_ON(!zalloc_cpumask_var_node(&pt->pod_cpus[0], GFP_KERNEL, NUMA_NO_NODE)); @@ -8063,7 +8063,7 @@ static void __init init_pod_type(struct wq_pod_type *pt, pt->nr_pods = 0; /* init @pt->cpu_pod[] according to @cpus_share_pod() */ - pt->cpu_pod = kzalloc_objs(pt->cpu_pod[0], nr_cpu_ids, GFP_KERNEL); + pt->cpu_pod = kzalloc_objs(pt->cpu_pod[0], nr_cpu_ids); BUG_ON(!pt->cpu_pod); for_each_possible_cpu(cur) { @@ -8080,8 +8080,8 @@ static void __init init_pod_type(struct wq_pod_type *pt, } /* init the rest to match @pt->cpu_pod[] */ - pt->pod_cpus = kzalloc_objs(pt->pod_cpus[0], pt->nr_pods, GFP_KERNEL); - pt->pod_node = kzalloc_objs(pt->pod_node[0], pt->nr_pods, GFP_KERNEL); + pt->pod_cpus = kzalloc_objs(pt->pod_cpus[0], pt->nr_pods); + pt->pod_node = kzalloc_objs(pt->pod_node[0], pt->nr_pods); BUG_ON(!pt->pod_cpus || !pt->pod_node); for (pod = 0; pod < pt->nr_pods; pod++) |
