diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2026-02-21 16:37:42 -0800 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2026-02-21 17:09:51 -0800 |
| commit | bf4afc53b77aeaa48b5409da5c8da6bb4eff7f43 (patch) | |
| tree | 01fdd9d27f1b272bef0127966e08eac44d134d0a /kernel/sched | |
| parent | e19e1b480ac73c3e62ffebbca1174f0f511f43e7 (diff) | |
Convert 'alloc_obj' family to use the new default GFP_KERNEL argument
This was done entirely with mindless brute force, using
git grep -l '\<k[vmz]*alloc_objs*(.*, GFP_KERNEL)' |
xargs sed -i 's/\(alloc_objs*(.*\), GFP_KERNEL)/\1)/'
to convert the new alloc_obj() users that had a simple GFP_KERNEL
argument to just drop that argument.
Note that due to the extreme simplicity of the scripting, any slightly
more complex cases spread over multiple lines would not be triggered:
they definitely exist, but this covers the vast bulk of the cases, and
the resulting diff is also then easier to check automatically.
For the same reason the 'flex' versions will be done as a separate
conversion.
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'kernel/sched')
| -rw-r--r-- | kernel/sched/autogroup.c | 2 | ||||
| -rw-r--r-- | kernel/sched/core_sched.c | 2 | ||||
| -rw-r--r-- | kernel/sched/cpuacct.c | 2 | ||||
| -rw-r--r-- | kernel/sched/cpudeadline.c | 2 | ||||
| -rw-r--r-- | kernel/sched/cpufreq_schedutil.c | 4 | ||||
| -rw-r--r-- | kernel/sched/cpupri.c | 2 | ||||
| -rw-r--r-- | kernel/sched/ext.c | 6 | ||||
| -rw-r--r-- | kernel/sched/fair.c | 6 | ||||
| -rw-r--r-- | kernel/sched/psi.c | 4 | ||||
| -rw-r--r-- | kernel/sched/rt.c | 4 | ||||
| -rw-r--r-- | kernel/sched/topology.c | 8 |
11 files changed, 21 insertions, 21 deletions
diff --git a/kernel/sched/autogroup.c b/kernel/sched/autogroup.c index c5a1019cbe83..e380cf9372bb 100644 --- a/kernel/sched/autogroup.c +++ b/kernel/sched/autogroup.c @@ -86,7 +86,7 @@ static inline struct autogroup *autogroup_task_get(struct task_struct *p) static inline struct autogroup *autogroup_create(void) { - struct autogroup *ag = kzalloc_obj(*ag, GFP_KERNEL); + struct autogroup *ag = kzalloc_obj(*ag); struct task_group *tg; if (!ag) diff --git a/kernel/sched/core_sched.c b/kernel/sched/core_sched.c index 6065cf725eee..73b6b2426911 100644 --- a/kernel/sched/core_sched.c +++ b/kernel/sched/core_sched.c @@ -12,7 +12,7 @@ struct sched_core_cookie { static unsigned long sched_core_alloc_cookie(void) { - struct sched_core_cookie *ck = kmalloc_obj(*ck, GFP_KERNEL); + struct sched_core_cookie *ck = kmalloc_obj(*ck); if (!ck) return 0; diff --git a/kernel/sched/cpuacct.c b/kernel/sched/cpuacct.c index 6e9a2e067886..ca9d52cb1ebb 100644 --- a/kernel/sched/cpuacct.c +++ b/kernel/sched/cpuacct.c @@ -61,7 +61,7 @@ cpuacct_css_alloc(struct cgroup_subsys_state *parent_css) if (!parent_css) return &root_cpuacct.css; - ca = kzalloc_obj(*ca, GFP_KERNEL); + ca = kzalloc_obj(*ca); if (!ca) goto out; diff --git a/kernel/sched/cpudeadline.c b/kernel/sched/cpudeadline.c index bbb2d68df86a..0a2b7e30fd10 100644 --- a/kernel/sched/cpudeadline.c +++ b/kernel/sched/cpudeadline.c @@ -252,7 +252,7 @@ int cpudl_init(struct cpudl *cp) raw_spin_lock_init(&cp->lock); cp->size = 0; - cp->elements = kzalloc_objs(struct cpudl_item, nr_cpu_ids, GFP_KERNEL); + cp->elements = kzalloc_objs(struct cpudl_item, nr_cpu_ids); if (!cp->elements) return -ENOMEM; diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c index d71d09ed1b3b..153232dd8276 100644 --- a/kernel/sched/cpufreq_schedutil.c +++ b/kernel/sched/cpufreq_schedutil.c @@ -638,7 +638,7 @@ static struct sugov_policy *sugov_policy_alloc(struct cpufreq_policy *policy) { struct sugov_policy *sg_policy; - sg_policy = kzalloc_obj(*sg_policy, GFP_KERNEL); + sg_policy = kzalloc_obj(*sg_policy); if (!sg_policy) return NULL; @@ -722,7 +722,7 @@ static struct sugov_tunables *sugov_tunables_alloc(struct sugov_policy *sg_polic { struct sugov_tunables *tunables; - tunables = kzalloc_obj(*tunables, GFP_KERNEL); + tunables = kzalloc_obj(*tunables); if (tunables) { gov_attr_set_init(&tunables->attr_set, &sg_policy->tunables_hook); if (!have_governor_per_policy()) diff --git a/kernel/sched/cpupri.c b/kernel/sched/cpupri.c index c2642deeaabc..8f2237e8b484 100644 --- a/kernel/sched/cpupri.c +++ b/kernel/sched/cpupri.c @@ -288,7 +288,7 @@ int cpupri_init(struct cpupri *cp) goto cleanup; } - cp->cpu_to_pri = kzalloc_objs(int, nr_cpu_ids, GFP_KERNEL); + cp->cpu_to_pri = kzalloc_objs(int, nr_cpu_ids); if (!cp->cpu_to_pri) goto cleanup; diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c index b9fadb2583ea..5a812b510d5d 100644 --- a/kernel/sched/ext.c +++ b/kernel/sched/ext.c @@ -4223,11 +4223,11 @@ static struct scx_exit_info *alloc_exit_info(size_t exit_dump_len) { struct scx_exit_info *ei; - ei = kzalloc_obj(*ei, GFP_KERNEL); + ei = kzalloc_obj(*ei); if (!ei) return NULL; - ei->bt = kzalloc_objs(ei->bt[0], SCX_EXIT_BT_LEN, GFP_KERNEL); + ei->bt = kzalloc_objs(ei->bt[0], SCX_EXIT_BT_LEN); ei->msg = kzalloc(SCX_EXIT_MSG_LEN, GFP_KERNEL); ei->dump = kvzalloc(exit_dump_len, GFP_KERNEL); @@ -4824,7 +4824,7 @@ static struct scx_sched *scx_alloc_and_add_sched(struct sched_ext_ops *ops) struct scx_sched *sch; int node, ret; - sch = kzalloc_obj(*sch, GFP_KERNEL); + sch = kzalloc_obj(*sch); if (!sch) return ERR_PTR(-ENOMEM); diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index f6f050f2faec..eea99ec01a3f 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -3427,7 +3427,7 @@ retry_pids: if (!vma->numab_state) { struct vma_numab_state *ptr; - ptr = kzalloc_obj(*ptr, GFP_KERNEL); + ptr = kzalloc_obj(*ptr); if (!ptr) continue; @@ -13622,10 +13622,10 @@ int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent) struct cfs_rq *cfs_rq; int i; - tg->cfs_rq = kzalloc_objs(cfs_rq, nr_cpu_ids, GFP_KERNEL); + tg->cfs_rq = kzalloc_objs(cfs_rq, nr_cpu_ids); if (!tg->cfs_rq) goto err; - tg->se = kzalloc_objs(se, nr_cpu_ids, GFP_KERNEL); + tg->se = kzalloc_objs(se, nr_cpu_ids); if (!tg->se) goto err; diff --git a/kernel/sched/psi.c b/kernel/sched/psi.c index bf8a70598a09..d9c9d9480a45 100644 --- a/kernel/sched/psi.c +++ b/kernel/sched/psi.c @@ -1114,7 +1114,7 @@ int psi_cgroup_alloc(struct cgroup *cgroup) if (!static_branch_likely(&psi_cgroups_enabled)) return 0; - cgroup->psi = kzalloc_obj(struct psi_group, GFP_KERNEL); + cgroup->psi = kzalloc_obj(struct psi_group); if (!cgroup->psi) return -ENOMEM; @@ -1340,7 +1340,7 @@ struct psi_trigger *psi_trigger_create(struct psi_group *group, char *buf, if (threshold_us == 0 || threshold_us > window_us) return ERR_PTR(-EINVAL); - t = kmalloc_obj(*t, GFP_KERNEL); + t = kmalloc_obj(*t); if (!t) return ERR_PTR(-ENOMEM); diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index e72df7045592..f69e1f16d923 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c @@ -259,10 +259,10 @@ int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent) if (!rt_group_sched_enabled()) return 1; - tg->rt_rq = kzalloc_objs(rt_rq, nr_cpu_ids, GFP_KERNEL); + tg->rt_rq = kzalloc_objs(rt_rq, nr_cpu_ids); if (!tg->rt_rq) goto err; - tg->rt_se = kzalloc_objs(rt_se, nr_cpu_ids, GFP_KERNEL); + tg->rt_se = kzalloc_objs(rt_se, nr_cpu_ids); if (!tg->rt_se) goto err; diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c index ac54fcae5de7..32dcddaead82 100644 --- a/kernel/sched/topology.c +++ b/kernel/sched/topology.c @@ -350,7 +350,7 @@ static struct perf_domain *pd_init(int cpu) return NULL; } - pd = kzalloc_obj(*pd, GFP_KERNEL); + pd = kzalloc_obj(*pd); if (!pd) return NULL; pd->em_pd = obj; @@ -589,7 +589,7 @@ static struct root_domain *alloc_rootdomain(void) { struct root_domain *rd; - rd = kzalloc_obj(*rd, GFP_KERNEL); + rd = kzalloc_obj(*rd); if (!rd) return NULL; @@ -1998,7 +1998,7 @@ static int sched_record_numa_dist(int offline_node, int (*n_dist)(int, int), */ nr_levels = bitmap_weight(distance_map, NR_DISTANCE_VALUES); - distances = kzalloc_objs(int, nr_levels, GFP_KERNEL); + distances = kzalloc_objs(int, nr_levels); if (!distances) return -ENOMEM; @@ -2734,7 +2734,7 @@ cpumask_var_t *alloc_sched_domains(unsigned int ndoms) int i; cpumask_var_t *doms; - doms = kmalloc_objs(*doms, ndoms, GFP_KERNEL); + doms = kmalloc_objs(*doms, ndoms); if (!doms) return NULL; for (i = 0; i < ndoms; i++) { |
