diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2026-02-21 11:02:58 -0800 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2026-02-21 11:02:58 -0800 |
| commit | 8934827db5403eae57d4537114a9ff88b0a8460f (patch) | |
| tree | 5167aa7e16b786b9135e19d508b234054fa6e8ce /mm | |
| parent | c7decec2f2d2ab0366567f9e30c0e1418cece43f (diff) | |
| parent | 7a70c15bd1449f1eb30991772edce37b41e496fb (diff) | |
Merge tag 'kmalloc_obj-treewide-v7.0-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/kees/linuxHEADmaster
Pull kmalloc_obj conversion from Kees Cook:
"This does the tree-wide conversion to kmalloc_obj() and friends using
coccinelle, with a subsequent small manual cleanup of whitespace
alignment that coccinelle does not handle.
This uncovered a clang bug in __builtin_counted_by_ref(), so the
conversion is preceded by disabling that for current versions of
clang. The imminent clang 22.1 release has the fix.
I've done allmodconfig build tests for x86_64, arm64, i386, and arm. I
did defconfig builds for alpha, m68k, mips, parisc, powerpc, riscv,
s390, sparc, sh, arc, csky, xtensa, hexagon, and openrisc"
* tag 'kmalloc_obj-treewide-v7.0-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux:
kmalloc_obj: Clean up after treewide replacements
treewide: Replace kmalloc with kmalloc_obj for non-scalar types
compiler_types: Disable __builtin_counted_by_ref for Clang
Diffstat (limited to 'mm')
44 files changed, 159 insertions, 168 deletions
diff --git a/mm/backing-dev.c b/mm/backing-dev.c index e319bd5e8b75..7a18fa6c7272 100644 --- a/mm/backing-dev.c +++ b/mm/backing-dev.c @@ -689,7 +689,7 @@ static int cgwb_create(struct backing_dev_info *bdi, goto out_put; /* need to create a new one */ - wb = kmalloc(sizeof(*wb), gfp); + wb = kmalloc_obj(*wb, gfp); if (!wb) { ret = -ENOMEM; goto out_put; diff --git a/mm/cma_debug.c b/mm/cma_debug.c index 8c7d7f8e8fbd..cb94f897169d 100644 --- a/mm/cma_debug.c +++ b/mm/cma_debug.c @@ -131,7 +131,7 @@ static int cma_alloc_mem(struct cma *cma, int count) struct cma_mem *mem; struct page *p; - mem = kzalloc(sizeof(*mem), GFP_KERNEL); + mem = kzalloc_obj(*mem, GFP_KERNEL); if (!mem) return -ENOMEM; diff --git a/mm/cma_sysfs.c b/mm/cma_sysfs.c index 97acd3e5a6a5..ee76baaf843c 100644 --- a/mm/cma_sysfs.c +++ b/mm/cma_sysfs.c @@ -117,7 +117,7 @@ static int __init cma_sysfs_init(void) return -ENOMEM; for (i = 0; i < cma_area_count; i++) { - cma_kobj = kzalloc(sizeof(*cma_kobj), GFP_KERNEL); + cma_kobj = kzalloc_obj(*cma_kobj, GFP_KERNEL); if (!cma_kobj) { err = -ENOMEM; goto out; diff --git a/mm/damon/core.c b/mm/damon/core.c index 5e2724a4f285..2d73d7effa3b 100644 --- a/mm/damon/core.c +++ b/mm/damon/core.c @@ -273,7 +273,7 @@ struct damos_filter *damos_new_filter(enum damos_filter_type type, { struct damos_filter *filter; - filter = kmalloc(sizeof(*filter), GFP_KERNEL); + filter = kmalloc_obj(*filter, GFP_KERNEL); if (!filter) return NULL; filter->type = type; @@ -332,7 +332,7 @@ struct damos_quota_goal *damos_new_quota_goal( { struct damos_quota_goal *goal; - goal = kmalloc(sizeof(*goal), GFP_KERNEL); + goal = kmalloc_obj(*goal, GFP_KERNEL); if (!goal) return NULL; goal->metric = metric; @@ -385,7 +385,7 @@ struct damos *damon_new_scheme(struct damos_access_pattern *pattern, { struct damos *scheme; - scheme = kmalloc(sizeof(*scheme), GFP_KERNEL); + scheme = kmalloc_obj(*scheme, GFP_KERNEL); if (!scheme) return NULL; scheme->pattern = *pattern; @@ -473,7 +473,7 @@ struct damon_target *damon_new_target(void) { struct damon_target *t; - t = kmalloc(sizeof(*t), GFP_KERNEL); + t = kmalloc_obj(*t, GFP_KERNEL); if (!t) return NULL; @@ -529,7 +529,7 @@ struct damon_ctx *damon_new_ctx(void) { struct damon_ctx *ctx; - ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); + ctx = kzalloc_obj(*ctx, GFP_KERNEL); if (!ctx) return NULL; @@ -1153,7 +1153,7 @@ static int damon_commit_target_regions(struct damon_target *dst, if (!i) return 0; - ranges = kmalloc_array(i, sizeof(*ranges), GFP_KERNEL | __GFP_NOWARN); + ranges = kmalloc_objs(*ranges, i, GFP_KERNEL | __GFP_NOWARN); if (!ranges) return -ENOMEM; i = 0; diff --git a/mm/damon/stat.c b/mm/damon/stat.c index bcf6c8ae9b90..06fc95861dd4 100644 --- a/mm/damon/stat.c +++ b/mm/damon/stat.c @@ -90,8 +90,8 @@ static int damon_stat_sort_regions(struct damon_ctx *c, damon_for_each_target(t, c) { /* there is only one target */ - region_pointers = kmalloc_array(damon_nr_regions(t), - sizeof(*region_pointers), GFP_KERNEL); + region_pointers = kmalloc_objs(*region_pointers, + damon_nr_regions(t), GFP_KERNEL); if (!region_pointers) return -ENOMEM; damon_for_each_region(r, t) { diff --git a/mm/damon/sysfs-common.c b/mm/damon/sysfs-common.c index ffaf285e241a..2149008135ef 100644 --- a/mm/damon/sysfs-common.c +++ b/mm/damon/sysfs-common.c @@ -19,8 +19,7 @@ struct damon_sysfs_ul_range *damon_sysfs_ul_range_alloc( unsigned long min, unsigned long max) { - struct damon_sysfs_ul_range *range = kmalloc(sizeof(*range), - GFP_KERNEL); + struct damon_sysfs_ul_range *range = kmalloc_obj(*range, GFP_KERNEL); if (!range) return NULL; diff --git a/mm/damon/sysfs-schemes.c b/mm/damon/sysfs-schemes.c index 2b05a6477188..ba700da545af 100644 --- a/mm/damon/sysfs-schemes.c +++ b/mm/damon/sysfs-schemes.c @@ -26,8 +26,8 @@ struct damon_sysfs_scheme_region { static struct damon_sysfs_scheme_region *damon_sysfs_scheme_region_alloc( struct damon_region *region) { - struct damon_sysfs_scheme_region *sysfs_region = kmalloc( - sizeof(*sysfs_region), GFP_KERNEL); + struct damon_sysfs_scheme_region *sysfs_region = kmalloc_obj(*sysfs_region, + GFP_KERNEL); if (!sysfs_region) return NULL; @@ -138,8 +138,8 @@ struct damon_sysfs_scheme_regions { static struct damon_sysfs_scheme_regions * damon_sysfs_scheme_regions_alloc(void) { - struct damon_sysfs_scheme_regions *regions = kmalloc(sizeof(*regions), - GFP_KERNEL); + struct damon_sysfs_scheme_regions *regions = kmalloc_obj(*regions, + GFP_KERNEL); if (!regions) return NULL; @@ -210,7 +210,7 @@ struct damon_sysfs_stats { static struct damon_sysfs_stats *damon_sysfs_stats_alloc(void) { - return kzalloc(sizeof(struct damon_sysfs_stats), GFP_KERNEL); + return kzalloc_obj(struct damon_sysfs_stats, GFP_KERNEL); } static ssize_t nr_tried_show(struct kobject *kobj, struct kobj_attribute *attr, @@ -376,7 +376,7 @@ static struct damon_sysfs_scheme_filter *damon_sysfs_scheme_filter_alloc( { struct damon_sysfs_scheme_filter *filter; - filter = kzalloc(sizeof(struct damon_sysfs_scheme_filter), GFP_KERNEL); + filter = kzalloc_obj(struct damon_sysfs_scheme_filter, GFP_KERNEL); if (filter) filter->handle_layer = layer; return filter; @@ -724,7 +724,7 @@ damon_sysfs_scheme_filters_alloc(enum damos_sysfs_filter_handle_layer layer) { struct damon_sysfs_scheme_filters *filters; - filters = kzalloc(sizeof(struct damon_sysfs_scheme_filters), GFP_KERNEL); + filters = kzalloc_obj(struct damon_sysfs_scheme_filters, GFP_KERNEL); if (filters) filters->handle_layer = layer; return filters; @@ -753,8 +753,8 @@ static int damon_sysfs_scheme_filters_add_dirs( if (!nr_filters) return 0; - filters_arr = kmalloc_array(nr_filters, sizeof(*filters_arr), - GFP_KERNEL | __GFP_NOWARN); + filters_arr = kmalloc_objs(*filters_arr, nr_filters, + GFP_KERNEL | __GFP_NOWARN); if (!filters_arr) return -ENOMEM; filters->filters_arr = filters_arr; @@ -851,8 +851,8 @@ static struct damon_sysfs_watermarks *damon_sysfs_watermarks_alloc( enum damos_wmark_metric metric, unsigned long interval_us, unsigned long high, unsigned long mid, unsigned long low) { - struct damon_sysfs_watermarks *watermarks = kmalloc( - sizeof(*watermarks), GFP_KERNEL); + struct damon_sysfs_watermarks *watermarks = kmalloc_obj(*watermarks, + GFP_KERNEL); if (!watermarks) return NULL; @@ -1045,7 +1045,7 @@ struct damos_sysfs_quota_goal { static struct damos_sysfs_quota_goal *damos_sysfs_quota_goal_alloc(void) { - return kzalloc(sizeof(struct damos_sysfs_quota_goal), GFP_KERNEL); + return kzalloc_obj(struct damos_sysfs_quota_goal, GFP_KERNEL); } struct damos_sysfs_qgoal_metric_name { @@ -1263,7 +1263,7 @@ struct damos_sysfs_quota_goals { static struct damos_sysfs_quota_goals *damos_sysfs_quota_goals_alloc(void) { - return kzalloc(sizeof(struct damos_sysfs_quota_goals), GFP_KERNEL); + return kzalloc_obj(struct damos_sysfs_quota_goals, GFP_KERNEL); } static void damos_sysfs_quota_goals_rm_dirs( @@ -1289,8 +1289,8 @@ static int damos_sysfs_quota_goals_add_dirs( if (!nr_goals) return 0; - goals_arr = kmalloc_array(nr_goals, sizeof(*goals_arr), - GFP_KERNEL | __GFP_NOWARN); + goals_arr = kmalloc_objs(*goals_arr, nr_goals, + GFP_KERNEL | __GFP_NOWARN); if (!goals_arr) return -ENOMEM; goals->goals_arr = goals_arr; @@ -1383,8 +1383,7 @@ struct damon_sysfs_weights { static struct damon_sysfs_weights *damon_sysfs_weights_alloc(unsigned int sz, unsigned int nr_accesses, unsigned int age) { - struct damon_sysfs_weights *weights = kmalloc(sizeof(*weights), - GFP_KERNEL); + struct damon_sysfs_weights *weights = kmalloc_obj(*weights, GFP_KERNEL); if (!weights) return NULL; @@ -1496,7 +1495,7 @@ struct damon_sysfs_quotas { static struct damon_sysfs_quotas *damon_sysfs_quotas_alloc(void) { - return kzalloc(sizeof(struct damon_sysfs_quotas), GFP_KERNEL); + return kzalloc_obj(struct damon_sysfs_quotas, GFP_KERNEL); } static int damon_sysfs_quotas_add_dirs(struct damon_sysfs_quotas *quotas) @@ -1660,8 +1659,8 @@ struct damon_sysfs_access_pattern { static struct damon_sysfs_access_pattern *damon_sysfs_access_pattern_alloc(void) { - struct damon_sysfs_access_pattern *access_pattern = - kmalloc(sizeof(*access_pattern), GFP_KERNEL); + struct damon_sysfs_access_pattern *access_pattern = kmalloc_obj(*access_pattern, + GFP_KERNEL); if (!access_pattern) return NULL; @@ -1757,7 +1756,7 @@ struct damos_sysfs_dest { static struct damos_sysfs_dest *damos_sysfs_dest_alloc(void) { - return kzalloc(sizeof(struct damos_sysfs_dest), GFP_KERNEL); + return kzalloc_obj(struct damos_sysfs_dest, GFP_KERNEL); } static ssize_t id_show( @@ -1837,7 +1836,7 @@ struct damos_sysfs_dests { static struct damos_sysfs_dests * damos_sysfs_dests_alloc(void) { - return kzalloc(sizeof(struct damos_sysfs_dests), GFP_KERNEL); + return kzalloc_obj(struct damos_sysfs_dests, GFP_KERNEL); } static void damos_sysfs_dests_rm_dirs( @@ -1863,8 +1862,8 @@ static int damos_sysfs_dests_add_dirs( if (!nr_dests) return 0; - dests_arr = kmalloc_array(nr_dests, sizeof(*dests_arr), - GFP_KERNEL | __GFP_NOWARN); + dests_arr = kmalloc_objs(*dests_arr, nr_dests, + GFP_KERNEL | __GFP_NOWARN); if (!dests_arr) return -ENOMEM; dests->dests_arr = dests_arr; @@ -2014,8 +2013,7 @@ static struct damos_sysfs_action_name damos_sysfs_action_names[] = { static struct damon_sysfs_scheme *damon_sysfs_scheme_alloc( enum damos_action action, unsigned long apply_interval_us) { - struct damon_sysfs_scheme *scheme = kmalloc(sizeof(*scheme), - GFP_KERNEL); + struct damon_sysfs_scheme *scheme = kmalloc_obj(*scheme, GFP_KERNEL); if (!scheme) return NULL; @@ -2376,7 +2374,7 @@ static const struct kobj_type damon_sysfs_scheme_ktype = { struct damon_sysfs_schemes *damon_sysfs_schemes_alloc(void) { - return kzalloc(sizeof(struct damon_sysfs_schemes), GFP_KERNEL); + return kzalloc_obj(struct damon_sysfs_schemes, GFP_KERNEL); } void damon_sysfs_schemes_rm_dirs(struct damon_sysfs_schemes *schemes) @@ -2403,8 +2401,8 @@ static int damon_sysfs_schemes_add_dirs(struct damon_sysfs_schemes *schemes, if (!nr_schemes) return 0; - schemes_arr = kmalloc_array(nr_schemes, sizeof(*schemes_arr), - GFP_KERNEL | __GFP_NOWARN); + schemes_arr = kmalloc_objs(*schemes_arr, nr_schemes, + GFP_KERNEL | __GFP_NOWARN); if (!schemes_arr) return -ENOMEM; schemes->schemes_arr = schemes_arr; @@ -2683,12 +2681,12 @@ static int damos_sysfs_add_migrate_dest(struct damos *scheme, struct damos_migrate_dests *dests = &scheme->migrate_dests; int i; - dests->node_id_arr = kmalloc_array(sysfs_dests->nr, - sizeof(*dests->node_id_arr), GFP_KERNEL); + dests->node_id_arr = kmalloc_objs(*dests->node_id_arr, sysfs_dests->nr, + GFP_KERNEL); if (!dests->node_id_arr) return -ENOMEM; - dests->weight_arr = kmalloc_array(sysfs_dests->nr, - sizeof(*dests->weight_arr), GFP_KERNEL); + dests->weight_arr = kmalloc_objs(*dests->weight_arr, sysfs_dests->nr, + GFP_KERNEL); if (!dests->weight_arr) /* ->node_id_arr will be freed by scheme destruction */ return -ENOMEM; diff --git a/mm/damon/sysfs.c b/mm/damon/sysfs.c index b7f66196bec4..9561ad8b7852 100644 --- a/mm/damon/sysfs.c +++ b/mm/damon/sysfs.c @@ -22,7 +22,7 @@ struct damon_sysfs_region { static struct damon_sysfs_region *damon_sysfs_region_alloc(void) { - return kzalloc(sizeof(struct damon_sysfs_region), GFP_KERNEL); + return kzalloc_obj(struct damon_sysfs_region, GFP_KERNEL); } static ssize_t start_show(struct kobject *kobj, struct kobj_attribute *attr, @@ -99,7 +99,7 @@ struct damon_sysfs_regions { static struct damon_sysfs_regions *damon_sysfs_regions_alloc(void) { - return kzalloc(sizeof(struct damon_sysfs_regions), GFP_KERNEL); + return kzalloc_obj(struct damon_sysfs_regions, GFP_KERNEL); } static void damon_sysfs_regions_rm_dirs(struct damon_sysfs_regions *regions) @@ -124,8 +124,8 @@ static int damon_sysfs_regions_add_dirs(struct damon_sysfs_regions *regions, if (!nr_regions) return 0; - regions_arr = kmalloc_array(nr_regions, sizeof(*regions_arr), - GFP_KERNEL | __GFP_NOWARN); + regions_arr = kmalloc_objs(*regions_arr, nr_regions, + GFP_KERNEL | __GFP_NOWARN); if (!regions_arr) return -ENOMEM; regions->regions_arr = regions_arr; @@ -217,7 +217,7 @@ struct damon_sysfs_target { static struct damon_sysfs_target *damon_sysfs_target_alloc(void) { - return kzalloc(sizeof(struct damon_sysfs_target), GFP_KERNEL); + return kzalloc_obj(struct damon_sysfs_target, GFP_KERNEL); } static int damon_sysfs_target_add_dirs(struct damon_sysfs_target *target) @@ -323,7 +323,7 @@ struct damon_sysfs_targets { static struct damon_sysfs_targets *damon_sysfs_targets_alloc(void) { - return kzalloc(sizeof(struct damon_sysfs_targets), GFP_KERNEL); + return kzalloc_obj(struct damon_sysfs_targets, GFP_KERNEL); } static void damon_sysfs_targets_rm_dirs(struct damon_sysfs_targets *targets) @@ -350,8 +350,8 @@ static int damon_sysfs_targets_add_dirs(struct damon_sysfs_targets *targets, if (!nr_targets) return 0; - targets_arr = kmalloc_array(nr_targets, sizeof(*targets_arr), - GFP_KERNEL | __GFP_NOWARN); + targets_arr = kmalloc_objs(*targets_arr, nr_targets, + GFP_KERNEL | __GFP_NOWARN); if (!targets_arr) return -ENOMEM; targets->targets_arr = targets_arr; @@ -452,8 +452,7 @@ static struct damon_sysfs_intervals_goal *damon_sysfs_intervals_goal_alloc( unsigned long access_bp, unsigned long aggrs, unsigned long min_sample_us, unsigned long max_sample_us) { - struct damon_sysfs_intervals_goal *goal = kmalloc(sizeof(*goal), - GFP_KERNEL); + struct damon_sysfs_intervals_goal *goal = kmalloc_obj(*goal, GFP_KERNEL); if (!goal) return NULL; @@ -610,8 +609,8 @@ static struct damon_sysfs_intervals *damon_sysfs_intervals_alloc( unsigned long sample_us, unsigned long aggr_us, unsigned long update_us) { - struct damon_sysfs_intervals *intervals = kmalloc(sizeof(*intervals), - GFP_KERNEL); + struct damon_sysfs_intervals *intervals = kmalloc_obj(*intervals, + GFP_KERNEL); if (!intervals) return NULL; @@ -761,7 +760,7 @@ struct damon_sysfs_attrs { static struct damon_sysfs_attrs *damon_sysfs_attrs_alloc(void) { - struct damon_sysfs_attrs *attrs = kmalloc(sizeof(*attrs), GFP_KERNEL); + struct damon_sysfs_attrs *attrs = kmalloc_obj(*attrs, GFP_KERNEL); if (!attrs) return NULL; @@ -873,8 +872,7 @@ struct damon_sysfs_context { static struct damon_sysfs_context *damon_sysfs_context_alloc( enum damon_ops_id ops_id) { - struct damon_sysfs_context *context = kmalloc(sizeof(*context), - GFP_KERNEL); + struct damon_sysfs_context *context = kmalloc_obj(*context, GFP_KERNEL); if (!context) return NULL; @@ -1096,7 +1094,7 @@ struct damon_sysfs_contexts { static struct damon_sysfs_contexts *damon_sysfs_contexts_alloc(void) { - return kzalloc(sizeof(struct damon_sysfs_contexts), GFP_KERNEL); + return kzalloc_obj(struct damon_sysfs_contexts, GFP_KERNEL); } static void damon_sysfs_contexts_rm_dirs(struct damon_sysfs_contexts *contexts) @@ -1123,8 +1121,8 @@ static int damon_sysfs_contexts_add_dirs(struct damon_sysfs_contexts *contexts, if (!nr_contexts) return 0; - contexts_arr = kmalloc_array(nr_contexts, sizeof(*contexts_arr), - GFP_KERNEL | __GFP_NOWARN); + contexts_arr = kmalloc_objs(*contexts_arr, nr_contexts, + GFP_KERNEL | __GFP_NOWARN); if (!contexts_arr) return -ENOMEM; contexts->contexts_arr = contexts_arr; @@ -1223,7 +1221,7 @@ struct damon_sysfs_kdamond { static struct damon_sysfs_kdamond *damon_sysfs_kdamond_alloc(void) { - return kzalloc(sizeof(struct damon_sysfs_kdamond), GFP_KERNEL); + return kzalloc_obj(struct damon_sysfs_kdamond, GFP_KERNEL); } static int damon_sysfs_kdamond_add_dirs(struct damon_sysfs_kdamond *kdamond) @@ -1367,8 +1365,9 @@ static int damon_sysfs_set_regions(struct damon_target *t, struct damon_sysfs_regions *sysfs_regions, unsigned long min_region_sz) { - struct damon_addr_range *ranges = kmalloc_array(sysfs_regions->nr, - sizeof(*ranges), GFP_KERNEL | __GFP_NOWARN); + struct damon_addr_range *ranges = kmalloc_objs(*ranges, + sysfs_regions->nr, + GFP_KERNEL | __GFP_NOWARN); int i, err = -EINVAL; if (!ranges) @@ -1643,8 +1642,7 @@ static int damon_sysfs_turn_damon_on(struct damon_sysfs_kdamond *kdamond) damon_destroy_ctx(kdamond->damon_ctx); kdamond->damon_ctx = NULL; - repeat_call_control = kmalloc(sizeof(*repeat_call_control), - GFP_KERNEL); + repeat_call_control = kmalloc_obj(*repeat_call_control, GFP_KERNEL); if (!repeat_call_control) return -ENOMEM; @@ -1897,7 +1895,7 @@ struct damon_sysfs_kdamonds { static struct damon_sysfs_kdamonds *damon_sysfs_kdamonds_alloc(void) { - return kzalloc(sizeof(struct damon_sysfs_kdamonds), GFP_KERNEL); + return kzalloc_obj(struct damon_sysfs_kdamonds, GFP_KERNEL); } static void damon_sysfs_kdamonds_rm_dirs(struct damon_sysfs_kdamonds *kdamonds) @@ -1940,8 +1938,8 @@ static int damon_sysfs_kdamonds_add_dirs(struct damon_sysfs_kdamonds *kdamonds, if (!nr_kdamonds) return 0; - kdamonds_arr = kmalloc_array(nr_kdamonds, sizeof(*kdamonds_arr), - GFP_KERNEL | __GFP_NOWARN); + kdamonds_arr = kmalloc_objs(*kdamonds_arr, nr_kdamonds, + GFP_KERNEL | __GFP_NOWARN); if (!kdamonds_arr) return -ENOMEM; kdamonds->kdamonds_arr = kdamonds_arr; @@ -2038,7 +2036,7 @@ struct damon_sysfs_ui_dir { static struct damon_sysfs_ui_dir *damon_sysfs_ui_dir_alloc(void) { - return kzalloc(sizeof(struct damon_sysfs_ui_dir), GFP_KERNEL); + return kzalloc_obj(struct damon_sysfs_ui_dir, GFP_KERNEL); } static int damon_sysfs_ui_dir_add_dirs(struct damon_sysfs_ui_dir *ui_dir) diff --git a/mm/damon/tests/core-kunit.h b/mm/damon/tests/core-kunit.h index 92ea25e2dc9e..d3a30b170564 100644 --- a/mm/damon/tests/core-kunit.h +++ b/mm/damon/tests/core-kunit.h @@ -725,12 +725,12 @@ static int damos_test_help_dests_setup(struct damos_migrate_dests *dests, { size_t i; - dests->node_id_arr = kmalloc_array(nr_dests, - sizeof(*dests->node_id_arr), GFP_KERNEL); + dests->node_id_arr = kmalloc_objs(*dests->node_id_arr, nr_dests, + GFP_KERNEL); if (!dests->node_id_arr) return -ENOMEM; - dests->weight_arr = kmalloc_array(nr_dests, - sizeof(*dests->weight_arr), GFP_KERNEL); + dests->weight_arr = kmalloc_objs(*dests->weight_arr, nr_dests, + GFP_KERNEL); if (!dests->weight_arr) { kfree(dests->node_id_arr); dests->node_id_arr = NULL; diff --git a/mm/damon/tests/sysfs-kunit.h b/mm/damon/tests/sysfs-kunit.h index 0c665ed255a3..8dcd4a01684e 100644 --- a/mm/damon/tests/sysfs-kunit.h +++ b/mm/damon/tests/sysfs-kunit.h @@ -48,8 +48,8 @@ static void damon_sysfs_test_add_targets(struct kunit *test) if (!sysfs_targets) kunit_skip(test, "sysfs_targets alloc fail"); sysfs_targets->nr = 1; - sysfs_targets->targets_arr = kmalloc_array(1, - sizeof(*sysfs_targets->targets_arr), GFP_KERNEL); + sysfs_targets->targets_arr = kmalloc_objs(*sysfs_targets->targets_arr, + 1, GFP_KERNEL); if (!sysfs_targets->targets_arr) { kfree(sysfs_targets); kunit_skip(test, "targets_arr alloc fail"); diff --git a/mm/damon/vaddr.c b/mm/damon/vaddr.c index 83ab3d8c3792..862835d69af1 100644 --- a/mm/damon/vaddr.c +++ b/mm/damon/vaddr.c @@ -821,8 +821,8 @@ static unsigned long damos_va_migrate(struct damon_target *target, use_target_nid = dests->nr_dests == 0; nr_dests = use_target_nid ? 1 : dests->nr_dests; priv.scheme = s; - priv.migration_lists = kmalloc_array(nr_dests, - sizeof(*priv.migration_lists), GFP_KERNEL); + priv.migration_lists = kmalloc_objs(*priv.migration_lists, nr_dests, + GFP_KERNEL); if (!priv.migration_lists) return 0; diff --git a/mm/dmapool_test.c b/mm/dmapool_test.c index e8172d708308..454952ac9f0e 100644 --- a/mm/dmapool_test.c +++ b/mm/dmapool_test.c @@ -67,7 +67,7 @@ static int dmapool_test_block(const struct dmapool_parms *parms) struct dma_pool_pair *p; int i, ret; - p = kcalloc(blocks, sizeof(*p), GFP_KERNEL); + p = kzalloc_objs(*p, blocks, GFP_KERNEL); if (!p) return -ENOMEM; @@ -723,8 +723,8 @@ int hmm_dma_map_alloc(struct device *dev, struct hmm_dma_map *map, use_iova = dma_iova_try_alloc(dev, &map->state, 0, nr_entries * PAGE_SIZE); if (!use_iova && dma_need_unmap(dev)) { - map->dma_list = kvcalloc(nr_entries, sizeof(*map->dma_list), - GFP_KERNEL | __GFP_NOWARN); + map->dma_list = kvzalloc_objs(*map->dma_list, nr_entries, + GFP_KERNEL | __GFP_NOWARN); if (!map->dma_list) goto err_dma; } diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 0d487649e4de..809c99ee81b9 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -718,7 +718,7 @@ static struct thpsize *thpsize_create(int order, struct kobject *parent) struct thpsize *thpsize; int ret = -ENOMEM; - thpsize = kzalloc(sizeof(*thpsize), GFP_KERNEL); + thpsize = kzalloc_obj(*thpsize, GFP_KERNEL); if (!thpsize) goto err; diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 6e855a32de3d..6793a5b07882 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -154,7 +154,7 @@ struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages, { struct hugepage_subpool *spool; - spool = kzalloc(sizeof(*spool), GFP_KERNEL); + spool = kzalloc_obj(*spool, GFP_KERNEL); if (!spool) return NULL; @@ -429,7 +429,7 @@ int hugetlb_vma_lock_alloc(struct vm_area_struct *vma) if (vma->vm_private_data) return -EINVAL; - vma_lock = kmalloc(sizeof(*vma_lock), GFP_KERNEL); + vma_lock = kmalloc_obj(*vma_lock, GFP_KERNEL); if (!vma_lock) { /* * If we can not allocate structure, then vma can not @@ -687,7 +687,7 @@ static int allocate_file_region_entries(struct resv_map *resv, spin_unlock(&resv->lock); for (i = 0; i < to_allocate; i++) { - trg = kmalloc(sizeof(*trg), GFP_KERNEL); + trg = kmalloc_obj(*trg, GFP_KERNEL); if (!trg) goto out_of_memory; list_add(&trg->link, &allocated_regions); @@ -891,7 +891,7 @@ retry: if (!nrg) { spin_unlock(&resv->lock); - nrg = kmalloc(sizeof(*nrg), GFP_KERNEL); + nrg = kmalloc_obj(*nrg, GFP_KERNEL); if (!nrg) return -ENOMEM; goto retry; @@ -1105,8 +1105,8 @@ resv_map_set_hugetlb_cgroup_uncharge_info(struct resv_map *resv_map, struct resv_map *resv_map_alloc(void) { - struct resv_map *resv_map = kmalloc(sizeof(*resv_map), GFP_KERNEL); - struct file_region *rg = kmalloc(sizeof(*rg), GFP_KERNEL); + struct resv_map *resv_map = kmalloc_obj(*resv_map, GFP_KERNEL); + struct file_region *rg = kmalloc_obj(*rg, GFP_KERNEL); if (!resv_map || !rg) { kfree(resv_map); @@ -4190,8 +4190,7 @@ static int __init hugetlb_init(void) num_fault_mutexes = 1; #endif hugetlb_fault_mutex_table = - kmalloc_array(num_fault_mutexes, sizeof(struct mutex), - GFP_KERNEL); + kmalloc_objs(struct mutex, num_fault_mutexes, GFP_KERNEL); BUG_ON(!hugetlb_fault_mutex_table); for (i = 0; i < num_fault_mutexes; i++) diff --git a/mm/hugetlb_cgroup.c b/mm/hugetlb_cgroup.c index 792d06538fa9..6e4706d6ee82 100644 --- a/mm/hugetlb_cgroup.c +++ b/mm/hugetlb_cgroup.c @@ -139,8 +139,7 @@ hugetlb_cgroup_css_alloc(struct cgroup_subsys_state *parent_css) struct hugetlb_cgroup *h_cgroup; int node; - h_cgroup = kzalloc(struct_size(h_cgroup, nodeinfo, nr_node_ids), - GFP_KERNEL); + h_cgroup = kzalloc_flex(*h_cgroup, nodeinfo, nr_node_ids, GFP_KERNEL); if (!h_cgroup) return ERR_PTR(-ENOMEM); @@ -857,10 +856,10 @@ static void __init __hugetlb_cgroup_file_pre_init(void) int cft_count; cft_count = hugetlb_max_hstate * DFL_TMPL_SIZE + 1; /* add terminator */ - dfl_files = kcalloc(cft_count, sizeof(struct cftype), GFP_KERNEL); + dfl_files = kzalloc_objs(struct cftype, cft_count, GFP_KERNEL); BUG_ON(!dfl_files); cft_count = hugetlb_max_hstate * LEGACY_TMPL_SIZE + 1; /* add terminator */ - legacy_files = kcalloc(cft_count, sizeof(struct cftype), GFP_KERNEL); + legacy_files = kzalloc_objs(struct cftype, cft_count, GFP_KERNEL); BUG_ON(!legacy_files); } diff --git a/mm/kasan/kasan_test_c.c b/mm/kasan/kasan_test_c.c index b4d157962121..cb9c95ed7821 100644 --- a/mm/kasan/kasan_test_c.c +++ b/mm/kasan/kasan_test_c.c @@ -511,7 +511,7 @@ static void kmalloc_oob_16(struct kunit *test) ptr1 = RELOC_HIDE(kmalloc(sizeof(*ptr1) - 3, GFP_KERNEL), 0); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1); - ptr2 = kmalloc(sizeof(*ptr2), GFP_KERNEL); + ptr2 = kmalloc_obj(*ptr2, GFP_KERNEL); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2); OPTIMIZER_HIDE_VAR(ptr1); @@ -529,10 +529,10 @@ static void kmalloc_uaf_16(struct kunit *test) KASAN_TEST_NEEDS_CHECKED_MEMINTRINSICS(test); - ptr1 = kmalloc(sizeof(*ptr1), GFP_KERNEL); + ptr1 = kmalloc_obj(*ptr1, GFP_KERNEL); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1); - ptr2 = kmalloc(sizeof(*ptr2), GFP_KERNEL); + ptr2 = kmalloc_obj(*ptr2, GFP_KERNEL); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2); kfree(ptr2); @@ -859,7 +859,7 @@ static void kasan_atomics(struct kunit *test) */ a1 = kzalloc(48, GFP_KERNEL); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, a1); - a2 = kzalloc(sizeof(atomic_long_t), GFP_KERNEL); + a2 = kzalloc_obj(atomic_long_t, GFP_KERNEL); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, a2); /* Use atomics to access the redzone. */ @@ -954,7 +954,7 @@ static void rcu_uaf(struct kunit *test) { struct kasan_rcu_info *ptr; - ptr = kmalloc(sizeof(struct kasan_rcu_info), GFP_KERNEL); + ptr = kmalloc_obj(struct kasan_rcu_info, GFP_KERNEL); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); global_rcu_ptr = rcu_dereference_protected( @@ -978,7 +978,7 @@ static void workqueue_uaf(struct kunit *test) workqueue = create_workqueue("kasan_workqueue_test"); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, workqueue); - work = kmalloc(sizeof(struct work_struct), GFP_KERNEL); + work = kmalloc_obj(struct work_struct, GFP_KERNEL); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, work); INIT_WORK(work, workqueue_uaf_work); diff --git a/mm/khugepaged.c b/mm/khugepaged.c index eff9e3061925..f2f95b32317c 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -2769,7 +2769,7 @@ int madvise_collapse(struct vm_area_struct *vma, unsigned long start, if (!thp_vma_allowable_order(vma, vma->vm_flags, TVA_FORCED_COLLAPSE, PMD_ORDER)) return -EINVAL; - cc = kmalloc(sizeof(*cc), GFP_KERNEL); + cc = kmalloc_obj(*cc, GFP_KERNEL); if (!cc) return -ENOMEM; cc->is_khugepaged = false; diff --git a/mm/kmsan/kmsan_test.c b/mm/kmsan/kmsan_test.c index 7a7fbaff7350..27cc936176ea 100644 --- a/mm/kmsan/kmsan_test.c +++ b/mm/kmsan/kmsan_test.c @@ -168,7 +168,7 @@ static void test_uninit_kmalloc(struct kunit *test) int *ptr; kunit_info(test, "uninitialized kmalloc test (UMR report)\n"); - ptr = kmalloc(sizeof(*ptr), GFP_KERNEL); + ptr = kmalloc_obj(*ptr, GFP_KERNEL); USE(*ptr); KUNIT_EXPECT_TRUE(test, report_matches(&expect)); } @@ -182,7 +182,7 @@ static void test_init_kmalloc(struct kunit *test) int *ptr; kunit_info(test, "initialized kmalloc test (no reports)\n"); - ptr = kmalloc(sizeof(*ptr), GFP_KERNEL); + ptr = kmalloc_obj(*ptr, GFP_KERNEL); memset(ptr, 0, sizeof(*ptr)); USE(*ptr); KUNIT_EXPECT_TRUE(test, report_matches(&expect)); @@ -195,7 +195,7 @@ static void test_init_kzalloc(struct kunit *test) int *ptr; kunit_info(test, "initialized kzalloc test (no reports)\n"); - ptr = kzalloc(sizeof(*ptr), GFP_KERNEL); + ptr = kzalloc_obj(*ptr, GFP_KERNEL); USE(*ptr); KUNIT_EXPECT_TRUE(test, report_matches(&expect)); } @@ -322,7 +322,7 @@ static void test_init_kmsan_vmap_vunmap(struct kunit *test) kunit_info(test, "pages initialized via vmap (no reports)\n"); - pages = kmalloc_array(npages, sizeof(*pages), GFP_KERNEL); + pages = kmalloc_objs(*pages, npages, GFP_KERNEL); for (int i = 0; i < npages; i++) pages[i] = alloc_page(GFP_KERNEL); vbuf = vmap(pages, npages, VM_MAP, PAGE_KERNEL); diff --git a/mm/kmsan/shadow.c b/mm/kmsan/shadow.c index 9e1c5f2b7a41..8fde939784a7 100644 --- a/mm/kmsan/shadow.c +++ b/mm/kmsan/shadow.c @@ -230,8 +230,8 @@ int kmsan_vmap_pages_range_noflush(unsigned long start, unsigned long end, return 0; nr = (end - start) / PAGE_SIZE; - s_pages = kcalloc(nr, sizeof(*s_pages), gfp_mask); - o_pages = kcalloc(nr, sizeof(*o_pages), gfp_mask); + s_pages = kzalloc_objs(*s_pages, nr, gfp_mask); + o_pages = kzalloc_objs(*o_pages, nr, gfp_mask); if (!s_pages || !o_pages) { err = -ENOMEM; goto ret; @@ -3586,8 +3586,8 @@ static ssize_t merge_across_nodes_store(struct kobject *kobj, * Allocate stable and unstable together: * MAXSMP NODES_SHIFT 10 will use 16kB. */ - buf = kcalloc(nr_node_ids + nr_node_ids, sizeof(*buf), - GFP_KERNEL); + buf = kzalloc_objs(*buf, nr_node_ids + nr_node_ids, + GFP_KERNEL); /* Let us assume that RB_ROOT is NULL is zero */ if (!buf) err = -ENOMEM; diff --git a/mm/list_lru.c b/mm/list_lru.c index 13b9f66d950e..16526b9d71b5 100644 --- a/mm/list_lru.c +++ b/mm/list_lru.c @@ -407,7 +407,7 @@ static struct list_lru_memcg *memcg_init_list_lru_one(struct list_lru *lru, gfp_ int nid; struct list_lru_memcg *mlru; - mlru = kmalloc(struct_size(mlru, node, nr_node_ids), gfp); + mlru = kmalloc_flex(*mlru, node, nr_node_ids, gfp); if (!mlru) return NULL; @@ -585,7 +585,7 @@ int __list_lru_init(struct list_lru *lru, bool memcg_aware, struct shrinker *shr memcg_aware = false; #endif - lru->node = kcalloc(nr_node_ids, sizeof(*lru->node), GFP_KERNEL); + lru->node = kzalloc_objs(*lru->node, nr_node_ids, GFP_KERNEL); if (!lru->node) return -ENOMEM; diff --git a/mm/madvise.c b/mm/madvise.c index 8debb2d434aa..30c7a642e7fc 100644 --- a/mm/madvise.c +++ b/mm/madvise.c @@ -91,7 +91,7 @@ struct anon_vma_name *anon_vma_name_alloc(const char *name) /* Add 1 for NUL terminator at the end of the anon_name->name */ count = strlen(name) + 1; - anon_name = kmalloc(struct_size(anon_name, name, count), GFP_KERNEL); + anon_name = kmalloc_flex(*anon_name, name, count, GFP_KERNEL); if (anon_name) { kref_init(&anon_name->kref); memcpy(anon_name->name, name, count); diff --git a/mm/memcontrol-v1.c b/mm/memcontrol-v1.c index c6078cd7f7e5..597af8a80163 100644 --- a/mm/memcontrol-v1.c +++ b/mm/memcontrol-v1.c @@ -783,7 +783,7 @@ static int __mem_cgroup_usage_register_event(struct mem_cgroup *memcg, size = thresholds->primary ? thresholds->primary->size + 1 : 1; /* Allocate memory for new array of thresholds */ - new = kmalloc(struct_size(new, entries, size), GFP_KERNEL_ACCOUNT); + new = kmalloc_flex(*new, entries, size, GFP_KERNEL_ACCOUNT); if (!new) { ret = -ENOMEM; goto unlock; @@ -946,7 +946,7 @@ static int mem_cgroup_oom_register_event(struct mem_cgroup *memcg, { struct mem_cgroup_eventfd_list *event; - event = kmalloc(sizeof(*event), GFP_KERNEL_ACCOUNT); + event = kmalloc_obj(*event, GFP_KERNEL_ACCOUNT); if (!event) return -ENOMEM; @@ -1109,7 +1109,7 @@ static ssize_t memcg_write_event_control(struct kernfs_open_file *of, CLASS(fd, cfile)(cfd); - event = kzalloc(sizeof(*event), GFP_KERNEL_ACCOUNT); + event = kzalloc_obj(*event, GFP_KERNEL_ACCOUNT); if (!event) return -ENOMEM; diff --git a/mm/memcontrol.c b/mm/memcontrol.c index f2b87e02574e..63773a0b91f7 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -192,7 +192,7 @@ static struct obj_cgroup *obj_cgroup_alloc(void) struct obj_cgroup *objcg; int ret; - objcg = kzalloc(sizeof(struct obj_cgroup), GFP_KERNEL); + objcg = kzalloc_obj(struct obj_cgroup, GFP_KERNEL); if (!objcg) return NULL; @@ -3761,8 +3761,7 @@ static struct mem_cgroup *mem_cgroup_alloc(struct mem_cgroup *parent) goto fail; error = -ENOMEM; - memcg->vmstats = kzalloc(sizeof(struct memcg_vmstats), - GFP_KERNEL_ACCOUNT); + memcg->vmstats = kzalloc_obj(struct memcg_vmstats, GFP_KERNEL_ACCOUNT); if (!memcg->vmstats) goto fail; diff --git a/mm/memfd_luo.c b/mm/memfd_luo.c index a34fccc23b6a..c69774c19c88 100644 --- a/mm/memfd_luo.c +++ b/mm/memfd_luo.c @@ -112,7 +112,7 @@ static int memfd_luo_preserve_folios(struct file *file, * up being smaller if there are higher order folios. */ max_folios = PAGE_ALIGN(size) / PAGE_SIZE; - folios = kvmalloc_array(max_folios, sizeof(*folios), GFP_KERNEL); + folios = kvmalloc_objs(*folios, max_folios, GFP_KERNEL); if (!folios) return -ENOMEM; diff --git a/mm/memory-failure.c b/mm/memory-failure.c index ba4231858a36..ee42d4361309 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -387,7 +387,7 @@ static void __add_to_kill(struct task_struct *tsk, const struct page *p, { struct to_kill *tk; - tk = kmalloc(sizeof(struct to_kill), GFP_ATOMIC); + tk = kmalloc_obj(struct to_kill, GFP_ATOMIC); if (!tk) { pr_err("Out of memory while machine check handling\n"); return; @@ -1917,7 +1917,7 @@ static int hugetlb_update_hwpoison(struct folio *folio, struct page *page) return MF_HUGETLB_PAGE_PRE_POISONED; } - raw_hwp = kmalloc(sizeof(struct raw_hwp_page), GFP_ATOMIC); + raw_hwp = kmalloc_obj(struct raw_hwp_page, GFP_ATOMIC); if (raw_hwp) { raw_hwp->page = page; llist_add(&raw_hwp->node, head); @@ -2214,7 +2214,7 @@ static void add_to_kill_pgoff(struct task_struct *tsk, { struct to_kill *tk; - tk = kmalloc(sizeof(*tk), GFP_ATOMIC); + tk = kmalloc_obj(*tk, GFP_ATOMIC); if (!tk) { pr_info("Unable to kill proc %d\n", tsk->pid); return; diff --git a/mm/memory-tiers.c b/mm/memory-tiers.c index 545e34626df7..b05c916fa5f4 100644 --- a/mm/memory-tiers.c +++ b/mm/memory-tiers.c @@ -227,7 +227,7 @@ static struct memory_tier *find_create_memory_tier(struct memory_dev_type *memty } } - new_memtier = kzalloc(sizeof(struct memory_tier), GFP_KERNEL); + new_memtier = kzalloc_obj(struct memory_tier, GFP_KERNEL); if (!new_memtier) return ERR_PTR(-ENOMEM); @@ -625,7 +625,7 @@ struct memory_dev_type *alloc_memory_type(int adistance) { struct memory_dev_type *memtype; - memtype = kmalloc(sizeof(*memtype), GFP_KERNEL); + memtype = kmalloc_obj(*memtype, GFP_KERNEL); if (!memtype) return ERR_PTR(-ENOMEM); @@ -912,8 +912,8 @@ static int __init memory_tier_init(void) panic("%s() failed to register memory tier subsystem\n", __func__); #ifdef CONFIG_MIGRATION - node_demotion = kcalloc(nr_node_ids, sizeof(struct demotion_nodes), - GFP_KERNEL); + node_demotion = kzalloc_objs(struct demotion_nodes, nr_node_ids, + GFP_KERNEL); WARN_ON(!node_demotion); #endif diff --git a/mm/memory.c b/mm/memory.c index 876bf73959c6..144e30d2825f 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -3035,7 +3035,7 @@ static inline struct pfnmap_track_ctx *pfnmap_track_ctx_alloc(unsigned long pfn, if (pfnmap_track(pfn, size, prot)) return ERR_PTR(-EINVAL); - ctx = kmalloc(sizeof(*ctx), GFP_KERNEL); + ctx = kmalloc_obj(*ctx, GFP_KERNEL); if (unlikely(!ctx)) { pfnmap_untrack(pfn, size); return ERR_PTR(-ENOMEM); diff --git a/mm/mempolicy.c b/mm/mempolicy.c index dbd48502ac24..0835743f6575 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -229,8 +229,8 @@ int mempolicy_set_node_perf(unsigned int node, struct access_coordinate *coords) if (!new_bw) return -ENOMEM; - new_wi_state = kmalloc(struct_size(new_wi_state, iw_table, nr_node_ids), - GFP_KERNEL); + new_wi_state = kmalloc_flex(*new_wi_state, iw_table, nr_node_ids, + GFP_KERNEL); if (!new_wi_state) { kfree(new_bw); return -ENOMEM; @@ -3642,8 +3642,8 @@ static ssize_t node_store(struct kobject *kobj, struct kobj_attribute *attr, kstrtou8(buf, 0, &weight) || weight == 0) return -EINVAL; - new_wi_state = kzalloc(struct_size(new_wi_state, iw_table, nr_node_ids), - GFP_KERNEL); + new_wi_state = kzalloc_flex(*new_wi_state, iw_table, nr_node_ids, + GFP_KERNEL); if (!new_wi_state) return -ENOMEM; @@ -3695,8 +3695,8 @@ static ssize_t weighted_interleave_auto_store(struct kobject *kobj, if (kstrtobool(buf, &input)) return -EINVAL; - new_wi_state = kzalloc(struct_size(new_wi_state, iw_table, nr_node_ids), - GFP_KERNEL); + new_wi_state = kzalloc_flex(*new_wi_state, iw_table, nr_node_ids, + GFP_KERNEL); if (!new_wi_state) return -ENOMEM; for (i = 0; i < nr_node_ids; i++) @@ -3815,7 +3815,7 @@ static int sysfs_wi_node_add(int nid) return -EINVAL; } - new_attr = kzalloc(sizeof(*new_attr), GFP_KERNEL); + new_attr = kzalloc_obj(*new_attr, GFP_KERNEL); if (!new_attr) return -ENOMEM; @@ -3880,8 +3880,7 @@ static int __init add_weighted_interleave_group(struct kobject *mempolicy_kobj) { int nid, err; - wi_group = kzalloc(struct_size(wi_group, nattrs, nr_node_ids), - GFP_KERNEL); + wi_group = kzalloc_flex(*wi_group, nattrs, nr_node_ids, GFP_KERNEL); if (!wi_group) return -ENOMEM; mutex_init(&wi_group->kobj_lock); diff --git a/mm/mempool.c b/mm/mempool.c index c290e5261b47..c22c63ccbbcd 100644 --- a/mm/mempool.c +++ b/mm/mempool.c @@ -371,8 +371,7 @@ int mempool_resize(struct mempool *pool, int new_min_nr) spin_unlock_irqrestore(&pool->lock, flags); /* Grow the pool */ - new_elements = kmalloc_array(new_min_nr, sizeof(*new_elements), - GFP_KERNEL); + new_elements = kmalloc_objs(*new_elements, new_min_nr, GFP_KERNEL); if (!new_elements) return -ENOMEM; diff --git a/mm/mmu_notifier.c b/mm/mmu_notifier.c index 8e0125dc0522..1aa561a055eb 100644 --- a/mm/mmu_notifier.c +++ b/mm/mmu_notifier.c @@ -618,8 +618,8 @@ int __mmu_notifier_register(struct mmu_notifier *subscription, * know that mm->notifier_subscriptions can't change while we * hold the write side of the mmap_lock. */ - subscriptions = kzalloc( - sizeof(struct mmu_notifier_subscriptions), GFP_KERNEL); + subscriptions = kzalloc_obj(struct mmu_notifier_subscriptions, + GFP_KERNEL); if (!subscriptions) return -ENOMEM; diff --git a/mm/page_owner.c b/mm/page_owner.c index b6a394a130ec..8178e0be557f 100644 --- a/mm/page_owner.c +++ b/mm/page_owner.c @@ -181,7 +181,7 @@ static void add_stack_record_to_list(struct stack_record *stack_record, return; set_current_in_page_owner(); - stack = kmalloc(sizeof(*stack), gfp_nested_mask(gfp_mask)); + stack = kmalloc_obj(*stack, gfp_nested_mask(gfp_mask)); if (!stack) { unset_current_in_page_owner(); return; diff --git a/mm/page_reporting.c b/mm/page_reporting.c index 8a03effda749..7323284d2f7d 100644 --- a/mm/page_reporting.c +++ b/mm/page_reporting.c @@ -322,7 +322,7 @@ static void page_reporting_process(struct work_struct *work) atomic_set(&prdev->state, state); /* allocate scatterlist to store pages being reported on */ - sgl = kmalloc_array(PAGE_REPORTING_CAPACITY, sizeof(*sgl), GFP_KERNEL); + sgl = kmalloc_objs(*sgl, PAGE_REPORTING_CAPACITY, GFP_KERNEL); if (!sgl) goto err_out; diff --git a/mm/shmem.c b/mm/shmem.c index d129f4eb5ca9..5f2e8e3d5b75 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -5328,7 +5328,7 @@ int shmem_init_fs_context(struct fs_context *fc) { struct shmem_options *ctx; - ctx = kzalloc(sizeof(struct shmem_options), GFP_KERNEL); + ctx = kzalloc_obj(struct shmem_options, GFP_KERNEL); if (!ctx) return -ENOMEM; diff --git a/mm/shmem_quota.c b/mm/shmem_quota.c index d1e32ac01407..d0b92d6da50f 100644 --- a/mm/shmem_quota.c +++ b/mm/shmem_quota.c @@ -67,7 +67,7 @@ static int shmem_read_file_info(struct super_block *sb, int type) struct quota_info *dqopt = sb_dqopt(sb); struct mem_dqinfo *info = &dqopt->info[type]; - info->dqi_priv = kzalloc(sizeof(struct rb_root), GFP_NOFS); + info->dqi_priv = kzalloc_obj(struct rb_root, GFP_NOFS); if (!info->dqi_priv) return -ENOMEM; @@ -190,7 +190,7 @@ static int shmem_acquire_dquot(struct dquot *dquot) } /* We don't have entry for this id yet, create it */ - new_entry = kzalloc(sizeof(struct quota_id), GFP_NOFS); + new_entry = kzalloc_obj(struct quota_id, GFP_NOFS); if (!new_entry) { ret = -ENOMEM; goto out_unlock; diff --git a/mm/shrinker.c b/mm/shrinker.c index 4a93fd433689..52e7bebe4579 100644 --- a/mm/shrinker.c +++ b/mm/shrinker.c @@ -682,7 +682,7 @@ struct shrinker *shrinker_alloc(unsigned int flags, const char *fmt, ...) va_list ap; int err; - shrinker = kzalloc(sizeof(struct shrinker), GFP_KERNEL); + shrinker = kzalloc_obj(struct shrinker, GFP_KERNEL); if (!shrinker) return NULL; diff --git a/mm/slub.c b/mm/slub.c index 865bc050f654..b8e9c0b62435 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -2636,7 +2636,7 @@ bool slab_free_hook(struct kmem_cache *s, void *x, bool init, if (still_accessible) { struct rcu_delayed_free *delayed_free; - delayed_free = kmalloc(sizeof(*delayed_free), GFP_NOWAIT); + delayed_free = kmalloc_obj(*delayed_free, GFP_NOWAIT); if (delayed_free) { /* * Let KASAN track our call stack as a "related work @@ -4928,7 +4928,7 @@ kmem_cache_prefill_sheaf(struct kmem_cache *s, gfp_t gfp, unsigned int size) if (unlikely(size > s->sheaf_capacity)) { - sheaf = kzalloc(struct_size(sheaf, objects, size), gfp); + sheaf = kzalloc_flex(*sheaf, objects, size, gfp); if (!sheaf) return NULL; @@ -9139,7 +9139,7 @@ static int show_stat(struct kmem_cache *s, char *buf, enum stat_item si) unsigned long sum = 0; int cpu; int len = 0; - int *data = kmalloc_array(nr_cpu_ids, sizeof(int), GFP_KERNEL); + int *data = kmalloc_objs(int, nr_cpu_ids, GFP_KERNEL); if (!data) return -ENOMEM; @@ -9510,7 +9510,7 @@ int sysfs_slab_alias(struct kmem_cache *s, const char *name) return sysfs_create_link(&slab_kset->kobj, &s->kobj, name); } - al = kmalloc(sizeof(struct saved_alias), GFP_KERNEL); + al = kmalloc_obj(struct saved_alias, GFP_KERNEL); if (!al) return -ENOMEM; diff --git a/mm/swapfile.c b/mm/swapfile.c index c2377c4b6bb9..8a1e2af356ba 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -2575,7 +2575,7 @@ add_swap_extent(struct swap_info_struct *sis, unsigned long start_page, } /* No merge, insert a new extent. */ - new_se = kmalloc(sizeof(*se), GFP_KERNEL); + new_se = kmalloc_obj(*se, GFP_KERNEL); if (new_se == NULL) return -ENOMEM; new_se->start_page = start_page; @@ -3048,7 +3048,7 @@ static struct swap_info_struct *alloc_swap_info(void) struct swap_info_struct *defer = NULL; unsigned int type; - p = kvzalloc(sizeof(struct swap_info_struct), GFP_KERNEL); + p = kvzalloc_obj(struct swap_info_struct, GFP_KERNEL); if (!p) return ERR_PTR(-ENOMEM); @@ -3257,7 +3257,7 @@ static struct swap_cluster_info *setup_clusters(struct swap_info_struct *si, int err = -ENOMEM; unsigned long i; - cluster_info = kvcalloc(nr_clusters, sizeof(*cluster_info), GFP_KERNEL); + cluster_info = kvzalloc_objs(*cluster_info, nr_clusters, GFP_KERNEL); if (!cluster_info) goto err; @@ -3265,8 +3265,8 @@ static struct swap_cluster_info *setup_clusters(struct swap_info_struct *si, spin_lock_init(&cluster_info[i].lock); if (!(si->flags & SWP_SOLIDSTATE)) { - si->global_cluster = kmalloc(sizeof(*si->global_cluster), - GFP_KERNEL); + si->global_cluster = kmalloc_obj(*si->global_cluster, + GFP_KERNEL); if (!si->global_cluster) goto err; for (i = 0; i < SWAP_NR_ORDERS; i++) diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 03e1117480d5..672c56d8bfe1 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -4920,14 +4920,14 @@ struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets, return NULL; } - vms = kcalloc(nr_vms, sizeof(vms[0]), GFP_KERNEL); - vas = kcalloc(nr_vms, sizeof(vas[0]), GFP_KERNEL); + vms = kzalloc_objs(vms[0], nr_vms, GFP_KERNEL); + vas = kzalloc_objs(vas[0], nr_vms, GFP_KERNEL); if (!vas || !vms) goto err_free2; for (area = 0; area < nr_vms; area++) { vas[area] = kmem_cache_zalloc(vmap_area_cachep, GFP_KERNEL); - vms[area] = kzalloc(sizeof(struct vm_struct), GFP_KERNEL); + vms[area] = kzalloc_obj(struct vm_struct, GFP_KERNEL); if (!vas[area] || !vms[area]) goto err_free; } @@ -5366,7 +5366,7 @@ static void vmap_init_nodes(void) int n = clamp_t(unsigned int, num_possible_cpus(), 1, 128); if (n > 1) { - vn = kmalloc_array(n, sizeof(*vn), GFP_NOWAIT); + vn = kmalloc_objs(*vn, n, GFP_NOWAIT); if (vn) { /* Node partition is 16 pages. */ vmap_zone_size = (1 << 4) * PAGE_SIZE; diff --git a/mm/vmpressure.c b/mm/vmpressure.c index c197ed47bcc4..035e0384e39b 100644 --- a/mm/vmpressure.c +++ b/mm/vmpressure.c @@ -402,7 +402,7 @@ int vmpressure_register_event(struct mem_cgroup *memcg, mode = ret; } - ev = kzalloc(sizeof(*ev), GFP_KERNEL); + ev = kzalloc_obj(*ev, GFP_KERNEL); if (!ev) { ret = -ENOMEM; goto out; diff --git a/mm/vmscan.c b/mm/vmscan.c index 44e4fcd6463c..0fc9373e8251 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -3821,7 +3821,8 @@ static struct lru_gen_mm_walk *set_mm_walk(struct pglist_data *pgdat, bool force } else if (!walk && force_alloc) { VM_WARN_ON_ONCE(current_is_kswapd()); - walk = kzalloc(sizeof(*walk), __GFP_HIGH | __GFP_NOMEMALLOC | __GFP_NOWARN); + walk = kzalloc_obj(*walk, + __GFP_HIGH | __GFP_NOMEMALLOC | __GFP_NOWARN); } current->reclaim_state->mm_walk = walk; diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c index d5d1c27b3852..fa0b726dcec7 100644 --- a/mm/zsmalloc.c +++ b/mm/zsmalloc.c @@ -2062,7 +2062,7 @@ struct zs_pool *zs_create_pool(const char *name) struct zs_pool *pool; struct size_class *prev_class = NULL; - pool = kzalloc(sizeof(*pool), GFP_KERNEL); + pool = kzalloc_obj(*pool, GFP_KERNEL); if (!pool) return NULL; @@ -2128,7 +2128,7 @@ struct zs_pool *zs_create_pool(const char *name) } } - class = kzalloc(sizeof(struct size_class), GFP_KERNEL); + class = kzalloc_obj(struct size_class, GFP_KERNEL); if (!class) goto err; diff --git a/mm/zswap.c b/mm/zswap.c index af3f0fbb0558..bbfd8a51e4c8 100644 --- a/mm/zswap.c +++ b/mm/zswap.c @@ -251,7 +251,7 @@ static struct zswap_pool *zswap_pool_create(char *compressor) if (!zswap_has_pool && !strcmp(compressor, ZSWAP_PARAM_UNSET)) return NULL; - pool = kzalloc(sizeof(*pool), GFP_KERNEL); + pool = kzalloc_obj(*pool, GFP_KERNEL); if (!pool) return NULL; @@ -1665,7 +1665,7 @@ int zswap_swapon(int type, unsigned long nr_pages) unsigned int nr, i; nr = DIV_ROUND_UP(nr_pages, ZSWAP_ADDRESS_SPACE_PAGES); - trees = kvcalloc(nr, sizeof(*tree), GFP_KERNEL); + trees = kvzalloc_objs(*tree, nr, GFP_KERNEL); if (!trees) { pr_err("alloc failed, zswap disabled for swap type %d\n", type); return -ENOMEM; |
