diff options
| author | Harry Yoo <harry.yoo@oracle.com> | 2026-01-27 19:31:51 +0900 |
|---|---|---|
| committer | Vlastimil Babka <vbabka@suse.cz> | 2026-02-04 10:05:36 +0100 |
| commit | 2f35fee943435b5b1a3e403c7fb9bd19727754d8 (patch) | |
| tree | 57378d3d113d25aadcca59ead7fbb0f6ca209e3d | |
| parent | a77d6d338685025cbf84f6e3abd92a8e59a4d894 (diff) | |
mm/slab: only allow SLAB_OBJ_EXT_IN_OBJ for unmergeable caches
While SLAB_OBJ_EXT_IN_OBJ allows to reduce memory overhead to account
slab objects, it prevents slab merging because merging can change
the metadata layout.
As pointed out Vlastimil Babka, disabling merging solely for this memory
optimization may not be a net win, because disabling slab merging tends
to increase overall memory usage.
Restrict SLAB_OBJ_EXT_IN_OBJ to caches that are already unmergeable for
other reasons (e.g., those with constructors or SLAB_TYPESAFE_BY_RCU).
Suggested-by: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: Harry Yoo <harry.yoo@oracle.com>
Link: https://patch.msgid.link/20260127103151.21883-3-harry.yoo@oracle.com
Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
| -rw-r--r-- | mm/slab.h | 1 | ||||
| -rw-r--r-- | mm/slab_common.c | 3 | ||||
| -rw-r--r-- | mm/slub.c | 3 |
3 files changed, 4 insertions, 3 deletions
diff --git a/mm/slab.h b/mm/slab.h index 43b7c5ababb5..3f49666e943c 100644 --- a/mm/slab.h +++ b/mm/slab.h @@ -411,6 +411,7 @@ extern void create_boot_cache(struct kmem_cache *, const char *name, unsigned int useroffset, unsigned int usersize); int slab_unmergeable(struct kmem_cache *s); +bool slab_args_unmergeable(struct kmem_cache_args *args, slab_flags_t flags); slab_flags_t kmem_cache_flags(slab_flags_t flags, const char *name); diff --git a/mm/slab_common.c b/mm/slab_common.c index 886d02fa94fb..094afa2792d0 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -174,8 +174,7 @@ int slab_unmergeable(struct kmem_cache *s) return 0; } -static bool slab_args_unmergeable(struct kmem_cache_args *args, - slab_flags_t flags) +bool slab_args_unmergeable(struct kmem_cache_args *args, slab_flags_t flags) { if (slab_nomerge) return true; diff --git a/mm/slub.c b/mm/slub.c index 0805c09d4b55..18ac9460f9e9 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -8382,7 +8382,8 @@ static int calculate_sizes(struct kmem_cache_args *args, struct kmem_cache *s) */ aligned_size = ALIGN(size, s->align); #if defined(CONFIG_SLAB_OBJ_EXT) && defined(CONFIG_64BIT) - if (aligned_size - size >= sizeof(struct slabobj_ext)) + if (slab_args_unmergeable(args, s->flags) && + (aligned_size - size >= sizeof(struct slabobj_ext))) s->flags |= SLAB_OBJ_EXT_IN_OBJ; #endif size = aligned_size; |
