summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorHarry Yoo <harry.yoo@oracle.com>2026-01-13 15:18:44 +0900
committerVlastimil Babka <vbabka@suse.cz>2026-02-04 10:05:35 +0100
commitfab0694646d75d5b03e9898ffb85899fb23320ea (patch)
tree883ee2c80d2db4af8767c02731454fd13798868a
parent70089d018807506e8a6acd03eede33a0619ec417 (diff)
mm/slab: move [__]ksize and slab_ksize() to mm/slub.c
To access SLUB's internal implementation details beyond cache flags in ksize(), move __ksize(), ksize(), and slab_ksize() to mm/slub.c. [vbabka@suse.cz: also make __ksize() static and move its kerneldoc to ksize() ] Signed-off-by: Harry Yoo <harry.yoo@oracle.com> Link: https://patch.msgid.link/20260113061845.159790-9-harry.yoo@oracle.com Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
-rw-r--r--include/linux/slab.h1
-rw-r--r--mm/slab.h27
-rw-r--r--mm/slab_common.c61
-rw-r--r--mm/slub.c86
4 files changed, 86 insertions, 89 deletions
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 4554c04a9bd7..93e367b6a5f6 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -509,7 +509,6 @@ void * __must_check krealloc_node_align_noprof(const void *objp, size_t new_size
void kfree(const void *objp);
void kfree_nolock(const void *objp);
void kfree_sensitive(const void *objp);
-size_t __ksize(const void *objp);
DEFINE_FREE(kfree, void *, if (!IS_ERR_OR_NULL(_T)) kfree(_T))
DEFINE_FREE(kfree_sensitive, void *, if (_T) kfree_sensitive(_T))
diff --git a/mm/slab.h b/mm/slab.h
index d5da2f69e2d5..43b7c5ababb5 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -661,33 +661,6 @@ void __memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab,
void kvfree_rcu_cb(struct rcu_head *head);
-size_t __ksize(const void *objp);
-
-static inline size_t slab_ksize(const struct kmem_cache *s)
-{
-#ifdef CONFIG_SLUB_DEBUG
- /*
- * Debugging requires use of the padding between object
- * and whatever may come after it.
- */
- if (s->flags & (SLAB_RED_ZONE | SLAB_POISON))
- return s->object_size;
-#endif
- if (s->flags & SLAB_KASAN)
- return s->object_size;
- /*
- * If we have the need to store the freelist pointer
- * back there or track user information then we can
- * only use the space before that information.
- */
- if (s->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_STORE_USER))
- return s->inuse;
- /*
- * Else we can use all the padding etc for the allocation
- */
- return s->size;
-}
-
static inline unsigned int large_kmalloc_order(const struct page *page)
{
return page[1].flags.f & 0xff;
diff --git a/mm/slab_common.c b/mm/slab_common.c
index 027bf64c2e35..b2db8f8f3cf0 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -1021,43 +1021,6 @@ void __init create_kmalloc_caches(void)
0, SLAB_NO_MERGE, NULL);
}
-/**
- * __ksize -- Report full size of underlying allocation
- * @object: pointer to the object
- *
- * This should only be used internally to query the true size of allocations.
- * It is not meant to be a way to discover the usable size of an allocation
- * after the fact. Instead, use kmalloc_size_roundup(). Using memory beyond
- * the originally requested allocation size may trigger KASAN, UBSAN_BOUNDS,
- * and/or FORTIFY_SOURCE.
- *
- * Return: size of the actual memory used by @object in bytes
- */
-size_t __ksize(const void *object)
-{
- const struct page *page;
- const struct slab *slab;
-
- if (unlikely(object == ZERO_SIZE_PTR))
- return 0;
-
- page = virt_to_page(object);
-
- if (unlikely(PageLargeKmalloc(page)))
- return large_kmalloc_size(page);
-
- slab = page_slab(page);
- /* Delete this after we're sure there are no users */
- if (WARN_ON(!slab))
- return page_size(page);
-
-#ifdef CONFIG_SLUB_DEBUG
- skip_orig_size_check(slab->slab_cache, object);
-#endif
-
- return slab_ksize(slab->slab_cache);
-}
-
gfp_t kmalloc_fix_flags(gfp_t flags)
{
gfp_t invalid_mask = flags & GFP_SLAB_BUG_MASK;
@@ -1273,30 +1236,6 @@ void kfree_sensitive(const void *p)
}
EXPORT_SYMBOL(kfree_sensitive);
-size_t ksize(const void *objp)
-{
- /*
- * We need to first check that the pointer to the object is valid.
- * The KASAN report printed from ksize() is more useful, then when
- * it's printed later when the behaviour could be undefined due to
- * a potential use-after-free or double-free.
- *
- * We use kasan_check_byte(), which is supported for the hardware
- * tag-based KASAN mode, unlike kasan_check_read/write().
- *
- * If the pointed to memory is invalid, we return 0 to avoid users of
- * ksize() writing to and potentially corrupting the memory region.
- *
- * We want to perform the check before __ksize(), to avoid potentially
- * crashing in __ksize() due to accessing invalid metadata.
- */
- if (unlikely(ZERO_OR_NULL_PTR(objp)) || !kasan_check_byte(objp))
- return 0;
-
- return kfence_ksize(objp) ?: __ksize(objp);
-}
-EXPORT_SYMBOL(ksize);
-
#ifdef CONFIG_BPF_SYSCALL
#include <linux/btf.h>
diff --git a/mm/slub.c b/mm/slub.c
index 7b6d8df06ad9..782685433580 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -7028,6 +7028,92 @@ void kmem_cache_free(struct kmem_cache *s, void *x)
}
EXPORT_SYMBOL(kmem_cache_free);
+static inline size_t slab_ksize(const struct kmem_cache *s)
+{
+#ifdef CONFIG_SLUB_DEBUG
+ /*
+ * Debugging requires use of the padding between object
+ * and whatever may come after it.
+ */
+ if (s->flags & (SLAB_RED_ZONE | SLAB_POISON))
+ return s->object_size;
+#endif
+ if (s->flags & SLAB_KASAN)
+ return s->object_size;
+ /*
+ * If we have the need to store the freelist pointer
+ * back there or track user information then we can
+ * only use the space before that information.
+ */
+ if (s->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_STORE_USER))
+ return s->inuse;
+ /*
+ * Else we can use all the padding etc for the allocation
+ */
+ return s->size;
+}
+
+static size_t __ksize(const void *object)
+{
+ const struct page *page;
+ const struct slab *slab;
+
+ if (unlikely(object == ZERO_SIZE_PTR))
+ return 0;
+
+ page = virt_to_page(object);
+
+ if (unlikely(PageLargeKmalloc(page)))
+ return large_kmalloc_size(page);
+
+ slab = page_slab(page);
+ /* Delete this after we're sure there are no users */
+ if (WARN_ON(!slab))
+ return page_size(page);
+
+#ifdef CONFIG_SLUB_DEBUG
+ skip_orig_size_check(slab->slab_cache, object);
+#endif
+
+ return slab_ksize(slab->slab_cache);
+}
+
+/**
+ * ksize -- Report full size of underlying allocation
+ * @objp: pointer to the object
+ *
+ * This should only be used internally to query the true size of allocations.
+ * It is not meant to be a way to discover the usable size of an allocation
+ * after the fact. Instead, use kmalloc_size_roundup(). Using memory beyond
+ * the originally requested allocation size may trigger KASAN, UBSAN_BOUNDS,
+ * and/or FORTIFY_SOURCE.
+ *
+ * Return: size of the actual memory used by @objp in bytes
+ */
+size_t ksize(const void *objp)
+{
+ /*
+ * We need to first check that the pointer to the object is valid.
+ * The KASAN report printed from ksize() is more useful, then when
+ * it's printed later when the behaviour could be undefined due to
+ * a potential use-after-free or double-free.
+ *
+ * We use kasan_check_byte(), which is supported for the hardware
+ * tag-based KASAN mode, unlike kasan_check_read/write().
+ *
+ * If the pointed to memory is invalid, we return 0 to avoid users of
+ * ksize() writing to and potentially corrupting the memory region.
+ *
+ * We want to perform the check before __ksize(), to avoid potentially
+ * crashing in __ksize() due to accessing invalid metadata.
+ */
+ if (unlikely(ZERO_OR_NULL_PTR(objp)) || !kasan_check_byte(objp))
+ return 0;
+
+ return kfence_ksize(objp) ?: __ksize(objp);
+}
+EXPORT_SYMBOL(ksize);
+
static void free_large_kmalloc(struct page *page, void *object)
{
unsigned int order = compound_order(page);