From e9d198f2be851f8c977b9ac47748edc4f9d0f26e Mon Sep 17 00:00:00 2001 From: Thorsten Scherer Date: Sun, 12 Mar 2023 13:23:15 +0100 Subject: slab: Adjust comment after refactoring of gfp.h Reflect the change from the commit below. Fixes: cb5a065b4ea9 ("headers/deps: mm: Split out of ") Signed-off-by: Thorsten Scherer Acked-by: Hyeonggon Yoo <42.hyeyoo@gmail.com> Signed-off-by: Vlastimil Babka --- include/linux/slab.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/slab.h b/include/linux/slab.h index 45af70315a94..87d687c43d8c 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -526,7 +526,7 @@ void *kmalloc_large_node(size_t size, gfp_t flags, int node) __assume_page_align * to be at least to the size. * * The @flags argument may be one of the GFP flags defined at - * include/linux/gfp.h and described at + * include/linux/gfp_types.h and described at * :ref:`Documentation/core-api/mm-api.rst ` * * The recommended usage of the @flags is described at -- cgit v1.2.3 From c4ba69f00c18524eb89990e9afda9d2a9b54de2f Mon Sep 17 00:00:00 2001 From: Vlastimil Babka Date: Tue, 28 Feb 2023 10:59:54 +0100 Subject: mm, page_flags: remove PG_slob_free With SLOB removed we no longer need the PG_slob_free alias for PG_private. Also update tools/mm/page-types. Signed-off-by: Vlastimil Babka Acked-by: Hyeonggon Yoo <42.hyeyoo@gmail.com> Acked-by: Lorenzo Stoakes Acked-by: Mike Rapoport (IBM) --- include/linux/page-flags.h | 4 ---- 1 file changed, 4 deletions(-) (limited to 'include/linux') diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index a7e3a3405520..2bdc41cb0594 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -174,9 +174,6 @@ enum pageflags { /* Remapped by swiotlb-xen. */ PG_xen_remapped = PG_owner_priv_1, - /* SLOB */ - PG_slob_free = PG_private, - #ifdef CONFIG_MEMORY_FAILURE /* * Compound pages. Stored in first tail page's flags. @@ -483,7 +480,6 @@ PAGEFLAG(Active, active, PF_HEAD) __CLEARPAGEFLAG(Active, active, PF_HEAD) PAGEFLAG(Workingset, workingset, PF_HEAD) TESTCLEARFLAG(Workingset, workingset, PF_HEAD) __PAGEFLAG(Slab, slab, PF_NO_TAIL) -__PAGEFLAG(SlobFree, slob_free, PF_NO_TAIL) PAGEFLAG(Checked, checked, PF_NO_COMPOUND) /* Used by some filesystems */ /* Xen */ -- cgit v1.2.3 From de4d6089b9271ed172e92148a04f31578b375525 Mon Sep 17 00:00:00 2001 From: Vlastimil Babka Date: Tue, 28 Feb 2023 15:34:17 +0100 Subject: mm/slab: remove CONFIG_SLOB code from slab common code CONFIG_SLOB has been removed from Kconfig. Remove code and #ifdef's specific to SLOB in the slab headers and common code. Signed-off-by: Vlastimil Babka Reviewed-by: Hyeonggon Yoo <42.hyeyoo@gmail.com> Acked-by: Lorenzo Stoakes Acked-by: Mike Rapoport (IBM) --- include/linux/slab.h | 39 --------------------------------------- 1 file changed, 39 deletions(-) (limited to 'include/linux') diff --git a/include/linux/slab.h b/include/linux/slab.h index 45af70315a94..7f645a4c1298 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -298,19 +298,6 @@ static inline unsigned int arch_slab_minalign(void) #endif #endif -#ifdef CONFIG_SLOB -/* - * SLOB passes all requests larger than one page to the page allocator. - * No kmalloc array is necessary since objects of different sizes can - * be allocated from the same page. - */ -#define KMALLOC_SHIFT_HIGH PAGE_SHIFT -#define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT - 1) -#ifndef KMALLOC_SHIFT_LOW -#define KMALLOC_SHIFT_LOW 3 -#endif -#endif - /* Maximum allocatable size */ #define KMALLOC_MAX_SIZE (1UL << KMALLOC_SHIFT_MAX) /* Maximum size for which we actually use a slab cache */ @@ -366,7 +353,6 @@ enum kmalloc_cache_type { NR_KMALLOC_TYPES }; -#ifndef CONFIG_SLOB extern struct kmem_cache * kmalloc_caches[NR_KMALLOC_TYPES][KMALLOC_SHIFT_HIGH + 1]; @@ -458,7 +444,6 @@ static __always_inline unsigned int __kmalloc_index(size_t size, } static_assert(PAGE_SHIFT <= 20); #define kmalloc_index(s) __kmalloc_index(s, true) -#endif /* !CONFIG_SLOB */ void *__kmalloc(size_t size, gfp_t flags) __assume_kmalloc_alignment __alloc_size(1); @@ -487,10 +472,6 @@ void kmem_cache_free(struct kmem_cache *s, void *objp); void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p); int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, void **p); -/* - * Caller must not use kfree_bulk() on memory not originally allocated - * by kmalloc(), because the SLOB allocator cannot handle this. - */ static __always_inline void kfree_bulk(size_t size, void **p) { kmem_cache_free_bulk(NULL, size, p); @@ -567,7 +548,6 @@ void *kmalloc_large_node(size_t size, gfp_t flags, int node) __assume_page_align * Try really hard to succeed the allocation but fail * eventually. */ -#ifndef CONFIG_SLOB static __always_inline __alloc_size(1) void *kmalloc(size_t size, gfp_t flags) { if (__builtin_constant_p(size) && size) { @@ -583,17 +563,7 @@ static __always_inline __alloc_size(1) void *kmalloc(size_t size, gfp_t flags) } return __kmalloc(size, flags); } -#else -static __always_inline __alloc_size(1) void *kmalloc(size_t size, gfp_t flags) -{ - if (__builtin_constant_p(size) && size > KMALLOC_MAX_CACHE_SIZE) - return kmalloc_large(size, flags); - - return __kmalloc(size, flags); -} -#endif -#ifndef CONFIG_SLOB static __always_inline __alloc_size(1) void *kmalloc_node(size_t size, gfp_t flags, int node) { if (__builtin_constant_p(size) && size) { @@ -609,15 +579,6 @@ static __always_inline __alloc_size(1) void *kmalloc_node(size_t size, gfp_t fla } return __kmalloc_node(size, flags, node); } -#else -static __always_inline __alloc_size(1) void *kmalloc_node(size_t size, gfp_t flags, int node) -{ - if (__builtin_constant_p(size) && size > KMALLOC_MAX_CACHE_SIZE) - return kmalloc_large_node(size, flags, node); - - return __kmalloc_node(size, flags, node); -} -#endif /** * kmalloc_array - allocate memory for an array. -- cgit v1.2.3 From ae65a5211d90e54ae604012ce9cf234c48780929 Mon Sep 17 00:00:00 2001 From: Vlastimil Babka Date: Thu, 2 Mar 2023 16:01:00 +0100 Subject: mm/slab: document kfree() as allowed for kmem_cache_alloc() objects This will make it easier to free objects in situations when they can come from either kmalloc() or kmem_cache_alloc(), and also allow kfree_rcu() for freeing objects from kmem_cache_alloc(). For the SLAB and SLUB allocators this was always possible so with SLOB gone, we can document it as supported. Signed-off-by: Vlastimil Babka Reviewed-by: Mike Rapoport (IBM) Cc: Jonathan Corbet Cc: "Paul E. McKenney" Cc: Frederic Weisbecker Cc: Neeraj Upadhyay Cc: Josh Triplett Cc: Steven Rostedt Cc: Mathieu Desnoyers Cc: Lai Jiangshan Cc: Joel Fernandes --- include/linux/rcupdate.h | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 094321c17e48..dcd2cf1e8326 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -976,8 +976,10 @@ static inline notrace void rcu_read_unlock_sched_notrace(void) * either fall back to use of call_rcu() or rearrange the structure to * position the rcu_head structure into the first 4096 bytes. * - * Note that the allowable offset might decrease in the future, for example, - * to allow something like kmem_cache_free_rcu(). + * The object to be freed can be allocated either by kmalloc() or + * kmem_cache_alloc(). + * + * Note that the allowable offset might decrease in the future. * * The BUILD_BUG_ON check must not involve any function calls, hence the * checks are done in macros here. -- cgit v1.2.3