summaryrefslogtreecommitdiff
path: root/include/linux/slab.h
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2025-12-11 08:54:08 +0900
committerLinus Torvalds <torvalds@linux-foundation.org>2025-12-11 08:54:08 +0900
commit1de741159bbb187c8018c4c779acde4ea0188478 (patch)
treeb8f2cd5230df77517c8204dbc914f560c3af0f27 /include/linux/slab.h
parent0723a166d1f1da4c60d7b11289383f073e4dee9b (diff)
parent0f35040de59371ad542b915d7b91176c9910dadc (diff)
Merge tag 'slab-for-6.19-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/vbabka/slab
Pull slab fix from Vlastimil Babka: - A stable fix for performance regression in tests that perform kmem_cache_destroy() a lot, due to unnecessarily wide scope of kvfree_rcu_barrier() (Harry Yoo) * tag 'slab-for-6.19-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/vbabka/slab: mm/slab: introduce kvfree_rcu_barrier_on_cache() for cache destruction
Diffstat (limited to 'include/linux/slab.h')
-rw-r--r--include/linux/slab.h7
1 files changed, 7 insertions, 0 deletions
diff --git a/include/linux/slab.h b/include/linux/slab.h
index cf443f064a66..2482992248dc 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -1150,10 +1150,17 @@ static inline void kvfree_rcu_barrier(void)
rcu_barrier();
}
+static inline void kvfree_rcu_barrier_on_cache(struct kmem_cache *s)
+{
+ rcu_barrier();
+}
+
static inline void kfree_rcu_scheduler_running(void) { }
#else
void kvfree_rcu_barrier(void);
+void kvfree_rcu_barrier_on_cache(struct kmem_cache *s);
+
void kfree_rcu_scheduler_running(void);
#endif