diff options
| -rw-r--r-- | mm/slab_common.c | 7 | ||||
| -rw-r--r-- | mm/slub.c | 41 |
2 files changed, 40 insertions, 8 deletions
diff --git a/mm/slab_common.c b/mm/slab_common.c index 005a4319c06a..b6601e0fe598 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -1623,8 +1623,11 @@ static bool kfree_rcu_sheaf(void *obj) slab = folio_slab(folio); s = slab->slab_cache; - if (s->cpu_sheaves) - return __kfree_rcu_sheaf(s, obj); + if (s->cpu_sheaves) { + if (likely(!IS_ENABLED(CONFIG_NUMA) || + slab_nid(slab) == numa_mem_id())) + return __kfree_rcu_sheaf(s, obj); + } return false; } diff --git a/mm/slub.c b/mm/slub.c index 596f375870af..6bcddc513963 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -472,6 +472,7 @@ struct slab_sheaf { }; struct kmem_cache *cache; unsigned int size; + int node; /* only used for rcu_sheaf */ void *objects[]; }; @@ -5822,7 +5823,7 @@ static void rcu_free_sheaf(struct rcu_head *head) */ __rcu_free_sheaf_prepare(s, sheaf); - barn = get_node(s, numa_mem_id())->barn; + barn = get_node(s, sheaf->node)->barn; /* due to slab_free_hook() */ if (unlikely(sheaf->size == 0)) @@ -5912,10 +5913,12 @@ do_free: */ rcu_sheaf->objects[rcu_sheaf->size++] = obj; - if (likely(rcu_sheaf->size < s->sheaf_capacity)) + if (likely(rcu_sheaf->size < s->sheaf_capacity)) { rcu_sheaf = NULL; - else + } else { pcs->rcu_free = NULL; + rcu_sheaf->node = numa_mem_id(); + } /* * we flush before local_unlock to make sure a racing @@ -5946,7 +5949,11 @@ static void free_to_pcs_bulk(struct kmem_cache *s, size_t size, void **p) bool init = slab_want_init_on_free(s); unsigned int batch, i = 0; struct node_barn *barn; + void *remote_objects[PCS_BATCH_MAX]; + unsigned int remote_nr = 0; + int node = numa_mem_id(); +next_remote_batch: while (i < size) { struct slab *slab = virt_to_slab(p[i]); @@ -5956,7 +5963,15 @@ static void free_to_pcs_bulk(struct kmem_cache *s, size_t size, void **p) if (unlikely(!slab_free_hook(s, p[i], init, false))) { p[i] = p[--size]; if (!size) - return; + goto flush_remote; + continue; + } + + if (unlikely(IS_ENABLED(CONFIG_NUMA) && slab_nid(slab) != node)) { + remote_objects[remote_nr] = p[i]; + p[i] = p[--size]; + if (++remote_nr >= PCS_BATCH_MAX) + goto flush_remote; continue; } @@ -6026,6 +6041,15 @@ no_empty: */ fallback: __kmem_cache_free_bulk(s, size, p); + +flush_remote: + if (remote_nr) { + __kmem_cache_free_bulk(s, remote_nr, &remote_objects[0]); + if (i < size) { + remote_nr = 0; + goto next_remote_batch; + } + } } #ifndef CONFIG_SLUB_TINY @@ -6117,8 +6141,13 @@ void slab_free(struct kmem_cache *s, struct slab *slab, void *object, if (unlikely(!slab_free_hook(s, object, slab_want_init_on_free(s), false))) return; - if (!s->cpu_sheaves || !free_to_pcs(s, object)) - do_slab_free(s, slab, object, object, 1, addr); + if (s->cpu_sheaves && likely(!IS_ENABLED(CONFIG_NUMA) || + slab_nid(slab) == numa_mem_id())) { + if (likely(free_to_pcs(s, object))) + return; + } + + do_slab_free(s, slab, object, object, 1, addr); } #ifdef CONFIG_MEMCG |
