diff options
author | Christoph Lameter <clameter@engr.sgi.com> | 2006-03-22 00:09:07 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-03-22 07:54:06 -0800 |
commit | b18e7e654d7af741d2bf34a90dc34128d0217fea (patch) | |
tree | 04cbc067da970b8b24c8b5d48a1e7d9520f92b06 /mm | |
parent | 1b55253a7f95adc82eb20937b57b3e3e32ba65df (diff) |
[PATCH] slab: fix drain_array() so that it works correctly with the shared_array
The list_lock also protects the shared array and we call drain_array() with
the shared array. Therefore we cannot go as far as I wanted to but have to
take the lock in a way so that it also protects the array_cache in
drain_pages.
(Note: maybe we should make the array_cache locking more consistent? I.e.
always take the array cache lock for shared arrays and disable interrupts
for the per cpu arrays?)
Signed-off-by: Christoph Lameter <clameter@sgi.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/slab.c | 21 |
1 files changed, 12 insertions, 9 deletions
diff --git a/mm/slab.c b/mm/slab.c index 3274144c0d16..6b691ecbac44 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -3521,7 +3521,8 @@ static void enable_cpucache(struct kmem_cache *cachep) /* * Drain an array if it contains any elements taking the l3 lock only if - * necessary. + * necessary. Note that the l3 listlock also protects the array_cache + * if drain_array() is used on the shared array. */ void drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3, struct array_cache *ac, int force, int node) @@ -3532,16 +3533,18 @@ void drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3, return; if (ac->touched && !force) { ac->touched = 0; - } else if (ac->avail) { - tofree = force ? ac->avail : (ac->limit + 4) / 5; - if (tofree > ac->avail) - tofree = (ac->avail + 1) / 2; + } else { spin_lock_irq(&l3->list_lock); - free_block(cachep, ac->entry, tofree, node); + if (ac->avail) { + tofree = force ? ac->avail : (ac->limit + 4) / 5; + if (tofree > ac->avail) + tofree = (ac->avail + 1) / 2; + free_block(cachep, ac->entry, tofree, node); + ac->avail -= tofree; + memmove(ac->entry, &(ac->entry[tofree]), + sizeof(void *) * ac->avail); + } spin_unlock_irq(&l3->list_lock); - ac->avail -= tofree; - memmove(ac->entry, &(ac->entry[tofree]), - sizeof(void *) * ac->avail); } } |