diff options
author | Nick Piggin <nickpiggin@yahoo.com.au> | 2007-05-16 22:10:49 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-05-17 05:23:02 -0700 |
commit | afc0cedbe9138e3e8b38bfa1e4dfd01a2c537d62 (patch) | |
tree | 0df03f95645ef76a387dd541da062b682319c921 /mm/slob.c | |
parent | b2cd64153b94473f6bd82448a68b8e8c041676ea (diff) |
slob: implement RCU freeing
The SLOB allocator should implement SLAB_DESTROY_BY_RCU correctly, because
even on UP, RCU freeing semantics are not equivalent to simply freeing
immediately. This also allows SLOB to be used on SMP.
Signed-off-by: Nick Piggin <npiggin@suse.de>
Acked-by: Matt Mackall <mpm@selenic.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/slob.c')
-rw-r--r-- | mm/slob.c | 52 |
1 files changed, 45 insertions, 7 deletions
diff --git a/mm/slob.c b/mm/slob.c index c6933bc19bcd..57bb72ed0d46 100644 --- a/mm/slob.c +++ b/mm/slob.c @@ -35,6 +35,7 @@ #include <linux/init.h> #include <linux/module.h> #include <linux/timer.h> +#include <linux/rcupdate.h> struct slob_block { int units; @@ -53,6 +54,16 @@ struct bigblock { }; typedef struct bigblock bigblock_t; +/* + * struct slob_rcu is inserted at the tail of allocated slob blocks, which + * were created with a SLAB_DESTROY_BY_RCU slab. slob_rcu is used to free + * the block using call_rcu. + */ +struct slob_rcu { + struct rcu_head head; + int size; +}; + static slob_t arena = { .next = &arena, .units = 1 }; static slob_t *slobfree = &arena; static bigblock_t *bigblocks; @@ -266,6 +277,7 @@ size_t ksize(const void *block) struct kmem_cache { unsigned int size, align; + unsigned long flags; const char *name; void (*ctor)(void *, struct kmem_cache *, unsigned long); void (*dtor)(void *, struct kmem_cache *, unsigned long); @@ -283,6 +295,12 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size, if (c) { c->name = name; c->size = size; + if (flags & SLAB_DESTROY_BY_RCU) { + BUG_ON(dtor); + /* leave room for rcu footer at the end of object */ + c->size += sizeof(struct slob_rcu); + } + c->flags = flags; c->ctor = ctor; c->dtor = dtor; /* ignore alignment unless it's forced */ @@ -328,15 +346,35 @@ void *kmem_cache_zalloc(struct kmem_cache *c, gfp_t flags) } EXPORT_SYMBOL(kmem_cache_zalloc); -void kmem_cache_free(struct kmem_cache *c, void *b) +static void __kmem_cache_free(void *b, int size) { - if (c->dtor) - c->dtor(b, c, 0); - - if (c->size < PAGE_SIZE) - slob_free(b, c->size); + if (size < PAGE_SIZE) + slob_free(b, size); else - free_pages((unsigned long)b, get_order(c->size)); + free_pages((unsigned long)b, get_order(size)); +} + +static void kmem_rcu_free(struct rcu_head *head) +{ + struct slob_rcu *slob_rcu = (struct slob_rcu *)head; + void *b = (void *)slob_rcu - (slob_rcu->size - sizeof(struct slob_rcu)); + + __kmem_cache_free(b, slob_rcu->size); +} + +void kmem_cache_free(struct kmem_cache *c, void *b) +{ + if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) { + struct slob_rcu *slob_rcu; + slob_rcu = b + (c->size - sizeof(struct slob_rcu)); + INIT_RCU_HEAD(&slob_rcu->head); + slob_rcu->size = c->size; + call_rcu(&slob_rcu->head, kmem_rcu_free); + } else { + if (c->dtor) + c->dtor(b, c, 0); + __kmem_cache_free(b, c->size); + } } EXPORT_SYMBOL(kmem_cache_free); |