From 264132bc62fe071d0ff378c1103bae9d33212f10 Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Mon, 6 Mar 2006 12:10:07 -0800 Subject: Fix "check_slabp" printout size calculation We want to use the "struct slab" size, not the size of the pointer to same. As it is, we'd not print out the last entry pointers in the slab (where is ~10, depending on whether it's a 32-bit or 64-bit kernel). Gaah, that slab code was written by somebody who likes unreadable crud. Signed-off-by: Linus Torvalds --- mm/slab.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'mm/slab.c') diff --git a/mm/slab.c b/mm/slab.c index add05d808a4a..2b0b1519bb74 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -2554,7 +2554,7 @@ static void check_slabp(struct kmem_cache *cachep, struct slab *slabp) "slab: Internal list corruption detected in cache '%s'(%d), slabp %p(%d). Hexdump:\n", cachep->name, cachep->num, slabp, slabp->inuse); for (i = 0; - i < sizeof(slabp) + cachep->num * sizeof(kmem_bufctl_t); + i < sizeof(*slabp) + cachep->num * sizeof(kmem_bufctl_t); i++) { if ((i % 16) == 0) printk("\n%03x:", i); -- cgit v1.2.3 From 9888e6fa7b68d9c8cc2c162a90979825ab45150a Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Mon, 6 Mar 2006 17:44:43 -0800 Subject: slab: clarify and fix calculate_slab_order() If we triggered the 'offslab_limit' test, we would return with cachep->gfporder incremented once too many times. This clarifies the logic somewhat, and fixes that bug. Signed-off-by: Linus Torvalds --- mm/slab.c | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) (limited to 'mm/slab.c') diff --git a/mm/slab.c b/mm/slab.c index 2b0b1519bb74..f2e92dc1c9ce 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -1628,36 +1628,36 @@ static inline size_t calculate_slab_order(struct kmem_cache *cachep, size_t size, size_t align, unsigned long flags) { size_t left_over = 0; + int gfporder; - for (;; cachep->gfporder++) { + for (gfporder = 0 ; gfporder <= MAX_GFP_ORDER; gfporder++) { unsigned int num; size_t remainder; - if (cachep->gfporder > MAX_GFP_ORDER) { - cachep->num = 0; - break; - } - - cache_estimate(cachep->gfporder, size, align, flags, - &remainder, &num); + cache_estimate(gfporder, size, align, flags, &remainder, &num); if (!num) continue; + /* More than offslab_limit objects will cause problems */ - if (flags & CFLGS_OFF_SLAB && cachep->num > offslab_limit) + if ((flags & CFLGS_OFF_SLAB) && num > offslab_limit) break; + /* Found something acceptable - save it away */ cachep->num = num; + cachep->gfporder = gfporder; left_over = remainder; /* * Large number of objects is good, but very large slabs are * currently bad for the gfp()s. */ - if (cachep->gfporder >= slab_break_gfp_order) + if (gfporder >= slab_break_gfp_order) break; - if ((left_over * 8) <= (PAGE_SIZE << cachep->gfporder)) - /* Acceptable internal fragmentation */ + /* + * Acceptable internal fragmentation? + */ + if ((left_over * 8) <= (PAGE_SIZE << gfporder)) break; } return left_over; -- cgit v1.2.3 From f78bb8ad482267b92c122f0e37a7dce69c880247 Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Wed, 8 Mar 2006 10:33:05 -0800 Subject: slab: fix calculate_slab_order() for SLAB_RECLAIM_ACCOUNT Instead of having a hard-to-read and confusing conditional in the caller, just make the slab order calculation handle this special case, since it's simple and obvious there. Signed-off-by: Linus Torvalds --- mm/slab.c | 20 +++++++++----------- 1 file changed, 9 insertions(+), 11 deletions(-) (limited to 'mm/slab.c') diff --git a/mm/slab.c b/mm/slab.c index f2e92dc1c9ce..6ad6bd5a0b3e 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -1647,6 +1647,14 @@ static inline size_t calculate_slab_order(struct kmem_cache *cachep, cachep->gfporder = gfporder; left_over = remainder; + /* + * A VFS-reclaimable slab tends to have most allocations + * as GFP_NOFS and we really don't want to have to be allocating + * higher-order pages when we are unable to shrink dcache. + */ + if (flags & SLAB_RECLAIM_ACCOUNT) + break; + /* * Large number of objects is good, but very large slabs are * currently bad for the gfp()s. @@ -1869,17 +1877,7 @@ kmem_cache_create (const char *name, size_t size, size_t align, size = ALIGN(size, align); - if ((flags & SLAB_RECLAIM_ACCOUNT) && size <= PAGE_SIZE) { - /* - * A VFS-reclaimable slab tends to have most allocations - * as GFP_NOFS and we really don't want to have to be allocating - * higher-order pages when we are unable to shrink dcache. - */ - cachep->gfporder = 0; - cache_estimate(cachep->gfporder, size, align, flags, - &left_over, &cachep->num); - } else - left_over = calculate_slab_order(cachep, size, align, flags); + left_over = calculate_slab_order(cachep, size, align, flags); if (!cachep->num) { printk("kmem_cache_create: couldn't create cache %s.\n", name); -- cgit v1.2.3 From 07ed76b2a085a31f427c2a912a562627947dc7de Mon Sep 17 00:00:00 2001 From: Jack Steiner Date: Tue, 7 Mar 2006 21:55:46 -0800 Subject: [PATCH] slab: allocate larger cache_cache if order 0 fails kmem_cache_init() incorrectly assumes that the cache_cache object will fit in an order 0 allocation. On very large systems, this is not true. Change the code to try larger order allocations if order 0 fails. Signed-off-by: Jack Steiner Cc: Manfred Spraul Cc: Pekka Enberg Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/slab.c | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) (limited to 'mm/slab.c') diff --git a/mm/slab.c b/mm/slab.c index 6ad6bd5a0b3e..61800b88e241 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -1124,6 +1124,7 @@ void __init kmem_cache_init(void) struct cache_sizes *sizes; struct cache_names *names; int i; + int order; for (i = 0; i < NUM_INIT_LISTS; i++) { kmem_list3_init(&initkmem_list3[i]); @@ -1167,11 +1168,15 @@ void __init kmem_cache_init(void) cache_cache.buffer_size = ALIGN(cache_cache.buffer_size, cache_line_size()); - cache_estimate(0, cache_cache.buffer_size, cache_line_size(), 0, - &left_over, &cache_cache.num); + for (order = 0; order < MAX_ORDER; order++) { + cache_estimate(order, cache_cache.buffer_size, + cache_line_size(), 0, &left_over, &cache_cache.num); + if (cache_cache.num) + break; + } if (!cache_cache.num) BUG(); - + cache_cache.gfporder = order; cache_cache.colour = left_over / cache_cache.colour_off; cache_cache.slab_size = ALIGN(cache_cache.num * sizeof(kmem_bufctl_t) + sizeof(struct slab), cache_line_size()); -- cgit v1.2.3