diff options
author | David Rientjes <rientjes@google.com> | 2009-02-22 17:40:07 -0800 |
---|---|---|
committer | Pekka Enberg <penberg@cs.helsinki.fi> | 2009-02-23 12:05:41 +0200 |
commit | 3b89d7d881a1dbb4da158f7eb5d6b3ceefc72810 (patch) | |
tree | 48c119937a204172677a5fa3a829019890670350 /include/linux | |
parent | b578f3fcca1e78624dfb5f358776e63711d7fda2 (diff) |
slub: move min_partial to struct kmem_cache
Although it allows for better cacheline use, it is unnecessary to save a
copy of the cache's min_partial value in each kmem_cache_node.
Cc: Christoph Lameter <cl@linux-foundation.org>
Signed-off-by: David Rientjes <rientjes@google.com>
Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi>
Diffstat (limited to 'include/linux')
-rw-r--r-- | include/linux/slub_def.h | 2 |
1 files changed, 1 insertions, 1 deletions
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h index 2f5c16b1aacd..f20a89e4d52c 100644 --- a/include/linux/slub_def.h +++ b/include/linux/slub_def.h @@ -46,7 +46,6 @@ struct kmem_cache_cpu { struct kmem_cache_node { spinlock_t list_lock; /* Protect partial list and nr_partial */ unsigned long nr_partial; - unsigned long min_partial; struct list_head partial; #ifdef CONFIG_SLUB_DEBUG atomic_long_t nr_slabs; @@ -89,6 +88,7 @@ struct kmem_cache { void (*ctor)(void *); int inuse; /* Offset to metadata */ int align; /* Alignment */ + unsigned long min_partial; const char *name; /* Name (only for display!) */ struct list_head list; /* List of slab caches */ #ifdef CONFIG_SLUB_DEBUG |