summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorChristoph Lameter <clameter@sgi.com>2007-05-09 02:32:44 -0700
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-05-09 12:30:45 -0700
commit41ecc55b8a8ca618e6d490982c7ce45d230d4399 (patch)
treeba2f416a2ea92eb335cae028c586b3cd3797d66c
parent02cbc874463924de2c3403379b698bce3cd277a5 (diff)
SLUB: add CONFIG_SLUB_DEBUG
CONFIG_SLUB_DEBUG can be used to switch off the debugging and sysfs components of SLUB. Thus SLUB will be able to replace SLOB. SLUB can arrange objects in a denser way than SLOB and the code size should be minimal without debugging and sysfs support. Note that CONFIG_SLUB_DEBUG is materially different from CONFIG_SLAB_DEBUG. CONFIG_SLAB_DEBUG is used to enable slab debugging in SLAB. SLUB enables debugging via a boot parameter. SLUB debug code should always be present. CONFIG_SLUB_DEBUG can be modified in the embedded config section. Signed-off-by: Christoph Lameter <clameter@sgi.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--init/Kconfig9
-rw-r--r--mm/slub.c189
2 files changed, 123 insertions, 75 deletions
diff --git a/init/Kconfig b/init/Kconfig
index d0edf42f4dba..da6a91c4a051 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -504,6 +504,15 @@ config VM_EVENT_COUNTERS
on EMBEDDED systems. /proc/vmstat will only show page counts
if VM event counters are disabled.
+config SLUB_DEBUG
+ default y
+ bool "Enable SLUB debugging support" if EMBEDDED
+ help
+ SLUB has extensive debug support features. Disabling these can
+ result in significant savings in code size. This also disables
+ SLUB sysfs support. /sys/slab will not exist and there will be
+ no support for cache validation etc.
+
choice
prompt "Choose SLAB allocator"
default SLAB
diff --git a/mm/slub.c b/mm/slub.c
index b9e05366afda..3e614c1901b7 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -89,17 +89,25 @@
static inline int SlabDebug(struct page *page)
{
+#ifdef CONFIG_SLUB_DEBUG
return PageError(page);
+#else
+ return 0;
+#endif
}
static inline void SetSlabDebug(struct page *page)
{
+#ifdef CONFIG_SLUB_DEBUG
SetPageError(page);
+#endif
}
static inline void ClearSlabDebug(struct page *page)
{
+#ifdef CONFIG_SLUB_DEBUG
ClearPageError(page);
+#endif
}
/*
@@ -207,7 +215,7 @@ struct track {
enum track_item { TRACK_ALLOC, TRACK_FREE };
-#ifdef CONFIG_SYSFS
+#if defined(CONFIG_SYSFS) && defined(CONFIG_SLUB_DEBUG)
static int sysfs_slab_add(struct kmem_cache *);
static int sysfs_slab_alias(struct kmem_cache *, const char *);
static void sysfs_slab_remove(struct kmem_cache *);
@@ -284,6 +292,14 @@ static inline int slab_index(void *p, struct kmem_cache *s, void *addr)
return (p - addr) / s->size;
}
+#ifdef CONFIG_SLUB_DEBUG
+/*
+ * Debug settings:
+ */
+static int slub_debug;
+
+static char *slub_debug_slabs;
+
/*
* Object debugging
*/
@@ -821,6 +837,97 @@ static void trace(struct kmem_cache *s, struct page *page, void *object, int all
}
}
+static int __init setup_slub_debug(char *str)
+{
+ if (!str || *str != '=')
+ slub_debug = DEBUG_DEFAULT_FLAGS;
+ else {
+ str++;
+ if (*str == 0 || *str == ',')
+ slub_debug = DEBUG_DEFAULT_FLAGS;
+ else
+ for( ;*str && *str != ','; str++)
+ switch (*str) {
+ case 'f' : case 'F' :
+ slub_debug |= SLAB_DEBUG_FREE;
+ break;
+ case 'z' : case 'Z' :
+ slub_debug |= SLAB_RED_ZONE;
+ break;
+ case 'p' : case 'P' :
+ slub_debug |= SLAB_POISON;
+ break;
+ case 'u' : case 'U' :
+ slub_debug |= SLAB_STORE_USER;
+ break;
+ case 't' : case 'T' :
+ slub_debug |= SLAB_TRACE;
+ break;
+ default:
+ printk(KERN_ERR "slub_debug option '%c' "
+ "unknown. skipped\n",*str);
+ }
+ }
+
+ if (*str == ',')
+ slub_debug_slabs = str + 1;
+ return 1;
+}
+
+__setup("slub_debug", setup_slub_debug);
+
+static void kmem_cache_open_debug_check(struct kmem_cache *s)
+{
+ /*
+ * The page->offset field is only 16 bit wide. This is an offset
+ * in units of words from the beginning of an object. If the slab
+ * size is bigger then we cannot move the free pointer behind the
+ * object anymore.
+ *
+ * On 32 bit platforms the limit is 256k. On 64bit platforms
+ * the limit is 512k.
+ *
+ * Debugging or ctor/dtors may create a need to move the free
+ * pointer. Fail if this happens.
+ */
+ if (s->size >= 65535 * sizeof(void *)) {
+ BUG_ON(s->flags & (SLAB_RED_ZONE | SLAB_POISON |
+ SLAB_STORE_USER | SLAB_DESTROY_BY_RCU));
+ BUG_ON(s->ctor || s->dtor);
+ }
+ else
+ /*
+ * Enable debugging if selected on the kernel commandline.
+ */
+ if (slub_debug && (!slub_debug_slabs ||
+ strncmp(slub_debug_slabs, s->name,
+ strlen(slub_debug_slabs)) == 0))
+ s->flags |= slub_debug;
+}
+#else
+
+static inline int alloc_object_checks(struct kmem_cache *s,
+ struct page *page, void *object) { return 0; }
+
+static inline int free_object_checks(struct kmem_cache *s,
+ struct page *page, void *object) { return 0; }
+
+static inline void add_full(struct kmem_cache_node *n, struct page *page) {}
+static inline void remove_full(struct kmem_cache *s, struct page *page) {}
+static inline void trace(struct kmem_cache *s, struct page *page,
+ void *object, int alloc) {}
+static inline void init_object(struct kmem_cache *s,
+ void *object, int active) {}
+static inline void init_tracking(struct kmem_cache *s, void *object) {}
+static inline int slab_pad_check(struct kmem_cache *s, struct page *page)
+ { return 1; }
+static inline int check_object(struct kmem_cache *s, struct page *page,
+ void *object, int active) { return 1; }
+static inline void set_track(struct kmem_cache *s, void *object,
+ enum track_item alloc, void *addr) {}
+static inline void kmem_cache_open_debug_check(struct kmem_cache *s) {}
+#define slub_debug 0
+#endif
/*
* Slab allocation and freeing
*/
@@ -1446,13 +1553,6 @@ static int slub_min_objects = DEFAULT_MIN_OBJECTS;
static int slub_nomerge;
/*
- * Debug settings:
- */
-static int slub_debug;
-
-static char *slub_debug_slabs;
-
-/*
* Calculate the order of allocation given an slab object size.
*
* The order of allocation has significant impact on performance and other
@@ -1660,6 +1760,7 @@ static int calculate_sizes(struct kmem_cache *s)
*/
size = ALIGN(size, sizeof(void *));
+#ifdef CONFIG_SLUB_DEBUG
/*
* If we are Redzoning then check if there is some space between the
* end of the object and the free pointer. If not then add an
@@ -1667,6 +1768,7 @@ static int calculate_sizes(struct kmem_cache *s)
*/
if ((flags & SLAB_RED_ZONE) && size == s->objsize)
size += sizeof(void *);
+#endif
/*
* With that we have determined the number of bytes in actual use
@@ -1674,6 +1776,7 @@ static int calculate_sizes(struct kmem_cache *s)
*/
s->inuse = size;
+#ifdef CONFIG_SLUB_DEBUG
if (((flags & (SLAB_DESTROY_BY_RCU | SLAB_POISON)) ||
s->ctor || s->dtor)) {
/*
@@ -1704,6 +1807,7 @@ static int calculate_sizes(struct kmem_cache *s)
* of the object.
*/
size += sizeof(void *);
+#endif
/*
* Determine the alignment based on various parameters that the
@@ -1753,32 +1857,7 @@ static int kmem_cache_open(struct kmem_cache *s, gfp_t gfpflags,
s->objsize = size;
s->flags = flags;
s->align = align;
-
- /*
- * The page->offset field is only 16 bit wide. This is an offset
- * in units of words from the beginning of an object. If the slab
- * size is bigger then we cannot move the free pointer behind the
- * object anymore.
- *
- * On 32 bit platforms the limit is 256k. On 64bit platforms
- * the limit is 512k.
- *
- * Debugging or ctor/dtors may create a need to move the free
- * pointer. Fail if this happens.
- */
- if (s->size >= 65535 * sizeof(void *)) {
- BUG_ON(flags & (SLAB_RED_ZONE | SLAB_POISON |
- SLAB_STORE_USER | SLAB_DESTROY_BY_RCU));
- BUG_ON(ctor || dtor);
- }
- else
- /*
- * Enable debugging if selected on the kernel commandline.
- */
- if (slub_debug && (!slub_debug_slabs ||
- strncmp(slub_debug_slabs, name,
- strlen(slub_debug_slabs)) == 0))
- s->flags |= slub_debug;
+ kmem_cache_open_debug_check(s);
if (!calculate_sizes(s))
goto error;
@@ -1949,45 +2028,6 @@ static int __init setup_slub_nomerge(char *str)
__setup("slub_nomerge", setup_slub_nomerge);
-static int __init setup_slub_debug(char *str)
-{
- if (!str || *str != '=')
- slub_debug = DEBUG_DEFAULT_FLAGS;
- else {
- str++;
- if (*str == 0 || *str == ',')
- slub_debug = DEBUG_DEFAULT_FLAGS;
- else
- for( ;*str && *str != ','; str++)
- switch (*str) {
- case 'f' : case 'F' :
- slub_debug |= SLAB_DEBUG_FREE;
- break;
- case 'z' : case 'Z' :
- slub_debug |= SLAB_RED_ZONE;
- break;
- case 'p' : case 'P' :
- slub_debug |= SLAB_POISON;
- break;
- case 'u' : case 'U' :
- slub_debug |= SLAB_STORE_USER;
- break;
- case 't' : case 'T' :
- slub_debug |= SLAB_TRACE;
- break;
- default:
- printk(KERN_ERR "slub_debug option '%c' "
- "unknown. skipped\n",*str);
- }
- }
-
- if (*str == ',')
- slub_debug_slabs = str + 1;
- return 1;
-}
-
-__setup("slub_debug", setup_slub_debug);
-
static struct kmem_cache *create_kmalloc_cache(struct kmem_cache *s,
const char *name, int size, gfp_t gfp_flags)
{
@@ -2554,8 +2594,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
return slab_alloc(s, gfpflags, node, caller);
}
-#ifdef CONFIG_SYSFS
-
+#if defined(CONFIG_SYSFS) && defined(CONFIG_SLUB_DEBUG)
static int validate_slab(struct kmem_cache *s, struct page *page)
{
void *p;