summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2011-07-22 12:44:30 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2011-07-22 12:44:30 -0700
commitf99b7880cb9863e11441bd8b2f31d4f556ef1a44 (patch)
tree6f3dc6e33e847b431dd899bd968d799f0d4a8fff /include
parent02f8c6aee8df3cdc935e9bdd4f2d020306035dbe (diff)
parent7ea466f2256b02a7047dfd47d76a2f6c1e427e3e (diff)
Merge branch 'slab-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/slab-2.6
* 'slab-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/slab-2.6: slab: fix DEBUG_SLAB warning slab: shrink sizeof(struct kmem_cache) slab: fix DEBUG_SLAB build SLUB: Fix missing <linux/stacktrace.h> include slub: reduce overhead of slub_debug slub: Add method to verify memory is not freed slub: Enable backtrace for create/delete points slab allocators: Provide generic description of alignment defines slab, slub, slob: Unify alignment definition slob/lockdep: Fix gfp flags passed to lockdep
Diffstat (limited to 'include')
-rw-r--r--include/linux/slab.h20
-rw-r--r--include/linux/slab_def.h52
-rw-r--r--include/linux/slob_def.h10
-rw-r--r--include/linux/slub_def.h23
4 files changed, 46 insertions, 59 deletions
diff --git a/include/linux/slab.h b/include/linux/slab.h
index ad4dd1c8d30a..573c809c33d9 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -134,6 +134,26 @@ unsigned int kmem_cache_size(struct kmem_cache *);
#define KMALLOC_MAX_ORDER (KMALLOC_SHIFT_HIGH - PAGE_SHIFT)
/*
+ * Some archs want to perform DMA into kmalloc caches and need a guaranteed
+ * alignment larger than the alignment of a 64-bit integer.
+ * Setting ARCH_KMALLOC_MINALIGN in arch headers allows that.
+ */
+#ifdef ARCH_DMA_MINALIGN
+#define ARCH_KMALLOC_MINALIGN ARCH_DMA_MINALIGN
+#else
+#define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)
+#endif
+
+/*
+ * Setting ARCH_SLAB_MINALIGN in arch headers allows a different alignment.
+ * Intended for arches that get misalignment faults even for 64 bit integer
+ * aligned buffers.
+ */
+#ifndef ARCH_SLAB_MINALIGN
+#define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
+#endif
+
+/*
* Common kmalloc functions provided by all allocators
*/
void * __must_check __krealloc(const void *, size_t, gfp_t);
diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
index 83203ae9390b..d00e0bacda93 100644
--- a/include/linux/slab_def.h
+++ b/include/linux/slab_def.h
@@ -18,53 +18,25 @@
#include <trace/events/kmem.h>
/*
- * Enforce a minimum alignment for the kmalloc caches.
- * Usually, the kmalloc caches are cache_line_size() aligned, except when
- * DEBUG and FORCED_DEBUG are enabled, then they are BYTES_PER_WORD aligned.
- * Some archs want to perform DMA into kmalloc caches and need a guaranteed
- * alignment larger than the alignment of a 64-bit integer.
- * ARCH_KMALLOC_MINALIGN allows that.
- * Note that increasing this value may disable some debug features.
- */
-#ifdef ARCH_DMA_MINALIGN
-#define ARCH_KMALLOC_MINALIGN ARCH_DMA_MINALIGN
-#else
-#define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)
-#endif
-
-#ifndef ARCH_SLAB_MINALIGN
-/*
- * Enforce a minimum alignment for all caches.
- * Intended for archs that get misalignment faults even for BYTES_PER_WORD
- * aligned buffers. Includes ARCH_KMALLOC_MINALIGN.
- * If possible: Do not enable this flag for CONFIG_DEBUG_SLAB, it disables
- * some debug features.
- */
-#define ARCH_SLAB_MINALIGN 0
-#endif
-
-/*
* struct kmem_cache
*
* manages a cache.
*/
struct kmem_cache {
-/* 1) per-cpu data, touched during every alloc/free */
- struct array_cache *array[NR_CPUS];
-/* 2) Cache tunables. Protected by cache_chain_mutex */
+/* 1) Cache tunables. Protected by cache_chain_mutex */
unsigned int batchcount;
unsigned int limit;
unsigned int shared;
unsigned int buffer_size;
u32 reciprocal_buffer_size;
-/* 3) touched by every alloc & free from the backend */
+/* 2) touched by every alloc & free from the backend */
unsigned int flags; /* constant flags */
unsigned int num; /* # of objs per slab */
-/* 4) cache_grow/shrink */
+/* 3) cache_grow/shrink */
/* order of pgs per slab (2^n) */
unsigned int gfporder;
@@ -80,11 +52,11 @@ struct kmem_cache {
/* constructor func */
void (*ctor)(void *obj);
-/* 5) cache creation/removal */
+/* 4) cache creation/removal */
const char *name;
struct list_head next;
-/* 6) statistics */
+/* 5) statistics */
#ifdef CONFIG_DEBUG_SLAB
unsigned long num_active;
unsigned long num_allocations;
@@ -111,16 +83,18 @@ struct kmem_cache {
int obj_size;
#endif /* CONFIG_DEBUG_SLAB */
+/* 6) per-cpu/per-node data, touched during every alloc/free */
/*
- * We put nodelists[] at the end of kmem_cache, because we want to size
- * this array to nr_node_ids slots instead of MAX_NUMNODES
+ * We put array[] at the end of kmem_cache, because we want to size
+ * this array to nr_cpu_ids slots instead of NR_CPUS
* (see kmem_cache_init())
- * We still use [MAX_NUMNODES] and not [1] or [0] because cache_cache
- * is statically defined, so we reserve the max number of nodes.
+ * We still use [NR_CPUS] and not [1] or [0] because cache_cache
+ * is statically defined, so we reserve the max number of cpus.
*/
- struct kmem_list3 *nodelists[MAX_NUMNODES];
+ struct kmem_list3 **nodelists;
+ struct array_cache *array[NR_CPUS];
/*
- * Do not add fields after nodelists[]
+ * Do not add fields after array[]
*/
};
diff --git a/include/linux/slob_def.h b/include/linux/slob_def.h
index 4382db09df4f..0ec00b39d006 100644
--- a/include/linux/slob_def.h
+++ b/include/linux/slob_def.h
@@ -1,16 +1,6 @@
#ifndef __LINUX_SLOB_DEF_H
#define __LINUX_SLOB_DEF_H
-#ifdef ARCH_DMA_MINALIGN
-#define ARCH_KMALLOC_MINALIGN ARCH_DMA_MINALIGN
-#else
-#define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long)
-#endif
-
-#ifndef ARCH_SLAB_MINALIGN
-#define ARCH_SLAB_MINALIGN __alignof__(unsigned long)
-#endif
-
void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
static __always_inline void *kmem_cache_alloc(struct kmem_cache *cachep,
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index c8668d161dd8..4b35c06dfbc5 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -113,16 +113,6 @@ struct kmem_cache {
#define KMALLOC_SHIFT_LOW ilog2(KMALLOC_MIN_SIZE)
-#ifdef ARCH_DMA_MINALIGN
-#define ARCH_KMALLOC_MINALIGN ARCH_DMA_MINALIGN
-#else
-#define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)
-#endif
-
-#ifndef ARCH_SLAB_MINALIGN
-#define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
-#endif
-
/*
* Maximum kmalloc object size handled by SLUB. Larger object allocations
* are passed through to the page allocator. The page allocator "fastpath"
@@ -228,6 +218,19 @@ kmalloc_order(size_t size, gfp_t flags, unsigned int order)
return ret;
}
+/**
+ * Calling this on allocated memory will check that the memory
+ * is expected to be in use, and print warnings if not.
+ */
+#ifdef CONFIG_SLUB_DEBUG
+extern bool verify_mem_not_deleted(const void *x);
+#else
+static inline bool verify_mem_not_deleted(const void *x)
+{
+ return true;
+}
+#endif
+
#ifdef CONFIG_TRACING
extern void *
kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size);