summaryrefslogtreecommitdiff
path: root/include/linux/slub_def.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/slub_def.h')
-rw-r--r--include/linux/slub_def.h49
1 files changed, 25 insertions, 24 deletions
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index 0764c829d967..124270df8734 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -16,7 +16,9 @@ struct kmem_cache_node {
unsigned long nr_partial;
atomic_long_t nr_slabs;
struct list_head partial;
+#ifdef CONFIG_SLUB_DEBUG
struct list_head full;
+#endif
};
/*
@@ -28,7 +30,7 @@ struct kmem_cache {
int size; /* The size of an object including meta data */
int objsize; /* The size of an object without meta data */
int offset; /* Free pointer offset. */
- unsigned int order;
+ int order;
/*
* Avoid an extra cache line for UP, SMP and for the node local to
@@ -44,7 +46,9 @@ struct kmem_cache {
int align; /* Alignment */
const char *name; /* Name (only for display!) */
struct list_head list; /* List of slab caches */
+#ifdef CONFIG_SLUB_DEBUG
struct kobject kobj; /* For sysfs */
+#endif
#ifdef CONFIG_NUMA
int defrag_ratio;
@@ -56,7 +60,13 @@ struct kmem_cache {
/*
* Kmalloc subsystem.
*/
-#define KMALLOC_SHIFT_LOW 3
+#if defined(ARCH_KMALLOC_MINALIGN) && ARCH_KMALLOC_MINALIGN > 8
+#define KMALLOC_MIN_SIZE ARCH_KMALLOC_MINALIGN
+#else
+#define KMALLOC_MIN_SIZE 8
+#endif
+
+#define KMALLOC_SHIFT_LOW ilog2(KMALLOC_MIN_SIZE)
/*
* We keep the general caches in an array of slab caches that are used for
@@ -70,15 +80,15 @@ extern struct kmem_cache kmalloc_caches[KMALLOC_SHIFT_HIGH + 1];
*/
static inline int kmalloc_index(size_t size)
{
- /*
- * We should return 0 if size == 0 but we use the smallest object
- * here for SLAB legacy reasons.
- */
- WARN_ON_ONCE(size == 0);
+ if (!size)
+ return 0;
if (size > KMALLOC_MAX_SIZE)
return -1;
+ if (size <= KMALLOC_MIN_SIZE)
+ return KMALLOC_SHIFT_LOW;
+
if (size > 64 && size <= 96)
return 1;
if (size > 128 && size <= 192)
@@ -150,37 +160,28 @@ static inline struct kmem_cache *kmalloc_slab(size_t size)
#define SLUB_DMA __GFP_DMA
#else
/* Disable DMA functionality */
-#define SLUB_DMA 0
+#define SLUB_DMA (__force gfp_t)0
#endif
+void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
+void *__kmalloc(size_t size, gfp_t flags);
+
static inline void *kmalloc(size_t size, gfp_t flags)
{
if (__builtin_constant_p(size) && !(flags & SLUB_DMA)) {
struct kmem_cache *s = kmalloc_slab(size);
if (!s)
- return NULL;
+ return ZERO_SIZE_PTR;
return kmem_cache_alloc(s, flags);
} else
return __kmalloc(size, flags);
}
-static inline void *kzalloc(size_t size, gfp_t flags)
-{
- if (__builtin_constant_p(size) && !(flags & SLUB_DMA)) {
- struct kmem_cache *s = kmalloc_slab(size);
-
- if (!s)
- return NULL;
-
- return kmem_cache_zalloc(s, flags);
- } else
- return __kzalloc(size, flags);
-}
-
#ifdef CONFIG_NUMA
-extern void *__kmalloc_node(size_t size, gfp_t flags, int node);
+void *__kmalloc_node(size_t size, gfp_t flags, int node);
+void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
static inline void *kmalloc_node(size_t size, gfp_t flags, int node)
{
@@ -188,7 +189,7 @@ static inline void *kmalloc_node(size_t size, gfp_t flags, int node)
struct kmem_cache *s = kmalloc_slab(size);
if (!s)
- return NULL;
+ return ZERO_SIZE_PTR;
return kmem_cache_alloc_node(s, flags, node);
} else