summaryrefslogtreecommitdiff
path: root/mm/slab.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/slab.c')
-rw-r--r--mm/slab.c158
1 files changed, 105 insertions, 53 deletions
diff --git a/mm/slab.c b/mm/slab.c
index 9a90b00d2f91..18e3164de09a 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -102,11 +102,12 @@
#include <linux/cpu.h>
#include <linux/sysctl.h>
#include <linux/module.h>
-#include <trace/kmemtrace.h>
+#include <linux/kmemtrace.h>
#include <linux/rcupdate.h>
#include <linux/string.h>
#include <linux/uaccess.h>
#include <linux/nodemask.h>
+#include <linux/kmemleak.h>
#include <linux/mempolicy.h>
#include <linux/mutex.h>
#include <linux/fault-inject.h>
@@ -178,13 +179,13 @@
SLAB_STORE_USER | \
SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
- SLAB_DEBUG_OBJECTS)
+ SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE)
#else
# define CREATE_MASK (SLAB_HWCACHE_ALIGN | \
SLAB_CACHE_DMA | \
SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
- SLAB_DEBUG_OBJECTS)
+ SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE)
#endif
/*
@@ -303,6 +304,12 @@ struct kmem_list3 {
};
/*
+ * The slab allocator is initialized with interrupts disabled. Therefore, make
+ * sure early boot allocations don't accidentally enable interrupts.
+ */
+static gfp_t slab_gfp_mask __read_mostly = SLAB_GFP_BOOT_MASK;
+
+/*
* Need this for bootstrapping a per node allocator.
*/
#define NUM_INIT_LISTS (3 * MAX_NUMNODES)
@@ -315,7 +322,7 @@ static int drain_freelist(struct kmem_cache *cache,
struct kmem_list3 *l3, int tofree);
static void free_block(struct kmem_cache *cachep, void **objpp, int len,
int node);
-static int enable_cpucache(struct kmem_cache *cachep);
+static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp);
static void cache_reap(struct work_struct *unused);
/*
@@ -752,6 +759,7 @@ static enum {
NONE,
PARTIAL_AC,
PARTIAL_L3,
+ EARLY,
FULL
} g_cpucache_up;
@@ -760,7 +768,7 @@ static enum {
*/
int slab_is_available(void)
{
- return g_cpucache_up == FULL;
+ return g_cpucache_up >= EARLY;
}
static DEFINE_PER_CPU(struct delayed_work, reap_work);
@@ -958,12 +966,20 @@ static void __cpuinit start_cpu_timer(int cpu)
}
static struct array_cache *alloc_arraycache(int node, int entries,
- int batchcount)
+ int batchcount, gfp_t gfp)
{
int memsize = sizeof(void *) * entries + sizeof(struct array_cache);
struct array_cache *nc = NULL;
- nc = kmalloc_node(memsize, GFP_KERNEL, node);
+ nc = kmalloc_node(memsize, gfp, node);
+ /*
+ * The array_cache structures contain pointers to free object.
+ * However, when such objects are allocated or transfered to another
+ * cache the pointers are not cleared and they could be counted as
+ * valid references during a kmemleak scan. Therefore, kmemleak must
+ * not scan such objects.
+ */
+ kmemleak_no_scan(nc);
if (nc) {
nc->avail = 0;
nc->limit = entries;
@@ -1003,7 +1019,7 @@ static int transfer_objects(struct array_cache *to,
#define drain_alien_cache(cachep, alien) do { } while (0)
#define reap_alien(cachep, l3) do { } while (0)
-static inline struct array_cache **alloc_alien_cache(int node, int limit)
+static inline struct array_cache **alloc_alien_cache(int node, int limit, gfp_t gfp)
{
return (struct array_cache **)BAD_ALIEN_MAGIC;
}
@@ -1034,7 +1050,7 @@ static inline void *____cache_alloc_node(struct kmem_cache *cachep,
static void *____cache_alloc_node(struct kmem_cache *, gfp_t, int);
static void *alternate_node_alloc(struct kmem_cache *, gfp_t);
-static struct array_cache **alloc_alien_cache(int node, int limit)
+static struct array_cache **alloc_alien_cache(int node, int limit, gfp_t gfp)
{
struct array_cache **ac_ptr;
int memsize = sizeof(void *) * nr_node_ids;
@@ -1042,14 +1058,14 @@ static struct array_cache **alloc_alien_cache(int node, int limit)
if (limit > 1)
limit = 12;
- ac_ptr = kmalloc_node(memsize, GFP_KERNEL, node);
+ ac_ptr = kmalloc_node(memsize, gfp, node);
if (ac_ptr) {
for_each_node(i) {
if (i == node || !node_online(i)) {
ac_ptr[i] = NULL;
continue;
}
- ac_ptr[i] = alloc_arraycache(node, limit, 0xbaadf00d);
+ ac_ptr[i] = alloc_arraycache(node, limit, 0xbaadf00d, gfp);
if (!ac_ptr[i]) {
for (i--; i >= 0; i--)
kfree(ac_ptr[i]);
@@ -1282,20 +1298,20 @@ static int __cpuinit cpuup_prepare(long cpu)
struct array_cache **alien = NULL;
nc = alloc_arraycache(node, cachep->limit,
- cachep->batchcount);
+ cachep->batchcount, GFP_KERNEL);
if (!nc)
goto bad;
if (cachep->shared) {
shared = alloc_arraycache(node,
cachep->shared * cachep->batchcount,
- 0xbaadf00d);
+ 0xbaadf00d, GFP_KERNEL);
if (!shared) {
kfree(nc);
goto bad;
}
}
if (use_alien_caches) {
- alien = alloc_alien_cache(node, cachep->limit);
+ alien = alloc_alien_cache(node, cachep->limit, GFP_KERNEL);
if (!alien) {
kfree(shared);
kfree(nc);
@@ -1399,10 +1415,9 @@ static void init_list(struct kmem_cache *cachep, struct kmem_list3 *list,
{
struct kmem_list3 *ptr;
- ptr = kmalloc_node(sizeof(struct kmem_list3), GFP_KERNEL, nodeid);
+ ptr = kmalloc_node(sizeof(struct kmem_list3), GFP_NOWAIT, nodeid);
BUG_ON(!ptr);
- local_irq_disable();
memcpy(ptr, list, sizeof(struct kmem_list3));
/*
* Do not assume that spinlocks can be initialized via memcpy:
@@ -1411,7 +1426,6 @@ static void init_list(struct kmem_cache *cachep, struct kmem_list3 *list,
MAKE_ALL_LISTS(cachep, ptr, nodeid);
cachep->nodelists[nodeid] = ptr;
- local_irq_enable();
}
/*
@@ -1575,9 +1589,8 @@ void __init kmem_cache_init(void)
{
struct array_cache *ptr;
- ptr = kmalloc(sizeof(struct arraycache_init), GFP_KERNEL);
+ ptr = kmalloc(sizeof(struct arraycache_init), GFP_NOWAIT);
- local_irq_disable();
BUG_ON(cpu_cache_get(&cache_cache) != &initarray_cache.cache);
memcpy(ptr, cpu_cache_get(&cache_cache),
sizeof(struct arraycache_init));
@@ -1587,11 +1600,9 @@ void __init kmem_cache_init(void)
spin_lock_init(&ptr->lock);
cache_cache.array[smp_processor_id()] = ptr;
- local_irq_enable();
- ptr = kmalloc(sizeof(struct arraycache_init), GFP_KERNEL);
+ ptr = kmalloc(sizeof(struct arraycache_init), GFP_NOWAIT);
- local_irq_disable();
BUG_ON(cpu_cache_get(malloc_sizes[INDEX_AC].cs_cachep)
!= &initarray_generic.cache);
memcpy(ptr, cpu_cache_get(malloc_sizes[INDEX_AC].cs_cachep),
@@ -1603,7 +1614,6 @@ void __init kmem_cache_init(void)
malloc_sizes[INDEX_AC].cs_cachep->array[smp_processor_id()] =
ptr;
- local_irq_enable();
}
/* 5) Replace the bootstrap kmem_list3's */
{
@@ -1622,19 +1632,27 @@ void __init kmem_cache_init(void)
}
}
- /* 6) resize the head arrays to their final sizes */
- {
- struct kmem_cache *cachep;
- mutex_lock(&cache_chain_mutex);
- list_for_each_entry(cachep, &cache_chain, next)
- if (enable_cpucache(cachep))
- BUG();
- mutex_unlock(&cache_chain_mutex);
- }
+ g_cpucache_up = EARLY;
/* Annotate slab for lockdep -- annotate the malloc caches */
init_lock_keys();
+}
+
+void __init kmem_cache_init_late(void)
+{
+ struct kmem_cache *cachep;
+ /*
+ * Interrupts are enabled now so all GFP allocations are safe.
+ */
+ slab_gfp_mask = __GFP_BITS_MASK;
+
+ /* 6) resize the head arrays to their final sizes */
+ mutex_lock(&cache_chain_mutex);
+ list_for_each_entry(cachep, &cache_chain, next)
+ if (enable_cpucache(cachep, GFP_NOWAIT))
+ BUG();
+ mutex_unlock(&cache_chain_mutex);
/* Done! */
g_cpucache_up = FULL;
@@ -2064,10 +2082,10 @@ static size_t calculate_slab_order(struct kmem_cache *cachep,
return left_over;
}
-static int __init_refok setup_cpu_cache(struct kmem_cache *cachep)
+static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp)
{
if (g_cpucache_up == FULL)
- return enable_cpucache(cachep);
+ return enable_cpucache(cachep, gfp);
if (g_cpucache_up == NONE) {
/*
@@ -2089,7 +2107,7 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep)
g_cpucache_up = PARTIAL_AC;
} else {
cachep->array[smp_processor_id()] =
- kmalloc(sizeof(struct arraycache_init), GFP_KERNEL);
+ kmalloc(sizeof(struct arraycache_init), gfp);
if (g_cpucache_up == PARTIAL_AC) {
set_up_list3s(cachep, SIZE_L3);
@@ -2099,7 +2117,7 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep)
for_each_online_node(node) {
cachep->nodelists[node] =
kmalloc_node(sizeof(struct kmem_list3),
- GFP_KERNEL, node);
+ gfp, node);
BUG_ON(!cachep->nodelists[node]);
kmem_list3_init(cachep->nodelists[node]);
}
@@ -2153,6 +2171,7 @@ kmem_cache_create (const char *name, size_t size, size_t align,
{
size_t left_over, slab_size, ralign;
struct kmem_cache *cachep = NULL, *pc;
+ gfp_t gfp;
/*
* Sanity checks... these are all serious usage bugs.
@@ -2168,8 +2187,10 @@ kmem_cache_create (const char *name, size_t size, size_t align,
* We use cache_chain_mutex to ensure a consistent view of
* cpu_online_mask as well. Please see cpuup_callback
*/
- get_online_cpus();
- mutex_lock(&cache_chain_mutex);
+ if (slab_is_available()) {
+ get_online_cpus();
+ mutex_lock(&cache_chain_mutex);
+ }
list_for_each_entry(pc, &cache_chain, next) {
char tmp;
@@ -2278,8 +2299,13 @@ kmem_cache_create (const char *name, size_t size, size_t align,
*/
align = ralign;
+ if (slab_is_available())
+ gfp = GFP_KERNEL;
+ else
+ gfp = GFP_NOWAIT;
+
/* Get cache's description obj. */
- cachep = kmem_cache_zalloc(&cache_cache, GFP_KERNEL);
+ cachep = kmem_cache_zalloc(&cache_cache, gfp);
if (!cachep)
goto oops;
@@ -2382,7 +2408,7 @@ kmem_cache_create (const char *name, size_t size, size_t align,
cachep->ctor = ctor;
cachep->name = name;
- if (setup_cpu_cache(cachep)) {
+ if (setup_cpu_cache(cachep, gfp)) {
__kmem_cache_destroy(cachep);
cachep = NULL;
goto oops;
@@ -2394,8 +2420,10 @@ oops:
if (!cachep && (flags & SLAB_PANIC))
panic("kmem_cache_create(): failed to create slab `%s'\n",
name);
- mutex_unlock(&cache_chain_mutex);
- put_online_cpus();
+ if (slab_is_available()) {
+ mutex_unlock(&cache_chain_mutex);
+ put_online_cpus();
+ }
return cachep;
}
EXPORT_SYMBOL(kmem_cache_create);
@@ -2621,6 +2649,14 @@ static struct slab *alloc_slabmgmt(struct kmem_cache *cachep, void *objp,
/* Slab management obj is off-slab. */
slabp = kmem_cache_alloc_node(cachep->slabp_cache,
local_flags, nodeid);
+ /*
+ * If the first object in the slab is leaked (it's allocated
+ * but no one has a reference to it), we want to make sure
+ * kmemleak does not treat the ->s_mem pointer as a reference
+ * to the object. Otherwise we will not report the leak.
+ */
+ kmemleak_scan_area(slabp, offsetof(struct slab, list),
+ sizeof(struct list_head), local_flags);
if (!slabp)
return NULL;
} else {
@@ -3141,6 +3177,12 @@ static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags)
STATS_INC_ALLOCMISS(cachep);
objp = cache_alloc_refill(cachep, flags);
}
+ /*
+ * To avoid a false negative, if an object that is in one of the
+ * per-CPU caches is leaked, we need to make sure kmemleak doesn't
+ * treat the array pointers as a reference to the object.
+ */
+ kmemleak_erase(&ac->entry[ac->avail]);
return objp;
}
@@ -3327,6 +3369,8 @@ __cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
unsigned long save_flags;
void *ptr;
+ flags &= slab_gfp_mask;
+
lockdep_trace_alloc(flags);
if (slab_should_failslab(cachep, flags))
@@ -3360,6 +3404,8 @@ __cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
out:
local_irq_restore(save_flags);
ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, caller);
+ kmemleak_alloc_recursive(ptr, obj_size(cachep), 1, cachep->flags,
+ flags);
if (unlikely((flags & __GFP_ZERO) && ptr))
memset(ptr, 0, obj_size(cachep));
@@ -3405,6 +3451,8 @@ __cache_alloc(struct kmem_cache *cachep, gfp_t flags, void *caller)
unsigned long save_flags;
void *objp;
+ flags &= slab_gfp_mask;
+
lockdep_trace_alloc(flags);
if (slab_should_failslab(cachep, flags))
@@ -3415,6 +3463,8 @@ __cache_alloc(struct kmem_cache *cachep, gfp_t flags, void *caller)
objp = __do_cache_alloc(cachep, flags);
local_irq_restore(save_flags);
objp = cache_alloc_debugcheck_after(cachep, flags, objp, caller);
+ kmemleak_alloc_recursive(objp, obj_size(cachep), 1, cachep->flags,
+ flags);
prefetchw(objp);
if (unlikely((flags & __GFP_ZERO) && objp))
@@ -3530,6 +3580,7 @@ static inline void __cache_free(struct kmem_cache *cachep, void *objp)
struct array_cache *ac = cpu_cache_get(cachep);
check_irq_off();
+ kmemleak_free_recursive(objp, cachep->flags);
objp = cache_free_debugcheck(cachep, objp, __builtin_return_address(0));
/*
@@ -3802,7 +3853,7 @@ EXPORT_SYMBOL_GPL(kmem_cache_name);
/*
* This initializes kmem_list3 or resizes various caches for all nodes.
*/
-static int alloc_kmemlist(struct kmem_cache *cachep)
+static int alloc_kmemlist(struct kmem_cache *cachep, gfp_t gfp)
{
int node;
struct kmem_list3 *l3;
@@ -3812,7 +3863,7 @@ static int alloc_kmemlist(struct kmem_cache *cachep)
for_each_online_node(node) {
if (use_alien_caches) {
- new_alien = alloc_alien_cache(node, cachep->limit);
+ new_alien = alloc_alien_cache(node, cachep->limit, gfp);
if (!new_alien)
goto fail;
}
@@ -3821,7 +3872,7 @@ static int alloc_kmemlist(struct kmem_cache *cachep)
if (cachep->shared) {
new_shared = alloc_arraycache(node,
cachep->shared*cachep->batchcount,
- 0xbaadf00d);
+ 0xbaadf00d, gfp);
if (!new_shared) {
free_alien_cache(new_alien);
goto fail;
@@ -3850,7 +3901,7 @@ static int alloc_kmemlist(struct kmem_cache *cachep)
free_alien_cache(new_alien);
continue;
}
- l3 = kmalloc_node(sizeof(struct kmem_list3), GFP_KERNEL, node);
+ l3 = kmalloc_node(sizeof(struct kmem_list3), gfp, node);
if (!l3) {
free_alien_cache(new_alien);
kfree(new_shared);
@@ -3906,18 +3957,18 @@ static void do_ccupdate_local(void *info)
/* Always called with the cache_chain_mutex held */
static int do_tune_cpucache(struct kmem_cache *cachep, int limit,
- int batchcount, int shared)
+ int batchcount, int shared, gfp_t gfp)
{
struct ccupdate_struct *new;
int i;
- new = kzalloc(sizeof(*new), GFP_KERNEL);
+ new = kzalloc(sizeof(*new), gfp);
if (!new)
return -ENOMEM;
for_each_online_cpu(i) {
new->new[i] = alloc_arraycache(cpu_to_node(i), limit,
- batchcount);
+ batchcount, gfp);
if (!new->new[i]) {
for (i--; i >= 0; i--)
kfree(new->new[i]);
@@ -3944,11 +3995,11 @@ static int do_tune_cpucache(struct kmem_cache *cachep, int limit,
kfree(ccold);
}
kfree(new);
- return alloc_kmemlist(cachep);
+ return alloc_kmemlist(cachep, gfp);
}
/* Called with cache_chain_mutex held always */
-static int enable_cpucache(struct kmem_cache *cachep)
+static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp)
{
int err;
int limit, shared;
@@ -3994,7 +4045,7 @@ static int enable_cpucache(struct kmem_cache *cachep)
if (limit > 32)
limit = 32;
#endif
- err = do_tune_cpucache(cachep, limit, (limit + 1) / 2, shared);
+ err = do_tune_cpucache(cachep, limit, (limit + 1) / 2, shared, gfp);
if (err)
printk(KERN_ERR "enable_cpucache failed for %s, error %d.\n",
cachep->name, -err);
@@ -4300,7 +4351,8 @@ ssize_t slabinfo_write(struct file *file, const char __user * buffer,
res = 0;
} else {
res = do_tune_cpucache(cachep, limit,
- batchcount, shared);
+ batchcount, shared,
+ GFP_KERNEL);
}
break;
}