diff options
-rw-r--r-- | Documentation/vm/Makefile | 2 | ||||
-rw-r--r-- | include/linux/slab_def.h | 33 | ||||
-rw-r--r-- | include/linux/slub_def.h | 55 | ||||
-rw-r--r-- | mm/slab.c | 38 | ||||
-rw-r--r-- | mm/slub.c | 30 | ||||
-rw-r--r-- | tools/slub/slabinfo.c (renamed from Documentation/vm/slabinfo.c) | 6 |
6 files changed, 89 insertions, 75 deletions
diff --git a/Documentation/vm/Makefile b/Documentation/vm/Makefile index 9dcff328b964..3fa4d0668864 100644 --- a/Documentation/vm/Makefile +++ b/Documentation/vm/Makefile @@ -2,7 +2,7 @@ obj- := dummy.o # List of programs to build -hostprogs-y := slabinfo page-types hugepage-mmap hugepage-shm map_hugetlb +hostprogs-y := page-types hugepage-mmap hugepage-shm map_hugetlb # Tell kbuild to always build the programs always := $(hostprogs-y) diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h index 791a502f6906..83203ae9390b 100644 --- a/include/linux/slab_def.h +++ b/include/linux/slab_def.h @@ -138,11 +138,12 @@ void *kmem_cache_alloc(struct kmem_cache *, gfp_t); void *__kmalloc(size_t size, gfp_t flags); #ifdef CONFIG_TRACING -extern void *kmem_cache_alloc_notrace(struct kmem_cache *cachep, gfp_t flags); +extern void *kmem_cache_alloc_trace(size_t size, + struct kmem_cache *cachep, gfp_t flags); extern size_t slab_buffer_size(struct kmem_cache *cachep); #else static __always_inline void * -kmem_cache_alloc_notrace(struct kmem_cache *cachep, gfp_t flags) +kmem_cache_alloc_trace(size_t size, struct kmem_cache *cachep, gfp_t flags) { return kmem_cache_alloc(cachep, flags); } @@ -179,10 +180,7 @@ found: #endif cachep = malloc_sizes[i].cs_cachep; - ret = kmem_cache_alloc_notrace(cachep, flags); - - trace_kmalloc(_THIS_IP_, ret, - size, slab_buffer_size(cachep), flags); + ret = kmem_cache_alloc_trace(size, cachep, flags); return ret; } @@ -194,14 +192,16 @@ extern void *__kmalloc_node(size_t size, gfp_t flags, int node); extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); #ifdef CONFIG_TRACING -extern void *kmem_cache_alloc_node_notrace(struct kmem_cache *cachep, - gfp_t flags, - int nodeid); +extern void *kmem_cache_alloc_node_trace(size_t size, + struct kmem_cache *cachep, + gfp_t flags, + int nodeid); #else static __always_inline void * -kmem_cache_alloc_node_notrace(struct kmem_cache *cachep, - gfp_t flags, - int nodeid) +kmem_cache_alloc_node_trace(size_t size, + struct kmem_cache *cachep, + gfp_t flags, + int nodeid) { return kmem_cache_alloc_node(cachep, flags, nodeid); } @@ -210,7 +210,6 @@ kmem_cache_alloc_node_notrace(struct kmem_cache *cachep, static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) { struct kmem_cache *cachep; - void *ret; if (__builtin_constant_p(size)) { int i = 0; @@ -234,13 +233,7 @@ found: #endif cachep = malloc_sizes[i].cs_cachep; - ret = kmem_cache_alloc_node_notrace(cachep, flags, node); - - trace_kmalloc_node(_THIS_IP_, ret, - size, slab_buffer_size(cachep), - flags, node); - - return ret; + return kmem_cache_alloc_node_trace(size, cachep, flags, node); } return __kmalloc_node(size, flags, node); } diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h index e4f5ed180b9b..8b6e8ae5d5ca 100644 --- a/include/linux/slub_def.h +++ b/include/linux/slub_def.h @@ -10,9 +10,8 @@ #include <linux/gfp.h> #include <linux/workqueue.h> #include <linux/kobject.h> -#include <linux/kmemleak.h> -#include <trace/events/kmem.h> +#include <linux/kmemleak.h> enum stat_item { ALLOC_FASTPATH, /* Allocation from cpu slab */ @@ -216,31 +215,40 @@ static __always_inline struct kmem_cache *kmalloc_slab(size_t size) void *kmem_cache_alloc(struct kmem_cache *, gfp_t); void *__kmalloc(size_t size, gfp_t flags); +static __always_inline void * +kmalloc_order(size_t size, gfp_t flags, unsigned int order) +{ + void *ret = (void *) __get_free_pages(flags | __GFP_COMP, order); + kmemleak_alloc(ret, size, 1, flags); + return ret; +} + #ifdef CONFIG_TRACING -extern void *kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags); +extern void * +kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size); +extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order); #else static __always_inline void * -kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags) +kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size) { return kmem_cache_alloc(s, gfpflags); } + +static __always_inline void * +kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) +{ + return kmalloc_order(size, flags, order); +} #endif static __always_inline void *kmalloc_large(size_t size, gfp_t flags) { unsigned int order = get_order(size); - void *ret = (void *) __get_free_pages(flags | __GFP_COMP, order); - - kmemleak_alloc(ret, size, 1, flags); - trace_kmalloc(_THIS_IP_, ret, size, PAGE_SIZE << order, flags); - - return ret; + return kmalloc_order_trace(size, flags, order); } static __always_inline void *kmalloc(size_t size, gfp_t flags) { - void *ret; - if (__builtin_constant_p(size)) { if (size > SLUB_MAX_SIZE) return kmalloc_large(size, flags); @@ -251,11 +259,7 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags) if (!s) return ZERO_SIZE_PTR; - ret = kmem_cache_alloc_notrace(s, flags); - - trace_kmalloc(_THIS_IP_, ret, size, s->size, flags); - - return ret; + return kmem_cache_alloc_trace(s, flags, size); } } return __kmalloc(size, flags); @@ -266,14 +270,14 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node); void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); #ifdef CONFIG_TRACING -extern void *kmem_cache_alloc_node_notrace(struct kmem_cache *s, +extern void *kmem_cache_alloc_node_trace(struct kmem_cache *s, gfp_t gfpflags, - int node); + int node, size_t size); #else static __always_inline void * -kmem_cache_alloc_node_notrace(struct kmem_cache *s, +kmem_cache_alloc_node_trace(struct kmem_cache *s, gfp_t gfpflags, - int node) + int node, size_t size) { return kmem_cache_alloc_node(s, gfpflags, node); } @@ -281,8 +285,6 @@ kmem_cache_alloc_node_notrace(struct kmem_cache *s, static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) { - void *ret; - if (__builtin_constant_p(size) && size <= SLUB_MAX_SIZE && !(flags & SLUB_DMA)) { struct kmem_cache *s = kmalloc_slab(size); @@ -290,12 +292,7 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) if (!s) return ZERO_SIZE_PTR; - ret = kmem_cache_alloc_node_notrace(s, flags, node); - - trace_kmalloc_node(_THIS_IP_, ret, - size, s->size, flags, node); - - return ret; + return kmem_cache_alloc_node_trace(s, flags, node, size); } return __kmalloc_node(size, flags, node); } diff --git a/mm/slab.c b/mm/slab.c index e9f92987954a..264037449f08 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -3653,11 +3653,18 @@ void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags) EXPORT_SYMBOL(kmem_cache_alloc); #ifdef CONFIG_TRACING -void *kmem_cache_alloc_notrace(struct kmem_cache *cachep, gfp_t flags) +void * +kmem_cache_alloc_trace(size_t size, struct kmem_cache *cachep, gfp_t flags) { - return __cache_alloc(cachep, flags, __builtin_return_address(0)); + void *ret; + + ret = __cache_alloc(cachep, flags, __builtin_return_address(0)); + + trace_kmalloc(_RET_IP_, ret, + size, slab_buffer_size(cachep), flags); + return ret; } -EXPORT_SYMBOL(kmem_cache_alloc_notrace); +EXPORT_SYMBOL(kmem_cache_alloc_trace); #endif #ifdef CONFIG_NUMA @@ -3675,31 +3682,32 @@ void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid) EXPORT_SYMBOL(kmem_cache_alloc_node); #ifdef CONFIG_TRACING -void *kmem_cache_alloc_node_notrace(struct kmem_cache *cachep, - gfp_t flags, - int nodeid) +void *kmem_cache_alloc_node_trace(size_t size, + struct kmem_cache *cachep, + gfp_t flags, + int nodeid) { - return __cache_alloc_node(cachep, flags, nodeid, + void *ret; + + ret = __cache_alloc_node(cachep, flags, nodeid, __builtin_return_address(0)); + trace_kmalloc_node(_RET_IP_, ret, + size, slab_buffer_size(cachep), + flags, nodeid); + return ret; } -EXPORT_SYMBOL(kmem_cache_alloc_node_notrace); +EXPORT_SYMBOL(kmem_cache_alloc_node_trace); #endif static __always_inline void * __do_kmalloc_node(size_t size, gfp_t flags, int node, void *caller) { struct kmem_cache *cachep; - void *ret; cachep = kmem_find_general_cachep(size, flags); if (unlikely(ZERO_OR_NULL_PTR(cachep))) return cachep; - ret = kmem_cache_alloc_node_notrace(cachep, flags, node); - - trace_kmalloc_node((unsigned long) caller, ret, - size, cachep->buffer_size, flags, node); - - return ret; + return kmem_cache_alloc_node_trace(size, cachep, flags, node); } #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_TRACING) diff --git a/mm/slub.c b/mm/slub.c index a2fe1727ed85..008cd743a36a 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -28,6 +28,8 @@ #include <linux/math64.h> #include <linux/fault-inject.h> +#include <trace/events/kmem.h> + /* * Lock order: * 1. slab_lock(page) @@ -1774,11 +1776,21 @@ void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags) EXPORT_SYMBOL(kmem_cache_alloc); #ifdef CONFIG_TRACING -void *kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags) +void *kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size) +{ + void *ret = slab_alloc(s, gfpflags, NUMA_NO_NODE, _RET_IP_); + trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags); + return ret; +} +EXPORT_SYMBOL(kmem_cache_alloc_trace); + +void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) { - return slab_alloc(s, gfpflags, NUMA_NO_NODE, _RET_IP_); + void *ret = kmalloc_order(size, flags, order); + trace_kmalloc(_RET_IP_, ret, size, PAGE_SIZE << order, flags); + return ret; } -EXPORT_SYMBOL(kmem_cache_alloc_notrace); +EXPORT_SYMBOL(kmalloc_order_trace); #endif #ifdef CONFIG_NUMA @@ -1794,13 +1806,17 @@ void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node) EXPORT_SYMBOL(kmem_cache_alloc_node); #ifdef CONFIG_TRACING -void *kmem_cache_alloc_node_notrace(struct kmem_cache *s, +void *kmem_cache_alloc_node_trace(struct kmem_cache *s, gfp_t gfpflags, - int node) + int node, size_t size) { - return slab_alloc(s, gfpflags, node, _RET_IP_); + void *ret = slab_alloc(s, gfpflags, node, _RET_IP_); + + trace_kmalloc_node(_RET_IP_, ret, + size, s->size, gfpflags, node); + return ret; } -EXPORT_SYMBOL(kmem_cache_alloc_node_notrace); +EXPORT_SYMBOL(kmem_cache_alloc_node_trace); #endif #endif diff --git a/Documentation/vm/slabinfo.c b/tools/slub/slabinfo.c index 92e729f4b676..516551c9f172 100644 --- a/Documentation/vm/slabinfo.c +++ b/tools/slub/slabinfo.c @@ -607,7 +607,7 @@ static int debug_opt_scan(char *opt) } for ( ; *opt; opt++) - switch (*opt) { + switch (*opt) { case 'F' : case 'f': if (sanity) return 0; @@ -1127,7 +1127,7 @@ static void read_slab_dir(void) continue; switch (de->d_type) { case DT_LNK: - alias->name = strdup(de->d_name); + alias->name = strdup(de->d_name); count = readlink(de->d_name, buffer, sizeof(buffer)); if (count < 0) @@ -1143,7 +1143,7 @@ static void read_slab_dir(void) case DT_DIR: if (chdir(de->d_name)) fatal("Unable to access slab %s\n", slab->name); - slab->name = strdup(de->d_name); + slab->name = strdup(de->d_name); slab->alias = 0; slab->refs = 0; slab->aliases = get_obj("aliases"); |