summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPekka Enberg <penberg@cs.helsinki.fi>2006-02-01 03:05:52 -0800
committerLinus Torvalds <torvalds@g5.osdl.org>2006-02-01 08:53:18 -0800
commit7fd6b1413082c303613fc137aca9a004740cacf0 (patch)
tree3186f4fdecbda4f16558c393a05408f7d3799ad4
parentb958f7d9f35bfb61625f201cd92a3fc39504af7a (diff)
[PATCH] slab: fix kzalloc and kstrdup caller report for CONFIG_DEBUG_SLAB
Fix kzalloc() and kstrdup() caller report for CONFIG_DEBUG_SLAB. We must pass the caller to __cache_alloc() instead of directly doing __builtin_return_address(0) there; otherwise kzalloc() and kstrdup() are reported as the allocation site instead of the real one. Thanks to Valdis Kletnieks for reporting the problem and Steven Rostedt for the original idea. Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r--include/linux/slab.h7
-rw-r--r--mm/slab.c29
2 files changed, 31 insertions, 5 deletions
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 1fb77a9cc148..8cf52939d0ab 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -76,7 +76,14 @@ struct cache_sizes {
kmem_cache_t *cs_dmacachep;
};
extern struct cache_sizes malloc_sizes[];
+
+#ifndef CONFIG_DEBUG_SLAB
extern void *__kmalloc(size_t, gfp_t);
+#else
+extern void *__kmalloc_track_caller(size_t, gfp_t, void*);
+#define __kmalloc(size, flags) \
+ __kmalloc_track_caller(size, flags, __builtin_return_address(0))
+#endif
static inline void *kmalloc(size_t size, gfp_t flags)
{
diff --git a/mm/slab.c b/mm/slab.c
index 6fbd6a1cdeb4..67527268b01c 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -2687,7 +2687,8 @@ static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags)
return objp;
}
-static inline void *__cache_alloc(struct kmem_cache *cachep, gfp_t flags)
+static __always_inline void *
+__cache_alloc(struct kmem_cache *cachep, gfp_t flags, void *caller)
{
unsigned long save_flags;
void *objp;
@@ -2698,7 +2699,7 @@ static inline void *__cache_alloc(struct kmem_cache *cachep, gfp_t flags)
objp = ____cache_alloc(cachep, flags);
local_irq_restore(save_flags);
objp = cache_alloc_debugcheck_after(cachep, flags, objp,
- __builtin_return_address(0));
+ caller);
prefetchw(objp);
return objp;
}
@@ -2927,7 +2928,7 @@ static inline void __cache_free(struct kmem_cache *cachep, void *objp)
*/
void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
{
- return __cache_alloc(cachep, flags);
+ return __cache_alloc(cachep, flags, __builtin_return_address(0));
}
EXPORT_SYMBOL(kmem_cache_alloc);
@@ -3041,7 +3042,8 @@ EXPORT_SYMBOL(kmalloc_node);
* platforms. For example, on i386, it means that the memory must come
* from the first 16MB.
*/
-void *__kmalloc(size_t size, gfp_t flags)
+static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
+ void *caller)
{
struct kmem_cache *cachep;
@@ -3053,10 +3055,27 @@ void *__kmalloc(size_t size, gfp_t flags)
cachep = __find_general_cachep(size, flags);
if (unlikely(cachep == NULL))
return NULL;
- return __cache_alloc(cachep, flags);
+ return __cache_alloc(cachep, flags, caller);
+}
+
+#ifndef CONFIG_DEBUG_SLAB
+
+void *__kmalloc(size_t size, gfp_t flags)
+{
+ return __do_kmalloc(size, flags, NULL);
}
EXPORT_SYMBOL(__kmalloc);
+#else
+
+void *__kmalloc_track_caller(size_t size, gfp_t flags, void *caller)
+{
+ return __do_kmalloc(size, flags, caller);
+}
+EXPORT_SYMBOL(__kmalloc_track_caller);
+
+#endif
+
#ifdef CONFIG_SMP
/**
* __alloc_percpu - allocate one copy of the object for every present