diff options
author | Vegard Nossum <vegard.nossum@gmail.com> | 2008-08-31 23:39:21 +0200 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2008-09-01 09:47:16 +0200 |
commit | 673d62cc5ea6fca046650f17f77985b112c62322 (patch) | |
tree | 7cde0d756ffa71ad732cc079d28254b256c14e68 /lib | |
parent | bef69ea0dcce574a425feb0a5aa4c63dd108b9a6 (diff) |
debugobjects: fix lockdep warning
Daniel J. Blueman reported:
> =======================================================
> [ INFO: possible circular locking dependency detected ]
> 2.6.27-rc4-224c #1
> -------------------------------------------------------
> hald/4680 is trying to acquire lock:
> (&n->list_lock){++..}, at: [<ffffffff802bfa26>] add_partial+0x26/0x80
>
> but task is already holding lock:
> (&obj_hash[i].lock){++..}, at: [<ffffffff8041cfdc>]
> debug_object_free+0x5c/0x120
We fix it by moving the actual freeing to outside the lock (the lock
now only protects the list).
The pool lock is also promoted to irq-safe (suggested by Dan). It's
necessary because free_pool is now called outside the irq disabled
region. So we need to protect against an interrupt handler which calls
debug_object_init().
[tglx@linutronix.de: added hlist_move_list helper to avoid looping
through the list twice]
Reported-by: Daniel J Blueman <daniel.blueman@gmail.com>
Signed-off-by: Vegard Nossum <vegard.nossum@gmail.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'lib')
-rw-r--r-- | lib/debugobjects.c | 31 |
1 files changed, 23 insertions, 8 deletions
diff --git a/lib/debugobjects.c b/lib/debugobjects.c index 45a6bde762d1..e3ab374e1334 100644 --- a/lib/debugobjects.c +++ b/lib/debugobjects.c @@ -112,6 +112,7 @@ static struct debug_obj *lookup_object(void *addr, struct debug_bucket *b) /* * Allocate a new object. If the pool is empty, switch off the debugger. + * Must be called with interrupts disabled. */ static struct debug_obj * alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr) @@ -148,17 +149,18 @@ alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr) static void free_object(struct debug_obj *obj) { unsigned long idx = (unsigned long)(obj - obj_static_pool); + unsigned long flags; if (obj_pool_free < ODEBUG_POOL_SIZE || idx < ODEBUG_POOL_SIZE) { - spin_lock(&pool_lock); + spin_lock_irqsave(&pool_lock, flags); hlist_add_head(&obj->node, &obj_pool); obj_pool_free++; obj_pool_used--; - spin_unlock(&pool_lock); + spin_unlock_irqrestore(&pool_lock, flags); } else { - spin_lock(&pool_lock); + spin_lock_irqsave(&pool_lock, flags); obj_pool_used--; - spin_unlock(&pool_lock); + spin_unlock_irqrestore(&pool_lock, flags); kmem_cache_free(obj_cache, obj); } } @@ -171,6 +173,7 @@ static void debug_objects_oom(void) { struct debug_bucket *db = obj_hash; struct hlist_node *node, *tmp; + HLIST_HEAD(freelist); struct debug_obj *obj; unsigned long flags; int i; @@ -179,11 +182,14 @@ static void debug_objects_oom(void) for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) { spin_lock_irqsave(&db->lock, flags); - hlist_for_each_entry_safe(obj, node, tmp, &db->list, node) { + hlist_move_list(&db->list, &freelist); + spin_unlock_irqrestore(&db->lock, flags); + + /* Now free them */ + hlist_for_each_entry_safe(obj, node, tmp, &freelist, node) { hlist_del(&obj->node); free_object(obj); } - spin_unlock_irqrestore(&db->lock, flags); } } @@ -498,8 +504,9 @@ void debug_object_free(void *addr, struct debug_obj_descr *descr) return; default: hlist_del(&obj->node); + spin_unlock_irqrestore(&db->lock, flags); free_object(obj); - break; + return; } out_unlock: spin_unlock_irqrestore(&db->lock, flags); @@ -510,6 +517,7 @@ static void __debug_check_no_obj_freed(const void *address, unsigned long size) { unsigned long flags, oaddr, saddr, eaddr, paddr, chunks; struct hlist_node *node, *tmp; + HLIST_HEAD(freelist); struct debug_obj_descr *descr; enum debug_obj_state state; struct debug_bucket *db; @@ -545,11 +553,18 @@ repeat: goto repeat; default: hlist_del(&obj->node); - free_object(obj); + hlist_add_head(&obj->node, &freelist); break; } } spin_unlock_irqrestore(&db->lock, flags); + + /* Now free them */ + hlist_for_each_entry_safe(obj, node, tmp, &freelist, node) { + hlist_del(&obj->node); + free_object(obj); + } + if (cnt > debug_objects_maxchain) debug_objects_maxchain = cnt; } |