diff options
| author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2011-06-21 17:17:27 +0200 | 
|---|---|---|
| committer | Ingo Molnar <mingo@elte.hu> | 2011-06-22 11:39:34 +0200 | 
| commit | dd4e5d3ac4a76b868daf30e35bd572def96c30ed (patch) | |
| tree | 76ee78919a212b2c28171d39ccb53195aec7284f | |
| parent | 4f2a8d3cf5e0486fd547633fa86c5d130ae98cad (diff) | |
lockdep: Fix trace_[soft,hard]irqs_[on,off]() recursion
Commit:
  1efc5da3cf56: [PATCH] order of lockdep off/on in vprintk() should be changed
explains the reason for having raw_local_irq_*() and lockdep_off()
in printk(). Instead of working around the broken recursion detection
of interrupt state tracking, fix it.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: efault@gmx.de
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Link: http://lkml.kernel.org/r/20110621153806.185242734@chello.nl
Signed-off-by: Ingo Molnar <mingo@elte.hu>
| -rw-r--r-- | kernel/lockdep.c | 30 | 
1 files changed, 20 insertions, 10 deletions
| diff --git a/kernel/lockdep.c b/kernel/lockdep.c index 63437d065ac8..81968a065b4c 100644 --- a/kernel/lockdep.c +++ b/kernel/lockdep.c @@ -2478,15 +2478,10 @@ mark_held_locks(struct task_struct *curr, enum mark_type mark)  /*   * Hardirqs will be enabled:   */ -void trace_hardirqs_on_caller(unsigned long ip) +static void __trace_hardirqs_on_caller(unsigned long ip)  {  	struct task_struct *curr = current; -	time_hardirqs_on(CALLER_ADDR0, ip); - -	if (unlikely(!debug_locks || current->lockdep_recursion)) -		return; -  	if (DEBUG_LOCKS_WARN_ON(unlikely(early_boot_irqs_disabled)))  		return; @@ -2502,8 +2497,6 @@ void trace_hardirqs_on_caller(unsigned long ip)  	/* we'll do an OFF -> ON transition: */  	curr->hardirqs_enabled = 1; -	if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) -		return;  	if (DEBUG_LOCKS_WARN_ON(current->hardirq_context))  		return;  	/* @@ -2525,6 +2518,21 @@ void trace_hardirqs_on_caller(unsigned long ip)  	curr->hardirq_enable_event = ++curr->irq_events;  	debug_atomic_inc(hardirqs_on_events);  } + +void trace_hardirqs_on_caller(unsigned long ip) +{ +	time_hardirqs_on(CALLER_ADDR0, ip); + +	if (unlikely(!debug_locks || current->lockdep_recursion)) +		return; + +	if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) +		return; + +	current->lockdep_recursion = 1; +	__trace_hardirqs_on_caller(ip); +	current->lockdep_recursion = 0; +}  EXPORT_SYMBOL(trace_hardirqs_on_caller);  void trace_hardirqs_on(void) @@ -2574,7 +2582,7 @@ void trace_softirqs_on(unsigned long ip)  {  	struct task_struct *curr = current; -	if (unlikely(!debug_locks)) +	if (unlikely(!debug_locks || current->lockdep_recursion))  		return;  	if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) @@ -2585,6 +2593,7 @@ void trace_softirqs_on(unsigned long ip)  		return;  	} +	current->lockdep_recursion = 1;  	/*  	 * We'll do an OFF -> ON transition:  	 */ @@ -2599,6 +2608,7 @@ void trace_softirqs_on(unsigned long ip)  	 */  	if (curr->hardirqs_enabled)  		mark_held_locks(curr, SOFTIRQ); +	current->lockdep_recursion = 0;  }  /* @@ -2608,7 +2618,7 @@ void trace_softirqs_off(unsigned long ip)  {  	struct task_struct *curr = current; -	if (unlikely(!debug_locks)) +	if (unlikely(!debug_locks || current->lockdep_recursion))  		return;  	if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) | 
