diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2007-07-19 01:48:53 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-07-19 10:04:49 -0700 |
commit | ca58abcb4a6d52ee2db1b1130cea3ca2a76677b9 (patch) | |
tree | ad35a81d318a39cb33f28fe09c4374ec90b118ed | |
parent | 21f8ca3bf6198bd21e3c4cc820af2ccf753a6ec8 (diff) |
lockdep: sanitise CONFIG_PROVE_LOCKING
Ensure that all of the lock dependency tracking code is under
CONFIG_PROVE_LOCKING. This allows us to use the held lock tracking code for
other purposes.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Acked-by: Ingo Molnar <mingo@elte.hu>
Acked-by: Jason Baron <jbaron@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r-- | kernel/lockdep.c | 13 | ||||
-rw-r--r-- | kernel/spinlock.c | 4 |
2 files changed, 14 insertions, 3 deletions
diff --git a/kernel/lockdep.c b/kernel/lockdep.c index edba2ffb43de..05c1261791f4 100644 --- a/kernel/lockdep.c +++ b/kernel/lockdep.c @@ -95,6 +95,7 @@ static int lockdep_initialized; unsigned long nr_list_entries; static struct lock_list list_entries[MAX_LOCKDEP_ENTRIES]; +#ifdef CONFIG_PROVE_LOCKING /* * Allocate a lockdep entry. (assumes the graph_lock held, returns * with NULL on failure) @@ -111,6 +112,7 @@ static struct lock_list *alloc_list_entry(void) } return list_entries + nr_list_entries++; } +#endif /* * All data structures here are protected by the global debug_lock. @@ -140,7 +142,9 @@ LIST_HEAD(all_lock_classes); static struct list_head classhash_table[CLASSHASH_SIZE]; unsigned long nr_lock_chains; +#ifdef CONFIG_PROVE_LOCKING static struct lock_chain lock_chains[MAX_LOCKDEP_CHAINS]; +#endif /* * We put the lock dependency chains into a hash-table as well, to cache @@ -482,6 +486,7 @@ static void print_lock_dependencies(struct lock_class *class, int depth) } } +#ifdef CONFIG_PROVE_LOCKING /* * Add a new dependency to the head of the list: */ @@ -541,6 +546,7 @@ print_circular_bug_entry(struct lock_list *target, unsigned int depth) return 0; } +#endif static void print_kernel_version(void) { @@ -549,6 +555,7 @@ static void print_kernel_version(void) init_utsname()->version); } +#ifdef CONFIG_PROVE_LOCKING /* * When a circular dependency is detected, print the * header first: @@ -639,6 +646,7 @@ check_noncircular(struct lock_class *source, unsigned int depth) } return 1; } +#endif static int very_verbose(struct lock_class *class) { @@ -823,6 +831,7 @@ check_usage(struct task_struct *curr, struct held_lock *prev, #endif +#ifdef CONFIG_PROVE_LOCKING static int print_deadlock_bug(struct task_struct *curr, struct held_lock *prev, struct held_lock *next) @@ -1087,7 +1096,7 @@ out_bug: return 0; } - +#endif /* * Is this the address of a static object: @@ -1307,6 +1316,7 @@ out_unlock_set: return class; } +#ifdef CONFIG_PROVE_LOCKING /* * Look up a dependency chain. If the key is not present yet then * add it and return 1 - in this case the new dependency chain is @@ -1381,6 +1391,7 @@ cache_hit: return 1; } +#endif /* * We are building curr_chain_key incrementally, so double-check diff --git a/kernel/spinlock.c b/kernel/spinlock.c index 2c6c2bf85514..cd93bfe3f10d 100644 --- a/kernel/spinlock.c +++ b/kernel/spinlock.c @@ -88,7 +88,7 @@ unsigned long __lockfunc _spin_lock_irqsave(spinlock_t *lock) * _raw_spin_lock_flags() code, because lockdep assumes * that interrupts are not re-enabled during lock-acquire: */ -#ifdef CONFIG_PROVE_LOCKING +#ifdef CONFIG_LOCKDEP _raw_spin_lock(lock); #else _raw_spin_lock_flags(lock, &flags); @@ -305,7 +305,7 @@ unsigned long __lockfunc _spin_lock_irqsave_nested(spinlock_t *lock, int subclas * _raw_spin_lock_flags() code, because lockdep assumes * that interrupts are not re-enabled during lock-acquire: */ -#ifdef CONFIG_PROVE_SPIN_LOCKING +#ifdef CONFIG_LOCKDEP _raw_spin_lock(lock); #else _raw_spin_lock_flags(lock, &flags); |