diff options
Diffstat (limited to 'include/linux/bit_spinlock.h')
| -rw-r--r-- | include/linux/bit_spinlock.h | 24 |
1 files changed, 20 insertions, 4 deletions
diff --git a/include/linux/bit_spinlock.h b/include/linux/bit_spinlock.h index c0989b5b0407..7869a6e59b6a 100644 --- a/include/linux/bit_spinlock.h +++ b/include/linux/bit_spinlock.h @@ -7,6 +7,18 @@ #include <linux/atomic.h> #include <linux/bug.h> +#include <asm/processor.h> /* for cpu_relax() */ + +/* + * For static context analysis, we need a unique token for each possible bit + * that can be used as a bit_spinlock. The easiest way to do that is to create a + * fake context that we can cast to with the __bitlock(bitnum, addr) macro + * below, which will give us unique instances for each (bit, addr) pair that the + * static analysis can use. + */ +context_lock_struct(__context_bitlock) { }; +#define __bitlock(bitnum, addr) (struct __context_bitlock *)(bitnum + (addr)) + /* * bit-based spin_lock() * @@ -14,6 +26,7 @@ * are significantly faster. */ static __always_inline void bit_spin_lock(int bitnum, unsigned long *addr) + __acquires(__bitlock(bitnum, addr)) { /* * Assuming the lock is uncontended, this never enters @@ -32,13 +45,14 @@ static __always_inline void bit_spin_lock(int bitnum, unsigned long *addr) preempt_disable(); } #endif - __acquire(bitlock); + __acquire(__bitlock(bitnum, addr)); } /* * Return true if it was acquired */ static __always_inline int bit_spin_trylock(int bitnum, unsigned long *addr) + __cond_acquires(true, __bitlock(bitnum, addr)) { preempt_disable(); #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) @@ -47,7 +61,7 @@ static __always_inline int bit_spin_trylock(int bitnum, unsigned long *addr) return 0; } #endif - __acquire(bitlock); + __acquire(__bitlock(bitnum, addr)); return 1; } @@ -55,6 +69,7 @@ static __always_inline int bit_spin_trylock(int bitnum, unsigned long *addr) * bit-based spin_unlock() */ static __always_inline void bit_spin_unlock(int bitnum, unsigned long *addr) + __releases(__bitlock(bitnum, addr)) { #ifdef CONFIG_DEBUG_SPINLOCK BUG_ON(!test_bit(bitnum, addr)); @@ -63,7 +78,7 @@ static __always_inline void bit_spin_unlock(int bitnum, unsigned long *addr) clear_bit_unlock(bitnum, addr); #endif preempt_enable(); - __release(bitlock); + __release(__bitlock(bitnum, addr)); } /* @@ -72,6 +87,7 @@ static __always_inline void bit_spin_unlock(int bitnum, unsigned long *addr) * protecting the rest of the flags in the word. */ static __always_inline void __bit_spin_unlock(int bitnum, unsigned long *addr) + __releases(__bitlock(bitnum, addr)) { #ifdef CONFIG_DEBUG_SPINLOCK BUG_ON(!test_bit(bitnum, addr)); @@ -80,7 +96,7 @@ static __always_inline void __bit_spin_unlock(int bitnum, unsigned long *addr) __clear_bit_unlock(bitnum, addr); #endif preempt_enable(); - __release(bitlock); + __release(__bitlock(bitnum, addr)); } /* |
