From 931ab4a5ce2d93cb434dc7fc067a702117453b13 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Tue, 25 Jul 2017 09:59:08 -0700 Subject: atomics: Revert addition of comment header to spin_unlock_wait() There is still considerable confusion as to the semantics of spin_unlock_wait(), but there seems to be universal agreement that it is not that of a lock/unlock pair. This commit therefore removes the comment added by 6016ffc3874d ("atomics: Add header comment so spin_unlock_wait()") in order to prevent at least that flavor of confusion. Signed-off-by: Paul E. McKenney --- include/linux/spinlock.h | 20 -------------------- 1 file changed, 20 deletions(-) (limited to 'include/linux/spinlock.h') diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h index d9510e8522d4..59248dcc6ef3 100644 --- a/include/linux/spinlock.h +++ b/include/linux/spinlock.h @@ -369,26 +369,6 @@ static __always_inline int spin_trylock_irq(spinlock_t *lock) raw_spin_trylock_irqsave(spinlock_check(lock), flags); \ }) -/** - * spin_unlock_wait - Interpose between successive critical sections - * @lock: the spinlock whose critical sections are to be interposed. - * - * Semantically this is equivalent to a spin_lock() immediately - * followed by a spin_unlock(). However, most architectures have - * more efficient implementations in which the spin_unlock_wait() - * cannot block concurrent lock acquisition, and in some cases - * where spin_unlock_wait() does not write to the lock variable. - * Nevertheless, spin_unlock_wait() can have high overhead, so if - * you feel the need to use it, please check to see if there is - * a better way to get your job done. - * - * The ordering guarantees provided by spin_unlock_wait() are: - * - * 1. All accesses preceding the spin_unlock_wait() happen before - * any accesses in later critical sections for this same lock. - * 2. All accesses following the spin_unlock_wait() happen after - * any accesses in earlier critical sections for this same lock. - */ static __always_inline void spin_unlock_wait(spinlock_t *lock) { raw_spin_unlock_wait(&lock->rlock); -- cgit v1.2.3 From d3a024abbc438277851c510b51ec9b158821488b Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Thu, 29 Jun 2017 15:47:44 -0700 Subject: locking: Remove spin_unlock_wait() generic definitions There is no agreed-upon definition of spin_unlock_wait()'s semantics, and it appears that all callers could do just as well with a lock/unlock pair. This commit therefore removes spin_unlock_wait() and related definitions from core code. Signed-off-by: Paul E. McKenney Cc: Arnd Bergmann Cc: Ingo Molnar Cc: Will Deacon Cc: Peter Zijlstra Cc: Alan Stern Cc: Andrea Parri Cc: Linus Torvalds --- include/linux/spinlock.h | 11 ----------- 1 file changed, 11 deletions(-) (limited to 'include/linux/spinlock.h') diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h index 59248dcc6ef3..ef018a6e4985 100644 --- a/include/linux/spinlock.h +++ b/include/linux/spinlock.h @@ -130,12 +130,6 @@ do { \ #define smp_mb__before_spinlock() smp_wmb() #endif -/** - * raw_spin_unlock_wait - wait until the spinlock gets unlocked - * @lock: the spinlock in question. - */ -#define raw_spin_unlock_wait(lock) arch_spin_unlock_wait(&(lock)->raw_lock) - #ifdef CONFIG_DEBUG_SPINLOCK extern void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock); #define do_raw_spin_lock_flags(lock, flags) do_raw_spin_lock(lock) @@ -369,11 +363,6 @@ static __always_inline int spin_trylock_irq(spinlock_t *lock) raw_spin_trylock_irqsave(spinlock_check(lock), flags); \ }) -static __always_inline void spin_unlock_wait(spinlock_t *lock) -{ - raw_spin_unlock_wait(&lock->rlock); -} - static __always_inline int spin_is_locked(spinlock_t *lock) { return raw_spin_is_locked(&lock->rlock); -- cgit v1.2.3