From fcfdfe30e324725007e9dc5814b62a4c430ea909 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Thu, 26 Apr 2018 11:34:15 +0100 Subject: locking/barriers: Introduce smp_cond_load_relaxed() and atomic_cond_read_relaxed() Whilst we currently provide smp_cond_load_acquire() and atomic_cond_read_acquire(), there are cases where the ACQUIRE semantics are not required because of a subsequent fence or release operation once the conditional loop has exited. This patch adds relaxed versions of the conditional spinning primitives to avoid unnecessary barrier overhead on architectures such as arm64. Signed-off-by: Will Deacon Acked-by: Peter Zijlstra (Intel) Acked-by: Waiman Long Cc: Linus Torvalds Cc: Thomas Gleixner Cc: boqun.feng@gmail.com Cc: linux-arm-kernel@lists.infradead.org Cc: paulmck@linux.vnet.ibm.com Link: http://lkml.kernel.org/r/1524738868-31318-2-git-send-email-will.deacon@arm.com Signed-off-by: Ingo Molnar --- include/linux/atomic.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'include/linux') diff --git a/include/linux/atomic.h b/include/linux/atomic.h index 8b276fd9a127..01ce3997cb42 100644 --- a/include/linux/atomic.h +++ b/include/linux/atomic.h @@ -654,6 +654,7 @@ static inline int atomic_dec_if_positive(atomic_t *v) } #endif +#define atomic_cond_read_relaxed(v, c) smp_cond_load_relaxed(&(v)->counter, (c)) #define atomic_cond_read_acquire(v, c) smp_cond_load_acquire(&(v)->counter, (c)) #ifdef CONFIG_GENERIC_ATOMIC64 @@ -1075,6 +1076,7 @@ static inline long long atomic64_fetch_andnot_release(long long i, atomic64_t *v } #endif +#define atomic64_cond_read_relaxed(v, c) smp_cond_load_relaxed(&(v)->counter, (c)) #define atomic64_cond_read_acquire(v, c) smp_cond_load_acquire(&(v)->counter, (c)) #include -- cgit v1.2.3 From 02acc80d19edb0d5684c997b2004ad19f9f5236e Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Mon, 23 Apr 2018 18:10:23 +0200 Subject: delayacct: Use raw_spinlocks try_to_wake_up() might invoke delayacct_blkio_end() while holding the pi_lock (which is a raw_spinlock_t). delayacct_blkio_end() acquires task_delay_info.lock which is a spinlock_t. This causes a might sleep splat on -RT where non raw spinlocks are converted to 'sleeping' spinlocks. task_delay_info.lock is only held for a short amount of time so it's not a problem latency wise to make convert it to a raw spinlock. Signed-off-by: Sebastian Andrzej Siewior Signed-off-by: Thomas Gleixner Cc: Balbir Singh Link: https://lkml.kernel.org/r/20180423161024.6710-1-bigeasy@linutronix.de --- include/linux/delayacct.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/delayacct.h b/include/linux/delayacct.h index 5e335b6203f4..e6c0448ebcc7 100644 --- a/include/linux/delayacct.h +++ b/include/linux/delayacct.h @@ -29,7 +29,7 @@ #ifdef CONFIG_TASK_DELAY_ACCT struct task_delay_info { - spinlock_t lock; + raw_spinlock_t lock; unsigned int flags; /* Private per-task flags */ /* For each stat XXX, add following, aligned appropriately -- cgit v1.2.3 From b7e4aadef28f217de8907eec60a964328797a2be Mon Sep 17 00:00:00 2001 From: Andrea Parri Date: Mon, 14 May 2018 16:01:27 -0700 Subject: locking/spinlocks: Document the semantics of spin_is_locked() There appeared to be a certain, recurrent uncertainty concerning the semantics of spin_is_locked(), likely a consequence of the fact that this semantics remains undocumented or that it has been historically linked to the (likewise unclear) semantics of spin_unlock_wait(). A recent auditing [1] of the callers of the primitive confirmed that none of them are relying on particular ordering guarantees; document this semantics by adding a docbook header to spin_is_locked(). Also, describe behaviors specific to certain CONFIG_SMP=n builds. [1] https://marc.info/?l=linux-kernel&m=151981440005264&w=2 https://marc.info/?l=linux-kernel&m=152042843808540&w=2 https://marc.info/?l=linux-kernel&m=152043346110262&w=2 Co-Developed-by: Andrea Parri Co-Developed-by: Alan Stern Co-Developed-by: David Howells Signed-off-by: Andrea Parri Signed-off-by: Alan Stern Signed-off-by: David Howells Signed-off-by: Paul E. McKenney Acked-by: Randy Dunlap Cc: Akira Yokosawa Cc: Andrew Morton Cc: Boqun Feng Cc: Jade Alglave Cc: Linus Torvalds Cc: Luc Maranget Cc: Nicholas Piggin Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: Will Deacon Cc: linux-arch@vger.kernel.org Cc: parri.andrea@gmail.com Link: http://lkml.kernel.org/r/1526338889-7003-1-git-send-email-paulmck@linux.vnet.ibm.com Signed-off-by: Ingo Molnar --- include/linux/spinlock.h | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) (limited to 'include/linux') diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h index 4894d322d258..1e8a46435838 100644 --- a/include/linux/spinlock.h +++ b/include/linux/spinlock.h @@ -380,6 +380,24 @@ static __always_inline int spin_trylock_irq(spinlock_t *lock) raw_spin_trylock_irqsave(spinlock_check(lock), flags); \ }) +/** + * spin_is_locked() - Check whether a spinlock is locked. + * @lock: Pointer to the spinlock. + * + * This function is NOT required to provide any memory ordering + * guarantees; it could be used for debugging purposes or, when + * additional synchronization is needed, accompanied with other + * constructs (memory barriers) enforcing the synchronization. + * + * Returns: 1 if @lock is locked, 0 otherwise. + * + * Note that the function only tells you that the spinlock is + * seen to be locked, not that it is locked on your CPU. + * + * Further, on CONFIG_SMP=n builds with CONFIG_DEBUG_SPINLOCK=n, + * the return value is always 0 (see include/linux/spinlock_up.h). + * Therefore you should not rely heavily on the return value. + */ static __always_inline int spin_is_locked(spinlock_t *lock) { return raw_spin_is_locked(&lock->rlock); -- cgit v1.2.3 From 1362ae43c503a4e333ab6948fc4c6e0e794e1558 Mon Sep 17 00:00:00 2001 From: Andrea Parri Date: Mon, 14 May 2018 16:01:29 -0700 Subject: locking/spinlocks: Clean up comment and #ifndef for {,queued_}spin_is_locked() Removes "#ifndef queued_spin_is_locked" from the generic code: this is unused and it's reasonable to conclude that it will continue to be unused. Also removes the comment about spin_is_locked() from mutex_is_locked(): the comment remains valid but not particularly useful. Suggested-by: Will Deacon Signed-off-by: Andrea Parri Signed-off-by: Paul E. McKenney Acked-by: Will Deacon Cc: Andrew Morton Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: akiyks@gmail.com Cc: boqun.feng@gmail.com Cc: dhowells@redhat.com Cc: j.alglave@ucl.ac.uk Cc: linux-arch@vger.kernel.org Cc: luc.maranget@inria.fr Cc: npiggin@gmail.com Cc: parri.andrea@gmail.com Cc: stern@rowland.harvard.edu Link: http://lkml.kernel.org/r/1526338889-7003-3-git-send-email-paulmck@linux.vnet.ibm.com Signed-off-by: Ingo Molnar --- include/linux/mutex.h | 3 --- 1 file changed, 3 deletions(-) (limited to 'include/linux') diff --git a/include/linux/mutex.h b/include/linux/mutex.h index 14bc0d5d0ee5..3093dd162424 100644 --- a/include/linux/mutex.h +++ b/include/linux/mutex.h @@ -146,9 +146,6 @@ extern void __mutex_init(struct mutex *lock, const char *name, */ static inline bool mutex_is_locked(struct mutex *lock) { - /* - * XXX think about spin_is_locked - */ return __mutex_owner(lock) != NULL; } -- cgit v1.2.3