From b41cda03765580caf7723b8c1b672d191c71013f Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Sun, 15 Aug 2021 23:27:38 +0200 Subject: locking/rtmutex: Set proper wait context for lockdep RT mutexes belong to the LD_WAIT_SLEEP class. Make them so. Signed-off-by: Thomas Gleixner Signed-off-by: Peter Zijlstra (Intel) Signed-off-by: Ingo Molnar Link: https://lore.kernel.org/r/20210815211302.031014562@linutronix.de --- kernel/locking/rtmutex.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel/locking/rtmutex.c') diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c index ad0db322ed3b..1a7e3f838077 100644 --- a/kernel/locking/rtmutex.c +++ b/kernel/locking/rtmutex.c @@ -1556,7 +1556,7 @@ void __sched __rt_mutex_init(struct rt_mutex *lock, const char *name, struct lock_class_key *key) { debug_check_no_locks_freed((void *)lock, sizeof(*lock)); - lockdep_init_map(&lock->dep_map, name, key, 0); + lockdep_init_map_wait(&lock->dep_map, name, key, 0, LD_WAIT_SLEEP); __rt_mutex_basic_init(lock); } -- cgit v1.2.3 From 785159301bedea25fae9b20cae3d12377246e941 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Sun, 15 Aug 2021 23:27:54 +0200 Subject: locking/rtmutex: Convert macros to inlines Inlines are type-safe... Signed-off-by: Sebastian Andrzej Siewior Signed-off-by: Thomas Gleixner Signed-off-by: Peter Zijlstra (Intel) Signed-off-by: Ingo Molnar Link: https://lore.kernel.org/r/20210815211302.610830960@linutronix.de --- kernel/locking/rtmutex.c | 31 +++++++++++++++++++++++++++---- 1 file changed, 27 insertions(+), 4 deletions(-) (limited to 'kernel/locking/rtmutex.c') diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c index 1a7e3f838077..5187added8bc 100644 --- a/kernel/locking/rtmutex.c +++ b/kernel/locking/rtmutex.c @@ -141,8 +141,19 @@ static __always_inline void fixup_rt_mutex_waiters(struct rt_mutex *lock) * set up. */ #ifndef CONFIG_DEBUG_RT_MUTEXES -# define rt_mutex_cmpxchg_acquire(l,c,n) (cmpxchg_acquire(&l->owner, c, n) == c) -# define rt_mutex_cmpxchg_release(l,c,n) (cmpxchg_release(&l->owner, c, n) == c) +static __always_inline bool rt_mutex_cmpxchg_acquire(struct rt_mutex *lock, + struct task_struct *old, + struct task_struct *new) +{ + return cmpxchg_acquire(&lock->owner, old, new) == old; +} + +static __always_inline bool rt_mutex_cmpxchg_release(struct rt_mutex *lock, + struct task_struct *old, + struct task_struct *new) +{ + return cmpxchg_release(&lock->owner, old, new) == old; +} /* * Callers must hold the ->wait_lock -- which is the whole purpose as we force @@ -201,8 +212,20 @@ static __always_inline bool unlock_rt_mutex_safe(struct rt_mutex *lock, } #else -# define rt_mutex_cmpxchg_acquire(l,c,n) (0) -# define rt_mutex_cmpxchg_release(l,c,n) (0) +static __always_inline bool rt_mutex_cmpxchg_acquire(struct rt_mutex *lock, + struct task_struct *old, + struct task_struct *new) +{ + return false; + +} + +static __always_inline bool rt_mutex_cmpxchg_release(struct rt_mutex *lock, + struct task_struct *old, + struct task_struct *new) +{ + return false; +} static __always_inline void mark_rt_mutex_waiters(struct rt_mutex *lock) { -- cgit v1.2.3 From 709e0b62869f625afd18edd79f190c38cb39dfb2 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Sun, 15 Aug 2021 23:27:55 +0200 Subject: locking/rtmutex: Switch to from cmpxchg_*() to try_cmpxchg_*() Allows the compiler to generate better code depending on the architecture. Suggested-by: Peter Zijlstra Signed-off-by: Thomas Gleixner Signed-off-by: Peter Zijlstra (Intel) Signed-off-by: Ingo Molnar Link: https://lore.kernel.org/r/20210815211302.668958502@linutronix.de --- kernel/locking/rtmutex.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'kernel/locking/rtmutex.c') diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c index 5187added8bc..98f06c509f02 100644 --- a/kernel/locking/rtmutex.c +++ b/kernel/locking/rtmutex.c @@ -145,14 +145,14 @@ static __always_inline bool rt_mutex_cmpxchg_acquire(struct rt_mutex *lock, struct task_struct *old, struct task_struct *new) { - return cmpxchg_acquire(&lock->owner, old, new) == old; + return try_cmpxchg_acquire(&lock->owner, &old, new); } static __always_inline bool rt_mutex_cmpxchg_release(struct rt_mutex *lock, struct task_struct *old, struct task_struct *new) { - return cmpxchg_release(&lock->owner, old, new) == old; + return try_cmpxchg_release(&lock->owner, &old, new); } /* -- cgit v1.2.3 From 531ae4b06a737ed5539cd75dc6f6b9a28f900bba Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Sun, 15 Aug 2021 23:27:57 +0200 Subject: locking/rtmutex: Split API from implementation Prepare for reusing the inner functions of rtmutex for RT lock substitutions: introduce kernel/locking/rtmutex_api.c and move them there. Signed-off-by: Thomas Gleixner Signed-off-by: Peter Zijlstra (Intel) Signed-off-by: Ingo Molnar Link: https://lore.kernel.org/r/20210815211302.726560996@linutronix.de --- kernel/locking/rtmutex.c | 479 ++--------------------------------------------- 1 file changed, 17 insertions(+), 462 deletions(-) (limited to 'kernel/locking/rtmutex.c') diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c index 98f06c509f02..cd0e1a43b07a 100644 --- a/kernel/locking/rtmutex.c +++ b/kernel/locking/rtmutex.c @@ -11,14 +11,12 @@ * * See Documentation/locking/rt-mutex-design.rst for details. */ -#include -#include +#include +#include +#include #include #include -#include #include -#include -#include #include "rtmutex_common.h" @@ -371,11 +369,6 @@ rt_mutex_cond_detect_deadlock(struct rt_mutex_waiter *waiter, return chwalk == RT_MUTEX_FULL_CHAINWALK; } -/* - * Max number of times we'll walk the boosting chain: - */ -int max_lock_depth = 1024; - static __always_inline struct rt_mutex *task_blocked_on_lock(struct task_struct *p) { return p->pi_blocked_on ? p->pi_blocked_on->lock : NULL; @@ -1112,42 +1105,6 @@ static void __sched remove_waiter(struct rt_mutex *lock, raw_spin_lock_irq(&lock->wait_lock); } -/* - * Recheck the pi chain, in case we got a priority setting - * - * Called from sched_setscheduler - */ -void __sched rt_mutex_adjust_pi(struct task_struct *task) -{ - struct rt_mutex_waiter *waiter; - struct rt_mutex *next_lock; - unsigned long flags; - - raw_spin_lock_irqsave(&task->pi_lock, flags); - - waiter = task->pi_blocked_on; - if (!waiter || rt_mutex_waiter_equal(waiter, task_to_waiter(task))) { - raw_spin_unlock_irqrestore(&task->pi_lock, flags); - return; - } - next_lock = waiter->lock; - raw_spin_unlock_irqrestore(&task->pi_lock, flags); - - /* gets dropped in rt_mutex_adjust_prio_chain()! */ - get_task_struct(task); - - rt_mutex_adjust_prio_chain(task, RT_MUTEX_MIN_CHAINWALK, NULL, - next_lock, NULL, task); -} - -void __sched rt_mutex_init_waiter(struct rt_mutex_waiter *waiter) -{ - debug_rt_mutex_init_waiter(waiter); - RB_CLEAR_NODE(&waiter->pi_tree_entry); - RB_CLEAR_NODE(&waiter->tree_entry); - waiter->task = NULL; -} - /** * __rt_mutex_slowlock() - Perform the wait-wake-try-to-take loop * @lock: the rt_mutex to take @@ -1274,6 +1231,15 @@ static int __sched rt_mutex_slowlock(struct rt_mutex *lock, unsigned int state, return ret; } +static __always_inline int __rt_mutex_lock(struct rt_mutex *lock, + unsigned int state) +{ + if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current))) + return 0; + + return rt_mutex_slowlock(lock, state, NULL, RT_MUTEX_MIN_CHAINWALK); +} + static int __sched __rt_mutex_slowtrylock(struct rt_mutex *lock) { int ret = try_to_take_rt_mutex(lock, current, NULL); @@ -1316,21 +1282,16 @@ static int __sched rt_mutex_slowtrylock(struct rt_mutex *lock) return ret; } -/* - * Performs the wakeup of the top-waiter and re-enables preemption. - */ -void __sched rt_mutex_postunlock(struct wake_q_head *wake_q) +static __always_inline int __rt_mutex_trylock(struct rt_mutex *lock) { - wake_up_q(wake_q); + if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current))) + return 1; - /* Pairs with preempt_disable() in mark_wakeup_next_waiter() */ - preempt_enable(); + return rt_mutex_slowtrylock(lock); } /* * Slow path to release a rt-mutex. - * - * Return whether the current task needs to call rt_mutex_postunlock(). */ static void __sched rt_mutex_slowunlock(struct rt_mutex *lock) { @@ -1393,416 +1354,10 @@ static void __sched rt_mutex_slowunlock(struct rt_mutex *lock) rt_mutex_postunlock(&wake_q); } -/* - * debug aware fast / slowpath lock,trylock,unlock - * - * The atomic acquire/release ops are compiled away, when either the - * architecture does not support cmpxchg or when debugging is enabled. - */ -static __always_inline int __rt_mutex_lock(struct rt_mutex *lock, long state, - unsigned int subclass) -{ - int ret; - - might_sleep(); - mutex_acquire(&lock->dep_map, subclass, 0, _RET_IP_); - - if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current))) - return 0; - - ret = rt_mutex_slowlock(lock, state, NULL, RT_MUTEX_MIN_CHAINWALK); - if (ret) - mutex_release(&lock->dep_map, _RET_IP_); - return ret; -} - -#ifdef CONFIG_DEBUG_LOCK_ALLOC -/** - * rt_mutex_lock_nested - lock a rt_mutex - * - * @lock: the rt_mutex to be locked - * @subclass: the lockdep subclass - */ -void __sched rt_mutex_lock_nested(struct rt_mutex *lock, unsigned int subclass) -{ - __rt_mutex_lock(lock, TASK_UNINTERRUPTIBLE, subclass); -} -EXPORT_SYMBOL_GPL(rt_mutex_lock_nested); - -#else /* !CONFIG_DEBUG_LOCK_ALLOC */ - -/** - * rt_mutex_lock - lock a rt_mutex - * - * @lock: the rt_mutex to be locked - */ -void __sched rt_mutex_lock(struct rt_mutex *lock) -{ - __rt_mutex_lock(lock, TASK_UNINTERRUPTIBLE, 0); -} -EXPORT_SYMBOL_GPL(rt_mutex_lock); -#endif - -/** - * rt_mutex_lock_interruptible - lock a rt_mutex interruptible - * - * @lock: the rt_mutex to be locked - * - * Returns: - * 0 on success - * -EINTR when interrupted by a signal - */ -int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock) -{ - return __rt_mutex_lock(lock, TASK_INTERRUPTIBLE, 0); -} -EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible); - -/** - * rt_mutex_trylock - try to lock a rt_mutex - * - * @lock: the rt_mutex to be locked - * - * This function can only be called in thread context. It's safe to call it - * from atomic regions, but not from hard or soft interrupt context. - * - * Returns: - * 1 on success - * 0 on contention - */ -int __sched rt_mutex_trylock(struct rt_mutex *lock) +static __always_inline void __rt_mutex_unlock(struct rt_mutex *lock) { - int ret; - - if (IS_ENABLED(CONFIG_DEBUG_RT_MUTEXES) && WARN_ON_ONCE(!in_task())) - return 0; - - /* - * No lockdep annotation required because lockdep disables the fast - * path. - */ - if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current))) - return 1; - - ret = rt_mutex_slowtrylock(lock); - if (ret) - mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_); - - return ret; -} -EXPORT_SYMBOL_GPL(rt_mutex_trylock); - -/** - * rt_mutex_unlock - unlock a rt_mutex - * - * @lock: the rt_mutex to be unlocked - */ -void __sched rt_mutex_unlock(struct rt_mutex *lock) -{ - mutex_release(&lock->dep_map, _RET_IP_); if (likely(rt_mutex_cmpxchg_release(lock, current, NULL))) return; rt_mutex_slowunlock(lock); } -EXPORT_SYMBOL_GPL(rt_mutex_unlock); - -/* - * Futex variants, must not use fastpath. - */ -int __sched rt_mutex_futex_trylock(struct rt_mutex *lock) -{ - return rt_mutex_slowtrylock(lock); -} - -int __sched __rt_mutex_futex_trylock(struct rt_mutex *lock) -{ - return __rt_mutex_slowtrylock(lock); -} - -/** - * __rt_mutex_futex_unlock - Futex variant, that since futex variants - * do not use the fast-path, can be simple and will not need to retry. - * - * @lock: The rt_mutex to be unlocked - * @wake_q: The wake queue head from which to get the next lock waiter - */ -bool __sched __rt_mutex_futex_unlock(struct rt_mutex *lock, - struct wake_q_head *wake_q) -{ - lockdep_assert_held(&lock->wait_lock); - - debug_rt_mutex_unlock(lock); - - if (!rt_mutex_has_waiters(lock)) { - lock->owner = NULL; - return false; /* done */ - } - - /* - * We've already deboosted, mark_wakeup_next_waiter() will - * retain preempt_disabled when we drop the wait_lock, to - * avoid inversion prior to the wakeup. preempt_disable() - * therein pairs with rt_mutex_postunlock(). - */ - mark_wakeup_next_waiter(wake_q, lock); - - return true; /* call postunlock() */ -} - -void __sched rt_mutex_futex_unlock(struct rt_mutex *lock) -{ - DEFINE_WAKE_Q(wake_q); - unsigned long flags; - bool postunlock; - - raw_spin_lock_irqsave(&lock->wait_lock, flags); - postunlock = __rt_mutex_futex_unlock(lock, &wake_q); - raw_spin_unlock_irqrestore(&lock->wait_lock, flags); - - if (postunlock) - rt_mutex_postunlock(&wake_q); -} - -/** - * __rt_mutex_init - initialize the rt_mutex - * - * @lock: The rt_mutex to be initialized - * @name: The lock name used for debugging - * @key: The lock class key used for debugging - * - * Initialize the rt_mutex to unlocked state. - * - * Initializing of a locked rt_mutex is not allowed - */ -void __sched __rt_mutex_init(struct rt_mutex *lock, const char *name, - struct lock_class_key *key) -{ - debug_check_no_locks_freed((void *)lock, sizeof(*lock)); - lockdep_init_map_wait(&lock->dep_map, name, key, 0, LD_WAIT_SLEEP); - - __rt_mutex_basic_init(lock); -} -EXPORT_SYMBOL_GPL(__rt_mutex_init); - -/** - * rt_mutex_init_proxy_locked - initialize and lock a rt_mutex on behalf of a - * proxy owner - * - * @lock: the rt_mutex to be locked - * @proxy_owner:the task to set as owner - * - * No locking. Caller has to do serializing itself - * - * Special API call for PI-futex support. This initializes the rtmutex and - * assigns it to @proxy_owner. Concurrent operations on the rtmutex are not - * possible at this point because the pi_state which contains the rtmutex - * is not yet visible to other tasks. - */ -void __sched rt_mutex_init_proxy_locked(struct rt_mutex *lock, - struct task_struct *proxy_owner) -{ - __rt_mutex_basic_init(lock); - rt_mutex_set_owner(lock, proxy_owner); -} - -/** - * rt_mutex_proxy_unlock - release a lock on behalf of owner - * - * @lock: the rt_mutex to be locked - * - * No locking. Caller has to do serializing itself - * - * Special API call for PI-futex support. This merrily cleans up the rtmutex - * (debugging) state. Concurrent operations on this rt_mutex are not - * possible because it belongs to the pi_state which is about to be freed - * and it is not longer visible to other tasks. - */ -void __sched rt_mutex_proxy_unlock(struct rt_mutex *lock) -{ - debug_rt_mutex_proxy_unlock(lock); - rt_mutex_set_owner(lock, NULL); -} - -/** - * __rt_mutex_start_proxy_lock() - Start lock acquisition for another task - * @lock: the rt_mutex to take - * @waiter: the pre-initialized rt_mutex_waiter - * @task: the task to prepare - * - * Starts the rt_mutex acquire; it enqueues the @waiter and does deadlock - * detection. It does not wait, see rt_mutex_wait_proxy_lock() for that. - * - * NOTE: does _NOT_ remove the @waiter on failure; must either call - * rt_mutex_wait_proxy_lock() or rt_mutex_cleanup_proxy_lock() after this. - * - * Returns: - * 0 - task blocked on lock - * 1 - acquired the lock for task, caller should wake it up - * <0 - error - * - * Special API call for PI-futex support. - */ -int __sched __rt_mutex_start_proxy_lock(struct rt_mutex *lock, - struct rt_mutex_waiter *waiter, - struct task_struct *task) -{ - int ret; - - lockdep_assert_held(&lock->wait_lock); - - if (try_to_take_rt_mutex(lock, task, NULL)) - return 1; - - /* We enforce deadlock detection for futexes */ - ret = task_blocks_on_rt_mutex(lock, waiter, task, - RT_MUTEX_FULL_CHAINWALK); - - if (ret && !rt_mutex_owner(lock)) { - /* - * Reset the return value. We might have - * returned with -EDEADLK and the owner - * released the lock while we were walking the - * pi chain. Let the waiter sort it out. - */ - ret = 0; - } - - return ret; -} - -/** - * rt_mutex_start_proxy_lock() - Start lock acquisition for another task - * @lock: the rt_mutex to take - * @waiter: the pre-initialized rt_mutex_waiter - * @task: the task to prepare - * - * Starts the rt_mutex acquire; it enqueues the @waiter and does deadlock - * detection. It does not wait, see rt_mutex_wait_proxy_lock() for that. - * - * NOTE: unlike __rt_mutex_start_proxy_lock this _DOES_ remove the @waiter - * on failure. - * - * Returns: - * 0 - task blocked on lock - * 1 - acquired the lock for task, caller should wake it up - * <0 - error - * - * Special API call for PI-futex support. - */ -int __sched rt_mutex_start_proxy_lock(struct rt_mutex *lock, - struct rt_mutex_waiter *waiter, - struct task_struct *task) -{ - int ret; - - raw_spin_lock_irq(&lock->wait_lock); - ret = __rt_mutex_start_proxy_lock(lock, waiter, task); - if (unlikely(ret)) - remove_waiter(lock, waiter); - raw_spin_unlock_irq(&lock->wait_lock); - - return ret; -} - -/** - * rt_mutex_wait_proxy_lock() - Wait for lock acquisition - * @lock: the rt_mutex we were woken on - * @to: the timeout, null if none. hrtimer should already have - * been started. - * @waiter: the pre-initialized rt_mutex_waiter - * - * Wait for the lock acquisition started on our behalf by - * rt_mutex_start_proxy_lock(). Upon failure, the caller must call - * rt_mutex_cleanup_proxy_lock(). - * - * Returns: - * 0 - success - * <0 - error, one of -EINTR, -ETIMEDOUT - * - * Special API call for PI-futex support - */ -int __sched rt_mutex_wait_proxy_lock(struct rt_mutex *lock, - struct hrtimer_sleeper *to, - struct rt_mutex_waiter *waiter) -{ - int ret; - - raw_spin_lock_irq(&lock->wait_lock); - /* sleep on the mutex */ - set_current_state(TASK_INTERRUPTIBLE); - ret = __rt_mutex_slowlock(lock, TASK_INTERRUPTIBLE, to, waiter); - /* - * try_to_take_rt_mutex() sets the waiter bit unconditionally. We might - * have to fix that up. - */ - fixup_rt_mutex_waiters(lock); - raw_spin_unlock_irq(&lock->wait_lock); - - return ret; -} - -/** - * rt_mutex_cleanup_proxy_lock() - Cleanup failed lock acquisition - * @lock: the rt_mutex we were woken on - * @waiter: the pre-initialized rt_mutex_waiter - * - * Attempt to clean up after a failed __rt_mutex_start_proxy_lock() or - * rt_mutex_wait_proxy_lock(). - * - * Unless we acquired the lock; we're still enqueued on the wait-list and can - * in fact still be granted ownership until we're removed. Therefore we can - * find we are in fact the owner and must disregard the - * rt_mutex_wait_proxy_lock() failure. - * - * Returns: - * true - did the cleanup, we done. - * false - we acquired the lock after rt_mutex_wait_proxy_lock() returned, - * caller should disregards its return value. - * - * Special API call for PI-futex support - */ -bool __sched rt_mutex_cleanup_proxy_lock(struct rt_mutex *lock, - struct rt_mutex_waiter *waiter) -{ - bool cleanup = false; - - raw_spin_lock_irq(&lock->wait_lock); - /* - * Do an unconditional try-lock, this deals with the lock stealing - * state where __rt_mutex_futex_unlock() -> mark_wakeup_next_waiter() - * sets a NULL owner. - * - * We're not interested in the return value, because the subsequent - * test on rt_mutex_owner() will infer that. If the trylock succeeded, - * we will own the lock and it will have removed the waiter. If we - * failed the trylock, we're still not owner and we need to remove - * ourselves. - */ - try_to_take_rt_mutex(lock, current, waiter); - /* - * Unless we're the owner; we're still enqueued on the wait_list. - * So check if we became owner, if not, take us off the wait_list. - */ - if (rt_mutex_owner(lock) != current) { - remove_waiter(lock, waiter); - cleanup = true; - } - /* - * try_to_take_rt_mutex() sets the waiter bit unconditionally. We might - * have to fix that up. - */ - fixup_rt_mutex_waiters(lock); - - raw_spin_unlock_irq(&lock->wait_lock); - - return cleanup; -} - -#ifdef CONFIG_DEBUG_RT_MUTEXES -void rt_mutex_debug_task_free(struct task_struct *task) -{ - DEBUG_LOCKS_WARN_ON(!RB_EMPTY_ROOT(&task->pi_waiters.rb_root)); - DEBUG_LOCKS_WARN_ON(task->pi_blocked_on); -} -#endif -- cgit v1.2.3 From 830e6acc8a1cafe153a0d88f9b2455965b396131 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Sun, 15 Aug 2021 23:27:58 +0200 Subject: locking/rtmutex: Split out the inner parts of 'struct rtmutex' RT builds substitutions for rwsem, mutex, spinlock and rwlock around rtmutexes. Split the inner working out so each lock substitution can use them with the appropriate lockdep annotations. This avoids having an extra unused lockdep map in the wrapped rtmutex. No functional change. Signed-off-by: Peter Zijlstra (Intel) Signed-off-by: Thomas Gleixner Signed-off-by: Peter Zijlstra (Intel) Signed-off-by: Ingo Molnar Link: https://lore.kernel.org/r/20210815211302.784739994@linutronix.de --- kernel/locking/rtmutex.c | 64 +++++++++++++++++++++++++----------------------- 1 file changed, 33 insertions(+), 31 deletions(-) (limited to 'kernel/locking/rtmutex.c') diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c index cd0e1a43b07a..b31f6cbe3a30 100644 --- a/kernel/locking/rtmutex.c +++ b/kernel/locking/rtmutex.c @@ -48,7 +48,7 @@ */ static __always_inline void -rt_mutex_set_owner(struct rt_mutex *lock, struct task_struct *owner) +rt_mutex_set_owner(struct rt_mutex_base *lock, struct task_struct *owner) { unsigned long val = (unsigned long)owner; @@ -58,13 +58,13 @@ rt_mutex_set_owner(struct rt_mutex *lock, struct task_struct *owner) WRITE_ONCE(lock->owner, (struct task_struct *)val); } -static __always_inline void clear_rt_mutex_waiters(struct rt_mutex *lock) +static __always_inline void clear_rt_mutex_waiters(struct rt_mutex_base *lock) { lock->owner = (struct task_struct *) ((unsigned long)lock->owner & ~RT_MUTEX_HAS_WAITERS); } -static __always_inline void fixup_rt_mutex_waiters(struct rt_mutex *lock) +static __always_inline void fixup_rt_mutex_waiters(struct rt_mutex_base *lock) { unsigned long owner, *p = (unsigned long *) &lock->owner; @@ -139,14 +139,14 @@ static __always_inline void fixup_rt_mutex_waiters(struct rt_mutex *lock) * set up. */ #ifndef CONFIG_DEBUG_RT_MUTEXES -static __always_inline bool rt_mutex_cmpxchg_acquire(struct rt_mutex *lock, +static __always_inline bool rt_mutex_cmpxchg_acquire(struct rt_mutex_base *lock, struct task_struct *old, struct task_struct *new) { return try_cmpxchg_acquire(&lock->owner, &old, new); } -static __always_inline bool rt_mutex_cmpxchg_release(struct rt_mutex *lock, +static __always_inline bool rt_mutex_cmpxchg_release(struct rt_mutex_base *lock, struct task_struct *old, struct task_struct *new) { @@ -158,7 +158,7 @@ static __always_inline bool rt_mutex_cmpxchg_release(struct rt_mutex *lock, * all future threads that attempt to [Rmw] the lock to the slowpath. As such * relaxed semantics suffice. */ -static __always_inline void mark_rt_mutex_waiters(struct rt_mutex *lock) +static __always_inline void mark_rt_mutex_waiters(struct rt_mutex_base *lock) { unsigned long owner, *p = (unsigned long *) &lock->owner; @@ -174,7 +174,7 @@ static __always_inline void mark_rt_mutex_waiters(struct rt_mutex *lock) * 2) Drop lock->wait_lock * 3) Try to unlock the lock with cmpxchg */ -static __always_inline bool unlock_rt_mutex_safe(struct rt_mutex *lock, +static __always_inline bool unlock_rt_mutex_safe(struct rt_mutex_base *lock, unsigned long flags) __releases(lock->wait_lock) { @@ -210,7 +210,7 @@ static __always_inline bool unlock_rt_mutex_safe(struct rt_mutex *lock, } #else -static __always_inline bool rt_mutex_cmpxchg_acquire(struct rt_mutex *lock, +static __always_inline bool rt_mutex_cmpxchg_acquire(struct rt_mutex_base *lock, struct task_struct *old, struct task_struct *new) { @@ -218,14 +218,14 @@ static __always_inline bool rt_mutex_cmpxchg_acquire(struct rt_mutex *lock, } -static __always_inline bool rt_mutex_cmpxchg_release(struct rt_mutex *lock, +static __always_inline bool rt_mutex_cmpxchg_release(struct rt_mutex_base *lock, struct task_struct *old, struct task_struct *new) { return false; } -static __always_inline void mark_rt_mutex_waiters(struct rt_mutex *lock) +static __always_inline void mark_rt_mutex_waiters(struct rt_mutex_base *lock) { lock->owner = (struct task_struct *) ((unsigned long)lock->owner | RT_MUTEX_HAS_WAITERS); @@ -234,7 +234,7 @@ static __always_inline void mark_rt_mutex_waiters(struct rt_mutex *lock) /* * Simple slow path only version: lock->owner is protected by lock->wait_lock. */ -static __always_inline bool unlock_rt_mutex_safe(struct rt_mutex *lock, +static __always_inline bool unlock_rt_mutex_safe(struct rt_mutex_base *lock, unsigned long flags) __releases(lock->wait_lock) { @@ -295,13 +295,13 @@ static __always_inline bool __waiter_less(struct rb_node *a, const struct rb_nod } static __always_inline void -rt_mutex_enqueue(struct rt_mutex *lock, struct rt_mutex_waiter *waiter) +rt_mutex_enqueue(struct rt_mutex_base *lock, struct rt_mutex_waiter *waiter) { rb_add_cached(&waiter->tree_entry, &lock->waiters, __waiter_less); } static __always_inline void -rt_mutex_dequeue(struct rt_mutex *lock, struct rt_mutex_waiter *waiter) +rt_mutex_dequeue(struct rt_mutex_base *lock, struct rt_mutex_waiter *waiter) { if (RB_EMPTY_NODE(&waiter->tree_entry)) return; @@ -369,7 +369,7 @@ rt_mutex_cond_detect_deadlock(struct rt_mutex_waiter *waiter, return chwalk == RT_MUTEX_FULL_CHAINWALK; } -static __always_inline struct rt_mutex *task_blocked_on_lock(struct task_struct *p) +static __always_inline struct rt_mutex_base *task_blocked_on_lock(struct task_struct *p) { return p->pi_blocked_on ? p->pi_blocked_on->lock : NULL; } @@ -439,15 +439,15 @@ static __always_inline struct rt_mutex *task_blocked_on_lock(struct task_struct */ static int __sched rt_mutex_adjust_prio_chain(struct task_struct *task, enum rtmutex_chainwalk chwalk, - struct rt_mutex *orig_lock, - struct rt_mutex *next_lock, + struct rt_mutex_base *orig_lock, + struct rt_mutex_base *next_lock, struct rt_mutex_waiter *orig_waiter, struct task_struct *top_task) { struct rt_mutex_waiter *waiter, *top_waiter = orig_waiter; struct rt_mutex_waiter *prerequeue_top_waiter; int ret = 0, depth = 0; - struct rt_mutex *lock; + struct rt_mutex_base *lock; bool detect_deadlock; bool requeue = true; @@ -795,7 +795,7 @@ static int __sched rt_mutex_adjust_prio_chain(struct task_struct *task, * callsite called task_blocked_on_lock(), otherwise NULL */ static int __sched -try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task, +try_to_take_rt_mutex(struct rt_mutex_base *lock, struct task_struct *task, struct rt_mutex_waiter *waiter) { lockdep_assert_held(&lock->wait_lock); @@ -913,14 +913,14 @@ takeit: * * This must be called with lock->wait_lock held and interrupts disabled */ -static int __sched task_blocks_on_rt_mutex(struct rt_mutex *lock, +static int __sched task_blocks_on_rt_mutex(struct rt_mutex_base *lock, struct rt_mutex_waiter *waiter, struct task_struct *task, enum rtmutex_chainwalk chwalk) { struct task_struct *owner = rt_mutex_owner(lock); struct rt_mutex_waiter *top_waiter = waiter; - struct rt_mutex *next_lock; + struct rt_mutex_base *next_lock; int chain_walk = 0, res; lockdep_assert_held(&lock->wait_lock); @@ -1003,7 +1003,7 @@ static int __sched task_blocks_on_rt_mutex(struct rt_mutex *lock, * Called with lock->wait_lock held and interrupts disabled. */ static void __sched mark_wakeup_next_waiter(struct wake_q_head *wake_q, - struct rt_mutex *lock) + struct rt_mutex_base *lock) { struct rt_mutex_waiter *waiter; @@ -1052,12 +1052,12 @@ static void __sched mark_wakeup_next_waiter(struct wake_q_head *wake_q, * Must be called with lock->wait_lock held and interrupts disabled. I must * have just failed to try_to_take_rt_mutex(). */ -static void __sched remove_waiter(struct rt_mutex *lock, +static void __sched remove_waiter(struct rt_mutex_base *lock, struct rt_mutex_waiter *waiter) { bool is_top_waiter = (waiter == rt_mutex_top_waiter(lock)); struct task_struct *owner = rt_mutex_owner(lock); - struct rt_mutex *next_lock; + struct rt_mutex_base *next_lock; lockdep_assert_held(&lock->wait_lock); @@ -1115,7 +1115,8 @@ static void __sched remove_waiter(struct rt_mutex *lock, * * Must be called with lock->wait_lock held and interrupts disabled */ -static int __sched __rt_mutex_slowlock(struct rt_mutex *lock, unsigned int state, +static int __sched __rt_mutex_slowlock(struct rt_mutex_base *lock, + unsigned int state, struct hrtimer_sleeper *timeout, struct rt_mutex_waiter *waiter) { @@ -1170,7 +1171,8 @@ static void __sched rt_mutex_handle_deadlock(int res, int detect_deadlock, /* * Slow path lock function: */ -static int __sched rt_mutex_slowlock(struct rt_mutex *lock, unsigned int state, +static int __sched rt_mutex_slowlock(struct rt_mutex_base *lock, + unsigned int state, struct hrtimer_sleeper *timeout, enum rtmutex_chainwalk chwalk) { @@ -1231,7 +1233,7 @@ static int __sched rt_mutex_slowlock(struct rt_mutex *lock, unsigned int state, return ret; } -static __always_inline int __rt_mutex_lock(struct rt_mutex *lock, +static __always_inline int __rt_mutex_lock(struct rt_mutex_base *lock, unsigned int state) { if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current))) @@ -1240,7 +1242,7 @@ static __always_inline int __rt_mutex_lock(struct rt_mutex *lock, return rt_mutex_slowlock(lock, state, NULL, RT_MUTEX_MIN_CHAINWALK); } -static int __sched __rt_mutex_slowtrylock(struct rt_mutex *lock) +static int __sched __rt_mutex_slowtrylock(struct rt_mutex_base *lock) { int ret = try_to_take_rt_mutex(lock, current, NULL); @@ -1256,7 +1258,7 @@ static int __sched __rt_mutex_slowtrylock(struct rt_mutex *lock) /* * Slow path try-lock function: */ -static int __sched rt_mutex_slowtrylock(struct rt_mutex *lock) +static int __sched rt_mutex_slowtrylock(struct rt_mutex_base *lock) { unsigned long flags; int ret; @@ -1282,7 +1284,7 @@ static int __sched rt_mutex_slowtrylock(struct rt_mutex *lock) return ret; } -static __always_inline int __rt_mutex_trylock(struct rt_mutex *lock) +static __always_inline int __rt_mutex_trylock(struct rt_mutex_base *lock) { if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current))) return 1; @@ -1293,7 +1295,7 @@ static __always_inline int __rt_mutex_trylock(struct rt_mutex *lock) /* * Slow path to release a rt-mutex. */ -static void __sched rt_mutex_slowunlock(struct rt_mutex *lock) +static void __sched rt_mutex_slowunlock(struct rt_mutex_base *lock) { DEFINE_WAKE_Q(wake_q); unsigned long flags; @@ -1354,7 +1356,7 @@ static void __sched rt_mutex_slowunlock(struct rt_mutex *lock) rt_mutex_postunlock(&wake_q); } -static __always_inline void __rt_mutex_unlock(struct rt_mutex *lock) +static __always_inline void __rt_mutex_unlock(struct rt_mutex_base *lock) { if (likely(rt_mutex_cmpxchg_release(lock, current, NULL))) return; -- cgit v1.2.3 From ebbdc41e90ffce8b6bb3cbba1801ede2dd07a89b Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Sun, 15 Aug 2021 23:28:00 +0200 Subject: locking/rtmutex: Provide rt_mutex_slowlock_locked() Split the inner workings of rt_mutex_slowlock() out into a separate function, which can be reused by the upcoming RT lock substitutions, e.g. for rw_semaphores. Signed-off-by: Thomas Gleixner Signed-off-by: Peter Zijlstra (Intel) Signed-off-by: Ingo Molnar Link: https://lore.kernel.org/r/20210815211302.841971086@linutronix.de --- kernel/locking/rtmutex.c | 100 +++++++++++++++++++++++++++-------------------- 1 file changed, 58 insertions(+), 42 deletions(-) (limited to 'kernel/locking/rtmutex.c') diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c index b31f6cbe3a30..3d0b29cb5e63 100644 --- a/kernel/locking/rtmutex.c +++ b/kernel/locking/rtmutex.c @@ -1106,7 +1106,7 @@ static void __sched remove_waiter(struct rt_mutex_base *lock, } /** - * __rt_mutex_slowlock() - Perform the wait-wake-try-to-take loop + * rt_mutex_slowlock_block() - Perform the wait-wake-try-to-take loop * @lock: the rt_mutex to take * @state: the state the task should block in (TASK_INTERRUPTIBLE * or TASK_UNINTERRUPTIBLE) @@ -1115,10 +1115,10 @@ static void __sched remove_waiter(struct rt_mutex_base *lock, * * Must be called with lock->wait_lock held and interrupts disabled */ -static int __sched __rt_mutex_slowlock(struct rt_mutex_base *lock, - unsigned int state, - struct hrtimer_sleeper *timeout, - struct rt_mutex_waiter *waiter) +static int __sched rt_mutex_slowlock_block(struct rt_mutex_base *lock, + unsigned int state, + struct hrtimer_sleeper *timeout, + struct rt_mutex_waiter *waiter) { int ret = 0; @@ -1168,52 +1168,37 @@ static void __sched rt_mutex_handle_deadlock(int res, int detect_deadlock, } } -/* - * Slow path lock function: +/** + * __rt_mutex_slowlock - Locking slowpath invoked with lock::wait_lock held + * @lock: The rtmutex to block lock + * @state: The task state for sleeping + * @chwalk: Indicator whether full or partial chainwalk is requested + * @waiter: Initializer waiter for blocking */ -static int __sched rt_mutex_slowlock(struct rt_mutex_base *lock, - unsigned int state, - struct hrtimer_sleeper *timeout, - enum rtmutex_chainwalk chwalk) +static int __sched __rt_mutex_slowlock(struct rt_mutex_base *lock, + unsigned int state, + enum rtmutex_chainwalk chwalk, + struct rt_mutex_waiter *waiter) { - struct rt_mutex_waiter waiter; - unsigned long flags; - int ret = 0; - - rt_mutex_init_waiter(&waiter); + int ret; - /* - * Technically we could use raw_spin_[un]lock_irq() here, but this can - * be called in early boot if the cmpxchg() fast path is disabled - * (debug, no architecture support). In this case we will acquire the - * rtmutex with lock->wait_lock held. But we cannot unconditionally - * enable interrupts in that early boot case. So we need to use the - * irqsave/restore variants. - */ - raw_spin_lock_irqsave(&lock->wait_lock, flags); + lockdep_assert_held(&lock->wait_lock); /* Try to acquire the lock again: */ - if (try_to_take_rt_mutex(lock, current, NULL)) { - raw_spin_unlock_irqrestore(&lock->wait_lock, flags); + if (try_to_take_rt_mutex(lock, current, NULL)) return 0; - } set_current_state(state); - /* Setup the timer, when timeout != NULL */ - if (unlikely(timeout)) - hrtimer_start_expires(&timeout->timer, HRTIMER_MODE_ABS); - - ret = task_blocks_on_rt_mutex(lock, &waiter, current, chwalk); + ret = task_blocks_on_rt_mutex(lock, waiter, current, chwalk); if (likely(!ret)) - /* sleep on the mutex */ - ret = __rt_mutex_slowlock(lock, state, timeout, &waiter); + ret = rt_mutex_slowlock_block(lock, state, NULL, waiter); if (unlikely(ret)) { __set_current_state(TASK_RUNNING); - remove_waiter(lock, &waiter); - rt_mutex_handle_deadlock(ret, chwalk, &waiter); + remove_waiter(lock, waiter); + rt_mutex_handle_deadlock(ret, chwalk, waiter); } /* @@ -1221,14 +1206,45 @@ static int __sched rt_mutex_slowlock(struct rt_mutex_base *lock, * unconditionally. We might have to fix that up. */ fixup_rt_mutex_waiters(lock); + return ret; +} - raw_spin_unlock_irqrestore(&lock->wait_lock, flags); +static inline int __rt_mutex_slowlock_locked(struct rt_mutex_base *lock, + unsigned int state) +{ + struct rt_mutex_waiter waiter; + int ret; + + rt_mutex_init_waiter(&waiter); - /* Remove pending timer: */ - if (unlikely(timeout)) - hrtimer_cancel(&timeout->timer); + ret = __rt_mutex_slowlock(lock, state, RT_MUTEX_MIN_CHAINWALK, &waiter); debug_rt_mutex_free_waiter(&waiter); + return ret; +} + +/* + * rt_mutex_slowlock - Locking slowpath invoked when fast path fails + * @lock: The rtmutex to block lock + * @state: The task state for sleeping + */ +static int __sched rt_mutex_slowlock(struct rt_mutex_base *lock, + unsigned int state) +{ + unsigned long flags; + int ret; + + /* + * Technically we could use raw_spin_[un]lock_irq() here, but this can + * be called in early boot if the cmpxchg() fast path is disabled + * (debug, no architecture support). In this case we will acquire the + * rtmutex with lock->wait_lock held. But we cannot unconditionally + * enable interrupts in that early boot case. So we need to use the + * irqsave/restore variants. + */ + raw_spin_lock_irqsave(&lock->wait_lock, flags); + ret = __rt_mutex_slowlock_locked(lock, state); + raw_spin_unlock_irqrestore(&lock->wait_lock, flags); return ret; } @@ -1239,7 +1255,7 @@ static __always_inline int __rt_mutex_lock(struct rt_mutex_base *lock, if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current))) return 0; - return rt_mutex_slowlock(lock, state, NULL, RT_MUTEX_MIN_CHAINWALK); + return rt_mutex_slowlock(lock, state); } static int __sched __rt_mutex_slowtrylock(struct rt_mutex_base *lock) -- cgit v1.2.3 From c014ef69b3acdb8c9e7fc412e96944f4d5c36fa0 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Sun, 15 Aug 2021 23:28:06 +0200 Subject: locking/rtmutex: Add wake_state to rt_mutex_waiter Regular sleeping locks like mutexes, rtmutexes and rw_semaphores are always entering and leaving a blocking section with task state == TASK_RUNNING. On a non-RT kernel spinlocks and rwlocks never affect the task state, but on RT kernels these locks are converted to rtmutex based 'sleeping' locks. So in case of contention the task goes to block, which requires to carefully preserve the task state, and restore it after acquiring the lock taking regular wakeups for the task into account, which happened while the task was blocked. This state preserving is achieved by having a separate task state for blocking on a RT spin/rwlock and a saved_state field in task_struct along with careful handling of these wakeup scenarios in try_to_wake_up(). To avoid conditionals in the rtmutex code, store the wake state which has to be used for waking a lock waiter in rt_mutex_waiter which allows to handle the regular and RT spin/rwlocks by handing it to wake_up_state(). Signed-off-by: Thomas Gleixner Signed-off-by: Peter Zijlstra (Intel) Signed-off-by: Ingo Molnar Link: https://lore.kernel.org/r/20210815211303.079800739@linutronix.de --- kernel/locking/rtmutex.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel/locking/rtmutex.c') diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c index 3d0b29cb5e63..c13b9b849a4b 100644 --- a/kernel/locking/rtmutex.c +++ b/kernel/locking/rtmutex.c @@ -692,7 +692,7 @@ static int __sched rt_mutex_adjust_prio_chain(struct task_struct *task, * to get the lock. */ if (prerequeue_top_waiter != rt_mutex_top_waiter(lock)) - wake_up_process(rt_mutex_top_waiter(lock)->task); + wake_up_state(waiter->task, waiter->wake_state); raw_spin_unlock_irq(&lock->wait_lock); return 0; } -- cgit v1.2.3 From b576e640ce5e22673e12949cf14ae3cb18d9b859 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Sun, 15 Aug 2021 23:28:08 +0200 Subject: locking/rtmutex: Provide rt_wake_q_head and helpers To handle the difference between wakeups for regular sleeping locks (mutex, rtmutex, rw_semaphore) and the wakeups for 'sleeping' spin/rwlocks on PREEMPT_RT enabled kernels correctly, it is required to provide a wake_q_head construct which allows to keep them separate. Provide a wrapper around wake_q_head and the required helpers, which will be extended with the state handling later. No functional change. Signed-off-by: Thomas Gleixner Signed-off-by: Peter Zijlstra (Intel) Signed-off-by: Ingo Molnar Link: https://lore.kernel.org/r/20210815211303.139337655@linutronix.de --- kernel/locking/rtmutex.c | 15 +++++++++++++++ 1 file changed, 15 insertions(+) (limited to 'kernel/locking/rtmutex.c') diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c index c13b9b849a4b..35f7685d7460 100644 --- a/kernel/locking/rtmutex.c +++ b/kernel/locking/rtmutex.c @@ -347,6 +347,21 @@ static __always_inline void rt_mutex_adjust_prio(struct task_struct *p) rt_mutex_setprio(p, pi_task); } +/* RT mutex specific wake_q wrappers */ +static __always_inline void rt_mutex_wake_q_add(struct rt_wake_q_head *wqh, + struct rt_mutex_waiter *w) +{ + wake_q_add(&wqh->head, w->task); +} + +static __always_inline void rt_mutex_wake_up_q(struct rt_wake_q_head *wqh) +{ + wake_up_q(&wqh->head); + + /* Pairs with preempt_disable() in mark_wakeup_next_waiter() */ + preempt_enable(); +} + /* * Deadlock detection is conditional: * -- cgit v1.2.3 From 7980aa397cc0968ea3ffee7a985c31c92ad84f81 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Sun, 15 Aug 2021 23:28:09 +0200 Subject: locking/rtmutex: Use rt_mutex_wake_q_head Prepare for the required state aware handling of waiter wakeups via wake_q and switch the rtmutex code over to the rtmutex specific wrapper. No functional change. Signed-off-by: Thomas Gleixner Signed-off-by: Peter Zijlstra (Intel) Signed-off-by: Ingo Molnar Link: https://lore.kernel.org/r/20210815211303.197113263@linutronix.de --- kernel/locking/rtmutex.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'kernel/locking/rtmutex.c') diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c index 35f7685d7460..5f0d0725ca32 100644 --- a/kernel/locking/rtmutex.c +++ b/kernel/locking/rtmutex.c @@ -1017,7 +1017,7 @@ static int __sched task_blocks_on_rt_mutex(struct rt_mutex_base *lock, * * Called with lock->wait_lock held and interrupts disabled. */ -static void __sched mark_wakeup_next_waiter(struct wake_q_head *wake_q, +static void __sched mark_wakeup_next_waiter(struct rt_wake_q_head *wqh, struct rt_mutex_base *lock) { struct rt_mutex_waiter *waiter; @@ -1054,10 +1054,10 @@ static void __sched mark_wakeup_next_waiter(struct wake_q_head *wake_q, * deboost but before waking our donor task, hence the preempt_disable() * before unlock. * - * Pairs with preempt_enable() in rt_mutex_postunlock(); + * Pairs with preempt_enable() in rt_mutex_wake_up_q(); */ preempt_disable(); - wake_q_add(wake_q, waiter->task); + rt_mutex_wake_q_add(wqh, waiter); raw_spin_unlock(¤t->pi_lock); } @@ -1328,7 +1328,7 @@ static __always_inline int __rt_mutex_trylock(struct rt_mutex_base *lock) */ static void __sched rt_mutex_slowunlock(struct rt_mutex_base *lock) { - DEFINE_WAKE_Q(wake_q); + DEFINE_RT_WAKE_Q(wqh); unsigned long flags; /* irqsave required to support early boot calls */ @@ -1381,10 +1381,10 @@ static void __sched rt_mutex_slowunlock(struct rt_mutex_base *lock) * * Queue the next waiter for wakeup once we release the wait_lock. */ - mark_wakeup_next_waiter(&wake_q, lock); + mark_wakeup_next_waiter(&wqh, lock); raw_spin_unlock_irqrestore(&lock->wait_lock, flags); - rt_mutex_postunlock(&wake_q); + rt_mutex_wake_up_q(&wqh); } static __always_inline void __rt_mutex_unlock(struct rt_mutex_base *lock) -- cgit v1.2.3 From 456cfbc65cd072f4f53936ee5a37eb1447a7d3ba Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Sun, 15 Aug 2021 23:28:11 +0200 Subject: locking/rtmutex: Prepare RT rt_mutex_wake_q for RT locks Add an rtlock_task pointer to rt_mutex_wake_q, which allows to handle the RT specific wakeup for spin/rwlock waiters. The pointer is just consuming 4/8 bytes on the stack so it is provided unconditionaly to avoid #ifdeffery all over the place. This cannot use a regular wake_q, because a task can have concurrent wakeups which would make it miss either lock or the regular wakeups, depending on what gets queued first, unless task struct gains a separate wake_q_node for this, which would be overkill, because there can only be a single task which gets woken up in the spin/rw_lock unlock path. No functional change for non-RT enabled kernels. Signed-off-by: Thomas Gleixner Signed-off-by: Peter Zijlstra (Intel) Signed-off-by: Ingo Molnar Link: https://lore.kernel.org/r/20210815211303.253614678@linutronix.de --- kernel/locking/rtmutex.c | 18 ++++++++++++++++-- 1 file changed, 16 insertions(+), 2 deletions(-) (limited to 'kernel/locking/rtmutex.c') diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c index 5f0d0725ca32..8b0d38dc4147 100644 --- a/kernel/locking/rtmutex.c +++ b/kernel/locking/rtmutex.c @@ -351,12 +351,26 @@ static __always_inline void rt_mutex_adjust_prio(struct task_struct *p) static __always_inline void rt_mutex_wake_q_add(struct rt_wake_q_head *wqh, struct rt_mutex_waiter *w) { - wake_q_add(&wqh->head, w->task); + if (IS_ENABLED(CONFIG_PREEMPT_RT) && w->wake_state != TASK_NORMAL) { + if (IS_ENABLED(CONFIG_PROVE_LOCKING)) + WARN_ON_ONCE(wqh->rtlock_task); + get_task_struct(w->task); + wqh->rtlock_task = w->task; + } else { + wake_q_add(&wqh->head, w->task); + } } static __always_inline void rt_mutex_wake_up_q(struct rt_wake_q_head *wqh) { - wake_up_q(&wqh->head); + if (IS_ENABLED(CONFIG_PREEMPT_RT) && wqh->rtlock_task) { + wake_up_state(wqh->rtlock_task, TASK_RTLOCK_WAIT); + put_task_struct(wqh->rtlock_task); + wqh->rtlock_task = NULL; + } + + if (!wake_q_empty(&wqh->head)) + wake_up_q(&wqh->head); /* Pairs with preempt_disable() in mark_wakeup_next_waiter() */ preempt_enable(); -- cgit v1.2.3 From e17ba59b7e8e1f67e36d8fcc46daa13370efcf11 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Sun, 15 Aug 2021 23:28:12 +0200 Subject: locking/rtmutex: Guard regular sleeping locks specific functions Guard the regular sleeping lock specific functionality, which is used for rtmutex on non-RT enabled kernels and for mutex, rtmutex and semaphores on RT enabled kernels so the code can be reused for the RT specific implementation of spinlocks and rwlocks in a different compilation unit. No functional change. Signed-off-by: Thomas Gleixner Signed-off-by: Peter Zijlstra (Intel) Signed-off-by: Ingo Molnar Link: https://lore.kernel.org/r/20210815211303.311535693@linutronix.de --- kernel/locking/rtmutex.c | 254 ++++++++++++++++++++++++----------------------- 1 file changed, 131 insertions(+), 123 deletions(-) (limited to 'kernel/locking/rtmutex.c') diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c index 8b0d38dc4147..949781aa54b1 100644 --- a/kernel/locking/rtmutex.c +++ b/kernel/locking/rtmutex.c @@ -1075,10 +1075,139 @@ static void __sched mark_wakeup_next_waiter(struct rt_wake_q_head *wqh, raw_spin_unlock(¤t->pi_lock); } +static int __sched __rt_mutex_slowtrylock(struct rt_mutex_base *lock) +{ + int ret = try_to_take_rt_mutex(lock, current, NULL); + + /* + * try_to_take_rt_mutex() sets the lock waiters bit + * unconditionally. Clean this up. + */ + fixup_rt_mutex_waiters(lock); + + return ret; +} + +/* + * Slow path try-lock function: + */ +static int __sched rt_mutex_slowtrylock(struct rt_mutex_base *lock) +{ + unsigned long flags; + int ret; + + /* + * If the lock already has an owner we fail to get the lock. + * This can be done without taking the @lock->wait_lock as + * it is only being read, and this is a trylock anyway. + */ + if (rt_mutex_owner(lock)) + return 0; + + /* + * The mutex has currently no owner. Lock the wait lock and try to + * acquire the lock. We use irqsave here to support early boot calls. + */ + raw_spin_lock_irqsave(&lock->wait_lock, flags); + + ret = __rt_mutex_slowtrylock(lock); + + raw_spin_unlock_irqrestore(&lock->wait_lock, flags); + + return ret; +} + +static __always_inline int __rt_mutex_trylock(struct rt_mutex_base *lock) +{ + if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current))) + return 1; + + return rt_mutex_slowtrylock(lock); +} + +/* + * Slow path to release a rt-mutex. + */ +static void __sched rt_mutex_slowunlock(struct rt_mutex_base *lock) +{ + DEFINE_RT_WAKE_Q(wqh); + unsigned long flags; + + /* irqsave required to support early boot calls */ + raw_spin_lock_irqsave(&lock->wait_lock, flags); + + debug_rt_mutex_unlock(lock); + + /* + * We must be careful here if the fast path is enabled. If we + * have no waiters queued we cannot set owner to NULL here + * because of: + * + * foo->lock->owner = NULL; + * rtmutex_lock(foo->lock); <- fast path + * free = atomic_dec_and_test(foo->refcnt); + * rtmutex_unlock(foo->lock); <- fast path + * if (free) + * kfree(foo); + * raw_spin_unlock(foo->lock->wait_lock); + * + * So for the fastpath enabled kernel: + * + * Nothing can set the waiters bit as long as we hold + * lock->wait_lock. So we do the following sequence: + * + * owner = rt_mutex_owner(lock); + * clear_rt_mutex_waiters(lock); + * raw_spin_unlock(&lock->wait_lock); + * if (cmpxchg(&lock->owner, owner, 0) == owner) + * return; + * goto retry; + * + * The fastpath disabled variant is simple as all access to + * lock->owner is serialized by lock->wait_lock: + * + * lock->owner = NULL; + * raw_spin_unlock(&lock->wait_lock); + */ + while (!rt_mutex_has_waiters(lock)) { + /* Drops lock->wait_lock ! */ + if (unlock_rt_mutex_safe(lock, flags) == true) + return; + /* Relock the rtmutex and try again */ + raw_spin_lock_irqsave(&lock->wait_lock, flags); + } + + /* + * The wakeup next waiter path does not suffer from the above + * race. See the comments there. + * + * Queue the next waiter for wakeup once we release the wait_lock. + */ + mark_wakeup_next_waiter(&wqh, lock); + raw_spin_unlock_irqrestore(&lock->wait_lock, flags); + + rt_mutex_wake_up_q(&wqh); +} + +static __always_inline void __rt_mutex_unlock(struct rt_mutex_base *lock) +{ + if (likely(rt_mutex_cmpxchg_release(lock, current, NULL))) + return; + + rt_mutex_slowunlock(lock); +} + +#ifdef RT_MUTEX_BUILD_MUTEX +/* + * Functions required for: + * - rtmutex, futex on all kernels + * - mutex and rwsem substitutions on RT kernels + */ + /* * Remove a waiter from a lock and give up * - * Must be called with lock->wait_lock held and interrupts disabled. I must + * Must be called with lock->wait_lock held and interrupts disabled. It must * have just failed to try_to_take_rt_mutex(). */ static void __sched remove_waiter(struct rt_mutex_base *lock, @@ -1286,125 +1415,4 @@ static __always_inline int __rt_mutex_lock(struct rt_mutex_base *lock, return rt_mutex_slowlock(lock, state); } - -static int __sched __rt_mutex_slowtrylock(struct rt_mutex_base *lock) -{ - int ret = try_to_take_rt_mutex(lock, current, NULL); - - /* - * try_to_take_rt_mutex() sets the lock waiters bit - * unconditionally. Clean this up. - */ - fixup_rt_mutex_waiters(lock); - - return ret; -} - -/* - * Slow path try-lock function: - */ -static int __sched rt_mutex_slowtrylock(struct rt_mutex_base *lock) -{ - unsigned long flags; - int ret; - - /* - * If the lock already has an owner we fail to get the lock. - * This can be done without taking the @lock->wait_lock as - * it is only being read, and this is a trylock anyway. - */ - if (rt_mutex_owner(lock)) - return 0; - - /* - * The mutex has currently no owner. Lock the wait lock and try to - * acquire the lock. We use irqsave here to support early boot calls. - */ - raw_spin_lock_irqsave(&lock->wait_lock, flags); - - ret = __rt_mutex_slowtrylock(lock); - - raw_spin_unlock_irqrestore(&lock->wait_lock, flags); - - return ret; -} - -static __always_inline int __rt_mutex_trylock(struct rt_mutex_base *lock) -{ - if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current))) - return 1; - - return rt_mutex_slowtrylock(lock); -} - -/* - * Slow path to release a rt-mutex. - */ -static void __sched rt_mutex_slowunlock(struct rt_mutex_base *lock) -{ - DEFINE_RT_WAKE_Q(wqh); - unsigned long flags; - - /* irqsave required to support early boot calls */ - raw_spin_lock_irqsave(&lock->wait_lock, flags); - - debug_rt_mutex_unlock(lock); - - /* - * We must be careful here if the fast path is enabled. If we - * have no waiters queued we cannot set owner to NULL here - * because of: - * - * foo->lock->owner = NULL; - * rtmutex_lock(foo->lock); <- fast path - * free = atomic_dec_and_test(foo->refcnt); - * rtmutex_unlock(foo->lock); <- fast path - * if (free) - * kfree(foo); - * raw_spin_unlock(foo->lock->wait_lock); - * - * So for the fastpath enabled kernel: - * - * Nothing can set the waiters bit as long as we hold - * lock->wait_lock. So we do the following sequence: - * - * owner = rt_mutex_owner(lock); - * clear_rt_mutex_waiters(lock); - * raw_spin_unlock(&lock->wait_lock); - * if (cmpxchg(&lock->owner, owner, 0) == owner) - * return; - * goto retry; - * - * The fastpath disabled variant is simple as all access to - * lock->owner is serialized by lock->wait_lock: - * - * lock->owner = NULL; - * raw_spin_unlock(&lock->wait_lock); - */ - while (!rt_mutex_has_waiters(lock)) { - /* Drops lock->wait_lock ! */ - if (unlock_rt_mutex_safe(lock, flags) == true) - return; - /* Relock the rtmutex and try again */ - raw_spin_lock_irqsave(&lock->wait_lock, flags); - } - - /* - * The wakeup next waiter path does not suffer from the above - * race. See the comments there. - * - * Queue the next waiter for wakeup once we release the wait_lock. - */ - mark_wakeup_next_waiter(&wqh, lock); - raw_spin_unlock_irqrestore(&lock->wait_lock, flags); - - rt_mutex_wake_up_q(&wqh); -} - -static __always_inline void __rt_mutex_unlock(struct rt_mutex_base *lock) -{ - if (likely(rt_mutex_cmpxchg_release(lock, current, NULL))) - return; - - rt_mutex_slowunlock(lock); -} +#endif /* RT_MUTEX_BUILD_MUTEX */ -- cgit v1.2.3 From 1c143c4b65da09081d644110e619decc49c9dee4 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Sun, 15 Aug 2021 23:28:25 +0200 Subject: locking/rtmutex: Provide the spin/rwlock core lock function A simplified version of the rtmutex slowlock function, which neither handles signals nor timeouts, and is careful about preserving the state of the blocked task across the lock operation. Signed-off-by: Thomas Gleixner Signed-off-by: Peter Zijlstra (Intel) Signed-off-by: Ingo Molnar Link: https://lore.kernel.org/r/20210815211303.770228446@linutronix.de --- kernel/locking/rtmutex.c | 60 ++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 60 insertions(+) (limited to 'kernel/locking/rtmutex.c') diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c index 949781aa54b1..951bef073891 100644 --- a/kernel/locking/rtmutex.c +++ b/kernel/locking/rtmutex.c @@ -1416,3 +1416,63 @@ static __always_inline int __rt_mutex_lock(struct rt_mutex_base *lock, return rt_mutex_slowlock(lock, state); } #endif /* RT_MUTEX_BUILD_MUTEX */ + +#ifdef RT_MUTEX_BUILD_SPINLOCKS +/* + * Functions required for spin/rw_lock substitution on RT kernels + */ + +/** + * rtlock_slowlock_locked - Slow path lock acquisition for RT locks + * @lock: The underlying RT mutex + */ +static void __sched rtlock_slowlock_locked(struct rt_mutex_base *lock) +{ + struct rt_mutex_waiter waiter; + + lockdep_assert_held(&lock->wait_lock); + + if (try_to_take_rt_mutex(lock, current, NULL)) + return; + + rt_mutex_init_rtlock_waiter(&waiter); + + /* Save current state and set state to TASK_RTLOCK_WAIT */ + current_save_and_set_rtlock_wait_state(); + + task_blocks_on_rt_mutex(lock, &waiter, current, RT_MUTEX_MIN_CHAINWALK); + + for (;;) { + /* Try to acquire the lock again */ + if (try_to_take_rt_mutex(lock, current, &waiter)) + break; + + raw_spin_unlock_irq(&lock->wait_lock); + + schedule_rtlock(); + + raw_spin_lock_irq(&lock->wait_lock); + set_current_state(TASK_RTLOCK_WAIT); + } + + /* Restore the task state */ + current_restore_rtlock_saved_state(); + + /* + * try_to_take_rt_mutex() sets the waiter bit unconditionally. + * We might have to fix that up: + */ + fixup_rt_mutex_waiters(lock); + debug_rt_mutex_free_waiter(&waiter); +} + +static __always_inline void __sched rtlock_slowlock(struct rt_mutex_base *lock) +{ + unsigned long flags; + + raw_spin_lock_irqsave(&lock->wait_lock, flags); + rtlock_slowlock_locked(lock); + raw_spin_unlock_irqrestore(&lock->wait_lock, flags); +} + +#endif /* RT_MUTEX_BUILD_SPINLOCKS */ -- cgit v1.2.3 From 715f7f9ece4685157bb59560f6c612340d730ab4 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Sun, 15 Aug 2021 23:28:30 +0200 Subject: locking/rtmutex: Squash !RT tasks to DEFAULT_PRIO Ensure all !RT tasks have the same prio such that they end up in FIFO order and aren't split up according to nice level. The reason why nice levels were taken into account so far is historical. In the early days of the rtmutex code it was done to give the PI boosting and deboosting a larger coverage. Signed-off-by: Peter Zijlstra (Intel) Signed-off-by: Thomas Gleixner Signed-off-by: Peter Zijlstra (Intel) Signed-off-by: Ingo Molnar Link: https://lore.kernel.org/r/20210815211303.938676930@linutronix.de --- kernel/locking/rtmutex.c | 25 ++++++++++++++++++++----- 1 file changed, 20 insertions(+), 5 deletions(-) (limited to 'kernel/locking/rtmutex.c') diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c index 951bef073891..ac8fb2f9c6f2 100644 --- a/kernel/locking/rtmutex.c +++ b/kernel/locking/rtmutex.c @@ -244,11 +244,28 @@ static __always_inline bool unlock_rt_mutex_safe(struct rt_mutex_base *lock, } #endif +static __always_inline int __waiter_prio(struct task_struct *task) +{ + int prio = task->prio; + + if (!rt_prio(prio)) + return DEFAULT_PRIO; + + return prio; +} + +static __always_inline void +waiter_update_prio(struct rt_mutex_waiter *waiter, struct task_struct *task) +{ + waiter->prio = __waiter_prio(task); + waiter->deadline = task->dl.deadline; +} + /* * Only use with rt_mutex_waiter_{less,equal}() */ #define task_to_waiter(p) \ - &(struct rt_mutex_waiter){ .prio = (p)->prio, .deadline = (p)->dl.deadline } + &(struct rt_mutex_waiter){ .prio = __waiter_prio(p), .deadline = (p)->dl.deadline } static __always_inline int rt_mutex_waiter_less(struct rt_mutex_waiter *left, struct rt_mutex_waiter *right) @@ -698,8 +715,7 @@ static int __sched rt_mutex_adjust_prio_chain(struct task_struct *task, * serializes all pi_waiters access and rb_erase() does not care about * the values of the node being removed. */ - waiter->prio = task->prio; - waiter->deadline = task->dl.deadline; + waiter_update_prio(waiter, task); rt_mutex_enqueue(lock, waiter); @@ -969,8 +985,7 @@ static int __sched task_blocks_on_rt_mutex(struct rt_mutex_base *lock, raw_spin_lock(&task->pi_lock); waiter->task = task; waiter->lock = lock; - waiter->prio = task->prio; - waiter->deadline = task->dl.deadline; + waiter_update_prio(waiter, task); /* Get the top priority waiter on the lock */ if (rt_mutex_has_waiters(lock)) -- cgit v1.2.3 From add461325ec5bc39aa619a1bfcde7245e5f31ac7 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Sun, 15 Aug 2021 23:28:58 +0200 Subject: locking/rtmutex: Extend the rtmutex core to support ww_mutex Add a ww acquire context pointer to the waiter and various functions and add the ww_mutex related invocations to the proper spots in the locking code, similar to the mutex based variant. Signed-off-by: Peter Zijlstra (Intel) Signed-off-by: Thomas Gleixner Signed-off-by: Peter Zijlstra (Intel) Signed-off-by: Ingo Molnar Link: https://lore.kernel.org/r/20210815211304.966139174@linutronix.de --- kernel/locking/rtmutex.c | 121 ++++++++++++++++++++++++++++++++++++++++++----- 1 file changed, 110 insertions(+), 11 deletions(-) (limited to 'kernel/locking/rtmutex.c') diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c index ac8fb2f9c6f2..af7e3af4d313 100644 --- a/kernel/locking/rtmutex.c +++ b/kernel/locking/rtmutex.c @@ -17,9 +17,44 @@ #include #include #include +#include #include "rtmutex_common.h" +#ifndef WW_RT +# define build_ww_mutex() (false) +# define ww_container_of(rtm) NULL + +static inline int __ww_mutex_add_waiter(struct rt_mutex_waiter *waiter, + struct rt_mutex *lock, + struct ww_acquire_ctx *ww_ctx) +{ + return 0; +} + +static inline void __ww_mutex_check_waiters(struct rt_mutex *lock, + struct ww_acquire_ctx *ww_ctx) +{ +} + +static inline void ww_mutex_lock_acquired(struct ww_mutex *lock, + struct ww_acquire_ctx *ww_ctx) +{ +} + +static inline int __ww_mutex_check_kill(struct rt_mutex *lock, + struct rt_mutex_waiter *waiter, + struct ww_acquire_ctx *ww_ctx) +{ + return 0; +} + +#else +# define build_ww_mutex() (true) +# define ww_container_of(rtm) container_of(rtm, struct ww_mutex, base) +# include "ww_mutex.h" +#endif + /* * lock->owner state tracking: * @@ -308,7 +343,28 @@ static __always_inline int rt_mutex_waiter_equal(struct rt_mutex_waiter *left, static __always_inline bool __waiter_less(struct rb_node *a, const struct rb_node *b) { - return rt_mutex_waiter_less(__node_2_waiter(a), __node_2_waiter(b)); + struct rt_mutex_waiter *aw = __node_2_waiter(a); + struct rt_mutex_waiter *bw = __node_2_waiter(b); + + if (rt_mutex_waiter_less(aw, bw)) + return 1; + + if (!build_ww_mutex()) + return 0; + + if (rt_mutex_waiter_less(bw, aw)) + return 0; + + /* NOTE: relies on waiter->ww_ctx being set before insertion */ + if (aw->ww_ctx) { + if (!bw->ww_ctx) + return 1; + + return (signed long)(aw->ww_ctx->stamp - + bw->ww_ctx->stamp) < 0; + } + + return 0; } static __always_inline void @@ -961,6 +1017,7 @@ takeit: static int __sched task_blocks_on_rt_mutex(struct rt_mutex_base *lock, struct rt_mutex_waiter *waiter, struct task_struct *task, + struct ww_acquire_ctx *ww_ctx, enum rtmutex_chainwalk chwalk) { struct task_struct *owner = rt_mutex_owner(lock); @@ -996,6 +1053,16 @@ static int __sched task_blocks_on_rt_mutex(struct rt_mutex_base *lock, raw_spin_unlock(&task->pi_lock); + if (build_ww_mutex() && ww_ctx) { + struct rt_mutex *rtm; + + /* Check whether the waiter should back out immediately */ + rtm = container_of(lock, struct rt_mutex, rtmutex); + res = __ww_mutex_add_waiter(waiter, rtm, ww_ctx); + if (res) + return res; + } + if (!owner) return 0; @@ -1281,6 +1348,7 @@ static void __sched remove_waiter(struct rt_mutex_base *lock, /** * rt_mutex_slowlock_block() - Perform the wait-wake-try-to-take loop * @lock: the rt_mutex to take + * @ww_ctx: WW mutex context pointer * @state: the state the task should block in (TASK_INTERRUPTIBLE * or TASK_UNINTERRUPTIBLE) * @timeout: the pre-initialized and started timer, or NULL for none @@ -1289,10 +1357,12 @@ static void __sched remove_waiter(struct rt_mutex_base *lock, * Must be called with lock->wait_lock held and interrupts disabled */ static int __sched rt_mutex_slowlock_block(struct rt_mutex_base *lock, + struct ww_acquire_ctx *ww_ctx, unsigned int state, struct hrtimer_sleeper *timeout, struct rt_mutex_waiter *waiter) { + struct rt_mutex *rtm = container_of(lock, struct rt_mutex, rtmutex); int ret = 0; for (;;) { @@ -1309,6 +1379,12 @@ static int __sched rt_mutex_slowlock_block(struct rt_mutex_base *lock, break; } + if (build_ww_mutex() && ww_ctx) { + ret = __ww_mutex_check_kill(rtm, waiter, ww_ctx); + if (ret) + break; + } + raw_spin_unlock_irq(&lock->wait_lock); schedule(); @@ -1331,6 +1407,9 @@ static void __sched rt_mutex_handle_deadlock(int res, int detect_deadlock, if (res != -EDEADLOCK || detect_deadlock) return; + if (build_ww_mutex() && w->ww_ctx) + return; + /* * Yell loudly and stop the task right here. */ @@ -1344,31 +1423,46 @@ static void __sched rt_mutex_handle_deadlock(int res, int detect_deadlock, /** * __rt_mutex_slowlock - Locking slowpath invoked with lock::wait_lock held * @lock: The rtmutex to block lock + * @ww_ctx: WW mutex context pointer * @state: The task state for sleeping * @chwalk: Indicator whether full or partial chainwalk is requested * @waiter: Initializer waiter for blocking */ static int __sched __rt_mutex_slowlock(struct rt_mutex_base *lock, + struct ww_acquire_ctx *ww_ctx, unsigned int state, enum rtmutex_chainwalk chwalk, struct rt_mutex_waiter *waiter) { + struct rt_mutex *rtm = container_of(lock, struct rt_mutex, rtmutex); + struct ww_mutex *ww = ww_container_of(rtm); int ret; lockdep_assert_held(&lock->wait_lock); /* Try to acquire the lock again: */ - if (try_to_take_rt_mutex(lock, current, NULL)) + if (try_to_take_rt_mutex(lock, current, NULL)) { + if (build_ww_mutex() && ww_ctx) { + __ww_mutex_check_waiters(rtm, ww_ctx); + ww_mutex_lock_acquired(ww, ww_ctx); + } return 0; + } set_current_state(state); - ret = task_blocks_on_rt_mutex(lock, waiter, current, chwalk); - + ret = task_blocks_on_rt_mutex(lock, waiter, current, ww_ctx, chwalk); if (likely(!ret)) - ret = rt_mutex_slowlock_block(lock, state, NULL, waiter); - - if (unlikely(ret)) { + ret = rt_mutex_slowlock_block(lock, ww_ctx, state, NULL, waiter); + + if (likely(!ret)) { + /* acquired the lock */ + if (build_ww_mutex() && ww_ctx) { + if (!ww_ctx->is_wait_die) + __ww_mutex_check_waiters(rtm, ww_ctx); + ww_mutex_lock_acquired(ww, ww_ctx); + } + } else { __set_current_state(TASK_RUNNING); remove_waiter(lock, waiter); rt_mutex_handle_deadlock(ret, chwalk, waiter); @@ -1383,14 +1477,17 @@ static int __sched __rt_mutex_slowlock(struct rt_mutex_base *lock, } static inline int __rt_mutex_slowlock_locked(struct rt_mutex_base *lock, + struct ww_acquire_ctx *ww_ctx, unsigned int state) { struct rt_mutex_waiter waiter; int ret; rt_mutex_init_waiter(&waiter); + waiter.ww_ctx = ww_ctx; - ret = __rt_mutex_slowlock(lock, state, RT_MUTEX_MIN_CHAINWALK, &waiter); + ret = __rt_mutex_slowlock(lock, ww_ctx, state, RT_MUTEX_MIN_CHAINWALK, + &waiter); debug_rt_mutex_free_waiter(&waiter); return ret; @@ -1399,9 +1496,11 @@ static inline int __rt_mutex_slowlock_locked(struct rt_mutex_base *lock, /* * rt_mutex_slowlock - Locking slowpath invoked when fast path fails * @lock: The rtmutex to block lock + * @ww_ctx: WW mutex context pointer * @state: The task state for sleeping */ static int __sched rt_mutex_slowlock(struct rt_mutex_base *lock, + struct ww_acquire_ctx *ww_ctx, unsigned int state) { unsigned long flags; @@ -1416,7 +1515,7 @@ static int __sched rt_mutex_slowlock(struct rt_mutex_base *lock, * irqsave/restore variants. */ raw_spin_lock_irqsave(&lock->wait_lock, flags); - ret = __rt_mutex_slowlock_locked(lock, state); + ret = __rt_mutex_slowlock_locked(lock, ww_ctx, state); raw_spin_unlock_irqrestore(&lock->wait_lock, flags); return ret; @@ -1428,7 +1527,7 @@ static __always_inline int __rt_mutex_lock(struct rt_mutex_base *lock, if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current))) return 0; - return rt_mutex_slowlock(lock, state); + return rt_mutex_slowlock(lock, NULL, state); } #endif /* RT_MUTEX_BUILD_MUTEX */ @@ -1455,7 +1554,7 @@ static void __sched rtlock_slowlock_locked(struct rt_mutex_base *lock) /* Save current state and set state to TASK_RTLOCK_WAIT */ current_save_and_set_rtlock_wait_state(); - task_blocks_on_rt_mutex(lock, &waiter, current, RT_MUTEX_MIN_CHAINWALK); + task_blocks_on_rt_mutex(lock, &waiter, current, NULL, RT_MUTEX_MIN_CHAINWALK); for (;;) { /* Try to acquire the lock again */ -- cgit v1.2.3 From 48eb3f4fcfd35495a8357459aa6fe437aa430b00 Mon Sep 17 00:00:00 2001 From: Gregory Haskins Date: Sun, 15 Aug 2021 23:29:23 +0200 Subject: locking/rtmutex: Implement equal priority lock stealing The current logic only allows lock stealing to occur if the current task is of higher priority than the pending owner. Significant throughput improvements can be gained by allowing the lock stealing to include tasks of equal priority when the contended lock is a spin_lock or a rw_lock and the tasks are not in a RT scheduling task. The assumption was that the system will make faster progress by allowing the task already on the CPU to take the lock rather than waiting for the system to wake up a different task. This does add a degree of unfairness, but in reality no negative side effects have been observed in the many years that this has been used in the RT kernel. [ tglx: Refactored and rewritten several times by Steve Rostedt, Sebastian Siewior and myself ] Signed-off-by: Gregory Haskins Signed-off-by: Thomas Gleixner Signed-off-by: Peter Zijlstra (Intel) Signed-off-by: Ingo Molnar Link: https://lore.kernel.org/r/20210815211305.857240222@linutronix.de --- kernel/locking/rtmutex.c | 52 ++++++++++++++++++++++++++++++++---------------- 1 file changed, 35 insertions(+), 17 deletions(-) (limited to 'kernel/locking/rtmutex.c') diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c index af7e3af4d313..3eaf636606fd 100644 --- a/kernel/locking/rtmutex.c +++ b/kernel/locking/rtmutex.c @@ -338,6 +338,26 @@ static __always_inline int rt_mutex_waiter_equal(struct rt_mutex_waiter *left, return 1; } +static inline bool rt_mutex_steal(struct rt_mutex_waiter *waiter, + struct rt_mutex_waiter *top_waiter) +{ + if (rt_mutex_waiter_less(waiter, top_waiter)) + return true; + +#ifdef RT_MUTEX_BUILD_SPINLOCKS + /* + * Note that RT tasks are excluded from same priority (lateral) + * steals to prevent the introduction of an unbounded latency. + */ + if (rt_prio(waiter->prio) || dl_prio(waiter->prio)) + return false; + + return rt_mutex_waiter_equal(waiter, top_waiter); +#else + return false; +#endif +} + #define __node_2_waiter(node) \ rb_entry((node), struct rt_mutex_waiter, tree_entry) @@ -932,19 +952,21 @@ try_to_take_rt_mutex(struct rt_mutex_base *lock, struct task_struct *task, * trylock attempt. */ if (waiter) { - /* - * If waiter is not the highest priority waiter of - * @lock, give up. - */ - if (waiter != rt_mutex_top_waiter(lock)) - return 0; + struct rt_mutex_waiter *top_waiter = rt_mutex_top_waiter(lock); /* - * We can acquire the lock. Remove the waiter from the - * lock waiters tree. + * If waiter is the highest priority waiter of @lock, + * or allowed to steal it, take it over. */ - rt_mutex_dequeue(lock, waiter); - + if (waiter == top_waiter || rt_mutex_steal(waiter, top_waiter)) { + /* + * We can acquire the lock. Remove the waiter from the + * lock waiters tree. + */ + rt_mutex_dequeue(lock, waiter); + } else { + return 0; + } } else { /* * If the lock has waiters already we check whether @task is @@ -955,13 +977,9 @@ try_to_take_rt_mutex(struct rt_mutex_base *lock, struct task_struct *task, * not need to be dequeued. */ if (rt_mutex_has_waiters(lock)) { - /* - * If @task->prio is greater than or equal to - * the top waiter priority (kernel view), - * @task lost. - */ - if (!rt_mutex_waiter_less(task_to_waiter(task), - rt_mutex_top_waiter(lock))) + /* Check whether the trylock can steal it. */ + if (!rt_mutex_steal(task_to_waiter(task), + rt_mutex_top_waiter(lock))) return 0; /* -- cgit v1.2.3 From 992caf7f17243d736fc996770bac6566103778f6 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Sun, 15 Aug 2021 23:29:25 +0200 Subject: locking/rtmutex: Add adaptive spinwait mechanism Going to sleep when locks are contended can be quite inefficient when the contention time is short and the lock owner is running on a different CPU. The MCS mechanism cannot be used because MCS is strictly FIFO ordered while for rtmutex based locks the waiter ordering is priority based. Provide a simple adaptive spinwait mechanism which currently restricts the spinning to the top priority waiter. [ tglx: Provide a contemporary changelog, extended it to all rtmutex based locks and updated it to match the other spin on owner implementations ] Originally-by: Gregory Haskins Signed-off-by: Steven Rostedt Signed-off-by: Thomas Gleixner Signed-off-by: Peter Zijlstra (Intel) Signed-off-by: Ingo Molnar Link: https://lore.kernel.org/r/20210815211305.912050691@linutronix.de --- kernel/locking/rtmutex.c | 67 ++++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 65 insertions(+), 2 deletions(-) (limited to 'kernel/locking/rtmutex.c') diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c index 3eaf636606fd..8aaa352d0c17 100644 --- a/kernel/locking/rtmutex.c +++ b/kernel/locking/rtmutex.c @@ -8,6 +8,11 @@ * Copyright (C) 2005-2006 Timesys Corp., Thomas Gleixner * Copyright (C) 2005 Kihon Technologies Inc., Steven Rostedt * Copyright (C) 2006 Esben Nielsen + * Adaptive Spinlocks: + * Copyright (C) 2008 Novell, Inc., Gregory Haskins, Sven Dietrich, + * and Peter Morreale, + * Adaptive Spinlocks simplification: + * Copyright (C) 2008 Red Hat, Inc., Steven Rostedt * * See Documentation/locking/rt-mutex-design.rst for details. */ @@ -1297,6 +1302,52 @@ static __always_inline void __rt_mutex_unlock(struct rt_mutex_base *lock) rt_mutex_slowunlock(lock); } +#ifdef CONFIG_SMP +static bool rtmutex_spin_on_owner(struct rt_mutex_base *lock, + struct rt_mutex_waiter *waiter, + struct task_struct *owner) +{ + bool res = true; + + rcu_read_lock(); + for (;;) { + /* If owner changed, trylock again. */ + if (owner != rt_mutex_owner(lock)) + break; + /* + * Ensure that @owner is dereferenced after checking that + * the lock owner still matches @owner. If that fails, + * @owner might point to freed memory. If it still matches, + * the rcu_read_lock() ensures the memory stays valid. + */ + barrier(); + /* + * Stop spinning when: + * - the lock owner has been scheduled out + * - current is not longer the top waiter + * - current is requested to reschedule (redundant + * for CONFIG_PREEMPT_RCU=y) + * - the VCPU on which owner runs is preempted + */ + if (!owner->on_cpu || waiter != rt_mutex_top_waiter(lock) || + need_resched() || vcpu_is_preempted(task_cpu(owner))) { + res = false; + break; + } + cpu_relax(); + } + rcu_read_unlock(); + return res; +} +#else +static bool rtmutex_spin_on_owner(struct rt_mutex_base *lock, + struct rt_mutex_waiter *waiter, + struct task_struct *owner) +{ + return false; +} +#endif + #ifdef RT_MUTEX_BUILD_MUTEX /* * Functions required for: @@ -1381,6 +1432,7 @@ static int __sched rt_mutex_slowlock_block(struct rt_mutex_base *lock, struct rt_mutex_waiter *waiter) { struct rt_mutex *rtm = container_of(lock, struct rt_mutex, rtmutex); + struct task_struct *owner; int ret = 0; for (;;) { @@ -1403,9 +1455,14 @@ static int __sched rt_mutex_slowlock_block(struct rt_mutex_base *lock, break; } + if (waiter == rt_mutex_top_waiter(lock)) + owner = rt_mutex_owner(lock); + else + owner = NULL; raw_spin_unlock_irq(&lock->wait_lock); - schedule(); + if (!owner || !rtmutex_spin_on_owner(lock, waiter, owner)) + schedule(); raw_spin_lock_irq(&lock->wait_lock); set_current_state(state); @@ -1561,6 +1618,7 @@ static __always_inline int __rt_mutex_lock(struct rt_mutex_base *lock, static void __sched rtlock_slowlock_locked(struct rt_mutex_base *lock) { struct rt_mutex_waiter waiter; + struct task_struct *owner; lockdep_assert_held(&lock->wait_lock); @@ -1579,9 +1637,14 @@ static void __sched rtlock_slowlock_locked(struct rt_mutex_base *lock) if (try_to_take_rt_mutex(lock, current, &waiter)) break; + if (&waiter == rt_mutex_top_waiter(lock)) + owner = rt_mutex_owner(lock); + else + owner = NULL; raw_spin_unlock_irq(&lock->wait_lock); - schedule_rtlock(); + if (!owner || !rtmutex_spin_on_owner(lock, &waiter, owner)) + schedule_rtlock(); raw_spin_lock_irq(&lock->wait_lock); set_current_state(TASK_RTLOCK_WAIT); -- cgit v1.2.3 From c3123c431447da99db160264506de9897c003513 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 25 Aug 2021 12:33:12 +0200 Subject: locking/rtmutex: Dont dereference waiter lockless The new rt_mutex_spin_on_onwer() loop checks whether the spinning waiter is still the top waiter on the lock by utilizing rt_mutex_top_waiter(), which is broken because that function contains a sanity check which dereferences the top waiter pointer to check whether the waiter belongs to the lock. That's wrong in the lockless spinwait case: CPU 0 CPU 1 rt_mutex_lock(lock) rt_mutex_lock(lock); queue(waiter0) waiter0 == rt_mutex_top_waiter(lock) rt_mutex_spin_on_onwer(lock, waiter0) { queue(waiter1) waiter1 == rt_mutex_top_waiter(lock) ... top_waiter = rt_mutex_top_waiter(lock) leftmost = rb_first_cached(&lock->waiters); -> signal dequeue(waiter1) destroy(waiter1) w = rb_entry(leftmost, ....) BUG_ON(w->lock != lock) <- UAF The BUG_ON() is correct for the case where the caller holds lock->wait_lock which guarantees that the leftmost waiter entry cannot vanish. For the lockless spinwait case it's broken. Create a new helper function which avoids the pointer dereference and just compares the leftmost entry pointer with current's waiter pointer to validate that currrent is still elegible for spinning. Fixes: 992caf7f1724 ("locking/rtmutex: Add adaptive spinwait mechanism") Reported-by: Sebastian Siewior Signed-off-by: Thomas Gleixner Signed-off-by: Peter Zijlstra (Intel) Link: https://lkml.kernel.org/r/20210825102453.981720644@linutronix.de --- kernel/locking/rtmutex.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'kernel/locking/rtmutex.c') diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c index 8aaa352d0c17..b3c09611ef6a 100644 --- a/kernel/locking/rtmutex.c +++ b/kernel/locking/rtmutex.c @@ -1329,8 +1329,9 @@ static bool rtmutex_spin_on_owner(struct rt_mutex_base *lock, * for CONFIG_PREEMPT_RCU=y) * - the VCPU on which owner runs is preempted */ - if (!owner->on_cpu || waiter != rt_mutex_top_waiter(lock) || - need_resched() || vcpu_is_preempted(task_cpu(owner))) { + if (!owner->on_cpu || need_resched() || + rt_mutex_waiter_is_top_waiter(lock, waiter) || + vcpu_is_preempted(task_cpu(owner))) { res = false; break; } -- cgit v1.2.3 From 37e8abff2bebbf9947d6b784f5c75ed48a717089 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 25 Aug 2021 12:33:14 +0200 Subject: locking/rtmutex: Dequeue waiter on ww_mutex deadlock The rt_mutex based ww_mutex variant queues the new waiter first in the lock's rbtree before evaluating the ww_mutex specific conditions which might decide that the waiter should back out. This check and conditional exit happens before the waiter is enqueued into the PI chain. The failure handling at the call site assumes that the waiter, if it is the top most waiter on the lock, is queued in the PI chain and then proceeds to adjust the unmodified PI chain, which results in RB tree corruption. Dequeue the waiter from the lock waiter list in the ww_mutex error exit path to prevent this. Fixes: add461325ec5 ("locking/rtmutex: Extend the rtmutex core to support ww_mutex") Reported-by: Sebastian Siewior Signed-off-by: Thomas Gleixner Signed-off-by: Peter Zijlstra (Intel) Link: https://lkml.kernel.org/r/20210825102454.042280541@linutronix.de --- kernel/locking/rtmutex.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) (limited to 'kernel/locking/rtmutex.c') diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c index b3c09611ef6a..c8fe74ef8db9 100644 --- a/kernel/locking/rtmutex.c +++ b/kernel/locking/rtmutex.c @@ -1082,8 +1082,13 @@ static int __sched task_blocks_on_rt_mutex(struct rt_mutex_base *lock, /* Check whether the waiter should back out immediately */ rtm = container_of(lock, struct rt_mutex, rtmutex); res = __ww_mutex_add_waiter(waiter, rtm, ww_ctx); - if (res) + if (res) { + raw_spin_lock(&task->pi_lock); + rt_mutex_dequeue(lock, waiter); + task->pi_blocked_on = NULL; + raw_spin_unlock(&task->pi_lock); return res; + } } if (!owner) -- cgit v1.2.3 From 6467822b8cc96e5feda98c7bf5c6329c6a896c91 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 26 Aug 2021 09:36:53 +0200 Subject: locking/rtmutex: Prevent spurious EDEADLK return caused by ww_mutexes rtmutex based ww_mutexes can legitimately create a cycle in the lock graph which can be observed by a blocker which didn't cause the problem: P1: A, ww_A, ww_B P2: ww_B, ww_A P3: A P3 might therefore be trapped in the ww_mutex induced cycle and run into the lock depth limitation of rt_mutex_adjust_prio_chain() which returns -EDEADLK to the caller. Disable the deadlock detection walk when the chain walk observes a ww_mutex to prevent this looping. [ tglx: Split it apart and added changelog ] Reported-by: Sebastian Siewior Fixes: add461325ec5 ("locking/rtmutex: Extend the rtmutex core to support ww_mutex") Signed-off-by: Peter Zijlstra (Intel) Signed-off-by: Thomas Gleixner Link: https://lore.kernel.org/r/YSeWjCHoK4v5OcOt@hirez.programming.kicks-ass.net --- kernel/locking/rtmutex.c | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) (limited to 'kernel/locking/rtmutex.c') diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c index c8fe74ef8db9..3c1ba7b9a326 100644 --- a/kernel/locking/rtmutex.c +++ b/kernel/locking/rtmutex.c @@ -656,6 +656,31 @@ static int __sched rt_mutex_adjust_prio_chain(struct task_struct *task, if (next_lock != waiter->lock) goto out_unlock_pi; + /* + * There could be 'spurious' loops in the lock graph due to ww_mutex, + * consider: + * + * P1: A, ww_A, ww_B + * P2: ww_B, ww_A + * P3: A + * + * P3 should not return -EDEADLK because it gets trapped in the cycle + * created by P1 and P2 (which will resolve -- and runs into + * max_lock_depth above). Therefore disable detect_deadlock such that + * the below termination condition can trigger once all relevant tasks + * are boosted. + * + * Even when we start with ww_mutex we can disable deadlock detection, + * since we would supress a ww_mutex induced deadlock at [6] anyway. + * Supressing it here however is not sufficient since we might still + * hit [6] due to adjustment driven iteration. + * + * NOTE: if someone were to create a deadlock between 2 ww_classes we'd + * utterly fail to report it; lockdep should. + */ + if (IS_ENABLED(CONFIG_PREEMPT_RT) && waiter->ww_ctx && detect_deadlock) + detect_deadlock = false; + /* * Drop out, when the task has no waiters. Note, * top_waiter can be NULL, when we are in the deboosting -- cgit v1.2.3 From a055fcc132d4c25b96d1115aea514258810dc6fc Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 26 Aug 2021 10:48:18 +0200 Subject: locking/rtmutex: Return success on deadlock for ww_mutex waiters ww_mutexes can legitimately cause a deadlock situation in the lock graph which is resolved afterwards by the wait/wound mechanics. The rtmutex chain walk can detect such a deadlock and returns EDEADLK which in turn skips the wait/wound mechanism and returns EDEADLK to the caller. That's wrong because both lock chains might get EDEADLK or the wrong waiter would back out. Detect that situation and return 'success' in case that the waiter which initiated the chain walk is a ww_mutex with context. This allows the wait/wound mechanics to resolve the situation according to the rules. [ tglx: Split it apart and added changelog ] Reported-by: Sebastian Siewior Fixes: add461325ec5 ("locking/rtmutex: Extend the rtmutex core to support ww_mutex") Signed-off-by: Peter Zijlstra (Intel) Signed-off-by: Thomas Gleixner Link: https://lore.kernel.org/r/YSeWjCHoK4v5OcOt@hirez.programming.kicks-ass.net --- kernel/locking/rtmutex.c | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) (limited to 'kernel/locking/rtmutex.c') diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c index 3c1ba7b9a326..8eabdc79602b 100644 --- a/kernel/locking/rtmutex.c +++ b/kernel/locking/rtmutex.c @@ -742,8 +742,21 @@ static int __sched rt_mutex_adjust_prio_chain(struct task_struct *task, * walk, we detected a deadlock. */ if (lock == orig_lock || rt_mutex_owner(lock) == top_task) { - raw_spin_unlock(&lock->wait_lock); ret = -EDEADLK; + + /* + * When the deadlock is due to ww_mutex; also see above. Don't + * report the deadlock and instead let the ww_mutex wound/die + * logic pick which of the contending threads gets -EDEADLK. + * + * NOTE: assumes the cycle only contains a single ww_class; any + * other configuration and we fail to report; also, see + * lockdep. + */ + if (IS_ENABLED(CONFIG_PREEMPT_RT) && orig_waiter->ww_ctx) + ret = 0; + + raw_spin_unlock(&lock->wait_lock); goto out_unlock_pi; } -- cgit v1.2.3