From 9a4b99fce659c03699f1cb5003ebe7c57c084d49 Mon Sep 17 00:00:00 2001 From: Davidlohr Bueso Date: Fri, 26 Feb 2021 09:50:26 -0800 Subject: kernel/futex: Kill rt_mutex_next_owner() Update wake_futex_pi() and kill the call altogether. This is possible because: (i) The case of fixup_owner() in which the pi_mutex was stolen from the signaled enqueued top-waiter which fails to trylock and doesn't see a current owner of the rtmutex but needs to acknowledge an non-enqueued higher priority waiter, which is the other alternative. This used to be handled by rt_mutex_next_owner(), which guaranteed fixup_pi_state_owner('newowner') never to be nil. Nowadays the logic is handled by an EAGAIN loop, without the need of rt_mutex_next_owner(). Specifically: c1e2f0eaf015 (futex: Avoid violating the 10th rule of futex) 9f5d1c336a10 (futex: Handle transient "ownerless" rtmutex state correctly) (ii) rt_mutex_next_owner() and rt_mutex_top_waiter() are semantically equivalent, as of: c28d62cf52d7 (locking/rtmutex: Handle non enqueued waiters gracefully in remove_waiter()) So instead of keeping the call around, just use the good ole rt_mutex_top_waiter(). No change in semantics. Signed-off-by: Davidlohr Bueso Signed-off-by: Thomas Gleixner Acked-by: Peter Zijlstra (Intel) Link: https://lore.kernel.org/r/20210226175029.50335-1-dave@stgolabs.net --- kernel/locking/rtmutex.c | 20 -------------------- 1 file changed, 20 deletions(-) (limited to 'kernel/locking/rtmutex.c') diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c index 48fff6437901..29f09d0b8224 100644 --- a/kernel/locking/rtmutex.c +++ b/kernel/locking/rtmutex.c @@ -1792,26 +1792,6 @@ int rt_mutex_start_proxy_lock(struct rt_mutex *lock, return ret; } -/** - * rt_mutex_next_owner - return the next owner of the lock - * - * @lock: the rt lock query - * - * Returns the next owner of the lock or NULL - * - * Caller has to serialize against other accessors to the lock - * itself. - * - * Special API call for PI-futex support - */ -struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock) -{ - if (!rt_mutex_has_waiters(lock)) - return NULL; - - return rt_mutex_top_waiter(lock)->task; -} - /** * rt_mutex_wait_proxy_lock() - Wait for lock acquisition * @lock: the rt_mutex we were woken on -- cgit v1.2.3 From e2db7592be8e83df47519116621411e1056b21c7 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Mon, 22 Mar 2021 02:35:05 +0100 Subject: locking: Fix typos in comments Fix ~16 single-word typos in locking code comments. Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: Paul E. McKenney Cc: Will Deacon Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar --- kernel/locking/rtmutex.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'kernel/locking/rtmutex.c') diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c index 29f09d0b8224..db31bce114f8 100644 --- a/kernel/locking/rtmutex.c +++ b/kernel/locking/rtmutex.c @@ -706,7 +706,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task, } else if (prerequeue_top_waiter == waiter) { /* * The waiter was the top waiter on the lock, but is - * no longer the top prority waiter. Replace waiter in + * no longer the top priority waiter. Replace waiter in * the owner tasks pi waiters tree with the new top * (highest priority) waiter and adjust the priority * of the owner. @@ -1194,7 +1194,7 @@ static void rt_mutex_handle_deadlock(int res, int detect_deadlock, return; /* - * Yell lowdly and stop the task right here. + * Yell loudly and stop the task right here. */ rt_mutex_print_deadlock(w); while (1) { -- cgit v1.2.3 From c15380b72d7ae821ee090ba5a56fc6310828dbda Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Fri, 26 Mar 2021 16:29:30 +0100 Subject: locking/rtmutex: Remove rt_mutex_timed_lock() rt_mutex_timed_lock() has no callers since: c051b21f71d1f ("rtmutex: Confine deadlock logic to futex") Remove it. Signed-off-by: Sebastian Andrzej Siewior Signed-off-by: Thomas Gleixner Signed-off-by: Ingo Molnar Acked-by: Peter Zijlstra (Intel) Link: https://lore.kernel.org/r/20210326153943.061103415@linutronix.de --- kernel/locking/rtmutex.c | 46 ---------------------------------------------- 1 file changed, 46 deletions(-) (limited to 'kernel/locking/rtmutex.c') diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c index db31bce114f8..ca93e5d7b026 100644 --- a/kernel/locking/rtmutex.c +++ b/kernel/locking/rtmutex.c @@ -1394,21 +1394,6 @@ rt_mutex_fastlock(struct rt_mutex *lock, int state, return slowfn(lock, state, NULL, RT_MUTEX_MIN_CHAINWALK); } -static inline int -rt_mutex_timed_fastlock(struct rt_mutex *lock, int state, - struct hrtimer_sleeper *timeout, - enum rtmutex_chainwalk chwalk, - int (*slowfn)(struct rt_mutex *lock, int state, - struct hrtimer_sleeper *timeout, - enum rtmutex_chainwalk chwalk)) -{ - if (chwalk == RT_MUTEX_MIN_CHAINWALK && - likely(rt_mutex_cmpxchg_acquire(lock, NULL, current))) - return 0; - - return slowfn(lock, state, timeout, chwalk); -} - static inline int rt_mutex_fasttrylock(struct rt_mutex *lock, int (*slowfn)(struct rt_mutex *lock)) @@ -1516,37 +1501,6 @@ int __sched __rt_mutex_futex_trylock(struct rt_mutex *lock) return __rt_mutex_slowtrylock(lock); } -/** - * rt_mutex_timed_lock - lock a rt_mutex interruptible - * the timeout structure is provided - * by the caller - * - * @lock: the rt_mutex to be locked - * @timeout: timeout structure or NULL (no timeout) - * - * Returns: - * 0 on success - * -EINTR when interrupted by a signal - * -ETIMEDOUT when the timeout expired - */ -int -rt_mutex_timed_lock(struct rt_mutex *lock, struct hrtimer_sleeper *timeout) -{ - int ret; - - might_sleep(); - - mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_); - ret = rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout, - RT_MUTEX_MIN_CHAINWALK, - rt_mutex_slowlock); - if (ret) - mutex_release(&lock->dep_map, _RET_IP_); - - return ret; -} -EXPORT_SYMBOL_GPL(rt_mutex_timed_lock); - /** * rt_mutex_trylock - try to lock a rt_mutex * -- cgit v1.2.3 From 2d445c3e4a8216cfa9703998124c13250cc13e5e Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Fri, 26 Mar 2021 16:29:31 +0100 Subject: locking/rtmutex: Remove rtmutex deadlock tester leftovers The following debug members of 'struct rtmutex' are unused: - save_state: No users - file,line: Printed if ::name is NULL. This is only used for non-futex locks so ::name is never NULL - magic: Assigned to NULL by rt_mutex_destroy(), no further usage Remove them along with unused inline and macro leftovers related to the long gone deadlock tester. Signed-off-by: Sebastian Andrzej Siewior Signed-off-by: Thomas Gleixner Signed-off-by: Ingo Molnar Acked-by: Peter Zijlstra (Intel) Link: https://lore.kernel.org/r/20210326153943.195064296@linutronix.de --- kernel/locking/rtmutex.c | 3 --- 1 file changed, 3 deletions(-) (limited to 'kernel/locking/rtmutex.c') diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c index ca93e5d7b026..11abc60d1478 100644 --- a/kernel/locking/rtmutex.c +++ b/kernel/locking/rtmutex.c @@ -1594,9 +1594,6 @@ void __sched rt_mutex_futex_unlock(struct rt_mutex *lock) void rt_mutex_destroy(struct rt_mutex *lock) { WARN_ON(rt_mutex_is_locked(lock)); -#ifdef CONFIG_DEBUG_RT_MUTEXES - lock->magic = NULL; -#endif } EXPORT_SYMBOL_GPL(rt_mutex_destroy); -- cgit v1.2.3 From 6d41c675a5394057f6fb1dc97cc0a0e360f2c2f8 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Fri, 26 Mar 2021 16:29:32 +0100 Subject: locking/rtmutex: Remove output from deadlock detector The rtmutex specific deadlock detector predates lockdep coverage of rtmutex and since commit f5694788ad8da ("rt_mutex: Add lockdep annotations") it contains a lot of redundant functionality: - lockdep will detect an potential deadlock before rtmutex-debug has a chance to do so - the deadlock debugging is restricted to rtmutexes which are not associated to futexes and have an active waiter, which is covered by lockdep already Remove the redundant functionality and move actual deadlock WARN() into the deadlock code path. The latter needs a seperate cleanup. Signed-off-by: Sebastian Andrzej Siewior Signed-off-by: Thomas Gleixner Signed-off-by: Ingo Molnar Acked-by: Peter Zijlstra (Intel) Link: https://lore.kernel.org/r/20210326153943.320398604@linutronix.de --- kernel/locking/rtmutex.c | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) (limited to 'kernel/locking/rtmutex.c') diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c index 11abc60d1478..4beca549aeeb 100644 --- a/kernel/locking/rtmutex.c +++ b/kernel/locking/rtmutex.c @@ -579,7 +579,6 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task, * walk, we detected a deadlock. */ if (lock == orig_lock || rt_mutex_owner(lock) == top_task) { - debug_rt_mutex_deadlock(chwalk, orig_waiter, lock); raw_spin_unlock(&lock->wait_lock); ret = -EDEADLK; goto out_unlock_pi; @@ -1171,8 +1170,6 @@ __rt_mutex_slowlock(struct rt_mutex *lock, int state, raw_spin_unlock_irq(&lock->wait_lock); - debug_rt_mutex_print_deadlock(waiter); - schedule(); raw_spin_lock_irq(&lock->wait_lock); @@ -1196,7 +1193,7 @@ static void rt_mutex_handle_deadlock(int res, int detect_deadlock, /* * Yell loudly and stop the task right here. */ - rt_mutex_print_deadlock(w); + WARN(1, "rtmutex deadlock detected\n"); while (1) { set_current_state(TASK_INTERRUPTIBLE); schedule(); @@ -1704,8 +1701,6 @@ int __rt_mutex_start_proxy_lock(struct rt_mutex *lock, ret = 0; } - debug_rt_mutex_print_deadlock(waiter); - return ret; } -- cgit v1.2.3 From 8188d74e68174b11ff7c4a635ffc8fd31eacc6b9 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Fri, 26 Mar 2021 16:29:34 +0100 Subject: locking/rtmutex: Remove empty and unused debug stubs No users or useless and therefore just ballast. Signed-off-by: Thomas Gleixner Signed-off-by: Ingo Molnar Acked-by: Peter Zijlstra (Intel) Link: https://lore.kernel.org/r/20210326153943.549192485@linutronix.de --- kernel/locking/rtmutex.c | 18 ------------------ 1 file changed, 18 deletions(-) (limited to 'kernel/locking/rtmutex.c') diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c index 4beca549aeeb..96c7c537eab4 100644 --- a/kernel/locking/rtmutex.c +++ b/kernel/locking/rtmutex.c @@ -885,9 +885,6 @@ static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task, raw_spin_unlock(&task->pi_lock); takeit: - /* We got the lock. */ - debug_rt_mutex_lock(lock); - /* * This either preserves the RT_MUTEX_HAS_WAITERS bit if there * are still waiters or clears it. @@ -1580,20 +1577,6 @@ void __sched rt_mutex_futex_unlock(struct rt_mutex *lock) rt_mutex_postunlock(&wake_q); } -/** - * rt_mutex_destroy - mark a mutex unusable - * @lock: the mutex to be destroyed - * - * This function marks the mutex uninitialized, and any subsequent - * use of the mutex is forbidden. The mutex must not be locked when - * this function is called. - */ -void rt_mutex_destroy(struct rt_mutex *lock) -{ - WARN_ON(rt_mutex_is_locked(lock)); -} -EXPORT_SYMBOL_GPL(rt_mutex_destroy); - /** * __rt_mutex_init - initialize the rt_mutex * @@ -1635,7 +1618,6 @@ void rt_mutex_init_proxy_locked(struct rt_mutex *lock, struct task_struct *proxy_owner) { __rt_mutex_init(lock, NULL, NULL); - debug_rt_mutex_proxy_lock(lock, proxy_owner); rt_mutex_set_owner(lock, proxy_owner); } -- cgit v1.2.3 From fae37feee096bd3c85f6453713131a471404c6f5 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Fri, 26 Mar 2021 16:29:35 +0100 Subject: locking/rtmutex: Move rt_mutex_debug_task_free() to rtmutex.c Prepare for removing the header maze. Signed-off-by: Thomas Gleixner Signed-off-by: Ingo Molnar Acked-by: Peter Zijlstra (Intel) Link: https://lore.kernel.org/r/20210326153943.646359691@linutronix.de --- kernel/locking/rtmutex.c | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'kernel/locking/rtmutex.c') diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c index 96c7c537eab4..8a934db4bb1a 100644 --- a/kernel/locking/rtmutex.c +++ b/kernel/locking/rtmutex.c @@ -1813,3 +1813,11 @@ bool rt_mutex_cleanup_proxy_lock(struct rt_mutex *lock, return cleanup; } + +#ifdef CONFIG_DEBUG_RT_MUTEXES +void rt_mutex_debug_task_free(struct task_struct *task) +{ + DEBUG_LOCKS_WARN_ON(!RB_EMPTY_ROOT(&task->pi_waiters.rb_root)); + DEBUG_LOCKS_WARN_ON(task->pi_blocked_on); +} +#endif -- cgit v1.2.3 From f7efc4799f8114ba85b68584f83293f435009de4 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Fri, 26 Mar 2021 16:29:36 +0100 Subject: locking/rtmutex: Inline chainwalk depth check There is no point for this wrapper at all. Signed-off-by: Thomas Gleixner Signed-off-by: Ingo Molnar Acked-by: Peter Zijlstra (Intel) Link: https://lore.kernel.org/r/20210326153943.754254046@linutronix.de --- kernel/locking/rtmutex.c | 11 +++-------- 1 file changed, 3 insertions(+), 8 deletions(-) (limited to 'kernel/locking/rtmutex.c') diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c index 8a934db4bb1a..2c77e729b820 100644 --- a/kernel/locking/rtmutex.c +++ b/kernel/locking/rtmutex.c @@ -343,14 +343,9 @@ static void rt_mutex_adjust_prio(struct task_struct *p) static bool rt_mutex_cond_detect_deadlock(struct rt_mutex_waiter *waiter, enum rtmutex_chainwalk chwalk) { - /* - * This is just a wrapper function for the following call, - * because debug_rt_mutex_detect_deadlock() smells like a magic - * debug feature and I wanted to keep the cond function in the - * main source file along with the comments instead of having - * two of the same in the headers. - */ - return debug_rt_mutex_detect_deadlock(waiter, chwalk); + if (IS_ENABLED(CONFIG_DEBUG_RT_MUTEX)) + return waiter != NULL; + return chwalk == RT_MUTEX_FULL_CHAINWALK; } /* -- cgit v1.2.3 From f5a98866e506a816f6a855df1e7ed41e1891ec66 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Fri, 26 Mar 2021 16:29:38 +0100 Subject: locking/rtmutex: Decrapify __rt_mutex_init() The conditional debug handling is just another layer of obfuscation. Split the function so rt_mutex_init_proxy_locked() can invoke the inner init and __rt_mutex_init() gets the full treatment. Signed-off-by: Thomas Gleixner Signed-off-by: Ingo Molnar Acked-by: Peter Zijlstra (Intel) Link: https://lore.kernel.org/r/20210326153943.955697588@linutronix.de --- kernel/locking/rtmutex.c | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) (limited to 'kernel/locking/rtmutex.c') diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c index 2c77e729b820..c9c2ab50c1d5 100644 --- a/kernel/locking/rtmutex.c +++ b/kernel/locking/rtmutex.c @@ -1586,12 +1586,10 @@ void __sched rt_mutex_futex_unlock(struct rt_mutex *lock) void __rt_mutex_init(struct rt_mutex *lock, const char *name, struct lock_class_key *key) { - lock->owner = NULL; - raw_spin_lock_init(&lock->wait_lock); - lock->waiters = RB_ROOT_CACHED; + debug_check_no_locks_freed((void *)lock, sizeof(*lock)); + lockdep_init_map(&lock->dep_map, name, key, 0); - if (name && key) - debug_rt_mutex_init(lock, name, key); + __rt_mutex_basic_init(lock); } EXPORT_SYMBOL_GPL(__rt_mutex_init); @@ -1612,7 +1610,7 @@ EXPORT_SYMBOL_GPL(__rt_mutex_init); void rt_mutex_init_proxy_locked(struct rt_mutex *lock, struct task_struct *proxy_owner) { - __rt_mutex_init(lock, NULL, NULL); + __rt_mutex_basic_init(lock); rt_mutex_set_owner(lock, proxy_owner); } -- cgit v1.2.3 From d7a2edb890c0bfe467140c0cd79fe7cf65249376 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Fri, 26 Mar 2021 16:29:40 +0100 Subject: locking/rtmutex: Make text section and inlining consistent rtmutex is half __sched and the other half is not. If the compiler decides to not inline larger static functions then part of the code ends up in the regular text section. There are also quite some performance related small helpers which are either static or plain inline. Force inline those which make sense and mark the rest __sched. Signed-off-by: Thomas Gleixner Signed-off-by: Ingo Molnar Acked-by: Peter Zijlstra (Intel) Link: https://lore.kernel.org/r/20210326153944.152977820@linutronix.de --- kernel/locking/rtmutex.c | 152 +++++++++++++++++++++++------------------------ 1 file changed, 76 insertions(+), 76 deletions(-) (limited to 'kernel/locking/rtmutex.c') diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c index c9c2ab50c1d5..36128211f589 100644 --- a/kernel/locking/rtmutex.c +++ b/kernel/locking/rtmutex.c @@ -49,7 +49,7 @@ * set this bit before looking at the lock. */ -static void +static __always_inline void rt_mutex_set_owner(struct rt_mutex *lock, struct task_struct *owner) { unsigned long val = (unsigned long)owner; @@ -60,13 +60,13 @@ rt_mutex_set_owner(struct rt_mutex *lock, struct task_struct *owner) WRITE_ONCE(lock->owner, (struct task_struct *)val); } -static inline void clear_rt_mutex_waiters(struct rt_mutex *lock) +static __always_inline void clear_rt_mutex_waiters(struct rt_mutex *lock) { lock->owner = (struct task_struct *) ((unsigned long)lock->owner & ~RT_MUTEX_HAS_WAITERS); } -static void fixup_rt_mutex_waiters(struct rt_mutex *lock) +static __always_inline void fixup_rt_mutex_waiters(struct rt_mutex *lock) { unsigned long owner, *p = (unsigned long *) &lock->owner; @@ -149,7 +149,7 @@ static void fixup_rt_mutex_waiters(struct rt_mutex *lock) * all future threads that attempt to [Rmw] the lock to the slowpath. As such * relaxed semantics suffice. */ -static inline void mark_rt_mutex_waiters(struct rt_mutex *lock) +static __always_inline void mark_rt_mutex_waiters(struct rt_mutex *lock) { unsigned long owner, *p = (unsigned long *) &lock->owner; @@ -165,8 +165,8 @@ static inline void mark_rt_mutex_waiters(struct rt_mutex *lock) * 2) Drop lock->wait_lock * 3) Try to unlock the lock with cmpxchg */ -static inline bool unlock_rt_mutex_safe(struct rt_mutex *lock, - unsigned long flags) +static __always_inline bool unlock_rt_mutex_safe(struct rt_mutex *lock, + unsigned long flags) __releases(lock->wait_lock) { struct task_struct *owner = rt_mutex_owner(lock); @@ -204,7 +204,7 @@ static inline bool unlock_rt_mutex_safe(struct rt_mutex *lock, # define rt_mutex_cmpxchg_acquire(l,c,n) (0) # define rt_mutex_cmpxchg_release(l,c,n) (0) -static inline void mark_rt_mutex_waiters(struct rt_mutex *lock) +static __always_inline void mark_rt_mutex_waiters(struct rt_mutex *lock) { lock->owner = (struct task_struct *) ((unsigned long)lock->owner | RT_MUTEX_HAS_WAITERS); @@ -213,8 +213,8 @@ static inline void mark_rt_mutex_waiters(struct rt_mutex *lock) /* * Simple slow path only version: lock->owner is protected by lock->wait_lock. */ -static inline bool unlock_rt_mutex_safe(struct rt_mutex *lock, - unsigned long flags) +static __always_inline bool unlock_rt_mutex_safe(struct rt_mutex *lock, + unsigned long flags) __releases(lock->wait_lock) { lock->owner = NULL; @@ -229,9 +229,8 @@ static inline bool unlock_rt_mutex_safe(struct rt_mutex *lock, #define task_to_waiter(p) \ &(struct rt_mutex_waiter){ .prio = (p)->prio, .deadline = (p)->dl.deadline } -static inline int -rt_mutex_waiter_less(struct rt_mutex_waiter *left, - struct rt_mutex_waiter *right) +static __always_inline int rt_mutex_waiter_less(struct rt_mutex_waiter *left, + struct rt_mutex_waiter *right) { if (left->prio < right->prio) return 1; @@ -248,9 +247,8 @@ rt_mutex_waiter_less(struct rt_mutex_waiter *left, return 0; } -static inline int -rt_mutex_waiter_equal(struct rt_mutex_waiter *left, - struct rt_mutex_waiter *right) +static __always_inline int rt_mutex_waiter_equal(struct rt_mutex_waiter *left, + struct rt_mutex_waiter *right) { if (left->prio != right->prio) return 0; @@ -270,18 +268,18 @@ rt_mutex_waiter_equal(struct rt_mutex_waiter *left, #define __node_2_waiter(node) \ rb_entry((node), struct rt_mutex_waiter, tree_entry) -static inline bool __waiter_less(struct rb_node *a, const struct rb_node *b) +static __always_inline bool __waiter_less(struct rb_node *a, const struct rb_node *b) { return rt_mutex_waiter_less(__node_2_waiter(a), __node_2_waiter(b)); } -static void +static __always_inline void rt_mutex_enqueue(struct rt_mutex *lock, struct rt_mutex_waiter *waiter) { rb_add_cached(&waiter->tree_entry, &lock->waiters, __waiter_less); } -static void +static __always_inline void rt_mutex_dequeue(struct rt_mutex *lock, struct rt_mutex_waiter *waiter) { if (RB_EMPTY_NODE(&waiter->tree_entry)) @@ -294,18 +292,19 @@ rt_mutex_dequeue(struct rt_mutex *lock, struct rt_mutex_waiter *waiter) #define __node_2_pi_waiter(node) \ rb_entry((node), struct rt_mutex_waiter, pi_tree_entry) -static inline bool __pi_waiter_less(struct rb_node *a, const struct rb_node *b) +static __always_inline bool +__pi_waiter_less(struct rb_node *a, const struct rb_node *b) { return rt_mutex_waiter_less(__node_2_pi_waiter(a), __node_2_pi_waiter(b)); } -static void +static __always_inline void rt_mutex_enqueue_pi(struct task_struct *task, struct rt_mutex_waiter *waiter) { rb_add_cached(&waiter->pi_tree_entry, &task->pi_waiters, __pi_waiter_less); } -static void +static __always_inline void rt_mutex_dequeue_pi(struct task_struct *task, struct rt_mutex_waiter *waiter) { if (RB_EMPTY_NODE(&waiter->pi_tree_entry)) @@ -315,7 +314,7 @@ rt_mutex_dequeue_pi(struct task_struct *task, struct rt_mutex_waiter *waiter) RB_CLEAR_NODE(&waiter->pi_tree_entry); } -static void rt_mutex_adjust_prio(struct task_struct *p) +static __always_inline void rt_mutex_adjust_prio(struct task_struct *p) { struct task_struct *pi_task = NULL; @@ -340,8 +339,9 @@ static void rt_mutex_adjust_prio(struct task_struct *p) * deadlock detection is disabled independent of the detect argument * and the config settings. */ -static bool rt_mutex_cond_detect_deadlock(struct rt_mutex_waiter *waiter, - enum rtmutex_chainwalk chwalk) +static __always_inline bool +rt_mutex_cond_detect_deadlock(struct rt_mutex_waiter *waiter, + enum rtmutex_chainwalk chwalk) { if (IS_ENABLED(CONFIG_DEBUG_RT_MUTEX)) return waiter != NULL; @@ -353,7 +353,7 @@ static bool rt_mutex_cond_detect_deadlock(struct rt_mutex_waiter *waiter, */ int max_lock_depth = 1024; -static inline struct rt_mutex *task_blocked_on_lock(struct task_struct *p) +static __always_inline struct rt_mutex *task_blocked_on_lock(struct task_struct *p) { return p->pi_blocked_on ? p->pi_blocked_on->lock : NULL; } @@ -421,12 +421,12 @@ static inline struct rt_mutex *task_blocked_on_lock(struct task_struct *p) * unlock(lock->wait_lock); release [L] * goto again; */ -static int rt_mutex_adjust_prio_chain(struct task_struct *task, - enum rtmutex_chainwalk chwalk, - struct rt_mutex *orig_lock, - struct rt_mutex *next_lock, - struct rt_mutex_waiter *orig_waiter, - struct task_struct *top_task) +static int __sched rt_mutex_adjust_prio_chain(struct task_struct *task, + enum rtmutex_chainwalk chwalk, + struct rt_mutex *orig_lock, + struct rt_mutex *next_lock, + struct rt_mutex_waiter *orig_waiter, + struct task_struct *top_task) { struct rt_mutex_waiter *waiter, *top_waiter = orig_waiter; struct rt_mutex_waiter *prerequeue_top_waiter; @@ -778,8 +778,9 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task, * @waiter: The waiter that is queued to the lock's wait tree if the * callsite called task_blocked_on_lock(), otherwise NULL */ -static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task, - struct rt_mutex_waiter *waiter) +static int __sched +try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task, + struct rt_mutex_waiter *waiter) { lockdep_assert_held(&lock->wait_lock); @@ -896,10 +897,10 @@ takeit: * * This must be called with lock->wait_lock held and interrupts disabled */ -static int task_blocks_on_rt_mutex(struct rt_mutex *lock, - struct rt_mutex_waiter *waiter, - struct task_struct *task, - enum rtmutex_chainwalk chwalk) +static int __sched task_blocks_on_rt_mutex(struct rt_mutex *lock, + struct rt_mutex_waiter *waiter, + struct task_struct *task, + enum rtmutex_chainwalk chwalk) { struct task_struct *owner = rt_mutex_owner(lock); struct rt_mutex_waiter *top_waiter = waiter; @@ -985,8 +986,8 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock, * * Called with lock->wait_lock held and interrupts disabled. */ -static void mark_wakeup_next_waiter(struct wake_q_head *wake_q, - struct rt_mutex *lock) +static void __sched mark_wakeup_next_waiter(struct wake_q_head *wake_q, + struct rt_mutex *lock) { struct rt_mutex_waiter *waiter; @@ -1035,8 +1036,8 @@ static void mark_wakeup_next_waiter(struct wake_q_head *wake_q, * Must be called with lock->wait_lock held and interrupts disabled. I must * have just failed to try_to_take_rt_mutex(). */ -static void remove_waiter(struct rt_mutex *lock, - struct rt_mutex_waiter *waiter) +static void __sched remove_waiter(struct rt_mutex *lock, + struct rt_mutex_waiter *waiter) { bool is_top_waiter = (waiter == rt_mutex_top_waiter(lock)); struct task_struct *owner = rt_mutex_owner(lock); @@ -1093,7 +1094,7 @@ static void remove_waiter(struct rt_mutex *lock, * * Called from sched_setscheduler */ -void rt_mutex_adjust_pi(struct task_struct *task) +void __sched rt_mutex_adjust_pi(struct task_struct *task) { struct rt_mutex_waiter *waiter; struct rt_mutex *next_lock; @@ -1116,7 +1117,7 @@ void rt_mutex_adjust_pi(struct task_struct *task) next_lock, NULL, task); } -void rt_mutex_init_waiter(struct rt_mutex_waiter *waiter) +void __sched rt_mutex_init_waiter(struct rt_mutex_waiter *waiter) { debug_rt_mutex_init_waiter(waiter); RB_CLEAR_NODE(&waiter->pi_tree_entry); @@ -1134,10 +1135,9 @@ void rt_mutex_init_waiter(struct rt_mutex_waiter *waiter) * * Must be called with lock->wait_lock held and interrupts disabled */ -static int __sched -__rt_mutex_slowlock(struct rt_mutex *lock, int state, - struct hrtimer_sleeper *timeout, - struct rt_mutex_waiter *waiter) +static int __sched __rt_mutex_slowlock(struct rt_mutex *lock, int state, + struct hrtimer_sleeper *timeout, + struct rt_mutex_waiter *waiter) { int ret = 0; @@ -1172,8 +1172,8 @@ __rt_mutex_slowlock(struct rt_mutex *lock, int state, return ret; } -static void rt_mutex_handle_deadlock(int res, int detect_deadlock, - struct rt_mutex_waiter *w) +static void __sched rt_mutex_handle_deadlock(int res, int detect_deadlock, + struct rt_mutex_waiter *w) { /* * If the result is not -EDEADLOCK or the caller requested @@ -1195,10 +1195,9 @@ static void rt_mutex_handle_deadlock(int res, int detect_deadlock, /* * Slow path lock function: */ -static int __sched -rt_mutex_slowlock(struct rt_mutex *lock, int state, - struct hrtimer_sleeper *timeout, - enum rtmutex_chainwalk chwalk) +static int __sched rt_mutex_slowlock(struct rt_mutex *lock, int state, + struct hrtimer_sleeper *timeout, + enum rtmutex_chainwalk chwalk) { struct rt_mutex_waiter waiter; unsigned long flags; @@ -1257,7 +1256,7 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state, return ret; } -static inline int __rt_mutex_slowtrylock(struct rt_mutex *lock) +static int __sched __rt_mutex_slowtrylock(struct rt_mutex *lock) { int ret = try_to_take_rt_mutex(lock, current, NULL); @@ -1273,7 +1272,7 @@ static inline int __rt_mutex_slowtrylock(struct rt_mutex *lock) /* * Slow path try-lock function: */ -static inline int rt_mutex_slowtrylock(struct rt_mutex *lock) +static int __sched rt_mutex_slowtrylock(struct rt_mutex *lock) { unsigned long flags; int ret; @@ -1371,7 +1370,7 @@ static bool __sched rt_mutex_slowunlock(struct rt_mutex *lock, * The atomic acquire/release ops are compiled away, when either the * architecture does not support cmpxchg or when debugging is enabled. */ -static inline int +static __always_inline int rt_mutex_fastlock(struct rt_mutex *lock, int state, int (*slowfn)(struct rt_mutex *lock, int state, struct hrtimer_sleeper *timeout, @@ -1383,7 +1382,7 @@ rt_mutex_fastlock(struct rt_mutex *lock, int state, return slowfn(lock, state, NULL, RT_MUTEX_MIN_CHAINWALK); } -static inline int +static __always_inline int rt_mutex_fasttrylock(struct rt_mutex *lock, int (*slowfn)(struct rt_mutex *lock)) { @@ -1396,7 +1395,7 @@ rt_mutex_fasttrylock(struct rt_mutex *lock, /* * Performs the wakeup of the top-waiter and re-enables preemption. */ -void rt_mutex_postunlock(struct wake_q_head *wake_q) +void __sched rt_mutex_postunlock(struct wake_q_head *wake_q) { wake_up_q(wake_q); @@ -1404,7 +1403,7 @@ void rt_mutex_postunlock(struct wake_q_head *wake_q) preempt_enable(); } -static inline void +static __always_inline void rt_mutex_fastunlock(struct rt_mutex *lock, bool (*slowfn)(struct rt_mutex *lock, struct wake_q_head *wqh)) @@ -1418,7 +1417,8 @@ rt_mutex_fastunlock(struct rt_mutex *lock, rt_mutex_postunlock(&wake_q); } -static inline void __rt_mutex_lock(struct rt_mutex *lock, unsigned int subclass) +static __always_inline void __rt_mutex_lock(struct rt_mutex *lock, + unsigned int subclass) { might_sleep(); @@ -1536,7 +1536,7 @@ EXPORT_SYMBOL_GPL(rt_mutex_unlock); * @wake_q: The wake queue head from which to get the next lock waiter */ bool __sched __rt_mutex_futex_unlock(struct rt_mutex *lock, - struct wake_q_head *wake_q) + struct wake_q_head *wake_q) { lockdep_assert_held(&lock->wait_lock); @@ -1583,7 +1583,7 @@ void __sched rt_mutex_futex_unlock(struct rt_mutex *lock) * * Initializing of a locked rt_mutex is not allowed */ -void __rt_mutex_init(struct rt_mutex *lock, const char *name, +void __sched __rt_mutex_init(struct rt_mutex *lock, const char *name, struct lock_class_key *key) { debug_check_no_locks_freed((void *)lock, sizeof(*lock)); @@ -1607,8 +1607,8 @@ EXPORT_SYMBOL_GPL(__rt_mutex_init); * possible at this point because the pi_state which contains the rtmutex * is not yet visible to other tasks. */ -void rt_mutex_init_proxy_locked(struct rt_mutex *lock, - struct task_struct *proxy_owner) +void __sched rt_mutex_init_proxy_locked(struct rt_mutex *lock, + struct task_struct *proxy_owner) { __rt_mutex_basic_init(lock); rt_mutex_set_owner(lock, proxy_owner); @@ -1626,7 +1626,7 @@ void rt_mutex_init_proxy_locked(struct rt_mutex *lock, * possible because it belongs to the pi_state which is about to be freed * and it is not longer visible to other tasks. */ -void rt_mutex_proxy_unlock(struct rt_mutex *lock) +void __sched rt_mutex_proxy_unlock(struct rt_mutex *lock) { debug_rt_mutex_proxy_unlock(lock); rt_mutex_set_owner(lock, NULL); @@ -1651,9 +1651,9 @@ void rt_mutex_proxy_unlock(struct rt_mutex *lock) * * Special API call for PI-futex support. */ -int __rt_mutex_start_proxy_lock(struct rt_mutex *lock, - struct rt_mutex_waiter *waiter, - struct task_struct *task) +int __sched __rt_mutex_start_proxy_lock(struct rt_mutex *lock, + struct rt_mutex_waiter *waiter, + struct task_struct *task) { int ret; @@ -1698,9 +1698,9 @@ int __rt_mutex_start_proxy_lock(struct rt_mutex *lock, * * Special API call for PI-futex support. */ -int rt_mutex_start_proxy_lock(struct rt_mutex *lock, - struct rt_mutex_waiter *waiter, - struct task_struct *task) +int __sched rt_mutex_start_proxy_lock(struct rt_mutex *lock, + struct rt_mutex_waiter *waiter, + struct task_struct *task) { int ret; @@ -1730,9 +1730,9 @@ int rt_mutex_start_proxy_lock(struct rt_mutex *lock, * * Special API call for PI-futex support */ -int rt_mutex_wait_proxy_lock(struct rt_mutex *lock, - struct hrtimer_sleeper *to, - struct rt_mutex_waiter *waiter) +int __sched rt_mutex_wait_proxy_lock(struct rt_mutex *lock, + struct hrtimer_sleeper *to, + struct rt_mutex_waiter *waiter) { int ret; @@ -1770,8 +1770,8 @@ int rt_mutex_wait_proxy_lock(struct rt_mutex *lock, * * Special API call for PI-futex support */ -bool rt_mutex_cleanup_proxy_lock(struct rt_mutex *lock, - struct rt_mutex_waiter *waiter) +bool __sched rt_mutex_cleanup_proxy_lock(struct rt_mutex *lock, + struct rt_mutex_waiter *waiter) { bool cleanup = false; -- cgit v1.2.3 From 70c80103aafdeae99126694bc1cd54de016bc258 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Fri, 26 Mar 2021 16:29:41 +0100 Subject: locking/rtmutex: Consolidate the fast/slowpath invocation The indirection via a function pointer (which is at least optimized into a tail call by the compiler) is making the code hard to read. Clean it up and move the futex related trylock functions down to the futex section. Move the wake_q wakeup into rt_mutex_slowunlock(). No point in handing it to the caller. The futex code uses a different function. Signed-off-by: Thomas Gleixner Signed-off-by: Ingo Molnar Acked-by: Peter Zijlstra (Intel) Link: https://lore.kernel.org/r/20210326153944.247927548@linutronix.de --- kernel/locking/rtmutex.c | 144 +++++++++++++++++++---------------------------- 1 file changed, 59 insertions(+), 85 deletions(-) (limited to 'kernel/locking/rtmutex.c') diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c index 36128211f589..7d0c16871033 100644 --- a/kernel/locking/rtmutex.c +++ b/kernel/locking/rtmutex.c @@ -1298,14 +1298,25 @@ static int __sched rt_mutex_slowtrylock(struct rt_mutex *lock) return ret; } +/* + * Performs the wakeup of the top-waiter and re-enables preemption. + */ +void __sched rt_mutex_postunlock(struct wake_q_head *wake_q) +{ + wake_up_q(wake_q); + + /* Pairs with preempt_disable() in rt_mutex_slowunlock() */ + preempt_enable(); +} + /* * Slow path to release a rt-mutex. * * Return whether the current task needs to call rt_mutex_postunlock(). */ -static bool __sched rt_mutex_slowunlock(struct rt_mutex *lock, - struct wake_q_head *wake_q) +static void __sched rt_mutex_slowunlock(struct rt_mutex *lock) { + DEFINE_WAKE_Q(wake_q); unsigned long flags; /* irqsave required to support early boot calls */ @@ -1347,7 +1358,7 @@ static bool __sched rt_mutex_slowunlock(struct rt_mutex *lock, while (!rt_mutex_has_waiters(lock)) { /* Drops lock->wait_lock ! */ if (unlock_rt_mutex_safe(lock, flags) == true) - return false; + return; /* Relock the rtmutex and try again */ raw_spin_lock_irqsave(&lock->wait_lock, flags); } @@ -1358,10 +1369,10 @@ static bool __sched rt_mutex_slowunlock(struct rt_mutex *lock, * * Queue the next waiter for wakeup once we release the wait_lock. */ - mark_wakeup_next_waiter(wake_q, lock); + mark_wakeup_next_waiter(&wake_q, lock); raw_spin_unlock_irqrestore(&lock->wait_lock, flags); - return true; /* call rt_mutex_postunlock() */ + rt_mutex_postunlock(&wake_q); } /* @@ -1370,60 +1381,21 @@ static bool __sched rt_mutex_slowunlock(struct rt_mutex *lock, * The atomic acquire/release ops are compiled away, when either the * architecture does not support cmpxchg or when debugging is enabled. */ -static __always_inline int -rt_mutex_fastlock(struct rt_mutex *lock, int state, - int (*slowfn)(struct rt_mutex *lock, int state, - struct hrtimer_sleeper *timeout, - enum rtmutex_chainwalk chwalk)) +static __always_inline int __rt_mutex_lock(struct rt_mutex *lock, long state, + unsigned int subclass) { - if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current))) - return 0; + int ret; - return slowfn(lock, state, NULL, RT_MUTEX_MIN_CHAINWALK); -} + might_sleep(); + mutex_acquire(&lock->dep_map, subclass, 0, _RET_IP_); -static __always_inline int -rt_mutex_fasttrylock(struct rt_mutex *lock, - int (*slowfn)(struct rt_mutex *lock)) -{ if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current))) - return 1; - - return slowfn(lock); -} - -/* - * Performs the wakeup of the top-waiter and re-enables preemption. - */ -void __sched rt_mutex_postunlock(struct wake_q_head *wake_q) -{ - wake_up_q(wake_q); - - /* Pairs with preempt_disable() in rt_mutex_slowunlock() */ - preempt_enable(); -} - -static __always_inline void -rt_mutex_fastunlock(struct rt_mutex *lock, - bool (*slowfn)(struct rt_mutex *lock, - struct wake_q_head *wqh)) -{ - DEFINE_WAKE_Q(wake_q); - - if (likely(rt_mutex_cmpxchg_release(lock, current, NULL))) - return; - - if (slowfn(lock, &wake_q)) - rt_mutex_postunlock(&wake_q); -} - -static __always_inline void __rt_mutex_lock(struct rt_mutex *lock, - unsigned int subclass) -{ - might_sleep(); + return 0; - mutex_acquire(&lock->dep_map, subclass, 0, _RET_IP_); - rt_mutex_fastlock(lock, TASK_UNINTERRUPTIBLE, rt_mutex_slowlock); + ret = rt_mutex_slowlock(lock, state, NULL, RT_MUTEX_MIN_CHAINWALK); + if (ret) + mutex_release(&lock->dep_map, _RET_IP_); + return ret; } #ifdef CONFIG_DEBUG_LOCK_ALLOC @@ -1435,7 +1407,7 @@ static __always_inline void __rt_mutex_lock(struct rt_mutex *lock, */ void __sched rt_mutex_lock_nested(struct rt_mutex *lock, unsigned int subclass) { - __rt_mutex_lock(lock, subclass); + __rt_mutex_lock(lock, TASK_UNINTERRUPTIBLE, subclass); } EXPORT_SYMBOL_GPL(rt_mutex_lock_nested); @@ -1448,7 +1420,7 @@ EXPORT_SYMBOL_GPL(rt_mutex_lock_nested); */ void __sched rt_mutex_lock(struct rt_mutex *lock) { - __rt_mutex_lock(lock, 0); + __rt_mutex_lock(lock, TASK_UNINTERRUPTIBLE, 0); } EXPORT_SYMBOL_GPL(rt_mutex_lock); #endif @@ -1464,42 +1436,21 @@ EXPORT_SYMBOL_GPL(rt_mutex_lock); */ int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock) { - int ret; - - might_sleep(); - - mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_); - ret = rt_mutex_fastlock(lock, TASK_INTERRUPTIBLE, rt_mutex_slowlock); - if (ret) - mutex_release(&lock->dep_map, _RET_IP_); - - return ret; + return __rt_mutex_lock(lock, TASK_INTERRUPTIBLE, 0); } EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible); -/* - * Futex variant, must not use fastpath. - */ -int __sched rt_mutex_futex_trylock(struct rt_mutex *lock) -{ - return rt_mutex_slowtrylock(lock); -} - -int __sched __rt_mutex_futex_trylock(struct rt_mutex *lock) -{ - return __rt_mutex_slowtrylock(lock); -} - /** * rt_mutex_trylock - try to lock a rt_mutex * * @lock: the rt_mutex to be locked * - * This function can only be called in thread context. It's safe to - * call it from atomic regions, but not from hard interrupt or soft - * interrupt context. + * This function can only be called in thread context. It's safe to call it + * from atomic regions, but not from hard or soft interrupt context. * - * Returns 1 on success and 0 on contention + * Returns: + * 1 on success + * 0 on contention */ int __sched rt_mutex_trylock(struct rt_mutex *lock) { @@ -1508,7 +1459,14 @@ int __sched rt_mutex_trylock(struct rt_mutex *lock) if (WARN_ON_ONCE(in_irq() || in_nmi() || in_serving_softirq())) return 0; - ret = rt_mutex_fasttrylock(lock, rt_mutex_slowtrylock); + /* + * No lockdep annotation required because lockdep disables the fast + * path. + */ + if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current))) + return 1; + + ret = rt_mutex_slowtrylock(lock); if (ret) mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_); @@ -1524,10 +1482,26 @@ EXPORT_SYMBOL_GPL(rt_mutex_trylock); void __sched rt_mutex_unlock(struct rt_mutex *lock) { mutex_release(&lock->dep_map, _RET_IP_); - rt_mutex_fastunlock(lock, rt_mutex_slowunlock); + if (likely(rt_mutex_cmpxchg_release(lock, current, NULL))) + return; + + rt_mutex_slowunlock(lock); } EXPORT_SYMBOL_GPL(rt_mutex_unlock); +/* + * Futex variants, must not use fastpath. + */ +int __sched rt_mutex_futex_trylock(struct rt_mutex *lock) +{ + return rt_mutex_slowtrylock(lock); +} + +int __sched __rt_mutex_futex_trylock(struct rt_mutex *lock) +{ + return __rt_mutex_slowtrylock(lock); +} + /** * __rt_mutex_futex_unlock - Futex variant, that since futex variants * do not use the fast-path, can be simple and will not need to retry. -- cgit v1.2.3 From 82cd5b1039e26b1b1254886464991e34de439ac5 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Fri, 26 Mar 2021 16:29:42 +0100 Subject: locking/rtmutex: Fix misleading comment in rt_mutex_postunlock() Preemption is disabled in mark_wakeup_next_waiter(,) not in rt_mutex_slowunlock(). Signed-off-by: Thomas Gleixner Signed-off-by: Ingo Molnar Acked-by: Peter Zijlstra (Intel) Link: https://lore.kernel.org/r/20210326153944.341734608@linutronix.de --- kernel/locking/rtmutex.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel/locking/rtmutex.c') diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c index 7d0c16871033..512b400bd961 100644 --- a/kernel/locking/rtmutex.c +++ b/kernel/locking/rtmutex.c @@ -1305,7 +1305,7 @@ void __sched rt_mutex_postunlock(struct wake_q_head *wake_q) { wake_up_q(wake_q); - /* Pairs with preempt_disable() in rt_mutex_slowunlock() */ + /* Pairs with preempt_disable() in mark_wakeup_next_waiter() */ preempt_enable(); } -- cgit v1.2.3 From c2c360ed7f28fd6b7eb7e39e70af2d2ae405f466 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Fri, 26 Mar 2021 16:29:43 +0100 Subject: locking/rtmutex: Restrict the trylock WARN_ON() to debug The warning as written is expensive and not really required for a production kernel. Make it depend on rt mutex debugging and use !in_task() for the condition which generates far better code and gives the same answer. Signed-off-by: Thomas Gleixner Signed-off-by: Ingo Molnar Acked-by: Peter Zijlstra (Intel) Link: https://lore.kernel.org/r/20210326153944.436565064@linutronix.de --- kernel/locking/rtmutex.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel/locking/rtmutex.c') diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c index 512b400bd961..c68542dbbd0a 100644 --- a/kernel/locking/rtmutex.c +++ b/kernel/locking/rtmutex.c @@ -1456,7 +1456,7 @@ int __sched rt_mutex_trylock(struct rt_mutex *lock) { int ret; - if (WARN_ON_ONCE(in_irq() || in_nmi() || in_serving_softirq())) + if (IS_ENABLED(CONFIG_DEBUG_RT_MUTEXES) && WARN_ON_ONCE(!in_task())) return 0; /* -- cgit v1.2.3 From a51a327f3bcdcb1a37ed9325ad07e1456cd4d426 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Fri, 26 Mar 2021 16:29:44 +0100 Subject: locking/rtmutex: Clean up signal handling in __rt_mutex_slowlock() The signal handling in __rt_mutex_slowlock() is open coded. Use signal_pending_state() instead. Aside of the cleanup this also prepares for the RT lock substituions which require support for TASK_KILLABLE. Signed-off-by: Thomas Gleixner Signed-off-by: Ingo Molnar Acked-by: Peter Zijlstra (Intel) Link: https://lore.kernel.org/r/20210326153944.533811987@linutronix.de --- kernel/locking/rtmutex.c | 19 +++++++------------ 1 file changed, 7 insertions(+), 12 deletions(-) (limited to 'kernel/locking/rtmutex.c') diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c index c68542dbbd0a..406818196a9f 100644 --- a/kernel/locking/rtmutex.c +++ b/kernel/locking/rtmutex.c @@ -1146,18 +1146,13 @@ static int __sched __rt_mutex_slowlock(struct rt_mutex *lock, int state, if (try_to_take_rt_mutex(lock, current, waiter)) break; - /* - * TASK_INTERRUPTIBLE checks for signals and - * timeout. Ignored otherwise. - */ - if (likely(state == TASK_INTERRUPTIBLE)) { - /* Signal pending? */ - if (signal_pending(current)) - ret = -EINTR; - if (timeout && !timeout->task) - ret = -ETIMEDOUT; - if (ret) - break; + if (timeout && !timeout->task) { + ret = -ETIMEDOUT; + break; + } + if (signal_pending_state(state, current)) { + ret = -EINTR; + break; } raw_spin_unlock_irq(&lock->wait_lock); -- cgit v1.2.3