summaryrefslogtreecommitdiff
path: root/kernel/locking/spinlock_rt.c
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2024-10-09 16:53:34 -0700
committerPeter Zijlstra <peterz@infradead.org>2024-10-14 12:52:40 +0200
commit894d1b3db41cf7e6ae0304429a1747b3c3f390bc (patch)
tree3bcda09ee457a5edb2df9b51bbdfa04377394150 /kernel/locking/spinlock_rt.c
parent7e019dcc470f27066c98697e43d930df8d54bd9c (diff)
locking/mutex: Remove wakeups from under mutex::wait_lock
In preparation to nest mutex::wait_lock under rq::lock we need to remove wakeups from under it. Do this by utilizing wake_qs to defer the wakeup until after the lock is dropped. [Heavily changed after 55f036ca7e74 ("locking: WW mutex cleanup") and 08295b3b5bee ("locking: Implement an algorithm choice for Wound-Wait mutexes")] [jstultz: rebased to mainline, added extra wake_up_q & init to avoid hangs, similar to Connor's rework of this patch] Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Signed-off-by: Juri Lelli <juri.lelli@redhat.com> Signed-off-by: John Stultz <jstultz@google.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Reviewed-by: Metin Kaya <metin.kaya@arm.com> Acked-by: Davidlohr Bueso <dave@stgolabs.net> Tested-by: K Prateek Nayak <kprateek.nayak@amd.com> Tested-by: Metin Kaya <metin.kaya@arm.com> Link: https://lore.kernel.org/r/20241009235352.1614323-2-jstultz@google.com
Diffstat (limited to 'kernel/locking/spinlock_rt.c')
-rw-r--r--kernel/locking/spinlock_rt.c5
1 files changed, 3 insertions, 2 deletions
diff --git a/kernel/locking/spinlock_rt.c b/kernel/locking/spinlock_rt.c
index 38e292454fcc..014143934e00 100644
--- a/kernel/locking/spinlock_rt.c
+++ b/kernel/locking/spinlock_rt.c
@@ -162,9 +162,10 @@ rwbase_rtmutex_lock_state(struct rt_mutex_base *rtm, unsigned int state)
}
static __always_inline int
-rwbase_rtmutex_slowlock_locked(struct rt_mutex_base *rtm, unsigned int state)
+rwbase_rtmutex_slowlock_locked(struct rt_mutex_base *rtm, unsigned int state,
+ struct wake_q_head *wake_q)
{
- rtlock_slowlock_locked(rtm);
+ rtlock_slowlock_locked(rtm, wake_q);
return 0;
}