diff options
author | John Stultz <jstultz@google.com> | 2024-12-16 20:07:35 -0800 |
---|---|---|
committer | Peter Zijlstra <peterz@infradead.org> | 2024-12-20 15:31:21 +0100 |
commit | abfdccd6af2b071951633e57d6322c46a1ea791f (patch) | |
tree | d852a162cbe01984b07795b714d31e39b6e85450 /kernel | |
parent | c2db11a750fb626d0d04f2dc76e548a1f07617df (diff) |
sched/wake_q: Add helper to call wake_up_q after unlock with preemption disabled
A common pattern seen when wake_qs are used to defer a wakeup
until after a lock is released is something like:
preempt_disable();
raw_spin_unlock(lock);
wake_up_q(wake_q);
preempt_enable();
So create some raw_spin_unlock*_wake() helper functions to clean
this up.
Applies on top of the fix I submitted here:
https://lore.kernel.org/lkml/20241212222138.2400498-1-jstultz@google.com/
NOTE: I recognise the unlock()/unlock_irq()/unlock_irqrestore()
variants creates its own duplication, which we could use a macro
to generate the similar functions, but I often dislike how those
generation macros making finding the actual implementation
harder, so I left the three functions as is. If folks would
prefer otherwise, let me know and I'll switch it.
Suggested-by: Peter Zijlstra <peterz@infradead.org>
Signed-off-by: John Stultz <jstultz@google.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lkml.kernel.org/r/20241217040803.243420-1-jstultz@google.com
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/futex/pi.c | 5 | ||||
-rw-r--r-- | kernel/locking/mutex.c | 16 | ||||
-rw-r--r-- | kernel/locking/rtmutex.c | 32 |
3 files changed, 10 insertions, 43 deletions
diff --git a/kernel/futex/pi.c b/kernel/futex/pi.c index d62cca5ed8f4..daea650b16f5 100644 --- a/kernel/futex/pi.c +++ b/kernel/futex/pi.c @@ -1020,10 +1020,7 @@ retry_private: * it sees the futex_q::pi_state. */ ret = __rt_mutex_start_proxy_lock(&q.pi_state->pi_mutex, &rt_waiter, current, &wake_q); - preempt_disable(); - raw_spin_unlock_irq(&q.pi_state->pi_mutex.wait_lock); - wake_up_q(&wake_q); - preempt_enable(); + raw_spin_unlock_irq_wake(&q.pi_state->pi_mutex.wait_lock, &wake_q); if (ret) { if (ret == 1) diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c index 3302e52f0c96..b36f23de48f1 100644 --- a/kernel/locking/mutex.c +++ b/kernel/locking/mutex.c @@ -657,10 +657,7 @@ __mutex_lock_common(struct mutex *lock, unsigned int state, unsigned int subclas goto err; } - raw_spin_unlock_irqrestore(&lock->wait_lock, flags); - /* Make sure we do wakeups before calling schedule */ - wake_up_q(&wake_q); - wake_q_init(&wake_q); + raw_spin_unlock_irqrestore_wake(&lock->wait_lock, flags, &wake_q); schedule_preempt_disabled(); @@ -710,8 +707,7 @@ skip_wait: if (ww_ctx) ww_mutex_lock_acquired(ww, ww_ctx); - raw_spin_unlock_irqrestore(&lock->wait_lock, flags); - wake_up_q(&wake_q); + raw_spin_unlock_irqrestore_wake(&lock->wait_lock, flags, &wake_q); preempt_enable(); return 0; @@ -720,10 +716,9 @@ err: __mutex_remove_waiter(lock, &waiter); err_early_kill: trace_contention_end(lock, ret); - raw_spin_unlock_irqrestore(&lock->wait_lock, flags); + raw_spin_unlock_irqrestore_wake(&lock->wait_lock, flags, &wake_q); debug_mutex_free_waiter(&waiter); mutex_release(&lock->dep_map, ip); - wake_up_q(&wake_q); preempt_enable(); return ret; } @@ -935,10 +930,7 @@ static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigne if (owner & MUTEX_FLAG_HANDOFF) __mutex_handoff(lock, next); - preempt_disable(); - raw_spin_unlock_irqrestore(&lock->wait_lock, flags); - wake_up_q(&wake_q); - preempt_enable(); + raw_spin_unlock_irqrestore_wake(&lock->wait_lock, flags, &wake_q); } #ifndef CONFIG_DEBUG_LOCK_ALLOC diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c index 697a56d3d949..4a8df1800cbb 100644 --- a/kernel/locking/rtmutex.c +++ b/kernel/locking/rtmutex.c @@ -1292,13 +1292,7 @@ static int __sched task_blocks_on_rt_mutex(struct rt_mutex_base *lock, */ get_task_struct(owner); - preempt_disable(); - raw_spin_unlock_irq(&lock->wait_lock); - /* wake up any tasks on the wake_q before calling rt_mutex_adjust_prio_chain */ - wake_up_q(wake_q); - wake_q_init(wake_q); - preempt_enable(); - + raw_spin_unlock_irq_wake(&lock->wait_lock, wake_q); res = rt_mutex_adjust_prio_chain(owner, chwalk, lock, next_lock, waiter, task); @@ -1642,13 +1636,7 @@ static int __sched rt_mutex_slowlock_block(struct rt_mutex_base *lock, owner = rt_mutex_owner(lock); else owner = NULL; - preempt_disable(); - raw_spin_unlock_irq(&lock->wait_lock); - if (wake_q) { - wake_up_q(wake_q); - wake_q_init(wake_q); - } - preempt_enable(); + raw_spin_unlock_irq_wake(&lock->wait_lock, wake_q); if (!owner || !rtmutex_spin_on_owner(lock, waiter, owner)) rt_mutex_schedule(); @@ -1799,10 +1787,7 @@ static int __sched rt_mutex_slowlock(struct rt_mutex_base *lock, */ raw_spin_lock_irqsave(&lock->wait_lock, flags); ret = __rt_mutex_slowlock_locked(lock, ww_ctx, state, &wake_q); - preempt_disable(); - raw_spin_unlock_irqrestore(&lock->wait_lock, flags); - wake_up_q(&wake_q); - preempt_enable(); + raw_spin_unlock_irqrestore_wake(&lock->wait_lock, flags, &wake_q); rt_mutex_post_schedule(); return ret; @@ -1860,11 +1845,7 @@ static void __sched rtlock_slowlock_locked(struct rt_mutex_base *lock, owner = rt_mutex_owner(lock); else owner = NULL; - preempt_disable(); - raw_spin_unlock_irq(&lock->wait_lock); - wake_up_q(wake_q); - wake_q_init(wake_q); - preempt_enable(); + raw_spin_unlock_irq_wake(&lock->wait_lock, wake_q); if (!owner || !rtmutex_spin_on_owner(lock, &waiter, owner)) schedule_rtlock(); @@ -1893,10 +1874,7 @@ static __always_inline void __sched rtlock_slowlock(struct rt_mutex_base *lock) raw_spin_lock_irqsave(&lock->wait_lock, flags); rtlock_slowlock_locked(lock, &wake_q); - preempt_disable(); - raw_spin_unlock_irqrestore(&lock->wait_lock, flags); - wake_up_q(&wake_q); - preempt_enable(); + raw_spin_unlock_irqrestore_wake(&lock->wait_lock, flags, &wake_q); } #endif /* RT_MUTEX_BUILD_SPINLOCKS */ |