mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2025-01-15 02:05:33 +00:00
sched/wake_q: Add helper to call wake_up_q after unlock with preemption disabled
A common pattern seen when wake_qs are used to defer a wakeup until after a lock is released is something like: preempt_disable(); raw_spin_unlock(lock); wake_up_q(wake_q); preempt_enable(); So create some raw_spin_unlock*_wake() helper functions to clean this up. Applies on top of the fix I submitted here: https://lore.kernel.org/lkml/20241212222138.2400498-1-jstultz@google.com/ NOTE: I recognise the unlock()/unlock_irq()/unlock_irqrestore() variants creates its own duplication, which we could use a macro to generate the similar functions, but I often dislike how those generation macros making finding the actual implementation harder, so I left the three functions as is. If folks would prefer otherwise, let me know and I'll switch it. Suggested-by: Peter Zijlstra <peterz@infradead.org> Signed-off-by: John Stultz <jstultz@google.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Link: https://lkml.kernel.org/r/20241217040803.243420-1-jstultz@google.com
This commit is contained in:
parent
c2db11a750
commit
abfdccd6af
@ -63,4 +63,38 @@ extern void wake_q_add(struct wake_q_head *head, struct task_struct *task);
|
||||
extern void wake_q_add_safe(struct wake_q_head *head, struct task_struct *task);
|
||||
extern void wake_up_q(struct wake_q_head *head);
|
||||
|
||||
/* Spin unlock helpers to unlock and call wake_up_q with preempt disabled */
|
||||
static inline
|
||||
void raw_spin_unlock_wake(raw_spinlock_t *lock, struct wake_q_head *wake_q)
|
||||
{
|
||||
guard(preempt)();
|
||||
raw_spin_unlock(lock);
|
||||
if (wake_q) {
|
||||
wake_up_q(wake_q);
|
||||
wake_q_init(wake_q);
|
||||
}
|
||||
}
|
||||
|
||||
static inline
|
||||
void raw_spin_unlock_irq_wake(raw_spinlock_t *lock, struct wake_q_head *wake_q)
|
||||
{
|
||||
guard(preempt)();
|
||||
raw_spin_unlock_irq(lock);
|
||||
if (wake_q) {
|
||||
wake_up_q(wake_q);
|
||||
wake_q_init(wake_q);
|
||||
}
|
||||
}
|
||||
|
||||
static inline
|
||||
void raw_spin_unlock_irqrestore_wake(raw_spinlock_t *lock, unsigned long flags,
|
||||
struct wake_q_head *wake_q)
|
||||
{
|
||||
guard(preempt)();
|
||||
raw_spin_unlock_irqrestore(lock, flags);
|
||||
if (wake_q) {
|
||||
wake_up_q(wake_q);
|
||||
wake_q_init(wake_q);
|
||||
}
|
||||
}
|
||||
#endif /* _LINUX_SCHED_WAKE_Q_H */
|
||||
|
@ -1020,10 +1020,7 @@ retry_private:
|
||||
* it sees the futex_q::pi_state.
|
||||
*/
|
||||
ret = __rt_mutex_start_proxy_lock(&q.pi_state->pi_mutex, &rt_waiter, current, &wake_q);
|
||||
preempt_disable();
|
||||
raw_spin_unlock_irq(&q.pi_state->pi_mutex.wait_lock);
|
||||
wake_up_q(&wake_q);
|
||||
preempt_enable();
|
||||
raw_spin_unlock_irq_wake(&q.pi_state->pi_mutex.wait_lock, &wake_q);
|
||||
|
||||
if (ret) {
|
||||
if (ret == 1)
|
||||
|
@ -657,10 +657,7 @@ __mutex_lock_common(struct mutex *lock, unsigned int state, unsigned int subclas
|
||||
goto err;
|
||||
}
|
||||
|
||||
raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
|
||||
/* Make sure we do wakeups before calling schedule */
|
||||
wake_up_q(&wake_q);
|
||||
wake_q_init(&wake_q);
|
||||
raw_spin_unlock_irqrestore_wake(&lock->wait_lock, flags, &wake_q);
|
||||
|
||||
schedule_preempt_disabled();
|
||||
|
||||
@ -710,8 +707,7 @@ skip_wait:
|
||||
if (ww_ctx)
|
||||
ww_mutex_lock_acquired(ww, ww_ctx);
|
||||
|
||||
raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
|
||||
wake_up_q(&wake_q);
|
||||
raw_spin_unlock_irqrestore_wake(&lock->wait_lock, flags, &wake_q);
|
||||
preempt_enable();
|
||||
return 0;
|
||||
|
||||
@ -720,10 +716,9 @@ err:
|
||||
__mutex_remove_waiter(lock, &waiter);
|
||||
err_early_kill:
|
||||
trace_contention_end(lock, ret);
|
||||
raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
|
||||
raw_spin_unlock_irqrestore_wake(&lock->wait_lock, flags, &wake_q);
|
||||
debug_mutex_free_waiter(&waiter);
|
||||
mutex_release(&lock->dep_map, ip);
|
||||
wake_up_q(&wake_q);
|
||||
preempt_enable();
|
||||
return ret;
|
||||
}
|
||||
@ -935,10 +930,7 @@ static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigne
|
||||
if (owner & MUTEX_FLAG_HANDOFF)
|
||||
__mutex_handoff(lock, next);
|
||||
|
||||
preempt_disable();
|
||||
raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
|
||||
wake_up_q(&wake_q);
|
||||
preempt_enable();
|
||||
raw_spin_unlock_irqrestore_wake(&lock->wait_lock, flags, &wake_q);
|
||||
}
|
||||
|
||||
#ifndef CONFIG_DEBUG_LOCK_ALLOC
|
||||
|
@ -1292,13 +1292,7 @@ static int __sched task_blocks_on_rt_mutex(struct rt_mutex_base *lock,
|
||||
*/
|
||||
get_task_struct(owner);
|
||||
|
||||
preempt_disable();
|
||||
raw_spin_unlock_irq(&lock->wait_lock);
|
||||
/* wake up any tasks on the wake_q before calling rt_mutex_adjust_prio_chain */
|
||||
wake_up_q(wake_q);
|
||||
wake_q_init(wake_q);
|
||||
preempt_enable();
|
||||
|
||||
raw_spin_unlock_irq_wake(&lock->wait_lock, wake_q);
|
||||
|
||||
res = rt_mutex_adjust_prio_chain(owner, chwalk, lock,
|
||||
next_lock, waiter, task);
|
||||
@ -1642,13 +1636,7 @@ static int __sched rt_mutex_slowlock_block(struct rt_mutex_base *lock,
|
||||
owner = rt_mutex_owner(lock);
|
||||
else
|
||||
owner = NULL;
|
||||
preempt_disable();
|
||||
raw_spin_unlock_irq(&lock->wait_lock);
|
||||
if (wake_q) {
|
||||
wake_up_q(wake_q);
|
||||
wake_q_init(wake_q);
|
||||
}
|
||||
preempt_enable();
|
||||
raw_spin_unlock_irq_wake(&lock->wait_lock, wake_q);
|
||||
|
||||
if (!owner || !rtmutex_spin_on_owner(lock, waiter, owner))
|
||||
rt_mutex_schedule();
|
||||
@ -1799,10 +1787,7 @@ static int __sched rt_mutex_slowlock(struct rt_mutex_base *lock,
|
||||
*/
|
||||
raw_spin_lock_irqsave(&lock->wait_lock, flags);
|
||||
ret = __rt_mutex_slowlock_locked(lock, ww_ctx, state, &wake_q);
|
||||
preempt_disable();
|
||||
raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
|
||||
wake_up_q(&wake_q);
|
||||
preempt_enable();
|
||||
raw_spin_unlock_irqrestore_wake(&lock->wait_lock, flags, &wake_q);
|
||||
rt_mutex_post_schedule();
|
||||
|
||||
return ret;
|
||||
@ -1860,11 +1845,7 @@ static void __sched rtlock_slowlock_locked(struct rt_mutex_base *lock,
|
||||
owner = rt_mutex_owner(lock);
|
||||
else
|
||||
owner = NULL;
|
||||
preempt_disable();
|
||||
raw_spin_unlock_irq(&lock->wait_lock);
|
||||
wake_up_q(wake_q);
|
||||
wake_q_init(wake_q);
|
||||
preempt_enable();
|
||||
raw_spin_unlock_irq_wake(&lock->wait_lock, wake_q);
|
||||
|
||||
if (!owner || !rtmutex_spin_on_owner(lock, &waiter, owner))
|
||||
schedule_rtlock();
|
||||
@ -1893,10 +1874,7 @@ static __always_inline void __sched rtlock_slowlock(struct rt_mutex_base *lock)
|
||||
|
||||
raw_spin_lock_irqsave(&lock->wait_lock, flags);
|
||||
rtlock_slowlock_locked(lock, &wake_q);
|
||||
preempt_disable();
|
||||
raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
|
||||
wake_up_q(&wake_q);
|
||||
preempt_enable();
|
||||
raw_spin_unlock_irqrestore_wake(&lock->wait_lock, flags, &wake_q);
|
||||
}
|
||||
|
||||
#endif /* RT_MUTEX_BUILD_SPINLOCKS */
|
||||
|
Loading…
x
Reference in New Issue
Block a user