mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2025-01-15 11:57:46 +00:00
Fix missed rtmutex wakeups causing sporadic boot hangs
and other misbehavior. Signed-off-by: Ingo Molnar <mingo@kernel.org> -----BEGIN PGP SIGNATURE----- iQJFBAABCgAvFiEEBpT5eoXrXCwVQwEKEnMQ0APhK1gFAmdxC+ERHG1pbmdvQGtl cm5lbC5vcmcACgkQEnMQ0APhK1jvDw/+Kl24Gjai6hy7yFukGRFRkAezx3YRyK8F SM/vg2GzNaottkUSO3ywD//SMoG3qqkBOIukrS8kJXjLNlx1TI6AqGLVA9g9LpMw KFgqvIb4llstsAh7s8coCSJIVOCGcNC306EPfqvrhlU16YqFHRggQUqSiycRXQEd SDSAiNsiez0g0a0x1qI0lbFtF7l/Xht1CxOmpc0NQe8OXZcOXJI1z92DbzDsY+r4 g77sJ3jHT9j3rpz7MPdh4xS8RJnT/E3wAKn5dnS0pSJ58UFOndIgncKoeEpPC3gW 1hFWx+3IC2n0/t4m5TQhtpSFv0W4tkhwWOMI7JlRw2Sx2z0T/gnJsYH7E+DSu138 XYmRCuW+BHrFjG+Pns4bpndf8Gy2HSHjvp0AB9iUqzfIkWVkQNjBdonfdvY5pey0 EwkxCZPcWT8j0HehM9MhntYojfgy/Au/Z+EOZQSDDHKLAvkkE5ai1FPCjvhBxrCe FGko03zS77O+yayTFwXdtbn0StM1Bfa8WcCKxAKErsYqOOB4AP1bJWAknBKw0O4b Kj2nVSf7etDcue6sey9HWd1+pNzUsAlsuRM+bsa/dp2rxHxbbHVVHV1Yy0jTgHTL RkK8C3FyZbya4nhl0qY7kYudes37S8aT8AQEvyJ9/Y0aLURuESzdxiX1Knk0W2zs WsRnDI85Yq0= =0Vde -----END PGP SIGNATURE----- Merge tag 'locking-urgent-2024-12-29' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip Pull locking fix from Ingo Molnar: "Fix missed rtmutex wakeups causing sporadic boot hangs and other misbehavior" * tag 'locking-urgent-2024-12-29' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: locking/rtmutex: Make sure we wake anything on the wake_q when we release the lock->wait_lock
This commit is contained in:
commit
bf7a281b80
@ -1292,7 +1292,13 @@ static int __sched task_blocks_on_rt_mutex(struct rt_mutex_base *lock,
|
||||
*/
|
||||
get_task_struct(owner);
|
||||
|
||||
preempt_disable();
|
||||
raw_spin_unlock_irq(&lock->wait_lock);
|
||||
/* wake up any tasks on the wake_q before calling rt_mutex_adjust_prio_chain */
|
||||
wake_up_q(wake_q);
|
||||
wake_q_init(wake_q);
|
||||
preempt_enable();
|
||||
|
||||
|
||||
res = rt_mutex_adjust_prio_chain(owner, chwalk, lock,
|
||||
next_lock, waiter, task);
|
||||
@ -1596,6 +1602,7 @@ static void __sched remove_waiter(struct rt_mutex_base *lock,
|
||||
* or TASK_UNINTERRUPTIBLE)
|
||||
* @timeout: the pre-initialized and started timer, or NULL for none
|
||||
* @waiter: the pre-initialized rt_mutex_waiter
|
||||
* @wake_q: wake_q of tasks to wake when we drop the lock->wait_lock
|
||||
*
|
||||
* Must be called with lock->wait_lock held and interrupts disabled
|
||||
*/
|
||||
@ -1603,7 +1610,8 @@ static int __sched rt_mutex_slowlock_block(struct rt_mutex_base *lock,
|
||||
struct ww_acquire_ctx *ww_ctx,
|
||||
unsigned int state,
|
||||
struct hrtimer_sleeper *timeout,
|
||||
struct rt_mutex_waiter *waiter)
|
||||
struct rt_mutex_waiter *waiter,
|
||||
struct wake_q_head *wake_q)
|
||||
__releases(&lock->wait_lock) __acquires(&lock->wait_lock)
|
||||
{
|
||||
struct rt_mutex *rtm = container_of(lock, struct rt_mutex, rtmutex);
|
||||
@ -1634,7 +1642,13 @@ static int __sched rt_mutex_slowlock_block(struct rt_mutex_base *lock,
|
||||
owner = rt_mutex_owner(lock);
|
||||
else
|
||||
owner = NULL;
|
||||
preempt_disable();
|
||||
raw_spin_unlock_irq(&lock->wait_lock);
|
||||
if (wake_q) {
|
||||
wake_up_q(wake_q);
|
||||
wake_q_init(wake_q);
|
||||
}
|
||||
preempt_enable();
|
||||
|
||||
if (!owner || !rtmutex_spin_on_owner(lock, waiter, owner))
|
||||
rt_mutex_schedule();
|
||||
@ -1708,7 +1722,7 @@ static int __sched __rt_mutex_slowlock(struct rt_mutex_base *lock,
|
||||
|
||||
ret = task_blocks_on_rt_mutex(lock, waiter, current, ww_ctx, chwalk, wake_q);
|
||||
if (likely(!ret))
|
||||
ret = rt_mutex_slowlock_block(lock, ww_ctx, state, NULL, waiter);
|
||||
ret = rt_mutex_slowlock_block(lock, ww_ctx, state, NULL, waiter, wake_q);
|
||||
|
||||
if (likely(!ret)) {
|
||||
/* acquired the lock */
|
||||
|
@ -383,7 +383,7 @@ int __sched rt_mutex_wait_proxy_lock(struct rt_mutex_base *lock,
|
||||
raw_spin_lock_irq(&lock->wait_lock);
|
||||
/* sleep on the mutex */
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
ret = rt_mutex_slowlock_block(lock, NULL, TASK_INTERRUPTIBLE, to, waiter);
|
||||
ret = rt_mutex_slowlock_block(lock, NULL, TASK_INTERRUPTIBLE, to, waiter, NULL);
|
||||
/*
|
||||
* try_to_take_rt_mutex() sets the waiter bit unconditionally. We might
|
||||
* have to fix that up.
|
||||
|
Loading…
x
Reference in New Issue
Block a user