mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2025-01-17 22:05:08 +00:00
powerpc/qspinlock: don't propagate the not-sleepy state
To simplify things, don't propagate the not-sleepy condition back down the queue. Instead, have the waiters clear their own node->sleepy when finding the lock owner is not preempted. Signed-off-by: Nicholas Piggin <npiggin@gmail.com> Tested-by: Shrikanth Hegde <sshegde@linux.vnet.ibm.com> Reviewed-by: "Nysal Jan K.A" <nysal@linux.ibm.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au> Link: https://msgid.link/20231016124305.139923-5-npiggin@gmail.com
This commit is contained in:
parent
fd8fae50c9
commit
fcf77d4427
@ -350,7 +350,7 @@ static __always_inline bool yield_head_to_locked_owner(struct qspinlock *lock, u
|
||||
return __yield_to_locked_owner(lock, val, paravirt, mustq);
|
||||
}
|
||||
|
||||
static __always_inline void propagate_sleepy(struct qnode *node, u32 val, bool *set_sleepy, bool paravirt)
|
||||
static __always_inline void propagate_sleepy(struct qnode *node, u32 val, bool paravirt)
|
||||
{
|
||||
struct qnode *next;
|
||||
int owner;
|
||||
@ -359,18 +359,17 @@ static __always_inline void propagate_sleepy(struct qnode *node, u32 val, bool *
|
||||
return;
|
||||
if (!pv_yield_propagate_owner)
|
||||
return;
|
||||
if (*set_sleepy)
|
||||
return;
|
||||
|
||||
next = READ_ONCE(node->next);
|
||||
if (!next)
|
||||
return;
|
||||
|
||||
if (next->sleepy)
|
||||
return;
|
||||
|
||||
owner = get_owner_cpu(val);
|
||||
if (vcpu_is_preempted(owner)) {
|
||||
if (vcpu_is_preempted(owner))
|
||||
next->sleepy = 1;
|
||||
*set_sleepy = true;
|
||||
}
|
||||
}
|
||||
|
||||
/* Called inside spin_begin() */
|
||||
@ -385,12 +384,7 @@ static __always_inline bool yield_to_prev(struct qspinlock *lock, struct qnode *
|
||||
if (!pv_yield_propagate_owner)
|
||||
goto yield_prev;
|
||||
|
||||
if (!READ_ONCE(node->sleepy)) {
|
||||
/* Propagate back sleepy==false */
|
||||
if (node->next && node->next->sleepy)
|
||||
node->next->sleepy = 0;
|
||||
goto yield_prev;
|
||||
} else {
|
||||
if (node->sleepy) {
|
||||
u32 val = READ_ONCE(lock->val);
|
||||
|
||||
if (val & _Q_LOCKED_VAL) {
|
||||
@ -410,6 +404,7 @@ static __always_inline bool yield_to_prev(struct qspinlock *lock, struct qnode *
|
||||
if (preempted)
|
||||
return preempted;
|
||||
}
|
||||
node->sleepy = false;
|
||||
}
|
||||
|
||||
yield_prev:
|
||||
@ -533,7 +528,6 @@ static __always_inline void queued_spin_lock_mcs_queue(struct qspinlock *lock, b
|
||||
bool sleepy = false;
|
||||
bool mustq = false;
|
||||
int idx;
|
||||
bool set_sleepy = false;
|
||||
int iters = 0;
|
||||
|
||||
BUILD_BUG_ON(CONFIG_NR_CPUS >= (1U << _Q_TAIL_CPU_BITS));
|
||||
@ -591,10 +585,6 @@ static __always_inline void queued_spin_lock_mcs_queue(struct qspinlock *lock, b
|
||||
spec_barrier();
|
||||
spin_end();
|
||||
|
||||
/* Clear out stale propagated sleepy */
|
||||
if (paravirt && pv_yield_propagate_owner && node->sleepy)
|
||||
node->sleepy = 0;
|
||||
|
||||
smp_rmb(); /* acquire barrier for the mcs lock */
|
||||
|
||||
/*
|
||||
@ -636,7 +626,7 @@ again:
|
||||
}
|
||||
}
|
||||
|
||||
propagate_sleepy(node, val, &set_sleepy, paravirt);
|
||||
propagate_sleepy(node, val, paravirt);
|
||||
preempted = yield_head_to_locked_owner(lock, val, paravirt);
|
||||
if (!maybe_stealers)
|
||||
continue;
|
||||
|
Loading…
x
Reference in New Issue
Block a user