mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2025-01-04 04:06:26 +00:00
A set of updates for the RT specific reader/writer locking base code:
- Make the fast path reader ordering guarantees correct. - Code reshuffling to make the fix simpler. -----BEGIN PGP SIGNATURE----- iQJHBAABCgAxFiEEQp8+kY+LLUocC4bMphj1TA10mKEFAmFHIykTHHRnbHhAbGlu dXRyb25peC5kZQAKCRCmGPVMDXSYoeQOD/9fK16wOliLWJ082y/ywmFly0XnWXW3 WiHyWrrt6gr/nqbPUFkVzQtdtTY3Xanuid1WT9S/EQ45Vw9KI4GxtAQz4vBIyI6q /zz13+xOQdeEdH745ufOELaL/KKLsFAJXcf7UINZgVI8s4h7ghx2fy6FCd7vOu88 Mlv9CBnBpcScnOmNehsnBA11QuclSf6aOXeqkMtBmndVoRzTRMpgnGLmu74J+9gh iV6aquGSSU39FTvUIozm2nfTeCAJtd/SK//tMIg6dvE1N/cFxr3rel26MmlFuQCI 3hf2BRROtYvR+n/7Q9l+M+q/77SJEEWHIMKqG4sUaopDq6s4vhcOqM2FtXZAx7Lp lcqOHcL9XpXbKiHPo1RcbnrzzDtObWKearJ5nkQVxX2OpbkzJwbrcQPYOdwJ/BOd gpHyE1eI/mYLW5QTchAEQvVPP6gbKLNaKuXLxHz0SJSFpxtLq3bI4gE3pP254dlQ KsbytPfzzkk7MSVsRopf827B7K1+XrM/lmBSVJ70rzvLPiPVd7M7Jn6jlCov85XS 8sza4hQw4r/yNUDQWNO/u4GHicDNRMeYexA3axretfKSjdqegiNHGwEzBR7mhhBP DWxn3B4ZzLebj8dR8hR8zMf6cUeGui1+JIEN6mvEN4CxQlz393IMLlmsEvNcSETf L5yTIRc1sn1efw== =nwT0 -----END PGP SIGNATURE----- Merge tag 'locking-urgent-2021-09-19' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip Pull locking fixes from Thomas Gleixner: "A set of updates for the RT specific reader/writer locking base code: - Make the fast path reader ordering guarantees correct. - Code reshuffling to make the fix simpler" [ This plays ugly games with atomic_add_return_release() because we don't have a plain atomic_add_release(), and should really be cleaned up, I think - Linus ] * tag 'locking-urgent-2021-09-19' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: locking/rwbase: Take care of ordering guarantee for fastpath reader locking/rwbase: Extract __rwbase_write_trylock() locking/rwbase: Properly match set_and_save_state() to restore_state()
This commit is contained in:
commit
f5e29a26c4
@ -41,6 +41,12 @@
|
||||
* The risk of writer starvation is there, but the pathological use cases
|
||||
* which trigger it are not necessarily the typical RT workloads.
|
||||
*
|
||||
* Fast-path orderings:
|
||||
* The lock/unlock of readers can run in fast paths: lock and unlock are only
|
||||
* atomic ops, and there is no inner lock to provide ACQUIRE and RELEASE
|
||||
* semantics of rwbase_rt. Atomic ops should thus provide _acquire()
|
||||
* and _release() (or stronger).
|
||||
*
|
||||
* Common code shared between RT rw_semaphore and rwlock
|
||||
*/
|
||||
|
||||
@ -53,6 +59,7 @@ static __always_inline int rwbase_read_trylock(struct rwbase_rt *rwb)
|
||||
* set.
|
||||
*/
|
||||
for (r = atomic_read(&rwb->readers); r < 0;) {
|
||||
/* Fully-ordered if cmpxchg() succeeds, provides ACQUIRE */
|
||||
if (likely(atomic_try_cmpxchg(&rwb->readers, &r, r + 1)))
|
||||
return 1;
|
||||
}
|
||||
@ -162,6 +169,8 @@ static __always_inline void rwbase_read_unlock(struct rwbase_rt *rwb,
|
||||
/*
|
||||
* rwb->readers can only hit 0 when a writer is waiting for the
|
||||
* active readers to leave the critical section.
|
||||
*
|
||||
* dec_and_test() is fully ordered, provides RELEASE.
|
||||
*/
|
||||
if (unlikely(atomic_dec_and_test(&rwb->readers)))
|
||||
__rwbase_read_unlock(rwb, state);
|
||||
@ -172,7 +181,11 @@ static inline void __rwbase_write_unlock(struct rwbase_rt *rwb, int bias,
|
||||
{
|
||||
struct rt_mutex_base *rtm = &rwb->rtmutex;
|
||||
|
||||
atomic_add(READER_BIAS - bias, &rwb->readers);
|
||||
/*
|
||||
* _release() is needed in case that reader is in fast path, pairing
|
||||
* with atomic_try_cmpxchg() in rwbase_read_trylock(), provides RELEASE
|
||||
*/
|
||||
(void)atomic_add_return_release(READER_BIAS - bias, &rwb->readers);
|
||||
raw_spin_unlock_irqrestore(&rtm->wait_lock, flags);
|
||||
rwbase_rtmutex_unlock(rtm);
|
||||
}
|
||||
@ -196,6 +209,23 @@ static inline void rwbase_write_downgrade(struct rwbase_rt *rwb)
|
||||
__rwbase_write_unlock(rwb, WRITER_BIAS - 1, flags);
|
||||
}
|
||||
|
||||
static inline bool __rwbase_write_trylock(struct rwbase_rt *rwb)
|
||||
{
|
||||
/* Can do without CAS because we're serialized by wait_lock. */
|
||||
lockdep_assert_held(&rwb->rtmutex.wait_lock);
|
||||
|
||||
/*
|
||||
* _acquire is needed in case the reader is in the fast path, pairing
|
||||
* with rwbase_read_unlock(), provides ACQUIRE.
|
||||
*/
|
||||
if (!atomic_read_acquire(&rwb->readers)) {
|
||||
atomic_set(&rwb->readers, WRITER_BIAS);
|
||||
return 1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __sched rwbase_write_lock(struct rwbase_rt *rwb,
|
||||
unsigned int state)
|
||||
{
|
||||
@ -210,34 +240,30 @@ static int __sched rwbase_write_lock(struct rwbase_rt *rwb,
|
||||
atomic_sub(READER_BIAS, &rwb->readers);
|
||||
|
||||
raw_spin_lock_irqsave(&rtm->wait_lock, flags);
|
||||
/*
|
||||
* set_current_state() for rw_semaphore
|
||||
* current_save_and_set_rtlock_wait_state() for rwlock
|
||||
*/
|
||||
rwbase_set_and_save_current_state(state);
|
||||
if (__rwbase_write_trylock(rwb))
|
||||
goto out_unlock;
|
||||
|
||||
/* Block until all readers have left the critical section. */
|
||||
for (; atomic_read(&rwb->readers);) {
|
||||
rwbase_set_and_save_current_state(state);
|
||||
for (;;) {
|
||||
/* Optimized out for rwlocks */
|
||||
if (rwbase_signal_pending_state(state, current)) {
|
||||
__set_current_state(TASK_RUNNING);
|
||||
rwbase_restore_current_state();
|
||||
__rwbase_write_unlock(rwb, 0, flags);
|
||||
return -EINTR;
|
||||
}
|
||||
|
||||
if (__rwbase_write_trylock(rwb))
|
||||
break;
|
||||
|
||||
raw_spin_unlock_irqrestore(&rtm->wait_lock, flags);
|
||||
|
||||
/*
|
||||
* Schedule and wait for the readers to leave the critical
|
||||
* section. The last reader leaving it wakes the waiter.
|
||||
*/
|
||||
if (atomic_read(&rwb->readers) != 0)
|
||||
rwbase_schedule();
|
||||
set_current_state(state);
|
||||
rwbase_schedule();
|
||||
raw_spin_lock_irqsave(&rtm->wait_lock, flags);
|
||||
}
|
||||
|
||||
atomic_set(&rwb->readers, WRITER_BIAS);
|
||||
set_current_state(state);
|
||||
}
|
||||
rwbase_restore_current_state();
|
||||
|
||||
out_unlock:
|
||||
raw_spin_unlock_irqrestore(&rtm->wait_lock, flags);
|
||||
return 0;
|
||||
}
|
||||
@ -253,8 +279,7 @@ static inline int rwbase_write_trylock(struct rwbase_rt *rwb)
|
||||
atomic_sub(READER_BIAS, &rwb->readers);
|
||||
|
||||
raw_spin_lock_irqsave(&rtm->wait_lock, flags);
|
||||
if (!atomic_read(&rwb->readers)) {
|
||||
atomic_set(&rwb->readers, WRITER_BIAS);
|
||||
if (__rwbase_write_trylock(rwb)) {
|
||||
raw_spin_unlock_irqrestore(&rtm->wait_lock, flags);
|
||||
return 1;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user