mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-01-06 13:23:18 +00:00
locking/rwsem: Disable preemption in all down_write*() and up_write() code paths
The previous patch has disabled preemption in all the down_read() and
up_read() code paths. For symmetry, this patch extends commit:
48dfb5d256
("locking/rwsem: Disable preemption while trying for rwsem lock")
... to have preemption disabled in all the down_write() and up_write()
code paths, including downgrade_write().
Suggested-by: Peter Zijlstra <peterz@infradead.org>
Signed-off-by: Waiman Long <longman@redhat.com>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Link: https://lore.kernel.org/r/20230126003628.365092-4-longman@redhat.com
This commit is contained in:
parent
3f5245538a
commit
1d61659ced
@ -256,16 +256,13 @@ static inline bool rwsem_read_trylock(struct rw_semaphore *sem, long *cntp)
|
||||
static inline bool rwsem_write_trylock(struct rw_semaphore *sem)
|
||||
{
|
||||
long tmp = RWSEM_UNLOCKED_VALUE;
|
||||
bool ret = false;
|
||||
|
||||
preempt_disable();
|
||||
if (atomic_long_try_cmpxchg_acquire(&sem->count, &tmp, RWSEM_WRITER_LOCKED)) {
|
||||
rwsem_set_owner(sem);
|
||||
ret = true;
|
||||
return true;
|
||||
}
|
||||
|
||||
preempt_enable();
|
||||
return ret;
|
||||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -716,7 +713,6 @@ static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem)
|
||||
return false;
|
||||
}
|
||||
|
||||
preempt_disable();
|
||||
/*
|
||||
* Disable preemption is equal to the RCU read-side crital section,
|
||||
* thus the task_strcut structure won't go away.
|
||||
@ -728,7 +724,6 @@ static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem)
|
||||
if ((flags & RWSEM_NONSPINNABLE) ||
|
||||
(owner && !(flags & RWSEM_READER_OWNED) && !owner_on_cpu(owner)))
|
||||
ret = false;
|
||||
preempt_enable();
|
||||
|
||||
lockevent_cond_inc(rwsem_opt_fail, !ret);
|
||||
return ret;
|
||||
@ -828,8 +823,6 @@ static bool rwsem_optimistic_spin(struct rw_semaphore *sem)
|
||||
int loop = 0;
|
||||
u64 rspin_threshold = 0;
|
||||
|
||||
preempt_disable();
|
||||
|
||||
/* sem->wait_lock should not be held when doing optimistic spinning */
|
||||
if (!osq_lock(&sem->osq))
|
||||
goto done;
|
||||
@ -937,7 +930,6 @@ static bool rwsem_optimistic_spin(struct rw_semaphore *sem)
|
||||
}
|
||||
osq_unlock(&sem->osq);
|
||||
done:
|
||||
preempt_enable();
|
||||
lockevent_cond_inc(rwsem_opt_fail, !taken);
|
||||
return taken;
|
||||
}
|
||||
@ -1178,15 +1170,12 @@ rwsem_down_write_slowpath(struct rw_semaphore *sem, int state)
|
||||
if (waiter.handoff_set) {
|
||||
enum owner_state owner_state;
|
||||
|
||||
preempt_disable();
|
||||
owner_state = rwsem_spin_on_owner(sem);
|
||||
preempt_enable();
|
||||
|
||||
if (owner_state == OWNER_NULL)
|
||||
goto trylock_again;
|
||||
}
|
||||
|
||||
schedule();
|
||||
schedule_preempt_disabled();
|
||||
lockevent_inc(rwsem_sleep_writer);
|
||||
set_current_state(state);
|
||||
trylock_again:
|
||||
@ -1310,12 +1299,15 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
|
||||
*/
|
||||
static inline int __down_write_common(struct rw_semaphore *sem, int state)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
preempt_disable();
|
||||
if (unlikely(!rwsem_write_trylock(sem))) {
|
||||
if (IS_ERR(rwsem_down_write_slowpath(sem, state)))
|
||||
return -EINTR;
|
||||
ret = -EINTR;
|
||||
}
|
||||
|
||||
return 0;
|
||||
preempt_enable();
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline void __down_write(struct rw_semaphore *sem)
|
||||
@ -1330,8 +1322,14 @@ static inline int __down_write_killable(struct rw_semaphore *sem)
|
||||
|
||||
static inline int __down_write_trylock(struct rw_semaphore *sem)
|
||||
{
|
||||
int ret;
|
||||
|
||||
preempt_disable();
|
||||
DEBUG_RWSEMS_WARN_ON(sem->magic != sem, sem);
|
||||
return rwsem_write_trylock(sem);
|
||||
ret = rwsem_write_trylock(sem);
|
||||
preempt_enable();
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1374,9 +1372,9 @@ static inline void __up_write(struct rw_semaphore *sem)
|
||||
preempt_disable();
|
||||
rwsem_clear_owner(sem);
|
||||
tmp = atomic_long_fetch_add_release(-RWSEM_WRITER_LOCKED, &sem->count);
|
||||
preempt_enable();
|
||||
if (unlikely(tmp & RWSEM_FLAG_WAITERS))
|
||||
rwsem_wake(sem);
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1394,11 +1392,13 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
|
||||
* write side. As such, rely on RELEASE semantics.
|
||||
*/
|
||||
DEBUG_RWSEMS_WARN_ON(rwsem_owner(sem) != current, sem);
|
||||
preempt_disable();
|
||||
tmp = atomic_long_fetch_add_release(
|
||||
-RWSEM_WRITER_LOCKED+RWSEM_READER_BIAS, &sem->count);
|
||||
rwsem_set_reader_owned(sem);
|
||||
if (tmp & RWSEM_FLAG_WAITERS)
|
||||
rwsem_downgrade_wake(sem);
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
#else /* !CONFIG_PREEMPT_RT */
|
||||
|
Loading…
Reference in New Issue
Block a user