mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-01-13 16:50:05 +00:00
rcu: Remove fast check path from __synchronize_srcu()
The fastpath in __synchronize_srcu() is designed to handle cases where there are a large number of concurrent calls for the same srcu_struct structure. However, the Linux kernel currently does not use SRCU in this manner, so remove the fastpath checks for simplicity. Signed-off-by: Lai Jiangshan <laijs@cn.fujitsu.com> Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
This commit is contained in:
parent
cef50120b6
commit
4b7a3e9e32
@ -308,7 +308,7 @@ static void flip_idx_and_wait(struct srcu_struct *sp, bool expedited)
|
|||||||
*/
|
*/
|
||||||
static void __synchronize_srcu(struct srcu_struct *sp, bool expedited)
|
static void __synchronize_srcu(struct srcu_struct *sp, bool expedited)
|
||||||
{
|
{
|
||||||
int idx;
|
int idx = 0;
|
||||||
|
|
||||||
rcu_lockdep_assert(!lock_is_held(&sp->dep_map) &&
|
rcu_lockdep_assert(!lock_is_held(&sp->dep_map) &&
|
||||||
!lock_is_held(&rcu_bh_lock_map) &&
|
!lock_is_held(&rcu_bh_lock_map) &&
|
||||||
@ -316,31 +316,8 @@ static void __synchronize_srcu(struct srcu_struct *sp, bool expedited)
|
|||||||
!lock_is_held(&rcu_sched_lock_map),
|
!lock_is_held(&rcu_sched_lock_map),
|
||||||
"Illegal synchronize_srcu() in same-type SRCU (or RCU) read-side critical section");
|
"Illegal synchronize_srcu() in same-type SRCU (or RCU) read-side critical section");
|
||||||
|
|
||||||
smp_mb(); /* Ensure prior action happens before grace period. */
|
|
||||||
idx = ACCESS_ONCE(sp->completed);
|
|
||||||
smp_mb(); /* Access to ->completed before lock acquisition. */
|
|
||||||
mutex_lock(&sp->mutex);
|
mutex_lock(&sp->mutex);
|
||||||
|
|
||||||
/*
|
|
||||||
* Check to see if someone else did the work for us while we were
|
|
||||||
* waiting to acquire the lock. We need -three- advances of
|
|
||||||
* the counter, not just one. If there was but one, we might have
|
|
||||||
* shown up -after- our helper's first synchronize_sched(), thus
|
|
||||||
* having failed to prevent CPU-reordering races with concurrent
|
|
||||||
* srcu_read_unlock()s on other CPUs (see comment below). If there
|
|
||||||
* was only two, we are guaranteed to have waited through only one
|
|
||||||
* full index-flip phase. So we either (1) wait for three or
|
|
||||||
* (2) supply the additional ones we need.
|
|
||||||
*/
|
|
||||||
|
|
||||||
if (sp->completed == idx + 2)
|
|
||||||
idx = 1;
|
|
||||||
else if (sp->completed == idx + 3) {
|
|
||||||
mutex_unlock(&sp->mutex);
|
|
||||||
return;
|
|
||||||
} else
|
|
||||||
idx = 0;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If there were no helpers, then we need to do two flips of
|
* If there were no helpers, then we need to do two flips of
|
||||||
* the index. The first flip is required if there are any
|
* the index. The first flip is required if there are any
|
||||||
|
Loading…
x
Reference in New Issue
Block a user