mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2025-01-11 16:29:05 +00:00
rcutorture: Replace barriers with smp_store_release() and smp_load_acquire()
The rcutorture.c file uses several explicit memory barriers that can easily be converted to smp_store_release() and smp_load_acquire(), which improves maintainability and also improves performance a bit. Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Reviewed-by: Josh Triplett <josh@joshtriplett.org>
This commit is contained in:
parent
61d49d2f98
commit
6c7ed42c81
@ -672,8 +672,8 @@ static void rcu_torture_boost_cb(struct rcu_head *head)
|
|||||||
struct rcu_boost_inflight *rbip =
|
struct rcu_boost_inflight *rbip =
|
||||||
container_of(head, struct rcu_boost_inflight, rcu);
|
container_of(head, struct rcu_boost_inflight, rcu);
|
||||||
|
|
||||||
smp_mb(); /* Ensure RCU-core accesses precede clearing ->inflight */
|
/* Ensure RCU-core accesses precede clearing ->inflight */
|
||||||
rbip->inflight = 0;
|
smp_store_release(&rbip->inflight, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int rcu_torture_boost(void *arg)
|
static int rcu_torture_boost(void *arg)
|
||||||
@ -710,9 +710,9 @@ static int rcu_torture_boost(void *arg)
|
|||||||
call_rcu_time = jiffies;
|
call_rcu_time = jiffies;
|
||||||
while (ULONG_CMP_LT(jiffies, endtime)) {
|
while (ULONG_CMP_LT(jiffies, endtime)) {
|
||||||
/* If we don't have a callback in flight, post one. */
|
/* If we don't have a callback in flight, post one. */
|
||||||
if (!rbi.inflight) {
|
if (!smp_load_acquire(&rbi.inflight)) {
|
||||||
smp_mb(); /* RCU core before ->inflight = 1. */
|
/* RCU core before ->inflight = 1. */
|
||||||
rbi.inflight = 1;
|
smp_store_release(&rbi.inflight, 1);
|
||||||
call_rcu(&rbi.rcu, rcu_torture_boost_cb);
|
call_rcu(&rbi.rcu, rcu_torture_boost_cb);
|
||||||
if (jiffies - call_rcu_time >
|
if (jiffies - call_rcu_time >
|
||||||
test_boost_duration * HZ - HZ / 2) {
|
test_boost_duration * HZ - HZ / 2) {
|
||||||
@ -751,11 +751,10 @@ checkwait: stutter_wait("rcu_torture_boost");
|
|||||||
} while (!torture_must_stop());
|
} while (!torture_must_stop());
|
||||||
|
|
||||||
/* Clean up and exit. */
|
/* Clean up and exit. */
|
||||||
while (!kthread_should_stop() || rbi.inflight) {
|
while (!kthread_should_stop() || smp_load_acquire(&rbi.inflight)) {
|
||||||
torture_shutdown_absorb("rcu_torture_boost");
|
torture_shutdown_absorb("rcu_torture_boost");
|
||||||
schedule_timeout_uninterruptible(1);
|
schedule_timeout_uninterruptible(1);
|
||||||
}
|
}
|
||||||
smp_mb(); /* order accesses to ->inflight before stack-frame death. */
|
|
||||||
destroy_rcu_head_on_stack(&rbi.rcu);
|
destroy_rcu_head_on_stack(&rbi.rcu);
|
||||||
torture_kthread_stopping("rcu_torture_boost");
|
torture_kthread_stopping("rcu_torture_boost");
|
||||||
return 0;
|
return 0;
|
||||||
@ -1413,12 +1412,15 @@ static int rcu_torture_barrier_cbs(void *arg)
|
|||||||
do {
|
do {
|
||||||
wait_event(barrier_cbs_wq[myid],
|
wait_event(barrier_cbs_wq[myid],
|
||||||
(newphase =
|
(newphase =
|
||||||
READ_ONCE(barrier_phase)) != lastphase ||
|
smp_load_acquire(&barrier_phase)) != lastphase ||
|
||||||
torture_must_stop());
|
torture_must_stop());
|
||||||
lastphase = newphase;
|
lastphase = newphase;
|
||||||
smp_mb(); /* ensure barrier_phase load before ->call(). */
|
|
||||||
if (torture_must_stop())
|
if (torture_must_stop())
|
||||||
break;
|
break;
|
||||||
|
/*
|
||||||
|
* The above smp_load_acquire() ensures barrier_phase load
|
||||||
|
* is ordered before the folloiwng ->call().
|
||||||
|
*/
|
||||||
cur_ops->call(&rcu, rcu_torture_barrier_cbf);
|
cur_ops->call(&rcu, rcu_torture_barrier_cbf);
|
||||||
if (atomic_dec_and_test(&barrier_cbs_count))
|
if (atomic_dec_and_test(&barrier_cbs_count))
|
||||||
wake_up(&barrier_wq);
|
wake_up(&barrier_wq);
|
||||||
@ -1439,8 +1441,8 @@ static int rcu_torture_barrier(void *arg)
|
|||||||
do {
|
do {
|
||||||
atomic_set(&barrier_cbs_invoked, 0);
|
atomic_set(&barrier_cbs_invoked, 0);
|
||||||
atomic_set(&barrier_cbs_count, n_barrier_cbs);
|
atomic_set(&barrier_cbs_count, n_barrier_cbs);
|
||||||
smp_mb(); /* Ensure barrier_phase after prior assignments. */
|
/* Ensure barrier_phase ordered after prior assignments. */
|
||||||
barrier_phase = !barrier_phase;
|
smp_store_release(&barrier_phase, !barrier_phase);
|
||||||
for (i = 0; i < n_barrier_cbs; i++)
|
for (i = 0; i < n_barrier_cbs; i++)
|
||||||
wake_up(&barrier_cbs_wq[i]);
|
wake_up(&barrier_cbs_wq[i]);
|
||||||
wait_event(barrier_wq,
|
wait_event(barrier_wq,
|
||||||
|
Loading…
x
Reference in New Issue
Block a user