mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-12-29 17:25:38 +00:00
Merge branches 'rcu/fixes', 'rcu/nocb', 'rcu/torture', 'rcu/stall' and 'rcu/srcu' into rcu/dev
This commit is contained in:
commit
d8dfba2c60
@ -5421,6 +5421,14 @@
|
||||
The delay, in seconds, between successive
|
||||
read-then-exit testing episodes.
|
||||
|
||||
rcutorture.reader_flavor= [KNL]
|
||||
A bit mask indicating which readers to use.
|
||||
If there is more than one bit set, the readers
|
||||
are entered from low-order bit up, and are
|
||||
exited in the opposite order. For SRCU, the
|
||||
0x1 bit is normal readers, 0x2 NMI-safe readers,
|
||||
and 0x4 light-weight readers.
|
||||
|
||||
rcutorture.shuffle_interval= [KNL]
|
||||
Set task-shuffle interval (s). Shuffling tasks
|
||||
allows some CPUs to go into dyntick-idle mode
|
||||
|
@ -165,7 +165,6 @@ static inline bool rcu_inkernel_boot_has_ended(void) { return true; }
|
||||
static inline bool rcu_is_watching(void) { return true; }
|
||||
static inline void rcu_momentary_eqs(void) { }
|
||||
static inline void kfree_rcu_scheduler_running(void) { }
|
||||
static inline bool rcu_gp_might_be_stalled(void) { return false; }
|
||||
|
||||
/* Avoid RCU read-side critical sections leaking across. */
|
||||
static inline void rcu_all_qs(void) { barrier(); }
|
||||
|
@ -40,7 +40,6 @@ void kvfree_rcu_barrier(void);
|
||||
void rcu_barrier(void);
|
||||
void rcu_momentary_eqs(void);
|
||||
void kfree_rcu_scheduler_running(void);
|
||||
bool rcu_gp_might_be_stalled(void);
|
||||
|
||||
struct rcu_gp_oldstate {
|
||||
unsigned long rgos_norm;
|
||||
|
@ -56,6 +56,13 @@ void call_srcu(struct srcu_struct *ssp, struct rcu_head *head,
|
||||
void cleanup_srcu_struct(struct srcu_struct *ssp);
|
||||
int __srcu_read_lock(struct srcu_struct *ssp) __acquires(ssp);
|
||||
void __srcu_read_unlock(struct srcu_struct *ssp, int idx) __releases(ssp);
|
||||
#ifdef CONFIG_TINY_SRCU
|
||||
#define __srcu_read_lock_lite __srcu_read_lock
|
||||
#define __srcu_read_unlock_lite __srcu_read_unlock
|
||||
#else // #ifdef CONFIG_TINY_SRCU
|
||||
int __srcu_read_lock_lite(struct srcu_struct *ssp) __acquires(ssp);
|
||||
void __srcu_read_unlock_lite(struct srcu_struct *ssp, int idx) __releases(ssp);
|
||||
#endif // #else // #ifdef CONFIG_TINY_SRCU
|
||||
void synchronize_srcu(struct srcu_struct *ssp);
|
||||
|
||||
#define SRCU_GET_STATE_COMPLETED 0x1
|
||||
@ -176,17 +183,6 @@ static inline int srcu_read_lock_held(const struct srcu_struct *ssp)
|
||||
|
||||
#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
|
||||
|
||||
#define SRCU_NMI_UNKNOWN 0x0
|
||||
#define SRCU_NMI_UNSAFE 0x1
|
||||
#define SRCU_NMI_SAFE 0x2
|
||||
|
||||
#if defined(CONFIG_PROVE_RCU) && defined(CONFIG_TREE_SRCU)
|
||||
void srcu_check_nmi_safety(struct srcu_struct *ssp, bool nmi_safe);
|
||||
#else
|
||||
static inline void srcu_check_nmi_safety(struct srcu_struct *ssp,
|
||||
bool nmi_safe) { }
|
||||
#endif
|
||||
|
||||
|
||||
/**
|
||||
* srcu_dereference_check - fetch SRCU-protected pointer for later dereferencing
|
||||
@ -236,33 +232,67 @@ static inline void srcu_check_nmi_safety(struct srcu_struct *ssp,
|
||||
* a mutex that is held elsewhere while calling synchronize_srcu() or
|
||||
* synchronize_srcu_expedited().
|
||||
*
|
||||
* Note that srcu_read_lock() and the matching srcu_read_unlock() must
|
||||
* occur in the same context, for example, it is illegal to invoke
|
||||
* srcu_read_unlock() in an irq handler if the matching srcu_read_lock()
|
||||
* was invoked in process context.
|
||||
* The return value from srcu_read_lock() must be passed unaltered
|
||||
* to the matching srcu_read_unlock(). Note that srcu_read_lock() and
|
||||
* the matching srcu_read_unlock() must occur in the same context, for
|
||||
* example, it is illegal to invoke srcu_read_unlock() in an irq handler
|
||||
* if the matching srcu_read_lock() was invoked in process context. Or,
|
||||
* for that matter to invoke srcu_read_unlock() from one task and the
|
||||
* matching srcu_read_lock() from another.
|
||||
*/
|
||||
static inline int srcu_read_lock(struct srcu_struct *ssp) __acquires(ssp)
|
||||
{
|
||||
int retval;
|
||||
|
||||
srcu_check_nmi_safety(ssp, false);
|
||||
srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_NORMAL);
|
||||
retval = __srcu_read_lock(ssp);
|
||||
srcu_lock_acquire(&ssp->dep_map);
|
||||
return retval;
|
||||
}
|
||||
|
||||
/**
|
||||
* srcu_read_lock_lite - register a new reader for an SRCU-protected structure.
|
||||
* @ssp: srcu_struct in which to register the new reader.
|
||||
*
|
||||
* Enter an SRCU read-side critical section, but for a light-weight
|
||||
* smp_mb()-free reader. See srcu_read_lock() for more information.
|
||||
*
|
||||
* If srcu_read_lock_lite() is ever used on an srcu_struct structure,
|
||||
* then none of the other flavors may be used, whether before, during,
|
||||
* or after. Note that grace-period auto-expediting is disabled for _lite
|
||||
* srcu_struct structures because auto-expedited grace periods invoke
|
||||
* synchronize_rcu_expedited(), IPIs and all.
|
||||
*
|
||||
* Note that srcu_read_lock_lite() can be invoked only from those contexts
|
||||
* where RCU is watching, that is, from contexts where it would be legal
|
||||
* to invoke rcu_read_lock(). Otherwise, lockdep will complain.
|
||||
*/
|
||||
static inline int srcu_read_lock_lite(struct srcu_struct *ssp) __acquires(ssp)
|
||||
{
|
||||
int retval;
|
||||
|
||||
srcu_check_read_flavor_lite(ssp);
|
||||
retval = __srcu_read_lock_lite(ssp);
|
||||
rcu_try_lock_acquire(&ssp->dep_map);
|
||||
return retval;
|
||||
}
|
||||
|
||||
/**
|
||||
* srcu_read_lock_nmisafe - register a new reader for an SRCU-protected structure.
|
||||
* @ssp: srcu_struct in which to register the new reader.
|
||||
*
|
||||
* Enter an SRCU read-side critical section, but in an NMI-safe manner.
|
||||
* See srcu_read_lock() for more information.
|
||||
*
|
||||
* If srcu_read_lock_nmisafe() is ever used on an srcu_struct structure,
|
||||
* then none of the other flavors may be used, whether before, during,
|
||||
* or after.
|
||||
*/
|
||||
static inline int srcu_read_lock_nmisafe(struct srcu_struct *ssp) __acquires(ssp)
|
||||
{
|
||||
int retval;
|
||||
|
||||
srcu_check_nmi_safety(ssp, true);
|
||||
srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_NMI);
|
||||
retval = __srcu_read_lock_nmisafe(ssp);
|
||||
rcu_try_lock_acquire(&ssp->dep_map);
|
||||
return retval;
|
||||
@ -274,7 +304,7 @@ srcu_read_lock_notrace(struct srcu_struct *ssp) __acquires(ssp)
|
||||
{
|
||||
int retval;
|
||||
|
||||
srcu_check_nmi_safety(ssp, false);
|
||||
srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_NORMAL);
|
||||
retval = __srcu_read_lock(ssp);
|
||||
return retval;
|
||||
}
|
||||
@ -303,7 +333,7 @@ srcu_read_lock_notrace(struct srcu_struct *ssp) __acquires(ssp)
|
||||
static inline int srcu_down_read(struct srcu_struct *ssp) __acquires(ssp)
|
||||
{
|
||||
WARN_ON_ONCE(in_nmi());
|
||||
srcu_check_nmi_safety(ssp, false);
|
||||
srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_NORMAL);
|
||||
return __srcu_read_lock(ssp);
|
||||
}
|
||||
|
||||
@ -318,11 +348,27 @@ static inline void srcu_read_unlock(struct srcu_struct *ssp, int idx)
|
||||
__releases(ssp)
|
||||
{
|
||||
WARN_ON_ONCE(idx & ~0x1);
|
||||
srcu_check_nmi_safety(ssp, false);
|
||||
srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_NORMAL);
|
||||
srcu_lock_release(&ssp->dep_map);
|
||||
__srcu_read_unlock(ssp, idx);
|
||||
}
|
||||
|
||||
/**
|
||||
* srcu_read_unlock_lite - unregister a old reader from an SRCU-protected structure.
|
||||
* @ssp: srcu_struct in which to unregister the old reader.
|
||||
* @idx: return value from corresponding srcu_read_lock().
|
||||
*
|
||||
* Exit a light-weight SRCU read-side critical section.
|
||||
*/
|
||||
static inline void srcu_read_unlock_lite(struct srcu_struct *ssp, int idx)
|
||||
__releases(ssp)
|
||||
{
|
||||
WARN_ON_ONCE(idx & ~0x1);
|
||||
srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_LITE);
|
||||
srcu_lock_release(&ssp->dep_map);
|
||||
__srcu_read_unlock_lite(ssp, idx);
|
||||
}
|
||||
|
||||
/**
|
||||
* srcu_read_unlock_nmisafe - unregister a old reader from an SRCU-protected structure.
|
||||
* @ssp: srcu_struct in which to unregister the old reader.
|
||||
@ -334,7 +380,7 @@ static inline void srcu_read_unlock_nmisafe(struct srcu_struct *ssp, int idx)
|
||||
__releases(ssp)
|
||||
{
|
||||
WARN_ON_ONCE(idx & ~0x1);
|
||||
srcu_check_nmi_safety(ssp, true);
|
||||
srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_NMI);
|
||||
rcu_lock_release(&ssp->dep_map);
|
||||
__srcu_read_unlock_nmisafe(ssp, idx);
|
||||
}
|
||||
@ -343,7 +389,7 @@ static inline void srcu_read_unlock_nmisafe(struct srcu_struct *ssp, int idx)
|
||||
static inline notrace void
|
||||
srcu_read_unlock_notrace(struct srcu_struct *ssp, int idx) __releases(ssp)
|
||||
{
|
||||
srcu_check_nmi_safety(ssp, false);
|
||||
srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_NORMAL);
|
||||
__srcu_read_unlock(ssp, idx);
|
||||
}
|
||||
|
||||
@ -360,7 +406,7 @@ static inline void srcu_up_read(struct srcu_struct *ssp, int idx)
|
||||
{
|
||||
WARN_ON_ONCE(idx & ~0x1);
|
||||
WARN_ON_ONCE(in_nmi());
|
||||
srcu_check_nmi_safety(ssp, false);
|
||||
srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_NORMAL);
|
||||
__srcu_read_unlock(ssp, idx);
|
||||
}
|
||||
|
||||
|
@ -81,6 +81,9 @@ static inline void srcu_barrier(struct srcu_struct *ssp)
|
||||
synchronize_srcu(ssp);
|
||||
}
|
||||
|
||||
#define srcu_check_read_flavor(ssp, read_flavor) do { } while (0)
|
||||
#define srcu_check_read_flavor_lite(ssp) do { } while (0)
|
||||
|
||||
/* Defined here to avoid size increase for non-torture kernels. */
|
||||
static inline void srcu_torture_stats_print(struct srcu_struct *ssp,
|
||||
char *tt, char *tf)
|
||||
|
@ -25,7 +25,7 @@ struct srcu_data {
|
||||
/* Read-side state. */
|
||||
atomic_long_t srcu_lock_count[2]; /* Locks per CPU. */
|
||||
atomic_long_t srcu_unlock_count[2]; /* Unlocks per CPU. */
|
||||
int srcu_nmi_safety; /* NMI-safe srcu_struct structure? */
|
||||
int srcu_reader_flavor; /* Reader flavor for srcu_struct structure? */
|
||||
|
||||
/* Update-side state. */
|
||||
spinlock_t __private lock ____cacheline_internodealigned_in_smp;
|
||||
@ -43,6 +43,11 @@ struct srcu_data {
|
||||
struct srcu_struct *ssp;
|
||||
};
|
||||
|
||||
/* Values for ->srcu_reader_flavor. */
|
||||
#define SRCU_READ_FLAVOR_NORMAL 0x1 // srcu_read_lock().
|
||||
#define SRCU_READ_FLAVOR_NMI 0x2 // srcu_read_lock_nmisafe().
|
||||
#define SRCU_READ_FLAVOR_LITE 0x4 // srcu_read_lock_lite().
|
||||
|
||||
/*
|
||||
* Node in SRCU combining tree, similar in function to rcu_data.
|
||||
*/
|
||||
@ -204,4 +209,64 @@ void synchronize_srcu_expedited(struct srcu_struct *ssp);
|
||||
void srcu_barrier(struct srcu_struct *ssp);
|
||||
void srcu_torture_stats_print(struct srcu_struct *ssp, char *tt, char *tf);
|
||||
|
||||
/*
|
||||
* Counts the new reader in the appropriate per-CPU element of the
|
||||
* srcu_struct. Returns an index that must be passed to the matching
|
||||
* srcu_read_unlock_lite().
|
||||
*
|
||||
* Note that this_cpu_inc() is an RCU read-side critical section either
|
||||
* because it disables interrupts, because it is a single instruction,
|
||||
* or because it is a read-modify-write atomic operation, depending on
|
||||
* the whims of the architecture.
|
||||
*/
|
||||
static inline int __srcu_read_lock_lite(struct srcu_struct *ssp)
|
||||
{
|
||||
int idx;
|
||||
|
||||
RCU_LOCKDEP_WARN(!rcu_is_watching(), "RCU must be watching srcu_read_lock_lite().");
|
||||
idx = READ_ONCE(ssp->srcu_idx) & 0x1;
|
||||
this_cpu_inc(ssp->sda->srcu_lock_count[idx].counter); /* Y */
|
||||
barrier(); /* Avoid leaking the critical section. */
|
||||
return idx;
|
||||
}
|
||||
|
||||
/*
|
||||
* Removes the count for the old reader from the appropriate
|
||||
* per-CPU element of the srcu_struct. Note that this may well be a
|
||||
* different CPU than that which was incremented by the corresponding
|
||||
* srcu_read_lock_lite(), but it must be within the same task.
|
||||
*
|
||||
* Note that this_cpu_inc() is an RCU read-side critical section either
|
||||
* because it disables interrupts, because it is a single instruction,
|
||||
* or because it is a read-modify-write atomic operation, depending on
|
||||
* the whims of the architecture.
|
||||
*/
|
||||
static inline void __srcu_read_unlock_lite(struct srcu_struct *ssp, int idx)
|
||||
{
|
||||
barrier(); /* Avoid leaking the critical section. */
|
||||
this_cpu_inc(ssp->sda->srcu_unlock_count[idx].counter); /* Z */
|
||||
RCU_LOCKDEP_WARN(!rcu_is_watching(), "RCU must be watching srcu_read_unlock_lite().");
|
||||
}
|
||||
|
||||
void __srcu_check_read_flavor(struct srcu_struct *ssp, int read_flavor);
|
||||
|
||||
// Record _lite() usage even for CONFIG_PROVE_RCU=n kernels.
|
||||
static inline void srcu_check_read_flavor_lite(struct srcu_struct *ssp)
|
||||
{
|
||||
struct srcu_data *sdp = raw_cpu_ptr(ssp->sda);
|
||||
|
||||
if (likely(READ_ONCE(sdp->srcu_reader_flavor) & SRCU_READ_FLAVOR_LITE))
|
||||
return;
|
||||
|
||||
// Note that the cmpxchg() in srcu_check_read_flavor() is fully ordered.
|
||||
__srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_LITE);
|
||||
}
|
||||
|
||||
// Record non-_lite() usage only for CONFIG_PROVE_RCU=y kernels.
|
||||
static inline void srcu_check_read_flavor(struct srcu_struct *ssp, int read_flavor)
|
||||
{
|
||||
if (IS_ENABLED(CONFIG_PROVE_RCU))
|
||||
__srcu_check_read_flavor(ssp, read_flavor);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@ -120,7 +120,6 @@ void rcu_segcblist_inc_len(struct rcu_segcblist *rsclp);
|
||||
void rcu_segcblist_add_len(struct rcu_segcblist *rsclp, long v);
|
||||
void rcu_segcblist_init(struct rcu_segcblist *rsclp);
|
||||
void rcu_segcblist_disable(struct rcu_segcblist *rsclp);
|
||||
void rcu_segcblist_offload(struct rcu_segcblist *rsclp, bool offload);
|
||||
bool rcu_segcblist_ready_cbs(struct rcu_segcblist *rsclp);
|
||||
bool rcu_segcblist_pend_cbs(struct rcu_segcblist *rsclp);
|
||||
struct rcu_head *rcu_segcblist_first_cb(struct rcu_segcblist *rsclp);
|
||||
|
@ -889,14 +889,14 @@ kfree_scale_init(void)
|
||||
|
||||
if (WARN_ON_ONCE(jiffies_at_lazy_cb - jif_start < 2 * HZ)) {
|
||||
pr_alert("ERROR: call_rcu() CBs are not being lazy as expected!\n");
|
||||
WARN_ON_ONCE(1);
|
||||
return -1;
|
||||
firsterr = -1;
|
||||
goto unwind;
|
||||
}
|
||||
|
||||
if (WARN_ON_ONCE(jiffies_at_lazy_cb - jif_start > 3 * HZ)) {
|
||||
pr_alert("ERROR: call_rcu() CBs are being too lazy!\n");
|
||||
WARN_ON_ONCE(1);
|
||||
return -1;
|
||||
firsterr = -1;
|
||||
goto unwind;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -57,9 +57,9 @@ MODULE_AUTHOR("Paul E. McKenney <paulmck@linux.ibm.com> and Josh Triplett <josh@
|
||||
|
||||
/* Bits for ->extendables field, extendables param, and related definitions. */
|
||||
#define RCUTORTURE_RDR_SHIFT_1 8 /* Put SRCU index in upper bits. */
|
||||
#define RCUTORTURE_RDR_MASK_1 (1 << RCUTORTURE_RDR_SHIFT_1)
|
||||
#define RCUTORTURE_RDR_SHIFT_2 9 /* Put SRCU index in upper bits. */
|
||||
#define RCUTORTURE_RDR_MASK_2 (1 << RCUTORTURE_RDR_SHIFT_2)
|
||||
#define RCUTORTURE_RDR_MASK_1 (0xff << RCUTORTURE_RDR_SHIFT_1)
|
||||
#define RCUTORTURE_RDR_SHIFT_2 16 /* Put SRCU index in upper bits. */
|
||||
#define RCUTORTURE_RDR_MASK_2 (0xff << RCUTORTURE_RDR_SHIFT_2)
|
||||
#define RCUTORTURE_RDR_BH 0x01 /* Extend readers by disabling bh. */
|
||||
#define RCUTORTURE_RDR_IRQ 0x02 /* ... disabling interrupts. */
|
||||
#define RCUTORTURE_RDR_PREEMPT 0x04 /* ... disabling preemption. */
|
||||
@ -71,6 +71,9 @@ MODULE_AUTHOR("Paul E. McKenney <paulmck@linux.ibm.com> and Josh Triplett <josh@
|
||||
#define RCUTORTURE_MAX_EXTEND \
|
||||
(RCUTORTURE_RDR_BH | RCUTORTURE_RDR_IRQ | RCUTORTURE_RDR_PREEMPT | \
|
||||
RCUTORTURE_RDR_RBH | RCUTORTURE_RDR_SCHED)
|
||||
#define RCUTORTURE_RDR_ALLBITS \
|
||||
(RCUTORTURE_MAX_EXTEND | RCUTORTURE_RDR_RCU_1 | RCUTORTURE_RDR_RCU_2 | \
|
||||
RCUTORTURE_RDR_MASK_1 | RCUTORTURE_RDR_MASK_2)
|
||||
#define RCUTORTURE_RDR_MAX_LOOPS 0x7 /* Maximum reader extensions. */
|
||||
/* Must be power of two minus one. */
|
||||
#define RCUTORTURE_RDR_MAX_SEGS (RCUTORTURE_RDR_MAX_LOOPS + 3)
|
||||
@ -108,6 +111,7 @@ torture_param(int, nocbs_nthreads, 0, "Number of NOCB toggle threads, 0 to disab
|
||||
torture_param(int, nocbs_toggle, 1000, "Time between toggling nocb state (ms)");
|
||||
torture_param(int, read_exit_delay, 13, "Delay between read-then-exit episodes (s)");
|
||||
torture_param(int, read_exit_burst, 16, "# of read-then-exit bursts per episode, zero to disable");
|
||||
torture_param(int, reader_flavor, 0x1, "Reader flavors to use, one per bit.");
|
||||
torture_param(int, shuffle_interval, 3, "Number of seconds between shuffles");
|
||||
torture_param(int, shutdown_secs, 0, "Shutdown time (s), <= zero to disable.");
|
||||
torture_param(int, stall_cpu, 0, "Stall duration (s), zero to disable.");
|
||||
@ -643,10 +647,25 @@ static void srcu_get_gp_data(int *flags, unsigned long *gp_seq)
|
||||
|
||||
static int srcu_torture_read_lock(void)
|
||||
{
|
||||
if (cur_ops == &srcud_ops)
|
||||
return srcu_read_lock_nmisafe(srcu_ctlp);
|
||||
else
|
||||
return srcu_read_lock(srcu_ctlp);
|
||||
int idx;
|
||||
int ret = 0;
|
||||
|
||||
if ((reader_flavor & 0x1) || !(reader_flavor & 0x7)) {
|
||||
idx = srcu_read_lock(srcu_ctlp);
|
||||
WARN_ON_ONCE(idx & ~0x1);
|
||||
ret += idx;
|
||||
}
|
||||
if (reader_flavor & 0x2) {
|
||||
idx = srcu_read_lock_nmisafe(srcu_ctlp);
|
||||
WARN_ON_ONCE(idx & ~0x1);
|
||||
ret += idx << 1;
|
||||
}
|
||||
if (reader_flavor & 0x4) {
|
||||
idx = srcu_read_lock_lite(srcu_ctlp);
|
||||
WARN_ON_ONCE(idx & ~0x1);
|
||||
ret += idx << 2;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void
|
||||
@ -670,10 +689,13 @@ srcu_read_delay(struct torture_random_state *rrsp, struct rt_read_seg *rtrsp)
|
||||
|
||||
static void srcu_torture_read_unlock(int idx)
|
||||
{
|
||||
if (cur_ops == &srcud_ops)
|
||||
srcu_read_unlock_nmisafe(srcu_ctlp, idx);
|
||||
else
|
||||
srcu_read_unlock(srcu_ctlp, idx);
|
||||
WARN_ON_ONCE((reader_flavor && (idx & ~reader_flavor)) || (!reader_flavor && (idx & ~0x1)));
|
||||
if (reader_flavor & 0x4)
|
||||
srcu_read_unlock_lite(srcu_ctlp, (idx & 0x4) >> 2);
|
||||
if (reader_flavor & 0x2)
|
||||
srcu_read_unlock_nmisafe(srcu_ctlp, (idx & 0x2) >> 1);
|
||||
if ((reader_flavor & 0x1) || !(reader_flavor & 0x7))
|
||||
srcu_read_unlock(srcu_ctlp, idx & 0x1);
|
||||
}
|
||||
|
||||
static int torture_srcu_read_lock_held(void)
|
||||
@ -1061,8 +1083,13 @@ static bool rcu_torture_boost_failed(unsigned long gp_state, unsigned long *star
|
||||
// At most one persisted message per boost test.
|
||||
j = jiffies;
|
||||
lp = READ_ONCE(last_persist);
|
||||
if (time_after(j, lp + mininterval) && cmpxchg(&last_persist, lp, j) == lp)
|
||||
if (time_after(j, lp + mininterval) &&
|
||||
cmpxchg(&last_persist, lp, j) == lp) {
|
||||
if (cpu < 0)
|
||||
pr_info("Boost inversion persisted: QS from all CPUs\n");
|
||||
else
|
||||
pr_info("Boost inversion persisted: No QS from CPU %d\n", cpu);
|
||||
}
|
||||
return false; // passed on a technicality
|
||||
}
|
||||
VERBOSE_TOROUT_STRING("rcu_torture_boost boosting failed");
|
||||
@ -1830,7 +1857,7 @@ static void rcutorture_one_extend(int *readstate, int newstate,
|
||||
int statesold = *readstate & ~newstate;
|
||||
|
||||
WARN_ON_ONCE(idxold2 < 0);
|
||||
WARN_ON_ONCE((idxold2 >> RCUTORTURE_RDR_SHIFT_2) > 1);
|
||||
WARN_ON_ONCE(idxold2 & ~RCUTORTURE_RDR_ALLBITS);
|
||||
rtrsp->rt_readstate = newstate;
|
||||
|
||||
/* First, put new protection in place to avoid critical-section gap. */
|
||||
@ -1845,9 +1872,9 @@ static void rcutorture_one_extend(int *readstate, int newstate,
|
||||
if (statesnew & RCUTORTURE_RDR_SCHED)
|
||||
rcu_read_lock_sched();
|
||||
if (statesnew & RCUTORTURE_RDR_RCU_1)
|
||||
idxnew1 = (cur_ops->readlock() & 0x1) << RCUTORTURE_RDR_SHIFT_1;
|
||||
idxnew1 = (cur_ops->readlock() << RCUTORTURE_RDR_SHIFT_1) & RCUTORTURE_RDR_MASK_1;
|
||||
if (statesnew & RCUTORTURE_RDR_RCU_2)
|
||||
idxnew2 = (cur_ops->readlock() & 0x1) << RCUTORTURE_RDR_SHIFT_2;
|
||||
idxnew2 = (cur_ops->readlock() << RCUTORTURE_RDR_SHIFT_2) & RCUTORTURE_RDR_MASK_2;
|
||||
|
||||
/*
|
||||
* Next, remove old protection, in decreasing order of strength
|
||||
@ -1867,7 +1894,7 @@ static void rcutorture_one_extend(int *readstate, int newstate,
|
||||
if (statesold & RCUTORTURE_RDR_RBH)
|
||||
rcu_read_unlock_bh();
|
||||
if (statesold & RCUTORTURE_RDR_RCU_2) {
|
||||
cur_ops->readunlock((idxold2 >> RCUTORTURE_RDR_SHIFT_2) & 0x1);
|
||||
cur_ops->readunlock((idxold2 & RCUTORTURE_RDR_MASK_2) >> RCUTORTURE_RDR_SHIFT_2);
|
||||
WARN_ON_ONCE(idxnew2 != -1);
|
||||
idxold2 = 0;
|
||||
}
|
||||
@ -1877,7 +1904,7 @@ static void rcutorture_one_extend(int *readstate, int newstate,
|
||||
lockit = !cur_ops->no_pi_lock && !statesnew && !(torture_random(trsp) & 0xffff);
|
||||
if (lockit)
|
||||
raw_spin_lock_irqsave(¤t->pi_lock, flags);
|
||||
cur_ops->readunlock((idxold1 >> RCUTORTURE_RDR_SHIFT_1) & 0x1);
|
||||
cur_ops->readunlock((idxold1 & RCUTORTURE_RDR_MASK_1) >> RCUTORTURE_RDR_SHIFT_1);
|
||||
WARN_ON_ONCE(idxnew1 != -1);
|
||||
idxold1 = 0;
|
||||
if (lockit)
|
||||
@ -1892,16 +1919,13 @@ static void rcutorture_one_extend(int *readstate, int newstate,
|
||||
if (idxnew1 == -1)
|
||||
idxnew1 = idxold1 & RCUTORTURE_RDR_MASK_1;
|
||||
WARN_ON_ONCE(idxnew1 < 0);
|
||||
if (WARN_ON_ONCE((idxnew1 >> RCUTORTURE_RDR_SHIFT_1) > 1))
|
||||
pr_info("Unexpected idxnew1 value of %#x\n", idxnew1);
|
||||
if (idxnew2 == -1)
|
||||
idxnew2 = idxold2 & RCUTORTURE_RDR_MASK_2;
|
||||
WARN_ON_ONCE(idxnew2 < 0);
|
||||
WARN_ON_ONCE((idxnew2 >> RCUTORTURE_RDR_SHIFT_2) > 1);
|
||||
*readstate = idxnew1 | idxnew2 | newstate;
|
||||
WARN_ON_ONCE(*readstate < 0);
|
||||
if (WARN_ON_ONCE((*readstate >> RCUTORTURE_RDR_SHIFT_2) > 1))
|
||||
pr_info("Unexpected idxnew2 value of %#x\n", idxnew2);
|
||||
if (WARN_ON_ONCE(*readstate & ~RCUTORTURE_RDR_ALLBITS))
|
||||
pr_info("Unexpected readstate value of %#x\n", *readstate);
|
||||
}
|
||||
|
||||
/* Return the biggest extendables mask given current RCU and boot parameters. */
|
||||
@ -1926,7 +1950,7 @@ rcutorture_extend_mask(int oldmask, struct torture_random_state *trsp)
|
||||
unsigned long preempts_irq = preempts | RCUTORTURE_RDR_IRQ;
|
||||
unsigned long bhs = RCUTORTURE_RDR_BH | RCUTORTURE_RDR_RBH;
|
||||
|
||||
WARN_ON_ONCE(mask >> RCUTORTURE_RDR_SHIFT_1);
|
||||
WARN_ON_ONCE(mask >> RCUTORTURE_RDR_SHIFT_1); // Can't have reader idx bits.
|
||||
/* Mostly only one bit (need preemption!), sometimes lots of bits. */
|
||||
if (!(randmask1 & 0x7))
|
||||
mask = mask & randmask2;
|
||||
@ -2399,6 +2423,7 @@ rcu_torture_print_module_parms(struct rcu_torture_ops *cur_ops, const char *tag)
|
||||
"n_barrier_cbs=%d "
|
||||
"onoff_interval=%d onoff_holdoff=%d "
|
||||
"read_exit_delay=%d read_exit_burst=%d "
|
||||
"reader_flavor=%x "
|
||||
"nocbs_nthreads=%d nocbs_toggle=%d "
|
||||
"test_nmis=%d\n",
|
||||
torture_type, tag, nrealreaders, nfakewriters,
|
||||
@ -2411,6 +2436,7 @@ rcu_torture_print_module_parms(struct rcu_torture_ops *cur_ops, const char *tag)
|
||||
n_barrier_cbs,
|
||||
onoff_interval, onoff_holdoff,
|
||||
read_exit_delay, read_exit_burst,
|
||||
reader_flavor,
|
||||
nocbs_nthreads, nocbs_toggle,
|
||||
test_nmis);
|
||||
}
|
||||
|
@ -75,6 +75,9 @@ MODULE_PARM_DESC(scale_type, "Type of test (rcu, srcu, refcnt, rwsem, rwlock.");
|
||||
torture_param(int, verbose, 0, "Enable verbose debugging printk()s");
|
||||
torture_param(int, verbose_batched, 0, "Batch verbose debugging printk()s");
|
||||
|
||||
// Number of seconds to extend warm-up and cool-down for multiple guest OSes
|
||||
torture_param(long, guest_os_delay, 0,
|
||||
"Number of seconds to extend warm-up/cool-down for multiple guest OSes.");
|
||||
// Wait until there are multiple CPUs before starting test.
|
||||
torture_param(int, holdoff, IS_BUILTIN(CONFIG_RCU_REF_SCALE_TEST) ? 10 : 0,
|
||||
"Holdoff time before test start (s)");
|
||||
@ -212,6 +215,36 @@ static const struct ref_scale_ops srcu_ops = {
|
||||
.name = "srcu"
|
||||
};
|
||||
|
||||
static void srcu_lite_ref_scale_read_section(const int nloops)
|
||||
{
|
||||
int i;
|
||||
int idx;
|
||||
|
||||
for (i = nloops; i >= 0; i--) {
|
||||
idx = srcu_read_lock_lite(srcu_ctlp);
|
||||
srcu_read_unlock_lite(srcu_ctlp, idx);
|
||||
}
|
||||
}
|
||||
|
||||
static void srcu_lite_ref_scale_delay_section(const int nloops, const int udl, const int ndl)
|
||||
{
|
||||
int i;
|
||||
int idx;
|
||||
|
||||
for (i = nloops; i >= 0; i--) {
|
||||
idx = srcu_read_lock_lite(srcu_ctlp);
|
||||
un_delay(udl, ndl);
|
||||
srcu_read_unlock_lite(srcu_ctlp, idx);
|
||||
}
|
||||
}
|
||||
|
||||
static const struct ref_scale_ops srcu_lite_ops = {
|
||||
.init = rcu_sync_scale_init,
|
||||
.readsection = srcu_lite_ref_scale_read_section,
|
||||
.delaysection = srcu_lite_ref_scale_delay_section,
|
||||
.name = "srcu-lite"
|
||||
};
|
||||
|
||||
#ifdef CONFIG_TASKS_RCU
|
||||
|
||||
// Definitions for RCU Tasks ref scale testing: Empty read markers.
|
||||
@ -801,6 +834,18 @@ static void rcu_scale_one_reader(void)
|
||||
cur_ops->delaysection(loops, readdelay / 1000, readdelay % 1000);
|
||||
}
|
||||
|
||||
// Warm up cache, or, if needed run a series of rcu_scale_one_reader()
|
||||
// to allow multiple rcuscale guest OSes to collect mutually valid data.
|
||||
static void rcu_scale_warm_cool(void)
|
||||
{
|
||||
unsigned long jdone = jiffies + (guest_os_delay > 0 ? guest_os_delay * HZ : -1);
|
||||
|
||||
do {
|
||||
rcu_scale_one_reader();
|
||||
cond_resched();
|
||||
} while (time_before(jiffies, jdone));
|
||||
}
|
||||
|
||||
// Reader kthread. Repeatedly does empty RCU read-side
|
||||
// critical section, minimizing update-side interference.
|
||||
static int
|
||||
@ -829,7 +874,7 @@ ref_scale_reader(void *arg)
|
||||
goto end;
|
||||
|
||||
// Make sure that the CPU is affinitized appropriately during testing.
|
||||
WARN_ON_ONCE(raw_smp_processor_id() != me);
|
||||
WARN_ON_ONCE(raw_smp_processor_id() != me % nr_cpu_ids);
|
||||
|
||||
WRITE_ONCE(rt->start_reader, 0);
|
||||
if (!atomic_dec_return(&n_started))
|
||||
@ -957,6 +1002,7 @@ static int main_func(void *arg)
|
||||
schedule_timeout_uninterruptible(1);
|
||||
|
||||
// Start exp readers up per experiment
|
||||
rcu_scale_warm_cool();
|
||||
for (exp = 0; exp < nruns && !torture_must_stop(); exp++) {
|
||||
if (torture_must_stop())
|
||||
goto end;
|
||||
@ -987,6 +1033,7 @@ static int main_func(void *arg)
|
||||
|
||||
result_avg[exp] = div_u64(1000 * process_durations(nreaders), nreaders * loops);
|
||||
}
|
||||
rcu_scale_warm_cool();
|
||||
|
||||
// Print the average of all experiments
|
||||
SCALEOUT("END OF TEST. Calculating average duration per loop (nanoseconds)...\n");
|
||||
@ -1082,9 +1129,10 @@ ref_scale_init(void)
|
||||
long i;
|
||||
int firsterr = 0;
|
||||
static const struct ref_scale_ops *scale_ops[] = {
|
||||
&rcu_ops, &srcu_ops, RCU_TRACE_OPS RCU_TASKS_OPS &refcnt_ops, &rwlock_ops,
|
||||
&rwsem_ops, &lock_ops, &lock_irq_ops, &acqrel_ops, &clock_ops, &jiffies_ops,
|
||||
&typesafe_ref_ops, &typesafe_lock_ops, &typesafe_seqlock_ops,
|
||||
&rcu_ops, &srcu_ops, &srcu_lite_ops, RCU_TRACE_OPS RCU_TASKS_OPS
|
||||
&refcnt_ops, &rwlock_ops, &rwsem_ops, &lock_ops, &lock_irq_ops, &acqrel_ops,
|
||||
&clock_ops, &jiffies_ops, &typesafe_ref_ops, &typesafe_lock_ops,
|
||||
&typesafe_seqlock_ops,
|
||||
};
|
||||
|
||||
if (!torture_init_begin(scale_type, verbose))
|
||||
|
@ -128,7 +128,7 @@ static void init_srcu_struct_data(struct srcu_struct *ssp)
|
||||
* Initialize the per-CPU srcu_data array, which feeds into the
|
||||
* leaves of the srcu_node tree.
|
||||
*/
|
||||
WARN_ON_ONCE(ARRAY_SIZE(sdp->srcu_lock_count) !=
|
||||
BUILD_BUG_ON(ARRAY_SIZE(sdp->srcu_lock_count) !=
|
||||
ARRAY_SIZE(sdp->srcu_unlock_count));
|
||||
for_each_possible_cpu(cpu) {
|
||||
sdp = per_cpu_ptr(ssp->sda, cpu);
|
||||
@ -187,7 +187,7 @@ static bool init_srcu_struct_nodes(struct srcu_struct *ssp, gfp_t gfp_flags)
|
||||
/* Each pass through this loop initializes one srcu_node structure. */
|
||||
srcu_for_each_node_breadth_first(ssp, snp) {
|
||||
spin_lock_init(&ACCESS_PRIVATE(snp, lock));
|
||||
WARN_ON_ONCE(ARRAY_SIZE(snp->srcu_have_cbs) !=
|
||||
BUILD_BUG_ON(ARRAY_SIZE(snp->srcu_have_cbs) !=
|
||||
ARRAY_SIZE(snp->srcu_data_have_cbs));
|
||||
for (i = 0; i < ARRAY_SIZE(snp->srcu_have_cbs); i++) {
|
||||
snp->srcu_have_cbs[i] = SRCU_SNP_INIT_SEQ;
|
||||
@ -419,41 +419,60 @@ static void check_init_srcu_struct(struct srcu_struct *ssp)
|
||||
}
|
||||
|
||||
/*
|
||||
* Returns approximate total of the readers' ->srcu_lock_count[] values
|
||||
* for the rank of per-CPU counters specified by idx.
|
||||
* Is the current or any upcoming grace period to be expedited?
|
||||
*/
|
||||
static unsigned long srcu_readers_lock_idx(struct srcu_struct *ssp, int idx)
|
||||
static bool srcu_gp_is_expedited(struct srcu_struct *ssp)
|
||||
{
|
||||
int cpu;
|
||||
unsigned long sum = 0;
|
||||
struct srcu_usage *sup = ssp->srcu_sup;
|
||||
|
||||
for_each_possible_cpu(cpu) {
|
||||
struct srcu_data *cpuc = per_cpu_ptr(ssp->sda, cpu);
|
||||
|
||||
sum += atomic_long_read(&cpuc->srcu_lock_count[idx]);
|
||||
}
|
||||
return sum;
|
||||
return ULONG_CMP_LT(READ_ONCE(sup->srcu_gp_seq), READ_ONCE(sup->srcu_gp_seq_needed_exp));
|
||||
}
|
||||
|
||||
/*
|
||||
* Returns approximate total of the readers' ->srcu_unlock_count[] values
|
||||
* for the rank of per-CPU counters specified by idx.
|
||||
* Computes approximate total of the readers' ->srcu_lock_count[] values
|
||||
* for the rank of per-CPU counters specified by idx, and returns true if
|
||||
* the caller did the proper barrier (gp), and if the count of the locks
|
||||
* matches that of the unlocks passed in.
|
||||
*/
|
||||
static unsigned long srcu_readers_unlock_idx(struct srcu_struct *ssp, int idx)
|
||||
static bool srcu_readers_lock_idx(struct srcu_struct *ssp, int idx, bool gp, unsigned long unlocks)
|
||||
{
|
||||
int cpu;
|
||||
unsigned long mask = 0;
|
||||
unsigned long sum = 0;
|
||||
|
||||
for_each_possible_cpu(cpu) {
|
||||
struct srcu_data *cpuc = per_cpu_ptr(ssp->sda, cpu);
|
||||
struct srcu_data *sdp = per_cpu_ptr(ssp->sda, cpu);
|
||||
|
||||
sum += atomic_long_read(&cpuc->srcu_unlock_count[idx]);
|
||||
sum += atomic_long_read(&sdp->srcu_lock_count[idx]);
|
||||
if (IS_ENABLED(CONFIG_PROVE_RCU))
|
||||
mask = mask | READ_ONCE(cpuc->srcu_nmi_safety);
|
||||
mask = mask | READ_ONCE(sdp->srcu_reader_flavor);
|
||||
}
|
||||
WARN_ONCE(IS_ENABLED(CONFIG_PROVE_RCU) && (mask & (mask >> 1)),
|
||||
"Mixed NMI-safe readers for srcu_struct at %ps.\n", ssp);
|
||||
WARN_ONCE(IS_ENABLED(CONFIG_PROVE_RCU) && (mask & (mask - 1)),
|
||||
"Mixed reader flavors for srcu_struct at %ps.\n", ssp);
|
||||
if (mask & SRCU_READ_FLAVOR_LITE && !gp)
|
||||
return false;
|
||||
return sum == unlocks;
|
||||
}
|
||||
|
||||
/*
|
||||
* Returns approximate total of the readers' ->srcu_unlock_count[] values
|
||||
* for the rank of per-CPU counters specified by idx.
|
||||
*/
|
||||
static unsigned long srcu_readers_unlock_idx(struct srcu_struct *ssp, int idx, unsigned long *rdm)
|
||||
{
|
||||
int cpu;
|
||||
unsigned long mask = 0;
|
||||
unsigned long sum = 0;
|
||||
|
||||
for_each_possible_cpu(cpu) {
|
||||
struct srcu_data *sdp = per_cpu_ptr(ssp->sda, cpu);
|
||||
|
||||
sum += atomic_long_read(&sdp->srcu_unlock_count[idx]);
|
||||
mask = mask | READ_ONCE(sdp->srcu_reader_flavor);
|
||||
}
|
||||
WARN_ONCE(IS_ENABLED(CONFIG_PROVE_RCU) && (mask & (mask - 1)),
|
||||
"Mixed reader flavors for srcu_struct at %ps.\n", ssp);
|
||||
*rdm = mask;
|
||||
return sum;
|
||||
}
|
||||
|
||||
@ -463,22 +482,28 @@ static unsigned long srcu_readers_unlock_idx(struct srcu_struct *ssp, int idx)
|
||||
*/
|
||||
static bool srcu_readers_active_idx_check(struct srcu_struct *ssp, int idx)
|
||||
{
|
||||
bool did_gp;
|
||||
unsigned long rdm;
|
||||
unsigned long unlocks;
|
||||
|
||||
unlocks = srcu_readers_unlock_idx(ssp, idx);
|
||||
unlocks = srcu_readers_unlock_idx(ssp, idx, &rdm);
|
||||
did_gp = !!(rdm & SRCU_READ_FLAVOR_LITE);
|
||||
|
||||
/*
|
||||
* Make sure that a lock is always counted if the corresponding
|
||||
* unlock is counted. Needs to be a smp_mb() as the read side may
|
||||
* contain a read from a variable that is written to before the
|
||||
* synchronize_srcu() in the write side. In this case smp_mb()s
|
||||
* A and B act like the store buffering pattern.
|
||||
* A and B (or X and Y) act like the store buffering pattern.
|
||||
*
|
||||
* This smp_mb() also pairs with smp_mb() C to prevent accesses
|
||||
* after the synchronize_srcu() from being executed before the
|
||||
* grace period ends.
|
||||
* This smp_mb() also pairs with smp_mb() C (or, in the case of X,
|
||||
* Z) to prevent accesses after the synchronize_srcu() from being
|
||||
* executed before the grace period ends.
|
||||
*/
|
||||
if (!did_gp)
|
||||
smp_mb(); /* A */
|
||||
else
|
||||
synchronize_rcu(); /* X */
|
||||
|
||||
/*
|
||||
* If the locks are the same as the unlocks, then there must have
|
||||
@ -536,7 +561,7 @@ static bool srcu_readers_active_idx_check(struct srcu_struct *ssp, int idx)
|
||||
* which are unlikely to be configured with an address space fully
|
||||
* populated with memory, at least not anytime soon.
|
||||
*/
|
||||
return srcu_readers_lock_idx(ssp, idx) == unlocks;
|
||||
return srcu_readers_lock_idx(ssp, idx, did_gp, unlocks);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -554,12 +579,12 @@ static bool srcu_readers_active(struct srcu_struct *ssp)
|
||||
unsigned long sum = 0;
|
||||
|
||||
for_each_possible_cpu(cpu) {
|
||||
struct srcu_data *cpuc = per_cpu_ptr(ssp->sda, cpu);
|
||||
struct srcu_data *sdp = per_cpu_ptr(ssp->sda, cpu);
|
||||
|
||||
sum += atomic_long_read(&cpuc->srcu_lock_count[0]);
|
||||
sum += atomic_long_read(&cpuc->srcu_lock_count[1]);
|
||||
sum -= atomic_long_read(&cpuc->srcu_unlock_count[0]);
|
||||
sum -= atomic_long_read(&cpuc->srcu_unlock_count[1]);
|
||||
sum += atomic_long_read(&sdp->srcu_lock_count[0]);
|
||||
sum += atomic_long_read(&sdp->srcu_lock_count[1]);
|
||||
sum -= atomic_long_read(&sdp->srcu_unlock_count[0]);
|
||||
sum -= atomic_long_read(&sdp->srcu_unlock_count[1]);
|
||||
}
|
||||
return sum;
|
||||
}
|
||||
@ -622,7 +647,7 @@ static unsigned long srcu_get_delay(struct srcu_struct *ssp)
|
||||
unsigned long jbase = SRCU_INTERVAL;
|
||||
struct srcu_usage *sup = ssp->srcu_sup;
|
||||
|
||||
if (ULONG_CMP_LT(READ_ONCE(sup->srcu_gp_seq), READ_ONCE(sup->srcu_gp_seq_needed_exp)))
|
||||
if (srcu_gp_is_expedited(ssp))
|
||||
jbase = 0;
|
||||
if (rcu_seq_state(READ_ONCE(sup->srcu_gp_seq))) {
|
||||
j = jiffies - 1;
|
||||
@ -687,28 +712,28 @@ void cleanup_srcu_struct(struct srcu_struct *ssp)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(cleanup_srcu_struct);
|
||||
|
||||
#ifdef CONFIG_PROVE_RCU
|
||||
/*
|
||||
* Check for consistent NMI safety.
|
||||
* Check for consistent reader flavor.
|
||||
*/
|
||||
void srcu_check_nmi_safety(struct srcu_struct *ssp, bool nmi_safe)
|
||||
void __srcu_check_read_flavor(struct srcu_struct *ssp, int read_flavor)
|
||||
{
|
||||
int nmi_safe_mask = 1 << nmi_safe;
|
||||
int old_nmi_safe_mask;
|
||||
int old_read_flavor;
|
||||
struct srcu_data *sdp;
|
||||
|
||||
/* NMI-unsafe use in NMI is a bad sign */
|
||||
WARN_ON_ONCE(!nmi_safe && in_nmi());
|
||||
/* NMI-unsafe use in NMI is a bad sign, as is multi-bit read_flavor values. */
|
||||
WARN_ON_ONCE((read_flavor != SRCU_READ_FLAVOR_NMI) && in_nmi());
|
||||
WARN_ON_ONCE(read_flavor & (read_flavor - 1));
|
||||
|
||||
sdp = raw_cpu_ptr(ssp->sda);
|
||||
old_nmi_safe_mask = READ_ONCE(sdp->srcu_nmi_safety);
|
||||
if (!old_nmi_safe_mask) {
|
||||
WRITE_ONCE(sdp->srcu_nmi_safety, nmi_safe_mask);
|
||||
old_read_flavor = READ_ONCE(sdp->srcu_reader_flavor);
|
||||
if (!old_read_flavor) {
|
||||
old_read_flavor = cmpxchg(&sdp->srcu_reader_flavor, 0, read_flavor);
|
||||
if (!old_read_flavor)
|
||||
return;
|
||||
}
|
||||
WARN_ONCE(old_nmi_safe_mask != nmi_safe_mask, "CPU %d old state %d new state %d\n", sdp->cpu, old_nmi_safe_mask, nmi_safe_mask);
|
||||
WARN_ONCE(old_read_flavor != read_flavor, "CPU %d old state %d new state %d\n", sdp->cpu, old_read_flavor, read_flavor);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(srcu_check_nmi_safety);
|
||||
#endif /* CONFIG_PROVE_RCU */
|
||||
EXPORT_SYMBOL_GPL(__srcu_check_read_flavor);
|
||||
|
||||
/*
|
||||
* Counts the new reader in the appropriate per-CPU element of the
|
||||
@ -867,7 +892,7 @@ static void srcu_gp_end(struct srcu_struct *ssp)
|
||||
spin_lock_irq_rcu_node(sup);
|
||||
idx = rcu_seq_state(sup->srcu_gp_seq);
|
||||
WARN_ON_ONCE(idx != SRCU_STATE_SCAN2);
|
||||
if (ULONG_CMP_LT(READ_ONCE(sup->srcu_gp_seq), READ_ONCE(sup->srcu_gp_seq_needed_exp)))
|
||||
if (srcu_gp_is_expedited(ssp))
|
||||
cbdelay = 0;
|
||||
|
||||
WRITE_ONCE(sup->srcu_last_gp_end, ktime_get_mono_fast_ns());
|
||||
@ -1122,6 +1147,8 @@ static void srcu_flip(struct srcu_struct *ssp)
|
||||
* it stays until either (1) Compilers learn about this sort of
|
||||
* control dependency or (2) Some production workload running on
|
||||
* a production system is unduly delayed by this slowpath smp_mb().
|
||||
* Except for _lite() readers, where it is inoperative, which
|
||||
* means that it is a good thing that it is redundant.
|
||||
*/
|
||||
smp_mb(); /* E */ /* Pairs with B and C. */
|
||||
|
||||
@ -1139,7 +1166,9 @@ static void srcu_flip(struct srcu_struct *ssp)
|
||||
}
|
||||
|
||||
/*
|
||||
* If SRCU is likely idle, return true, otherwise return false.
|
||||
* If SRCU is likely idle, in other words, the next SRCU grace period
|
||||
* should be expedited, return true, otherwise return false. Except that
|
||||
* in the presence of _lite() readers, always return false.
|
||||
*
|
||||
* Note that it is OK for several current from-idle requests for a new
|
||||
* grace period from idle to specify expediting because they will all end
|
||||
@ -1159,7 +1188,7 @@ static void srcu_flip(struct srcu_struct *ssp)
|
||||
* negligible when amortized over that time period, and the extra latency
|
||||
* of a needlessly non-expedited grace period is similarly negligible.
|
||||
*/
|
||||
static bool srcu_might_be_idle(struct srcu_struct *ssp)
|
||||
static bool srcu_should_expedite(struct srcu_struct *ssp)
|
||||
{
|
||||
unsigned long curseq;
|
||||
unsigned long flags;
|
||||
@ -1168,6 +1197,9 @@ static bool srcu_might_be_idle(struct srcu_struct *ssp)
|
||||
unsigned long tlast;
|
||||
|
||||
check_init_srcu_struct(ssp);
|
||||
/* If _lite() readers, don't do unsolicited expediting. */
|
||||
if (this_cpu_read(ssp->sda->srcu_reader_flavor) & SRCU_READ_FLAVOR_LITE)
|
||||
return false;
|
||||
/* If the local srcu_data structure has callbacks, not idle. */
|
||||
sdp = raw_cpu_ptr(ssp->sda);
|
||||
spin_lock_irqsave_rcu_node(sdp, flags);
|
||||
@ -1469,14 +1501,15 @@ EXPORT_SYMBOL_GPL(synchronize_srcu_expedited);
|
||||
* Implementation of these memory-ordering guarantees is similar to
|
||||
* that of synchronize_rcu().
|
||||
*
|
||||
* If SRCU is likely idle, expedite the first request. This semantic
|
||||
* was provided by Classic SRCU, and is relied upon by its users, so TREE
|
||||
* SRCU must also provide it. Note that detecting idleness is heuristic
|
||||
* and subject to both false positives and negatives.
|
||||
* If SRCU is likely idle as determined by srcu_should_expedite(),
|
||||
* expedite the first request. This semantic was provided by Classic SRCU,
|
||||
* and is relied upon by its users, so TREE SRCU must also provide it.
|
||||
* Note that detecting idleness is heuristic and subject to both false
|
||||
* positives and negatives.
|
||||
*/
|
||||
void synchronize_srcu(struct srcu_struct *ssp)
|
||||
{
|
||||
if (srcu_might_be_idle(ssp) || rcu_gp_is_expedited())
|
||||
if (srcu_should_expedite(ssp) || rcu_gp_is_expedited())
|
||||
synchronize_srcu_expedited(ssp);
|
||||
else
|
||||
__synchronize_srcu(ssp, true);
|
||||
|
@ -891,7 +891,18 @@ static void nocb_cb_wait(struct rcu_data *rdp)
|
||||
swait_event_interruptible_exclusive(rdp->nocb_cb_wq,
|
||||
nocb_cb_wait_cond(rdp));
|
||||
if (kthread_should_park()) {
|
||||
/*
|
||||
* kthread_park() must be preceded by an rcu_barrier().
|
||||
* But yet another rcu_barrier() might have sneaked in between
|
||||
* the barrier callback execution and the callbacks counter
|
||||
* decrement.
|
||||
*/
|
||||
if (rdp->nocb_cb_sleep) {
|
||||
rcu_nocb_lock_irqsave(rdp, flags);
|
||||
WARN_ON_ONCE(rcu_segcblist_n_cbs(&rdp->cblist));
|
||||
rcu_nocb_unlock_irqrestore(rdp, flags);
|
||||
kthread_parkme();
|
||||
}
|
||||
} else if (READ_ONCE(rdp->nocb_cb_sleep)) {
|
||||
WARN_ON(signal_pending(current));
|
||||
trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("WokeEmpty"));
|
||||
|
@ -76,36 +76,6 @@ int rcu_jiffies_till_stall_check(void)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(rcu_jiffies_till_stall_check);
|
||||
|
||||
/**
|
||||
* rcu_gp_might_be_stalled - Is it likely that the grace period is stalled?
|
||||
*
|
||||
* Returns @true if the current grace period is sufficiently old that
|
||||
* it is reasonable to assume that it might be stalled. This can be
|
||||
* useful when deciding whether to allocate memory to enable RCU-mediated
|
||||
* freeing on the one hand or just invoking synchronize_rcu() on the other.
|
||||
* The latter is preferable when the grace period is stalled.
|
||||
*
|
||||
* Note that sampling of the .gp_start and .gp_seq fields must be done
|
||||
* carefully to avoid false positives at the beginnings and ends of
|
||||
* grace periods.
|
||||
*/
|
||||
bool rcu_gp_might_be_stalled(void)
|
||||
{
|
||||
unsigned long d = rcu_jiffies_till_stall_check() / RCU_STALL_MIGHT_DIV;
|
||||
unsigned long j = jiffies;
|
||||
|
||||
if (d < RCU_STALL_MIGHT_MIN)
|
||||
d = RCU_STALL_MIGHT_MIN;
|
||||
smp_mb(); // jiffies before .gp_seq to avoid false positives.
|
||||
if (!rcu_gp_in_progress())
|
||||
return false;
|
||||
// Long delays at this point avoids false positive, but a delay
|
||||
// of ULONG_MAX/4 jiffies voids your no-false-positive warranty.
|
||||
smp_mb(); // .gp_seq before second .gp_start
|
||||
// And ditto here.
|
||||
return !time_before(j, READ_ONCE(rcu_state.gp_start) + d);
|
||||
}
|
||||
|
||||
/* Don't do RCU CPU stall warnings during long sysrq printouts. */
|
||||
void rcu_sysrq_start(void)
|
||||
{
|
||||
@ -365,7 +335,7 @@ static int rcu_print_task_stall(struct rcu_node *rnp, unsigned long flags)
|
||||
* that don't support NMI-based stack dumps. The NMI-triggered stack
|
||||
* traces are more accurate because they are printed by the target CPU.
|
||||
*/
|
||||
static void rcu_dump_cpu_stacks(void)
|
||||
static void rcu_dump_cpu_stacks(unsigned long gp_seq)
|
||||
{
|
||||
int cpu;
|
||||
unsigned long flags;
|
||||
@ -373,8 +343,15 @@ static void rcu_dump_cpu_stacks(void)
|
||||
|
||||
rcu_for_each_leaf_node(rnp) {
|
||||
printk_deferred_enter();
|
||||
for_each_leaf_node_possible_cpu(rnp, cpu) {
|
||||
if (gp_seq != data_race(rcu_state.gp_seq)) {
|
||||
printk_deferred_exit();
|
||||
pr_err("INFO: Stall ended during stack backtracing.\n");
|
||||
return;
|
||||
}
|
||||
if (!(data_race(rnp->qsmask) & leaf_node_cpu_bit(rnp, cpu)))
|
||||
continue;
|
||||
raw_spin_lock_irqsave_rcu_node(rnp, flags);
|
||||
for_each_leaf_node_possible_cpu(rnp, cpu)
|
||||
if (rnp->qsmask & leaf_node_cpu_bit(rnp, cpu)) {
|
||||
if (cpu_is_offline(cpu))
|
||||
pr_err("Offline CPU %d blocking current GP.\n", cpu);
|
||||
@ -382,6 +359,7 @@ static void rcu_dump_cpu_stacks(void)
|
||||
dump_cpu_task(cpu);
|
||||
}
|
||||
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
|
||||
}
|
||||
printk_deferred_exit();
|
||||
}
|
||||
}
|
||||
@ -638,7 +616,7 @@ static void print_other_cpu_stall(unsigned long gp_seq, unsigned long gps)
|
||||
(long)rcu_seq_current(&rcu_state.gp_seq), totqlen,
|
||||
data_race(rcu_state.n_online_cpus)); // Diagnostic read
|
||||
if (ndetected) {
|
||||
rcu_dump_cpu_stacks();
|
||||
rcu_dump_cpu_stacks(gp_seq);
|
||||
|
||||
/* Complain about tasks blocking the grace period. */
|
||||
rcu_for_each_leaf_node(rnp)
|
||||
@ -670,7 +648,7 @@ static void print_other_cpu_stall(unsigned long gp_seq, unsigned long gps)
|
||||
rcu_force_quiescent_state(); /* Kick them all. */
|
||||
}
|
||||
|
||||
static void print_cpu_stall(unsigned long gps)
|
||||
static void print_cpu_stall(unsigned long gp_seq, unsigned long gps)
|
||||
{
|
||||
int cpu;
|
||||
unsigned long flags;
|
||||
@ -707,7 +685,7 @@ static void print_cpu_stall(unsigned long gps)
|
||||
rcu_check_gp_kthread_expired_fqs_timer();
|
||||
rcu_check_gp_kthread_starvation();
|
||||
|
||||
rcu_dump_cpu_stacks();
|
||||
rcu_dump_cpu_stacks(gp_seq);
|
||||
|
||||
raw_spin_lock_irqsave_rcu_node(rnp, flags);
|
||||
/* Rewrite if needed in case of slow consoles. */
|
||||
@ -789,7 +767,8 @@ static void check_cpu_stall(struct rcu_data *rdp)
|
||||
gs2 = READ_ONCE(rcu_state.gp_seq);
|
||||
if (gs1 != gs2 ||
|
||||
ULONG_CMP_LT(j, js) ||
|
||||
ULONG_CMP_GE(gps, js))
|
||||
ULONG_CMP_GE(gps, js) ||
|
||||
!rcu_seq_state(gs2))
|
||||
return; /* No stall or GP completed since entering function. */
|
||||
rnp = rdp->mynode;
|
||||
jn = jiffies + ULONG_MAX / 2;
|
||||
@ -810,7 +789,7 @@ static void check_cpu_stall(struct rcu_data *rdp)
|
||||
pr_err("INFO: %s detected stall, but suppressed full report due to a stuck CSD-lock.\n", rcu_state.name);
|
||||
} else if (self_detected) {
|
||||
/* We haven't checked in, so go dump stack. */
|
||||
print_cpu_stall(gps);
|
||||
print_cpu_stall(gs2, gps);
|
||||
} else {
|
||||
/* They had a few time units to dump stack, so complain. */
|
||||
print_other_cpu_stall(gs2, gps);
|
||||
|
@ -56,6 +56,8 @@ do
|
||||
echo > $i/kvm-test-1-run-qemu.sh.out
|
||||
export TORTURE_AFFINITY=
|
||||
kvm-get-cpus-script.sh $T/cpuarray.awk $T/cpubatches.awk $T/cpustate
|
||||
if test -z "${TORTURE_NO_AFFINITY}"
|
||||
then
|
||||
cat << ' ___EOF___' >> $T/cpubatches.awk
|
||||
END {
|
||||
affinitylist = "";
|
||||
@ -77,6 +79,7 @@ do
|
||||
cpu_count="`grep '# TORTURE_CPU_COUNT=' $i/qemu-cmd | sed -e 's/^.*=//'`"
|
||||
affinity_export="`awk -f $T/cpubatches.awk -v cpu_count="$cpu_count" -v scenario=$i < /dev/null`"
|
||||
$affinity_export
|
||||
fi
|
||||
kvm-test-1-run-qemu.sh $i >> $i/kvm-test-1-run-qemu.sh.out 2>&1 &
|
||||
done
|
||||
for i in $runfiles
|
||||
|
@ -42,6 +42,7 @@ TORTURE_JITTER_STOP=""
|
||||
TORTURE_KCONFIG_KASAN_ARG=""
|
||||
TORTURE_KCONFIG_KCSAN_ARG=""
|
||||
TORTURE_KMAKE_ARG=""
|
||||
TORTURE_NO_AFFINITY=""
|
||||
TORTURE_QEMU_MEM=512
|
||||
torture_qemu_mem_default=1
|
||||
TORTURE_REMOTE=
|
||||
@ -82,6 +83,7 @@ usage () {
|
||||
echo " --kmake-arg kernel-make-arguments"
|
||||
echo " --mac nn:nn:nn:nn:nn:nn"
|
||||
echo " --memory megabytes|nnnG"
|
||||
echo " --no-affinity"
|
||||
echo " --no-initrd"
|
||||
echo " --qemu-args qemu-arguments"
|
||||
echo " --qemu-cmd qemu-system-..."
|
||||
@ -220,6 +222,9 @@ do
|
||||
torture_qemu_mem_default=
|
||||
shift
|
||||
;;
|
||||
--no-affinity)
|
||||
TORTURE_NO_AFFINITY="no-affinity"
|
||||
;;
|
||||
--no-initrd)
|
||||
TORTURE_INITRD=""; export TORTURE_INITRD
|
||||
;;
|
||||
@ -417,6 +422,7 @@ TORTURE_KCONFIG_KASAN_ARG="$TORTURE_KCONFIG_KASAN_ARG"; export TORTURE_KCONFIG_K
|
||||
TORTURE_KCONFIG_KCSAN_ARG="$TORTURE_KCONFIG_KCSAN_ARG"; export TORTURE_KCONFIG_KCSAN_ARG
|
||||
TORTURE_KMAKE_ARG="$TORTURE_KMAKE_ARG"; export TORTURE_KMAKE_ARG
|
||||
TORTURE_MOD="$TORTURE_MOD"; export TORTURE_MOD
|
||||
TORTURE_NO_AFFINITY="$TORTURE_NO_AFFINITY"; export TORTURE_NO_AFFINITY
|
||||
TORTURE_QEMU_CMD="$TORTURE_QEMU_CMD"; export TORTURE_QEMU_CMD
|
||||
TORTURE_QEMU_INTERACTIVE="$TORTURE_QEMU_INTERACTIVE"; export TORTURE_QEMU_INTERACTIVE
|
||||
TORTURE_QEMU_MAC="$TORTURE_QEMU_MAC"; export TORTURE_QEMU_MAC
|
||||
|
@ -5,6 +5,7 @@ TREE04
|
||||
TREE05
|
||||
TREE07
|
||||
TREE09
|
||||
SRCU-L
|
||||
SRCU-N
|
||||
SRCU-P
|
||||
SRCU-T
|
||||
|
10
tools/testing/selftests/rcutorture/configs/rcu/SRCU-L
Normal file
10
tools/testing/selftests/rcutorture/configs/rcu/SRCU-L
Normal file
@ -0,0 +1,10 @@
|
||||
CONFIG_RCU_TRACE=n
|
||||
CONFIG_SMP=y
|
||||
CONFIG_NR_CPUS=6
|
||||
CONFIG_HOTPLUG_CPU=y
|
||||
CONFIG_PREEMPT_NONE=y
|
||||
CONFIG_PREEMPT_VOLUNTARY=n
|
||||
CONFIG_PREEMPT=n
|
||||
#CHECK#CONFIG_RCU_EXPERT=n
|
||||
CONFIG_KPROBES=n
|
||||
CONFIG_FTRACE=n
|
@ -0,0 +1,3 @@
|
||||
rcutorture.torture_type=srcu
|
||||
rcutorture.reader_flavor=0x4
|
||||
rcutorture.fwd_progress=3
|
@ -1,2 +1,3 @@
|
||||
rcutorture.torture_type=srcu
|
||||
rcutorture.reader_flavor=0x2
|
||||
rcutorture.fwd_progress=3
|
||||
|
@ -1,5 +1,5 @@
|
||||
CONFIG_SMP=y
|
||||
CONFIG_NR_CPUS=56
|
||||
CONFIG_NR_CPUS=74
|
||||
CONFIG_PREEMPT_NONE=y
|
||||
CONFIG_PREEMPT_VOLUNTARY=n
|
||||
CONFIG_PREEMPT=n
|
||||
|
Loading…
Reference in New Issue
Block a user