mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2024-12-28 16:52:18 +00:00
Merge branch 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/rcu/linux.git
# Conflicts: # kernel/rcu/tree.c
This commit is contained in:
commit
2d119f6afa
@ -5524,7 +5524,42 @@
|
|||||||
|
|
||||||
rcutorture.gp_cond= [KNL]
|
rcutorture.gp_cond= [KNL]
|
||||||
Use conditional/asynchronous update-side
|
Use conditional/asynchronous update-side
|
||||||
primitives, if available.
|
normal-grace-period primitives, if available.
|
||||||
|
|
||||||
|
rcutorture.gp_cond_exp= [KNL]
|
||||||
|
Use conditional/asynchronous update-side
|
||||||
|
expedited-grace-period primitives, if available.
|
||||||
|
|
||||||
|
rcutorture.gp_cond_full= [KNL]
|
||||||
|
Use conditional/asynchronous update-side
|
||||||
|
normal-grace-period primitives that also take
|
||||||
|
concurrent expedited grace periods into account,
|
||||||
|
if available.
|
||||||
|
|
||||||
|
rcutorture.gp_cond_exp_full= [KNL]
|
||||||
|
Use conditional/asynchronous update-side
|
||||||
|
expedited-grace-period primitives that also take
|
||||||
|
concurrent normal grace periods into account,
|
||||||
|
if available.
|
||||||
|
|
||||||
|
rcutorture.gp_cond_wi= [KNL]
|
||||||
|
Nominal wait interval for normal conditional
|
||||||
|
grace periods (specified by rcutorture's
|
||||||
|
gp_cond and gp_cond_full module parameters),
|
||||||
|
in microseconds. The actual wait interval will
|
||||||
|
be randomly selected to nanosecond granularity up
|
||||||
|
to this wait interval. Defaults to 16 jiffies,
|
||||||
|
for example, 16,000 microseconds on a system
|
||||||
|
with HZ=1000.
|
||||||
|
|
||||||
|
rcutorture.gp_cond_wi_exp= [KNL]
|
||||||
|
Nominal wait interval for expedited conditional
|
||||||
|
grace periods (specified by rcutorture's
|
||||||
|
gp_cond_exp and gp_cond_exp_full module
|
||||||
|
parameters), in microseconds. The actual wait
|
||||||
|
interval will be randomly selected to nanosecond
|
||||||
|
granularity up to this wait interval. Defaults to
|
||||||
|
128 microseconds.
|
||||||
|
|
||||||
rcutorture.gp_exp= [KNL]
|
rcutorture.gp_exp= [KNL]
|
||||||
Use expedited update-side primitives, if available.
|
Use expedited update-side primitives, if available.
|
||||||
@ -5533,6 +5568,43 @@
|
|||||||
Use normal (non-expedited) asynchronous
|
Use normal (non-expedited) asynchronous
|
||||||
update-side primitives, if available.
|
update-side primitives, if available.
|
||||||
|
|
||||||
|
rcutorture.gp_poll= [KNL]
|
||||||
|
Use polled update-side normal-grace-period
|
||||||
|
primitives, if available.
|
||||||
|
|
||||||
|
rcutorture.gp_poll_exp= [KNL]
|
||||||
|
Use polled update-side expedited-grace-period
|
||||||
|
primitives, if available.
|
||||||
|
|
||||||
|
rcutorture.gp_poll_full= [KNL]
|
||||||
|
Use polled update-side normal-grace-period
|
||||||
|
primitives that also take concurrent expedited
|
||||||
|
grace periods into account, if available.
|
||||||
|
|
||||||
|
rcutorture.gp_poll_exp_full= [KNL]
|
||||||
|
Use polled update-side expedited-grace-period
|
||||||
|
primitives that also take concurrent normal
|
||||||
|
grace periods into account, if available.
|
||||||
|
|
||||||
|
rcutorture.gp_poll_wi= [KNL]
|
||||||
|
Nominal wait interval for normal conditional
|
||||||
|
grace periods (specified by rcutorture's
|
||||||
|
gp_poll and gp_poll_full module parameters),
|
||||||
|
in microseconds. The actual wait interval will
|
||||||
|
be randomly selected to nanosecond granularity up
|
||||||
|
to this wait interval. Defaults to 16 jiffies,
|
||||||
|
for example, 16,000 microseconds on a system
|
||||||
|
with HZ=1000.
|
||||||
|
|
||||||
|
rcutorture.gp_poll_wi_exp= [KNL]
|
||||||
|
Nominal wait interval for expedited conditional
|
||||||
|
grace periods (specified by rcutorture's
|
||||||
|
gp_poll_exp and gp_poll_exp_full module
|
||||||
|
parameters), in microseconds. The actual wait
|
||||||
|
interval will be randomly selected to nanosecond
|
||||||
|
granularity up to this wait interval. Defaults to
|
||||||
|
128 microseconds.
|
||||||
|
|
||||||
rcutorture.gp_sync= [KNL]
|
rcutorture.gp_sync= [KNL]
|
||||||
Use normal (non-expedited) synchronous
|
Use normal (non-expedited) synchronous
|
||||||
update-side primitives, if available. If all
|
update-side primitives, if available. If all
|
||||||
@ -5586,6 +5658,22 @@
|
|||||||
Set time (jiffies) between CPU-hotplug operations,
|
Set time (jiffies) between CPU-hotplug operations,
|
||||||
or zero to disable CPU-hotplug testing.
|
or zero to disable CPU-hotplug testing.
|
||||||
|
|
||||||
|
rcutorture.preempt_duration= [KNL]
|
||||||
|
Set duration (in milliseconds) of preemptions
|
||||||
|
by a high-priority FIFO real-time task. Set to
|
||||||
|
zero (the default) to disable. The CPUs to
|
||||||
|
preempt are selected randomly from the set that
|
||||||
|
are online at a given point in time. Races with
|
||||||
|
CPUs going offline are ignored, with that attempt
|
||||||
|
at preemption skipped.
|
||||||
|
|
||||||
|
rcutorture.preempt_interval= [KNL]
|
||||||
|
Set interval (in milliseconds, defaulting to one
|
||||||
|
second) between preemptions by a high-priority
|
||||||
|
FIFO real-time task. This delay is mediated
|
||||||
|
by an hrtimer and is further fuzzed to avoid
|
||||||
|
inadvertent synchronizations.
|
||||||
|
|
||||||
rcutorture.read_exit_burst= [KNL]
|
rcutorture.read_exit_burst= [KNL]
|
||||||
The number of times in a given read-then-exit
|
The number of times in a given read-then-exit
|
||||||
episode that a set of read-then-exit kthreads
|
episode that a set of read-then-exit kthreads
|
||||||
|
10
MAINTAINERS
10
MAINTAINERS
@ -13343,7 +13343,7 @@ L: linux-kernel@vger.kernel.org
|
|||||||
L: linux-arch@vger.kernel.org
|
L: linux-arch@vger.kernel.org
|
||||||
L: lkmm@lists.linux.dev
|
L: lkmm@lists.linux.dev
|
||||||
S: Supported
|
S: Supported
|
||||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu.git dev
|
T: git git://git.kernel.org/pub/scm/linux/kernel/git/rcu/linux.git rcu/dev
|
||||||
F: Documentation/atomic_bitops.txt
|
F: Documentation/atomic_bitops.txt
|
||||||
F: Documentation/atomic_t.txt
|
F: Documentation/atomic_t.txt
|
||||||
F: Documentation/core-api/refcount-vs-atomic.rst
|
F: Documentation/core-api/refcount-vs-atomic.rst
|
||||||
@ -19674,7 +19674,7 @@ R: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
|
|||||||
R: Lai Jiangshan <jiangshanlai@gmail.com>
|
R: Lai Jiangshan <jiangshanlai@gmail.com>
|
||||||
L: rcu@vger.kernel.org
|
L: rcu@vger.kernel.org
|
||||||
S: Supported
|
S: Supported
|
||||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu.git dev
|
T: git git://git.kernel.org/pub/scm/linux/kernel/git/rcu/linux.git rcu/dev
|
||||||
F: tools/testing/selftests/rcutorture
|
F: tools/testing/selftests/rcutorture
|
||||||
|
|
||||||
RDACM20 Camera Sensor
|
RDACM20 Camera Sensor
|
||||||
@ -19753,7 +19753,7 @@ R: Zqiang <qiang.zhang1211@gmail.com>
|
|||||||
L: rcu@vger.kernel.org
|
L: rcu@vger.kernel.org
|
||||||
S: Supported
|
S: Supported
|
||||||
W: http://www.rdrop.com/users/paulmck/RCU/
|
W: http://www.rdrop.com/users/paulmck/RCU/
|
||||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu.git dev
|
T: git git://git.kernel.org/pub/scm/linux/kernel/git/rcu/linux.git rcu/dev
|
||||||
F: Documentation/RCU/
|
F: Documentation/RCU/
|
||||||
F: include/linux/rcu*
|
F: include/linux/rcu*
|
||||||
F: kernel/rcu/
|
F: kernel/rcu/
|
||||||
@ -21658,7 +21658,7 @@ R: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
|
|||||||
L: rcu@vger.kernel.org
|
L: rcu@vger.kernel.org
|
||||||
S: Supported
|
S: Supported
|
||||||
W: http://www.rdrop.com/users/paulmck/RCU/
|
W: http://www.rdrop.com/users/paulmck/RCU/
|
||||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu.git dev
|
T: git git://git.kernel.org/pub/scm/linux/kernel/git/rcu/linux.git rcu/dev
|
||||||
F: include/linux/srcu*.h
|
F: include/linux/srcu*.h
|
||||||
F: kernel/rcu/srcu*.c
|
F: kernel/rcu/srcu*.c
|
||||||
|
|
||||||
@ -23789,7 +23789,7 @@ M: "Paul E. McKenney" <paulmck@kernel.org>
|
|||||||
M: Josh Triplett <josh@joshtriplett.org>
|
M: Josh Triplett <josh@joshtriplett.org>
|
||||||
L: linux-kernel@vger.kernel.org
|
L: linux-kernel@vger.kernel.org
|
||||||
S: Supported
|
S: Supported
|
||||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu.git dev
|
T: git git://git.kernel.org/pub/scm/linux/kernel/git/rcu/linux.git rcu/dev
|
||||||
F: Documentation/RCU/torture.rst
|
F: Documentation/RCU/torture.rst
|
||||||
F: kernel/locking/locktorture.c
|
F: kernel/locking/locktorture.c
|
||||||
F: kernel/rcu/rcuscale.c
|
F: kernel/rcu/rcuscale.c
|
||||||
|
@ -65,4 +65,15 @@ static inline void cond_resched_rcu(void)
|
|||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Has the current task blocked within its current RCU read-side
|
||||||
|
// critical section?
|
||||||
|
static inline bool has_rcu_reader_blocked(void)
|
||||||
|
{
|
||||||
|
#ifdef CONFIG_PREEMPT_RCU
|
||||||
|
return !list_empty(¤t->rcu_node_entry);
|
||||||
|
#else
|
||||||
|
return false;
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
#endif /* _LINUX_SCHED_RCUPDATE_WAIT_H */
|
#endif /* _LINUX_SCHED_RCUPDATE_WAIT_H */
|
||||||
|
@ -43,6 +43,12 @@ int init_srcu_struct(struct srcu_struct *ssp);
|
|||||||
#define __SRCU_DEP_MAP_INIT(srcu_name)
|
#define __SRCU_DEP_MAP_INIT(srcu_name)
|
||||||
#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
|
#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
|
||||||
|
|
||||||
|
/* Values for SRCU Tree srcu_data ->srcu_reader_flavor, but also used by rcutorture. */
|
||||||
|
#define SRCU_READ_FLAVOR_NORMAL 0x1 // srcu_read_lock().
|
||||||
|
#define SRCU_READ_FLAVOR_NMI 0x2 // srcu_read_lock_nmisafe().
|
||||||
|
#define SRCU_READ_FLAVOR_LITE 0x4 // srcu_read_lock_lite().
|
||||||
|
#define SRCU_READ_FLAVOR_ALL 0x7 // All of the above.
|
||||||
|
|
||||||
#ifdef CONFIG_TINY_SRCU
|
#ifdef CONFIG_TINY_SRCU
|
||||||
#include <linux/srcutiny.h>
|
#include <linux/srcutiny.h>
|
||||||
#elif defined(CONFIG_TREE_SRCU)
|
#elif defined(CONFIG_TREE_SRCU)
|
||||||
@ -232,13 +238,14 @@ static inline int srcu_read_lock_held(const struct srcu_struct *ssp)
|
|||||||
* a mutex that is held elsewhere while calling synchronize_srcu() or
|
* a mutex that is held elsewhere while calling synchronize_srcu() or
|
||||||
* synchronize_srcu_expedited().
|
* synchronize_srcu_expedited().
|
||||||
*
|
*
|
||||||
* The return value from srcu_read_lock() must be passed unaltered
|
* The return value from srcu_read_lock() is guaranteed to be
|
||||||
* to the matching srcu_read_unlock(). Note that srcu_read_lock() and
|
* non-negative. This value must be passed unaltered to the matching
|
||||||
* the matching srcu_read_unlock() must occur in the same context, for
|
* srcu_read_unlock(). Note that srcu_read_lock() and the matching
|
||||||
* example, it is illegal to invoke srcu_read_unlock() in an irq handler
|
* srcu_read_unlock() must occur in the same context, for example, it is
|
||||||
* if the matching srcu_read_lock() was invoked in process context. Or,
|
* illegal to invoke srcu_read_unlock() in an irq handler if the matching
|
||||||
* for that matter to invoke srcu_read_unlock() from one task and the
|
* srcu_read_lock() was invoked in process context. Or, for that matter to
|
||||||
* matching srcu_read_lock() from another.
|
* invoke srcu_read_unlock() from one task and the matching srcu_read_lock()
|
||||||
|
* from another.
|
||||||
*/
|
*/
|
||||||
static inline int srcu_read_lock(struct srcu_struct *ssp) __acquires(ssp)
|
static inline int srcu_read_lock(struct srcu_struct *ssp) __acquires(ssp)
|
||||||
{
|
{
|
||||||
|
@ -26,6 +26,7 @@ struct srcu_data {
|
|||||||
atomic_long_t srcu_lock_count[2]; /* Locks per CPU. */
|
atomic_long_t srcu_lock_count[2]; /* Locks per CPU. */
|
||||||
atomic_long_t srcu_unlock_count[2]; /* Unlocks per CPU. */
|
atomic_long_t srcu_unlock_count[2]; /* Unlocks per CPU. */
|
||||||
int srcu_reader_flavor; /* Reader flavor for srcu_struct structure? */
|
int srcu_reader_flavor; /* Reader flavor for srcu_struct structure? */
|
||||||
|
/* Values: SRCU_READ_FLAVOR_.* */
|
||||||
|
|
||||||
/* Update-side state. */
|
/* Update-side state. */
|
||||||
spinlock_t __private lock ____cacheline_internodealigned_in_smp;
|
spinlock_t __private lock ____cacheline_internodealigned_in_smp;
|
||||||
@ -43,11 +44,6 @@ struct srcu_data {
|
|||||||
struct srcu_struct *ssp;
|
struct srcu_struct *ssp;
|
||||||
};
|
};
|
||||||
|
|
||||||
/* Values for ->srcu_reader_flavor. */
|
|
||||||
#define SRCU_READ_FLAVOR_NORMAL 0x1 // srcu_read_lock().
|
|
||||||
#define SRCU_READ_FLAVOR_NMI 0x2 // srcu_read_lock_nmisafe().
|
|
||||||
#define SRCU_READ_FLAVOR_LITE 0x4 // srcu_read_lock_lite().
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Node in SRCU combining tree, similar in function to rcu_data.
|
* Node in SRCU combining tree, similar in function to rcu_data.
|
||||||
*/
|
*/
|
||||||
@ -258,7 +254,7 @@ static inline void srcu_check_read_flavor_lite(struct srcu_struct *ssp)
|
|||||||
if (likely(READ_ONCE(sdp->srcu_reader_flavor) & SRCU_READ_FLAVOR_LITE))
|
if (likely(READ_ONCE(sdp->srcu_reader_flavor) & SRCU_READ_FLAVOR_LITE))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
// Note that the cmpxchg() in srcu_check_read_flavor() is fully ordered.
|
// Note that the cmpxchg() in __srcu_check_read_flavor() is fully ordered.
|
||||||
__srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_LITE);
|
__srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_LITE);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -130,7 +130,7 @@ void _torture_stop_kthread(char *m, struct task_struct **tp);
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if IS_ENABLED(CONFIG_RCU_TORTURE_TEST) || IS_MODULE(CONFIG_RCU_TORTURE_TEST) || IS_ENABLED(CONFIG_LOCK_TORTURE_TEST) || IS_MODULE(CONFIG_LOCK_TORTURE_TEST)
|
#if IS_ENABLED(CONFIG_RCU_TORTURE_TEST) || IS_MODULE(CONFIG_RCU_TORTURE_TEST) || IS_ENABLED(CONFIG_LOCK_TORTURE_TEST) || IS_MODULE(CONFIG_LOCK_TORTURE_TEST)
|
||||||
long torture_sched_setaffinity(pid_t pid, const struct cpumask *in_mask);
|
long torture_sched_setaffinity(pid_t pid, const struct cpumask *in_mask, bool dowarn);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#endif /* __LINUX_TORTURE_H */
|
#endif /* __LINUX_TORTURE_H */
|
||||||
|
@ -106,7 +106,7 @@ static const struct kernel_param_ops lt_bind_ops = {
|
|||||||
module_param_cb(bind_readers, <_bind_ops, &bind_readers, 0644);
|
module_param_cb(bind_readers, <_bind_ops, &bind_readers, 0644);
|
||||||
module_param_cb(bind_writers, <_bind_ops, &bind_writers, 0644);
|
module_param_cb(bind_writers, <_bind_ops, &bind_writers, 0644);
|
||||||
|
|
||||||
long torture_sched_setaffinity(pid_t pid, const struct cpumask *in_mask);
|
long torture_sched_setaffinity(pid_t pid, const struct cpumask *in_mask, bool dowarn);
|
||||||
|
|
||||||
static struct task_struct *stats_task;
|
static struct task_struct *stats_task;
|
||||||
static struct task_struct **writer_tasks;
|
static struct task_struct **writer_tasks;
|
||||||
@ -1358,7 +1358,7 @@ static int __init lock_torture_init(void)
|
|||||||
if (torture_init_error(firsterr))
|
if (torture_init_error(firsterr))
|
||||||
goto unwind;
|
goto unwind;
|
||||||
if (cpumask_nonempty(bind_writers))
|
if (cpumask_nonempty(bind_writers))
|
||||||
torture_sched_setaffinity(writer_tasks[i]->pid, bind_writers);
|
torture_sched_setaffinity(writer_tasks[i]->pid, bind_writers, true);
|
||||||
|
|
||||||
create_reader:
|
create_reader:
|
||||||
if (cxt.cur_ops->readlock == NULL || (j >= cxt.nrealreaders_stress))
|
if (cxt.cur_ops->readlock == NULL || (j >= cxt.nrealreaders_stress))
|
||||||
@ -1369,7 +1369,7 @@ static int __init lock_torture_init(void)
|
|||||||
if (torture_init_error(firsterr))
|
if (torture_init_error(firsterr))
|
||||||
goto unwind;
|
goto unwind;
|
||||||
if (cpumask_nonempty(bind_readers))
|
if (cpumask_nonempty(bind_readers))
|
||||||
torture_sched_setaffinity(reader_tasks[j]->pid, bind_readers);
|
torture_sched_setaffinity(reader_tasks[j]->pid, bind_readers, true);
|
||||||
}
|
}
|
||||||
if (stat_interval > 0) {
|
if (stat_interval > 0) {
|
||||||
firsterr = torture_create_kthread(lock_torture_stats, NULL,
|
firsterr = torture_create_kthread(lock_torture_stats, NULL,
|
||||||
|
@ -53,6 +53,37 @@ config RCU_TORTURE_TEST
|
|||||||
Say M if you want the RCU torture tests to build as a module.
|
Say M if you want the RCU torture tests to build as a module.
|
||||||
Say N if you are unsure.
|
Say N if you are unsure.
|
||||||
|
|
||||||
|
config RCU_TORTURE_TEST_CHK_RDR_STATE
|
||||||
|
tristate "Check rcutorture reader state"
|
||||||
|
depends on RCU_TORTURE_TEST
|
||||||
|
default n
|
||||||
|
help
|
||||||
|
This option causes rcutorture to check the desired rcutorture
|
||||||
|
reader state for each segment against the actual context.
|
||||||
|
Note that PREEMPT_COUNT must be enabled if the preempt-disabled
|
||||||
|
and bh-disabled checks are to take effect, and that PREEMPT_RCU
|
||||||
|
must be enabled for the RCU-nesting checks to take effect.
|
||||||
|
These checks add overhead, and this Kconfig options is therefore
|
||||||
|
disabled by default.
|
||||||
|
|
||||||
|
Say Y here if you want rcutorture reader contexts checked.
|
||||||
|
Say N if you are unsure.
|
||||||
|
|
||||||
|
config RCU_TORTURE_TEST_LOG_CPU
|
||||||
|
tristate "Log CPU for rcutorture failures"
|
||||||
|
depends on RCU_TORTURE_TEST
|
||||||
|
default n
|
||||||
|
help
|
||||||
|
This option causes rcutorture to decorate each entry of its
|
||||||
|
log of failure/close-call rcutorture reader segments with the
|
||||||
|
number of the CPU that the reader was running on at the time.
|
||||||
|
This information can be useful, but it does incur additional
|
||||||
|
overhead, overhead that can make both failures and close calls
|
||||||
|
less probable.
|
||||||
|
|
||||||
|
Say Y here if you want CPU IDs logged.
|
||||||
|
Say N if you are unsure.
|
||||||
|
|
||||||
config RCU_REF_SCALE_TEST
|
config RCU_REF_SCALE_TEST
|
||||||
tristate "Scalability tests for read-side synchronization (RCU and others)"
|
tristate "Scalability tests for read-side synchronization (RCU and others)"
|
||||||
depends on DEBUG_KERNEL
|
depends on DEBUG_KERNEL
|
||||||
|
@ -92,12 +92,20 @@ torture_param(bool, gp_cond_exp, false, "Use conditional/async expedited GP wait
|
|||||||
torture_param(bool, gp_cond_full, false, "Use conditional/async full-state GP wait primitives");
|
torture_param(bool, gp_cond_full, false, "Use conditional/async full-state GP wait primitives");
|
||||||
torture_param(bool, gp_cond_exp_full, false,
|
torture_param(bool, gp_cond_exp_full, false,
|
||||||
"Use conditional/async full-stateexpedited GP wait primitives");
|
"Use conditional/async full-stateexpedited GP wait primitives");
|
||||||
|
torture_param(int, gp_cond_wi, 16 * USEC_PER_SEC / HZ,
|
||||||
|
"Wait interval for normal conditional grace periods, us (default 16 jiffies)");
|
||||||
|
torture_param(int, gp_cond_wi_exp, 128,
|
||||||
|
"Wait interval for expedited conditional grace periods, us (default 128 us)");
|
||||||
torture_param(bool, gp_exp, false, "Use expedited GP wait primitives");
|
torture_param(bool, gp_exp, false, "Use expedited GP wait primitives");
|
||||||
torture_param(bool, gp_normal, false, "Use normal (non-expedited) GP wait primitives");
|
torture_param(bool, gp_normal, false, "Use normal (non-expedited) GP wait primitives");
|
||||||
torture_param(bool, gp_poll, false, "Use polling GP wait primitives");
|
torture_param(bool, gp_poll, false, "Use polling GP wait primitives");
|
||||||
torture_param(bool, gp_poll_exp, false, "Use polling expedited GP wait primitives");
|
torture_param(bool, gp_poll_exp, false, "Use polling expedited GP wait primitives");
|
||||||
torture_param(bool, gp_poll_full, false, "Use polling full-state GP wait primitives");
|
torture_param(bool, gp_poll_full, false, "Use polling full-state GP wait primitives");
|
||||||
torture_param(bool, gp_poll_exp_full, false, "Use polling full-state expedited GP wait primitives");
|
torture_param(bool, gp_poll_exp_full, false, "Use polling full-state expedited GP wait primitives");
|
||||||
|
torture_param(int, gp_poll_wi, 16 * USEC_PER_SEC / HZ,
|
||||||
|
"Wait interval for normal polled grace periods, us (default 16 jiffies)");
|
||||||
|
torture_param(int, gp_poll_wi_exp, 128,
|
||||||
|
"Wait interval for expedited polled grace periods, us (default 128 us)");
|
||||||
torture_param(bool, gp_sync, false, "Use synchronous GP wait primitives");
|
torture_param(bool, gp_sync, false, "Use synchronous GP wait primitives");
|
||||||
torture_param(int, irqreader, 1, "Allow RCU readers from irq handlers");
|
torture_param(int, irqreader, 1, "Allow RCU readers from irq handlers");
|
||||||
torture_param(int, leakpointer, 0, "Leak pointer dereferences from readers");
|
torture_param(int, leakpointer, 0, "Leak pointer dereferences from readers");
|
||||||
@ -109,9 +117,11 @@ torture_param(int, onoff_holdoff, 0, "Time after boot before CPU hotplugs (s)");
|
|||||||
torture_param(int, onoff_interval, 0, "Time between CPU hotplugs (jiffies), 0=disable");
|
torture_param(int, onoff_interval, 0, "Time between CPU hotplugs (jiffies), 0=disable");
|
||||||
torture_param(int, nocbs_nthreads, 0, "Number of NOCB toggle threads, 0 to disable");
|
torture_param(int, nocbs_nthreads, 0, "Number of NOCB toggle threads, 0 to disable");
|
||||||
torture_param(int, nocbs_toggle, 1000, "Time between toggling nocb state (ms)");
|
torture_param(int, nocbs_toggle, 1000, "Time between toggling nocb state (ms)");
|
||||||
|
torture_param(int, preempt_duration, 0, "Preemption duration (ms), zero to disable");
|
||||||
|
torture_param(int, preempt_interval, MSEC_PER_SEC, "Interval between preemptions (ms)");
|
||||||
torture_param(int, read_exit_delay, 13, "Delay between read-then-exit episodes (s)");
|
torture_param(int, read_exit_delay, 13, "Delay between read-then-exit episodes (s)");
|
||||||
torture_param(int, read_exit_burst, 16, "# of read-then-exit bursts per episode, zero to disable");
|
torture_param(int, read_exit_burst, 16, "# of read-then-exit bursts per episode, zero to disable");
|
||||||
torture_param(int, reader_flavor, 0x1, "Reader flavors to use, one per bit.");
|
torture_param(int, reader_flavor, SRCU_READ_FLAVOR_NORMAL, "Reader flavors to use, one per bit.");
|
||||||
torture_param(int, shuffle_interval, 3, "Number of seconds between shuffles");
|
torture_param(int, shuffle_interval, 3, "Number of seconds between shuffles");
|
||||||
torture_param(int, shutdown_secs, 0, "Shutdown time (s), <= zero to disable.");
|
torture_param(int, shutdown_secs, 0, "Shutdown time (s), <= zero to disable.");
|
||||||
torture_param(int, stall_cpu, 0, "Stall duration (s), zero to disable.");
|
torture_param(int, stall_cpu, 0, "Stall duration (s), zero to disable.");
|
||||||
@ -149,6 +159,7 @@ static struct task_struct **fwd_prog_tasks;
|
|||||||
static struct task_struct **barrier_cbs_tasks;
|
static struct task_struct **barrier_cbs_tasks;
|
||||||
static struct task_struct *barrier_task;
|
static struct task_struct *barrier_task;
|
||||||
static struct task_struct *read_exit_task;
|
static struct task_struct *read_exit_task;
|
||||||
|
static struct task_struct *preempt_task;
|
||||||
|
|
||||||
#define RCU_TORTURE_PIPE_LEN 10
|
#define RCU_TORTURE_PIPE_LEN 10
|
||||||
|
|
||||||
@ -259,10 +270,13 @@ struct rt_read_seg {
|
|||||||
unsigned long rt_delay_ms;
|
unsigned long rt_delay_ms;
|
||||||
unsigned long rt_delay_us;
|
unsigned long rt_delay_us;
|
||||||
bool rt_preempted;
|
bool rt_preempted;
|
||||||
|
int rt_cpu;
|
||||||
|
int rt_end_cpu;
|
||||||
};
|
};
|
||||||
static int err_segs_recorded;
|
static int err_segs_recorded;
|
||||||
static struct rt_read_seg err_segs[RCUTORTURE_RDR_MAX_SEGS];
|
static struct rt_read_seg err_segs[RCUTORTURE_RDR_MAX_SEGS];
|
||||||
static int rt_read_nsegs;
|
static int rt_read_nsegs;
|
||||||
|
static int rt_read_preempted;
|
||||||
|
|
||||||
static const char *rcu_torture_writer_state_getname(void)
|
static const char *rcu_torture_writer_state_getname(void)
|
||||||
{
|
{
|
||||||
@ -353,7 +367,8 @@ struct rcu_torture_ops {
|
|||||||
void (*read_delay)(struct torture_random_state *rrsp,
|
void (*read_delay)(struct torture_random_state *rrsp,
|
||||||
struct rt_read_seg *rtrsp);
|
struct rt_read_seg *rtrsp);
|
||||||
void (*readunlock)(int idx);
|
void (*readunlock)(int idx);
|
||||||
int (*readlock_held)(void);
|
int (*readlock_held)(void); // lockdep.
|
||||||
|
int (*readlock_nesting)(void); // actual nesting, if available, -1 if not.
|
||||||
unsigned long (*get_gp_seq)(void);
|
unsigned long (*get_gp_seq)(void);
|
||||||
unsigned long (*gp_diff)(unsigned long new, unsigned long old);
|
unsigned long (*gp_diff)(unsigned long new, unsigned long old);
|
||||||
void (*deferred_free)(struct rcu_torture *p);
|
void (*deferred_free)(struct rcu_torture *p);
|
||||||
@ -390,6 +405,7 @@ struct rcu_torture_ops {
|
|||||||
void (*get_gp_data)(int *flags, unsigned long *gp_seq);
|
void (*get_gp_data)(int *flags, unsigned long *gp_seq);
|
||||||
void (*gp_slow_register)(atomic_t *rgssp);
|
void (*gp_slow_register)(atomic_t *rgssp);
|
||||||
void (*gp_slow_unregister)(atomic_t *rgssp);
|
void (*gp_slow_unregister)(atomic_t *rgssp);
|
||||||
|
bool (*reader_blocked)(void);
|
||||||
long cbflood_max;
|
long cbflood_max;
|
||||||
int irq_capable;
|
int irq_capable;
|
||||||
int can_boost;
|
int can_boost;
|
||||||
@ -448,10 +464,8 @@ rcu_read_delay(struct torture_random_state *rrsp, struct rt_read_seg *rtrsp)
|
|||||||
rtrsp->rt_delay_us = shortdelay_us;
|
rtrsp->rt_delay_us = shortdelay_us;
|
||||||
}
|
}
|
||||||
if (!preempt_count() &&
|
if (!preempt_count() &&
|
||||||
!(torture_random(rrsp) % (nrealreaders * 500))) {
|
!(torture_random(rrsp) % (nrealreaders * 500)))
|
||||||
torture_preempt_schedule(); /* QS only if preemptible. */
|
torture_preempt_schedule(); /* QS only if preemptible. */
|
||||||
rtrsp->rt_preempted = true;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void rcu_torture_read_unlock(int idx)
|
static void rcu_torture_read_unlock(int idx)
|
||||||
@ -459,6 +473,15 @@ static void rcu_torture_read_unlock(int idx)
|
|||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int rcu_torture_readlock_nesting(void)
|
||||||
|
{
|
||||||
|
if (IS_ENABLED(CONFIG_PREEMPT_RCU))
|
||||||
|
return rcu_preempt_depth();
|
||||||
|
if (IS_ENABLED(CONFIG_PREEMPT_COUNT))
|
||||||
|
return (preempt_count() & PREEMPT_MASK);
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Update callback in the pipe. This should be invoked after a grace period.
|
* Update callback in the pipe. This should be invoked after a grace period.
|
||||||
*/
|
*/
|
||||||
@ -548,6 +571,7 @@ static struct rcu_torture_ops rcu_ops = {
|
|||||||
.read_delay = rcu_read_delay,
|
.read_delay = rcu_read_delay,
|
||||||
.readunlock = rcu_torture_read_unlock,
|
.readunlock = rcu_torture_read_unlock,
|
||||||
.readlock_held = torture_readlock_not_held,
|
.readlock_held = torture_readlock_not_held,
|
||||||
|
.readlock_nesting = rcu_torture_readlock_nesting,
|
||||||
.get_gp_seq = rcu_get_gp_seq,
|
.get_gp_seq = rcu_get_gp_seq,
|
||||||
.gp_diff = rcu_seq_diff,
|
.gp_diff = rcu_seq_diff,
|
||||||
.deferred_free = rcu_torture_deferred_free,
|
.deferred_free = rcu_torture_deferred_free,
|
||||||
@ -573,6 +597,7 @@ static struct rcu_torture_ops rcu_ops = {
|
|||||||
.start_gp_poll_exp_full = start_poll_synchronize_rcu_expedited_full,
|
.start_gp_poll_exp_full = start_poll_synchronize_rcu_expedited_full,
|
||||||
.poll_gp_state_exp = poll_state_synchronize_rcu,
|
.poll_gp_state_exp = poll_state_synchronize_rcu,
|
||||||
.cond_sync_exp = cond_synchronize_rcu_expedited,
|
.cond_sync_exp = cond_synchronize_rcu_expedited,
|
||||||
|
.cond_sync_exp_full = cond_synchronize_rcu_expedited_full,
|
||||||
.call = call_rcu_hurry,
|
.call = call_rcu_hurry,
|
||||||
.cb_barrier = rcu_barrier,
|
.cb_barrier = rcu_barrier,
|
||||||
.fqs = rcu_force_quiescent_state,
|
.fqs = rcu_force_quiescent_state,
|
||||||
@ -582,6 +607,9 @@ static struct rcu_torture_ops rcu_ops = {
|
|||||||
.get_gp_data = rcutorture_get_gp_data,
|
.get_gp_data = rcutorture_get_gp_data,
|
||||||
.gp_slow_register = rcu_gp_slow_register,
|
.gp_slow_register = rcu_gp_slow_register,
|
||||||
.gp_slow_unregister = rcu_gp_slow_unregister,
|
.gp_slow_unregister = rcu_gp_slow_unregister,
|
||||||
|
.reader_blocked = IS_ENABLED(CONFIG_RCU_TORTURE_TEST_LOG_CPU)
|
||||||
|
? has_rcu_reader_blocked
|
||||||
|
: NULL,
|
||||||
.irq_capable = 1,
|
.irq_capable = 1,
|
||||||
.can_boost = IS_ENABLED(CONFIG_RCU_BOOST),
|
.can_boost = IS_ENABLED(CONFIG_RCU_BOOST),
|
||||||
.extendables = RCUTORTURE_MAX_EXTEND,
|
.extendables = RCUTORTURE_MAX_EXTEND,
|
||||||
@ -628,6 +656,7 @@ static struct rcu_torture_ops rcu_busted_ops = {
|
|||||||
.exp_sync = synchronize_rcu_busted,
|
.exp_sync = synchronize_rcu_busted,
|
||||||
.call = call_rcu_busted,
|
.call = call_rcu_busted,
|
||||||
.irq_capable = 1,
|
.irq_capable = 1,
|
||||||
|
.extendables = RCUTORTURE_MAX_EXTEND,
|
||||||
.name = "busted"
|
.name = "busted"
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -650,17 +679,17 @@ static int srcu_torture_read_lock(void)
|
|||||||
int idx;
|
int idx;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
if ((reader_flavor & 0x1) || !(reader_flavor & 0x7)) {
|
if ((reader_flavor & SRCU_READ_FLAVOR_NORMAL) || !(reader_flavor & SRCU_READ_FLAVOR_ALL)) {
|
||||||
idx = srcu_read_lock(srcu_ctlp);
|
idx = srcu_read_lock(srcu_ctlp);
|
||||||
WARN_ON_ONCE(idx & ~0x1);
|
WARN_ON_ONCE(idx & ~0x1);
|
||||||
ret += idx;
|
ret += idx;
|
||||||
}
|
}
|
||||||
if (reader_flavor & 0x2) {
|
if (reader_flavor & SRCU_READ_FLAVOR_NMI) {
|
||||||
idx = srcu_read_lock_nmisafe(srcu_ctlp);
|
idx = srcu_read_lock_nmisafe(srcu_ctlp);
|
||||||
WARN_ON_ONCE(idx & ~0x1);
|
WARN_ON_ONCE(idx & ~0x1);
|
||||||
ret += idx << 1;
|
ret += idx << 1;
|
||||||
}
|
}
|
||||||
if (reader_flavor & 0x4) {
|
if (reader_flavor & SRCU_READ_FLAVOR_LITE) {
|
||||||
idx = srcu_read_lock_lite(srcu_ctlp);
|
idx = srcu_read_lock_lite(srcu_ctlp);
|
||||||
WARN_ON_ONCE(idx & ~0x1);
|
WARN_ON_ONCE(idx & ~0x1);
|
||||||
ret += idx << 2;
|
ret += idx << 2;
|
||||||
@ -690,11 +719,11 @@ srcu_read_delay(struct torture_random_state *rrsp, struct rt_read_seg *rtrsp)
|
|||||||
static void srcu_torture_read_unlock(int idx)
|
static void srcu_torture_read_unlock(int idx)
|
||||||
{
|
{
|
||||||
WARN_ON_ONCE((reader_flavor && (idx & ~reader_flavor)) || (!reader_flavor && (idx & ~0x1)));
|
WARN_ON_ONCE((reader_flavor && (idx & ~reader_flavor)) || (!reader_flavor && (idx & ~0x1)));
|
||||||
if (reader_flavor & 0x4)
|
if (reader_flavor & SRCU_READ_FLAVOR_LITE)
|
||||||
srcu_read_unlock_lite(srcu_ctlp, (idx & 0x4) >> 2);
|
srcu_read_unlock_lite(srcu_ctlp, (idx & 0x4) >> 2);
|
||||||
if (reader_flavor & 0x2)
|
if (reader_flavor & SRCU_READ_FLAVOR_NMI)
|
||||||
srcu_read_unlock_nmisafe(srcu_ctlp, (idx & 0x2) >> 1);
|
srcu_read_unlock_nmisafe(srcu_ctlp, (idx & 0x2) >> 1);
|
||||||
if ((reader_flavor & 0x1) || !(reader_flavor & 0x7))
|
if ((reader_flavor & SRCU_READ_FLAVOR_NORMAL) || !(reader_flavor & SRCU_READ_FLAVOR_ALL))
|
||||||
srcu_read_unlock(srcu_ctlp, idx & 0x1);
|
srcu_read_unlock(srcu_ctlp, idx & 0x1);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -857,7 +886,7 @@ static void synchronize_rcu_trivial(void)
|
|||||||
int cpu;
|
int cpu;
|
||||||
|
|
||||||
for_each_online_cpu(cpu) {
|
for_each_online_cpu(cpu) {
|
||||||
torture_sched_setaffinity(current->pid, cpumask_of(cpu));
|
torture_sched_setaffinity(current->pid, cpumask_of(cpu), true);
|
||||||
WARN_ON_ONCE(raw_smp_processor_id() != cpu);
|
WARN_ON_ONCE(raw_smp_processor_id() != cpu);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1347,6 +1376,7 @@ static void rcu_torture_write_types(void)
|
|||||||
pr_alert("%s: gp_sync without primitives.\n", __func__);
|
pr_alert("%s: gp_sync without primitives.\n", __func__);
|
||||||
}
|
}
|
||||||
pr_alert("%s: Testing %d update types.\n", __func__, nsynctypes);
|
pr_alert("%s: Testing %d update types.\n", __func__, nsynctypes);
|
||||||
|
pr_info("%s: gp_cond_wi %d gp_cond_wi_exp %d gp_poll_wi %d gp_poll_wi_exp %d\n", __func__, gp_cond_wi, gp_cond_wi_exp, gp_poll_wi, gp_poll_wi_exp);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -1513,7 +1543,8 @@ rcu_torture_writer(void *arg)
|
|||||||
case RTWS_COND_GET:
|
case RTWS_COND_GET:
|
||||||
rcu_torture_writer_state = RTWS_COND_GET;
|
rcu_torture_writer_state = RTWS_COND_GET;
|
||||||
gp_snap = cur_ops->get_gp_state();
|
gp_snap = cur_ops->get_gp_state();
|
||||||
torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand);
|
torture_hrtimeout_us(torture_random(&rand) % gp_cond_wi,
|
||||||
|
1000, &rand);
|
||||||
rcu_torture_writer_state = RTWS_COND_SYNC;
|
rcu_torture_writer_state = RTWS_COND_SYNC;
|
||||||
cur_ops->cond_sync(gp_snap);
|
cur_ops->cond_sync(gp_snap);
|
||||||
rcu_torture_pipe_update(old_rp);
|
rcu_torture_pipe_update(old_rp);
|
||||||
@ -1521,7 +1552,8 @@ rcu_torture_writer(void *arg)
|
|||||||
case RTWS_COND_GET_EXP:
|
case RTWS_COND_GET_EXP:
|
||||||
rcu_torture_writer_state = RTWS_COND_GET_EXP;
|
rcu_torture_writer_state = RTWS_COND_GET_EXP;
|
||||||
gp_snap = cur_ops->get_gp_state_exp();
|
gp_snap = cur_ops->get_gp_state_exp();
|
||||||
torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand);
|
torture_hrtimeout_us(torture_random(&rand) % gp_cond_wi_exp,
|
||||||
|
1000, &rand);
|
||||||
rcu_torture_writer_state = RTWS_COND_SYNC_EXP;
|
rcu_torture_writer_state = RTWS_COND_SYNC_EXP;
|
||||||
cur_ops->cond_sync_exp(gp_snap);
|
cur_ops->cond_sync_exp(gp_snap);
|
||||||
rcu_torture_pipe_update(old_rp);
|
rcu_torture_pipe_update(old_rp);
|
||||||
@ -1529,7 +1561,8 @@ rcu_torture_writer(void *arg)
|
|||||||
case RTWS_COND_GET_FULL:
|
case RTWS_COND_GET_FULL:
|
||||||
rcu_torture_writer_state = RTWS_COND_GET_FULL;
|
rcu_torture_writer_state = RTWS_COND_GET_FULL;
|
||||||
cur_ops->get_gp_state_full(&gp_snap_full);
|
cur_ops->get_gp_state_full(&gp_snap_full);
|
||||||
torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand);
|
torture_hrtimeout_us(torture_random(&rand) % gp_cond_wi,
|
||||||
|
1000, &rand);
|
||||||
rcu_torture_writer_state = RTWS_COND_SYNC_FULL;
|
rcu_torture_writer_state = RTWS_COND_SYNC_FULL;
|
||||||
cur_ops->cond_sync_full(&gp_snap_full);
|
cur_ops->cond_sync_full(&gp_snap_full);
|
||||||
rcu_torture_pipe_update(old_rp);
|
rcu_torture_pipe_update(old_rp);
|
||||||
@ -1537,7 +1570,8 @@ rcu_torture_writer(void *arg)
|
|||||||
case RTWS_COND_GET_EXP_FULL:
|
case RTWS_COND_GET_EXP_FULL:
|
||||||
rcu_torture_writer_state = RTWS_COND_GET_EXP_FULL;
|
rcu_torture_writer_state = RTWS_COND_GET_EXP_FULL;
|
||||||
cur_ops->get_gp_state_full(&gp_snap_full);
|
cur_ops->get_gp_state_full(&gp_snap_full);
|
||||||
torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand);
|
torture_hrtimeout_us(torture_random(&rand) % gp_cond_wi_exp,
|
||||||
|
1000, &rand);
|
||||||
rcu_torture_writer_state = RTWS_COND_SYNC_EXP_FULL;
|
rcu_torture_writer_state = RTWS_COND_SYNC_EXP_FULL;
|
||||||
cur_ops->cond_sync_exp_full(&gp_snap_full);
|
cur_ops->cond_sync_exp_full(&gp_snap_full);
|
||||||
rcu_torture_pipe_update(old_rp);
|
rcu_torture_pipe_update(old_rp);
|
||||||
@ -1557,8 +1591,8 @@ rcu_torture_writer(void *arg)
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
WARN_ON_ONCE(ulo_size > 0 && i >= ulo_size);
|
WARN_ON_ONCE(ulo_size > 0 && i >= ulo_size);
|
||||||
torture_hrtimeout_jiffies(torture_random(&rand) % 16,
|
torture_hrtimeout_us(torture_random(&rand) % gp_poll_wi,
|
||||||
&rand);
|
1000, &rand);
|
||||||
}
|
}
|
||||||
rcu_torture_pipe_update(old_rp);
|
rcu_torture_pipe_update(old_rp);
|
||||||
break;
|
break;
|
||||||
@ -1578,8 +1612,8 @@ rcu_torture_writer(void *arg)
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
WARN_ON_ONCE(rgo_size > 0 && i >= rgo_size);
|
WARN_ON_ONCE(rgo_size > 0 && i >= rgo_size);
|
||||||
torture_hrtimeout_jiffies(torture_random(&rand) % 16,
|
torture_hrtimeout_us(torture_random(&rand) % gp_poll_wi,
|
||||||
&rand);
|
1000, &rand);
|
||||||
}
|
}
|
||||||
rcu_torture_pipe_update(old_rp);
|
rcu_torture_pipe_update(old_rp);
|
||||||
break;
|
break;
|
||||||
@ -1588,8 +1622,8 @@ rcu_torture_writer(void *arg)
|
|||||||
gp_snap = cur_ops->start_gp_poll_exp();
|
gp_snap = cur_ops->start_gp_poll_exp();
|
||||||
rcu_torture_writer_state = RTWS_POLL_WAIT_EXP;
|
rcu_torture_writer_state = RTWS_POLL_WAIT_EXP;
|
||||||
while (!cur_ops->poll_gp_state_exp(gp_snap))
|
while (!cur_ops->poll_gp_state_exp(gp_snap))
|
||||||
torture_hrtimeout_jiffies(torture_random(&rand) % 16,
|
torture_hrtimeout_us(torture_random(&rand) % gp_poll_wi_exp,
|
||||||
&rand);
|
1000, &rand);
|
||||||
rcu_torture_pipe_update(old_rp);
|
rcu_torture_pipe_update(old_rp);
|
||||||
break;
|
break;
|
||||||
case RTWS_POLL_GET_EXP_FULL:
|
case RTWS_POLL_GET_EXP_FULL:
|
||||||
@ -1597,8 +1631,8 @@ rcu_torture_writer(void *arg)
|
|||||||
cur_ops->start_gp_poll_exp_full(&gp_snap_full);
|
cur_ops->start_gp_poll_exp_full(&gp_snap_full);
|
||||||
rcu_torture_writer_state = RTWS_POLL_WAIT_EXP_FULL;
|
rcu_torture_writer_state = RTWS_POLL_WAIT_EXP_FULL;
|
||||||
while (!cur_ops->poll_gp_state_full(&gp_snap_full))
|
while (!cur_ops->poll_gp_state_full(&gp_snap_full))
|
||||||
torture_hrtimeout_jiffies(torture_random(&rand) % 16,
|
torture_hrtimeout_us(torture_random(&rand) % gp_poll_wi_exp,
|
||||||
&rand);
|
1000, &rand);
|
||||||
rcu_torture_pipe_update(old_rp);
|
rcu_torture_pipe_update(old_rp);
|
||||||
break;
|
break;
|
||||||
case RTWS_SYNC:
|
case RTWS_SYNC:
|
||||||
@ -1835,6 +1869,44 @@ static void rcu_torture_reader_do_mbchk(long myid, struct rcu_torture *rtp,
|
|||||||
smp_store_release(&rtrcp_assigner->rtc_chkrdr, -1); // Assigner can again assign.
|
smp_store_release(&rtrcp_assigner->rtc_chkrdr, -1); // Assigner can again assign.
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Verify the specified RCUTORTURE_RDR* state.
|
||||||
|
#define ROEC_ARGS "%s %s: Current %#x To add %#x To remove %#x preempt_count() %#x\n", __func__, s, curstate, new, old, preempt_count()
|
||||||
|
static void rcutorture_one_extend_check(char *s, int curstate, int new, int old, bool insoftirq)
|
||||||
|
{
|
||||||
|
if (!IS_ENABLED(CONFIG_RCU_TORTURE_TEST_CHK_RDR_STATE))
|
||||||
|
return;
|
||||||
|
|
||||||
|
WARN_ONCE(!(curstate & RCUTORTURE_RDR_IRQ) && irqs_disabled(), ROEC_ARGS);
|
||||||
|
WARN_ONCE((curstate & RCUTORTURE_RDR_IRQ) && !irqs_disabled(), ROEC_ARGS);
|
||||||
|
|
||||||
|
// If CONFIG_PREEMPT_COUNT=n, further checks are unreliable.
|
||||||
|
if (!IS_ENABLED(CONFIG_PREEMPT_COUNT))
|
||||||
|
return;
|
||||||
|
|
||||||
|
WARN_ONCE((curstate & (RCUTORTURE_RDR_BH | RCUTORTURE_RDR_RBH)) &&
|
||||||
|
!(preempt_count() & SOFTIRQ_MASK), ROEC_ARGS);
|
||||||
|
WARN_ONCE((curstate & (RCUTORTURE_RDR_PREEMPT | RCUTORTURE_RDR_SCHED)) &&
|
||||||
|
!(preempt_count() & PREEMPT_MASK), ROEC_ARGS);
|
||||||
|
WARN_ONCE(cur_ops->readlock_nesting &&
|
||||||
|
(curstate & (RCUTORTURE_RDR_RCU_1 | RCUTORTURE_RDR_RCU_2)) &&
|
||||||
|
cur_ops->readlock_nesting() == 0, ROEC_ARGS);
|
||||||
|
|
||||||
|
// Timer handlers have all sorts of stuff disabled, so ignore
|
||||||
|
// unintended disabling.
|
||||||
|
if (insoftirq)
|
||||||
|
return;
|
||||||
|
|
||||||
|
WARN_ONCE(cur_ops->extendables &&
|
||||||
|
!(curstate & (RCUTORTURE_RDR_BH | RCUTORTURE_RDR_RBH)) &&
|
||||||
|
(preempt_count() & SOFTIRQ_MASK), ROEC_ARGS);
|
||||||
|
WARN_ONCE(cur_ops->extendables &&
|
||||||
|
!(curstate & (RCUTORTURE_RDR_PREEMPT | RCUTORTURE_RDR_SCHED)) &&
|
||||||
|
(preempt_count() & PREEMPT_MASK), ROEC_ARGS);
|
||||||
|
WARN_ONCE(cur_ops->readlock_nesting &&
|
||||||
|
!(curstate & (RCUTORTURE_RDR_RCU_1 | RCUTORTURE_RDR_RCU_2)) &&
|
||||||
|
cur_ops->readlock_nesting() > 0, ROEC_ARGS);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Do one extension of an RCU read-side critical section using the
|
* Do one extension of an RCU read-side critical section using the
|
||||||
* current reader state in readstate (set to zero for initial entry
|
* current reader state in readstate (set to zero for initial entry
|
||||||
@ -1844,10 +1916,11 @@ static void rcu_torture_reader_do_mbchk(long myid, struct rcu_torture *rtp,
|
|||||||
* beginning or end of the critical section and if there was actually a
|
* beginning or end of the critical section and if there was actually a
|
||||||
* change, do a ->read_delay().
|
* change, do a ->read_delay().
|
||||||
*/
|
*/
|
||||||
static void rcutorture_one_extend(int *readstate, int newstate,
|
static void rcutorture_one_extend(int *readstate, int newstate, bool insoftirq,
|
||||||
struct torture_random_state *trsp,
|
struct torture_random_state *trsp,
|
||||||
struct rt_read_seg *rtrsp)
|
struct rt_read_seg *rtrsp)
|
||||||
{
|
{
|
||||||
|
bool first;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
int idxnew1 = -1;
|
int idxnew1 = -1;
|
||||||
int idxnew2 = -1;
|
int idxnew2 = -1;
|
||||||
@ -1856,8 +1929,10 @@ static void rcutorture_one_extend(int *readstate, int newstate,
|
|||||||
int statesnew = ~*readstate & newstate;
|
int statesnew = ~*readstate & newstate;
|
||||||
int statesold = *readstate & ~newstate;
|
int statesold = *readstate & ~newstate;
|
||||||
|
|
||||||
|
first = idxold1 == 0;
|
||||||
WARN_ON_ONCE(idxold2 < 0);
|
WARN_ON_ONCE(idxold2 < 0);
|
||||||
WARN_ON_ONCE(idxold2 & ~RCUTORTURE_RDR_ALLBITS);
|
WARN_ON_ONCE(idxold2 & ~RCUTORTURE_RDR_ALLBITS);
|
||||||
|
rcutorture_one_extend_check("before change", idxold1, statesnew, statesold, insoftirq);
|
||||||
rtrsp->rt_readstate = newstate;
|
rtrsp->rt_readstate = newstate;
|
||||||
|
|
||||||
/* First, put new protection in place to avoid critical-section gap. */
|
/* First, put new protection in place to avoid critical-section gap. */
|
||||||
@ -1876,6 +1951,21 @@ static void rcutorture_one_extend(int *readstate, int newstate,
|
|||||||
if (statesnew & RCUTORTURE_RDR_RCU_2)
|
if (statesnew & RCUTORTURE_RDR_RCU_2)
|
||||||
idxnew2 = (cur_ops->readlock() << RCUTORTURE_RDR_SHIFT_2) & RCUTORTURE_RDR_MASK_2;
|
idxnew2 = (cur_ops->readlock() << RCUTORTURE_RDR_SHIFT_2) & RCUTORTURE_RDR_MASK_2;
|
||||||
|
|
||||||
|
// Complain unless both the old and the new protection is in place.
|
||||||
|
rcutorture_one_extend_check("during change",
|
||||||
|
idxold1 | statesnew, statesnew, statesold, insoftirq);
|
||||||
|
|
||||||
|
// Sample CPU under both sets of protections to reduce confusion.
|
||||||
|
if (IS_ENABLED(CONFIG_RCU_TORTURE_TEST_LOG_CPU)) {
|
||||||
|
int cpu = raw_smp_processor_id();
|
||||||
|
rtrsp->rt_cpu = cpu;
|
||||||
|
if (!first) {
|
||||||
|
rtrsp[-1].rt_end_cpu = cpu;
|
||||||
|
if (cur_ops->reader_blocked)
|
||||||
|
rtrsp[-1].rt_preempted = cur_ops->reader_blocked();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Next, remove old protection, in decreasing order of strength
|
* Next, remove old protection, in decreasing order of strength
|
||||||
* to avoid unlock paths that aren't safe in the stronger
|
* to avoid unlock paths that aren't safe in the stronger
|
||||||
@ -1926,6 +2016,7 @@ static void rcutorture_one_extend(int *readstate, int newstate,
|
|||||||
WARN_ON_ONCE(*readstate < 0);
|
WARN_ON_ONCE(*readstate < 0);
|
||||||
if (WARN_ON_ONCE(*readstate & ~RCUTORTURE_RDR_ALLBITS))
|
if (WARN_ON_ONCE(*readstate & ~RCUTORTURE_RDR_ALLBITS))
|
||||||
pr_info("Unexpected readstate value of %#x\n", *readstate);
|
pr_info("Unexpected readstate value of %#x\n", *readstate);
|
||||||
|
rcutorture_one_extend_check("after change", *readstate, statesnew, statesold, insoftirq);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Return the biggest extendables mask given current RCU and boot parameters. */
|
/* Return the biggest extendables mask given current RCU and boot parameters. */
|
||||||
@ -1992,7 +2083,7 @@ rcutorture_extend_mask(int oldmask, struct torture_random_state *trsp)
|
|||||||
* critical section.
|
* critical section.
|
||||||
*/
|
*/
|
||||||
static struct rt_read_seg *
|
static struct rt_read_seg *
|
||||||
rcutorture_loop_extend(int *readstate, struct torture_random_state *trsp,
|
rcutorture_loop_extend(int *readstate, bool insoftirq, struct torture_random_state *trsp,
|
||||||
struct rt_read_seg *rtrsp)
|
struct rt_read_seg *rtrsp)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
@ -2007,7 +2098,7 @@ rcutorture_loop_extend(int *readstate, struct torture_random_state *trsp,
|
|||||||
i = ((i | (i >> 3)) & RCUTORTURE_RDR_MAX_LOOPS) + 1;
|
i = ((i | (i >> 3)) & RCUTORTURE_RDR_MAX_LOOPS) + 1;
|
||||||
for (j = 0; j < i; j++) {
|
for (j = 0; j < i; j++) {
|
||||||
mask = rcutorture_extend_mask(*readstate, trsp);
|
mask = rcutorture_extend_mask(*readstate, trsp);
|
||||||
rcutorture_one_extend(readstate, mask, trsp, &rtrsp[j]);
|
rcutorture_one_extend(readstate, mask, insoftirq, trsp, &rtrsp[j]);
|
||||||
}
|
}
|
||||||
return &rtrsp[j];
|
return &rtrsp[j];
|
||||||
}
|
}
|
||||||
@ -2028,6 +2119,7 @@ static bool rcu_torture_one_read(struct torture_random_state *trsp, long myid)
|
|||||||
int newstate;
|
int newstate;
|
||||||
struct rcu_torture *p;
|
struct rcu_torture *p;
|
||||||
int pipe_count;
|
int pipe_count;
|
||||||
|
bool preempted = false;
|
||||||
int readstate = 0;
|
int readstate = 0;
|
||||||
struct rt_read_seg rtseg[RCUTORTURE_RDR_MAX_SEGS] = { { 0 } };
|
struct rt_read_seg rtseg[RCUTORTURE_RDR_MAX_SEGS] = { { 0 } };
|
||||||
struct rt_read_seg *rtrsp = &rtseg[0];
|
struct rt_read_seg *rtrsp = &rtseg[0];
|
||||||
@ -2036,7 +2128,7 @@ static bool rcu_torture_one_read(struct torture_random_state *trsp, long myid)
|
|||||||
|
|
||||||
WARN_ON_ONCE(!rcu_is_watching());
|
WARN_ON_ONCE(!rcu_is_watching());
|
||||||
newstate = rcutorture_extend_mask(readstate, trsp);
|
newstate = rcutorture_extend_mask(readstate, trsp);
|
||||||
rcutorture_one_extend(&readstate, newstate, trsp, rtrsp++);
|
rcutorture_one_extend(&readstate, newstate, myid < 0, trsp, rtrsp++);
|
||||||
if (checkpolling) {
|
if (checkpolling) {
|
||||||
if (cur_ops->get_gp_state && cur_ops->poll_gp_state)
|
if (cur_ops->get_gp_state && cur_ops->poll_gp_state)
|
||||||
cookie = cur_ops->get_gp_state();
|
cookie = cur_ops->get_gp_state();
|
||||||
@ -2049,13 +2141,13 @@ static bool rcu_torture_one_read(struct torture_random_state *trsp, long myid)
|
|||||||
!cur_ops->readlock_held || cur_ops->readlock_held());
|
!cur_ops->readlock_held || cur_ops->readlock_held());
|
||||||
if (p == NULL) {
|
if (p == NULL) {
|
||||||
/* Wait for rcu_torture_writer to get underway */
|
/* Wait for rcu_torture_writer to get underway */
|
||||||
rcutorture_one_extend(&readstate, 0, trsp, rtrsp);
|
rcutorture_one_extend(&readstate, 0, myid < 0, trsp, rtrsp);
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
if (p->rtort_mbtest == 0)
|
if (p->rtort_mbtest == 0)
|
||||||
atomic_inc(&n_rcu_torture_mberror);
|
atomic_inc(&n_rcu_torture_mberror);
|
||||||
rcu_torture_reader_do_mbchk(myid, p, trsp);
|
rcu_torture_reader_do_mbchk(myid, p, trsp);
|
||||||
rtrsp = rcutorture_loop_extend(&readstate, trsp, rtrsp);
|
rtrsp = rcutorture_loop_extend(&readstate, myid < 0, trsp, rtrsp);
|
||||||
preempt_disable();
|
preempt_disable();
|
||||||
pipe_count = READ_ONCE(p->rtort_pipe_count);
|
pipe_count = READ_ONCE(p->rtort_pipe_count);
|
||||||
if (pipe_count > RCU_TORTURE_PIPE_LEN) {
|
if (pipe_count > RCU_TORTURE_PIPE_LEN) {
|
||||||
@ -2093,7 +2185,9 @@ static bool rcu_torture_one_read(struct torture_random_state *trsp, long myid)
|
|||||||
rcu_torture_writer_state,
|
rcu_torture_writer_state,
|
||||||
cpumask_pr_args(cpu_online_mask));
|
cpumask_pr_args(cpu_online_mask));
|
||||||
}
|
}
|
||||||
rcutorture_one_extend(&readstate, 0, trsp, rtrsp);
|
if (cur_ops->reader_blocked)
|
||||||
|
preempted = cur_ops->reader_blocked();
|
||||||
|
rcutorture_one_extend(&readstate, 0, myid < 0, trsp, rtrsp);
|
||||||
WARN_ON_ONCE(readstate);
|
WARN_ON_ONCE(readstate);
|
||||||
// This next splat is expected behavior if leakpointer, especially
|
// This next splat is expected behavior if leakpointer, especially
|
||||||
// for CONFIG_RCU_STRICT_GRACE_PERIOD=y kernels.
|
// for CONFIG_RCU_STRICT_GRACE_PERIOD=y kernels.
|
||||||
@ -2105,6 +2199,7 @@ static bool rcu_torture_one_read(struct torture_random_state *trsp, long myid)
|
|||||||
for (rtrsp1 = &rtseg[0]; rtrsp1 < rtrsp; rtrsp1++)
|
for (rtrsp1 = &rtseg[0]; rtrsp1 < rtrsp; rtrsp1++)
|
||||||
err_segs[i++] = *rtrsp1;
|
err_segs[i++] = *rtrsp1;
|
||||||
rt_read_nsegs = i;
|
rt_read_nsegs = i;
|
||||||
|
rt_read_preempted = preempted;
|
||||||
}
|
}
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
@ -2425,7 +2520,8 @@ rcu_torture_print_module_parms(struct rcu_torture_ops *cur_ops, const char *tag)
|
|||||||
"read_exit_delay=%d read_exit_burst=%d "
|
"read_exit_delay=%d read_exit_burst=%d "
|
||||||
"reader_flavor=%x "
|
"reader_flavor=%x "
|
||||||
"nocbs_nthreads=%d nocbs_toggle=%d "
|
"nocbs_nthreads=%d nocbs_toggle=%d "
|
||||||
"test_nmis=%d\n",
|
"test_nmis=%d "
|
||||||
|
"preempt_duration=%d preempt_interval=%d\n",
|
||||||
torture_type, tag, nrealreaders, nfakewriters,
|
torture_type, tag, nrealreaders, nfakewriters,
|
||||||
stat_interval, verbose, test_no_idle_hz, shuffle_interval,
|
stat_interval, verbose, test_no_idle_hz, shuffle_interval,
|
||||||
stutter, irqreader, fqs_duration, fqs_holdoff, fqs_stutter,
|
stutter, irqreader, fqs_duration, fqs_holdoff, fqs_stutter,
|
||||||
@ -2438,7 +2534,8 @@ rcu_torture_print_module_parms(struct rcu_torture_ops *cur_ops, const char *tag)
|
|||||||
read_exit_delay, read_exit_burst,
|
read_exit_delay, read_exit_burst,
|
||||||
reader_flavor,
|
reader_flavor,
|
||||||
nocbs_nthreads, nocbs_toggle,
|
nocbs_nthreads, nocbs_toggle,
|
||||||
test_nmis);
|
test_nmis,
|
||||||
|
preempt_duration, preempt_interval);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int rcutorture_booster_cleanup(unsigned int cpu)
|
static int rcutorture_booster_cleanup(unsigned int cpu)
|
||||||
@ -3068,12 +3165,12 @@ static int __init rcu_torture_fwd_prog_init(void)
|
|||||||
fwd_progress = 0;
|
fwd_progress = 0;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
if (stall_cpu > 0) {
|
if (stall_cpu > 0 || (preempt_duration > 0 && IS_ENABLED(CONFIG_RCU_NOCB_CPU))) {
|
||||||
VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Disabled, conflicts with CPU-stall testing");
|
VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Disabled, conflicts with CPU-stall and/or preemption testing");
|
||||||
fwd_progress = 0;
|
fwd_progress = 0;
|
||||||
if (IS_MODULE(CONFIG_RCU_TORTURE_TEST))
|
if (IS_MODULE(CONFIG_RCU_TORTURE_TEST))
|
||||||
return -EINVAL; /* In module, can fail back to user. */
|
return -EINVAL; /* In module, can fail back to user. */
|
||||||
WARN_ON(1); /* Make sure rcutorture notices conflict. */
|
WARN_ON(1); /* Make sure rcutorture scripting notices conflict. */
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
if (fwd_progress_holdoff <= 0)
|
if (fwd_progress_holdoff <= 0)
|
||||||
@ -3418,6 +3515,35 @@ static void rcutorture_test_nmis(int n)
|
|||||||
#endif // #else // #if IS_BUILTIN(CONFIG_RCU_TORTURE_TEST)
|
#endif // #else // #if IS_BUILTIN(CONFIG_RCU_TORTURE_TEST)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Randomly preempt online CPUs.
|
||||||
|
static int rcu_torture_preempt(void *unused)
|
||||||
|
{
|
||||||
|
int cpu = -1;
|
||||||
|
DEFINE_TORTURE_RANDOM(rand);
|
||||||
|
|
||||||
|
schedule_timeout_idle(stall_cpu_holdoff);
|
||||||
|
do {
|
||||||
|
// Wait for preempt_interval ms with up to 100us fuzz.
|
||||||
|
torture_hrtimeout_ms(preempt_interval, 100, &rand);
|
||||||
|
// Select online CPU.
|
||||||
|
cpu = cpumask_next(cpu, cpu_online_mask);
|
||||||
|
if (cpu >= nr_cpu_ids)
|
||||||
|
cpu = cpumask_next(-1, cpu_online_mask);
|
||||||
|
WARN_ON_ONCE(cpu >= nr_cpu_ids);
|
||||||
|
// Move to that CPU, if can't do so, retry later.
|
||||||
|
if (torture_sched_setaffinity(current->pid, cpumask_of(cpu), false))
|
||||||
|
continue;
|
||||||
|
// Preempt at high-ish priority, then reset to normal.
|
||||||
|
sched_set_fifo(current);
|
||||||
|
torture_sched_setaffinity(current->pid, cpu_present_mask, true);
|
||||||
|
mdelay(preempt_duration);
|
||||||
|
sched_set_normal(current, 0);
|
||||||
|
stutter_wait("rcu_torture_preempt");
|
||||||
|
} while (!torture_must_stop());
|
||||||
|
torture_kthread_stopping("rcu_torture_preempt");
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
static enum cpuhp_state rcutor_hp;
|
static enum cpuhp_state rcutor_hp;
|
||||||
|
|
||||||
static void
|
static void
|
||||||
@ -3446,6 +3572,7 @@ rcu_torture_cleanup(void)
|
|||||||
|
|
||||||
if (cur_ops->gp_kthread_dbg)
|
if (cur_ops->gp_kthread_dbg)
|
||||||
cur_ops->gp_kthread_dbg();
|
cur_ops->gp_kthread_dbg();
|
||||||
|
torture_stop_kthread(rcu_torture_preempt, preempt_task);
|
||||||
rcu_torture_read_exit_cleanup();
|
rcu_torture_read_exit_cleanup();
|
||||||
rcu_torture_barrier_cleanup();
|
rcu_torture_barrier_cleanup();
|
||||||
rcu_torture_fwd_prog_cleanup();
|
rcu_torture_fwd_prog_cleanup();
|
||||||
@ -3508,26 +3635,49 @@ rcu_torture_cleanup(void)
|
|||||||
pr_alert("\t: No segments recorded!!!\n");
|
pr_alert("\t: No segments recorded!!!\n");
|
||||||
firsttime = 1;
|
firsttime = 1;
|
||||||
for (i = 0; i < rt_read_nsegs; i++) {
|
for (i = 0; i < rt_read_nsegs; i++) {
|
||||||
pr_alert("\t%d: %#x ", i, err_segs[i].rt_readstate);
|
pr_alert("\t%d: %#4x", i, err_segs[i].rt_readstate);
|
||||||
if (err_segs[i].rt_delay_jiffies != 0) {
|
if (err_segs[i].rt_delay_jiffies != 0) {
|
||||||
pr_cont("%s%ldjiffies", firsttime ? "" : "+",
|
pr_cont("%s%ldjiffies", firsttime ? "" : "+",
|
||||||
err_segs[i].rt_delay_jiffies);
|
err_segs[i].rt_delay_jiffies);
|
||||||
firsttime = 0;
|
firsttime = 0;
|
||||||
}
|
}
|
||||||
|
if (IS_ENABLED(CONFIG_RCU_TORTURE_TEST_LOG_CPU)) {
|
||||||
|
pr_cont(" CPU %2d", err_segs[i].rt_cpu);
|
||||||
|
if (err_segs[i].rt_cpu != err_segs[i].rt_end_cpu)
|
||||||
|
pr_cont("->%-2d", err_segs[i].rt_end_cpu);
|
||||||
|
else
|
||||||
|
pr_cont(" ...");
|
||||||
|
}
|
||||||
if (err_segs[i].rt_delay_ms != 0) {
|
if (err_segs[i].rt_delay_ms != 0) {
|
||||||
pr_cont("%s%ldms", firsttime ? "" : "+",
|
pr_cont(" %s%ldms", firsttime ? "" : "+",
|
||||||
err_segs[i].rt_delay_ms);
|
err_segs[i].rt_delay_ms);
|
||||||
firsttime = 0;
|
firsttime = 0;
|
||||||
}
|
}
|
||||||
if (err_segs[i].rt_delay_us != 0) {
|
if (err_segs[i].rt_delay_us != 0) {
|
||||||
pr_cont("%s%ldus", firsttime ? "" : "+",
|
pr_cont(" %s%ldus", firsttime ? "" : "+",
|
||||||
err_segs[i].rt_delay_us);
|
err_segs[i].rt_delay_us);
|
||||||
firsttime = 0;
|
firsttime = 0;
|
||||||
}
|
}
|
||||||
pr_cont("%s\n",
|
pr_cont("%s", err_segs[i].rt_preempted ? " preempted" : "");
|
||||||
err_segs[i].rt_preempted ? "preempted" : "");
|
if (err_segs[i].rt_readstate & RCUTORTURE_RDR_BH)
|
||||||
|
pr_cont(" BH");
|
||||||
|
if (err_segs[i].rt_readstate & RCUTORTURE_RDR_IRQ)
|
||||||
|
pr_cont(" IRQ");
|
||||||
|
if (err_segs[i].rt_readstate & RCUTORTURE_RDR_PREEMPT)
|
||||||
|
pr_cont(" PREEMPT");
|
||||||
|
if (err_segs[i].rt_readstate & RCUTORTURE_RDR_RBH)
|
||||||
|
pr_cont(" RBH");
|
||||||
|
if (err_segs[i].rt_readstate & RCUTORTURE_RDR_SCHED)
|
||||||
|
pr_cont(" SCHED");
|
||||||
|
if (err_segs[i].rt_readstate & RCUTORTURE_RDR_RCU_1)
|
||||||
|
pr_cont(" RCU_1");
|
||||||
|
if (err_segs[i].rt_readstate & RCUTORTURE_RDR_RCU_2)
|
||||||
|
pr_cont(" RCU_2");
|
||||||
|
pr_cont("\n");
|
||||||
|
|
||||||
}
|
}
|
||||||
|
if (rt_read_preempted)
|
||||||
|
pr_alert("\tReader was preempted.\n");
|
||||||
}
|
}
|
||||||
if (atomic_read(&n_rcu_torture_error) || n_rcu_torture_barrier_error)
|
if (atomic_read(&n_rcu_torture_error) || n_rcu_torture_barrier_error)
|
||||||
rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
|
rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
|
||||||
@ -4019,6 +4169,11 @@ rcu_torture_init(void)
|
|||||||
firsterr = rcu_torture_read_exit_init();
|
firsterr = rcu_torture_read_exit_init();
|
||||||
if (torture_init_error(firsterr))
|
if (torture_init_error(firsterr))
|
||||||
goto unwind;
|
goto unwind;
|
||||||
|
if (preempt_duration > 0) {
|
||||||
|
firsterr = torture_create_kthread(rcu_torture_preempt, NULL, preempt_task);
|
||||||
|
if (torture_init_error(firsterr))
|
||||||
|
goto unwind;
|
||||||
|
}
|
||||||
if (object_debug)
|
if (object_debug)
|
||||||
rcu_test_debug_objects();
|
rcu_test_debug_objects();
|
||||||
torture_init_end();
|
torture_init_end();
|
||||||
|
@ -36,6 +36,7 @@
|
|||||||
#include <linux/slab.h>
|
#include <linux/slab.h>
|
||||||
#include <linux/torture.h>
|
#include <linux/torture.h>
|
||||||
#include <linux/types.h>
|
#include <linux/types.h>
|
||||||
|
#include <linux/sched/clock.h>
|
||||||
|
|
||||||
#include "rcu.h"
|
#include "rcu.h"
|
||||||
|
|
||||||
@ -531,6 +532,39 @@ static const struct ref_scale_ops acqrel_ops = {
|
|||||||
|
|
||||||
static volatile u64 stopopts;
|
static volatile u64 stopopts;
|
||||||
|
|
||||||
|
static void ref_sched_clock_section(const int nloops)
|
||||||
|
{
|
||||||
|
u64 x = 0;
|
||||||
|
int i;
|
||||||
|
|
||||||
|
preempt_disable();
|
||||||
|
for (i = nloops; i >= 0; i--)
|
||||||
|
x += sched_clock();
|
||||||
|
preempt_enable();
|
||||||
|
stopopts = x;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void ref_sched_clock_delay_section(const int nloops, const int udl, const int ndl)
|
||||||
|
{
|
||||||
|
u64 x = 0;
|
||||||
|
int i;
|
||||||
|
|
||||||
|
preempt_disable();
|
||||||
|
for (i = nloops; i >= 0; i--) {
|
||||||
|
x += sched_clock();
|
||||||
|
un_delay(udl, ndl);
|
||||||
|
}
|
||||||
|
preempt_enable();
|
||||||
|
stopopts = x;
|
||||||
|
}
|
||||||
|
|
||||||
|
static const struct ref_scale_ops sched_clock_ops = {
|
||||||
|
.readsection = ref_sched_clock_section,
|
||||||
|
.delaysection = ref_sched_clock_delay_section,
|
||||||
|
.name = "sched-clock"
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
static void ref_clock_section(const int nloops)
|
static void ref_clock_section(const int nloops)
|
||||||
{
|
{
|
||||||
u64 x = 0;
|
u64 x = 0;
|
||||||
@ -1130,9 +1164,9 @@ ref_scale_init(void)
|
|||||||
int firsterr = 0;
|
int firsterr = 0;
|
||||||
static const struct ref_scale_ops *scale_ops[] = {
|
static const struct ref_scale_ops *scale_ops[] = {
|
||||||
&rcu_ops, &srcu_ops, &srcu_lite_ops, RCU_TRACE_OPS RCU_TASKS_OPS
|
&rcu_ops, &srcu_ops, &srcu_lite_ops, RCU_TRACE_OPS RCU_TASKS_OPS
|
||||||
&refcnt_ops, &rwlock_ops, &rwsem_ops, &lock_ops, &lock_irq_ops, &acqrel_ops,
|
&refcnt_ops, &rwlock_ops, &rwsem_ops, &lock_ops, &lock_irq_ops,
|
||||||
&clock_ops, &jiffies_ops, &typesafe_ref_ops, &typesafe_lock_ops,
|
&acqrel_ops, &sched_clock_ops, &clock_ops, &jiffies_ops,
|
||||||
&typesafe_seqlock_ops,
|
&typesafe_ref_ops, &typesafe_lock_ops, &typesafe_seqlock_ops,
|
||||||
};
|
};
|
||||||
|
|
||||||
if (!torture_init_begin(scale_type, verbose))
|
if (!torture_init_begin(scale_type, verbose))
|
||||||
|
@ -738,7 +738,8 @@ EXPORT_SYMBOL_GPL(__srcu_check_read_flavor);
|
|||||||
/*
|
/*
|
||||||
* Counts the new reader in the appropriate per-CPU element of the
|
* Counts the new reader in the appropriate per-CPU element of the
|
||||||
* srcu_struct.
|
* srcu_struct.
|
||||||
* Returns an index that must be passed to the matching srcu_read_unlock().
|
* Returns a guaranteed non-negative index that must be passed to the
|
||||||
|
* matching __srcu_read_unlock().
|
||||||
*/
|
*/
|
||||||
int __srcu_read_lock(struct srcu_struct *ssp)
|
int __srcu_read_lock(struct srcu_struct *ssp)
|
||||||
{
|
{
|
||||||
@ -1076,7 +1077,6 @@ static void srcu_funnel_gp_start(struct srcu_struct *ssp, struct srcu_data *sdp,
|
|||||||
/* If grace period not already in progress, start it. */
|
/* If grace period not already in progress, start it. */
|
||||||
if (!WARN_ON_ONCE(rcu_seq_done(&sup->srcu_gp_seq, s)) &&
|
if (!WARN_ON_ONCE(rcu_seq_done(&sup->srcu_gp_seq, s)) &&
|
||||||
rcu_seq_state(sup->srcu_gp_seq) == SRCU_STATE_IDLE) {
|
rcu_seq_state(sup->srcu_gp_seq) == SRCU_STATE_IDLE) {
|
||||||
WARN_ON_ONCE(ULONG_CMP_GE(sup->srcu_gp_seq, sup->srcu_gp_seq_needed));
|
|
||||||
srcu_gp_start(ssp);
|
srcu_gp_start(ssp);
|
||||||
|
|
||||||
// And how can that list_add() in the "else" clause
|
// And how can that list_add() in the "else" clause
|
||||||
|
@ -3084,8 +3084,11 @@ __call_rcu_common(struct rcu_head *head, rcu_callback_t func, bool lazy_in)
|
|||||||
head->func = func;
|
head->func = func;
|
||||||
head->next = NULL;
|
head->next = NULL;
|
||||||
kasan_record_aux_stack(head);
|
kasan_record_aux_stack(head);
|
||||||
|
|
||||||
local_irq_save(flags);
|
local_irq_save(flags);
|
||||||
rdp = this_cpu_ptr(&rcu_data);
|
rdp = this_cpu_ptr(&rcu_data);
|
||||||
|
RCU_LOCKDEP_WARN(!rcu_rdp_cpu_online(rdp), "Callback enqueued on offline CPU!");
|
||||||
|
|
||||||
lazy = lazy_in && !rcu_async_should_hurry();
|
lazy = lazy_in && !rcu_async_should_hurry();
|
||||||
|
|
||||||
/* Add the callback to our list. */
|
/* Add the callback to our list. */
|
||||||
|
@ -227,16 +227,16 @@ static void __maybe_unused rcu_report_exp_rnp(struct rcu_node *rnp, bool wake)
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
* Report expedited quiescent state for multiple CPUs, all covered by the
|
* Report expedited quiescent state for multiple CPUs, all covered by the
|
||||||
* specified leaf rcu_node structure.
|
* specified leaf rcu_node structure, which is acquired by the caller.
|
||||||
*/
|
*/
|
||||||
static void rcu_report_exp_cpu_mult(struct rcu_node *rnp,
|
static void rcu_report_exp_cpu_mult(struct rcu_node *rnp, unsigned long flags,
|
||||||
unsigned long mask, bool wake)
|
unsigned long mask, bool wake)
|
||||||
|
__releases(rnp->lock)
|
||||||
{
|
{
|
||||||
int cpu;
|
int cpu;
|
||||||
unsigned long flags;
|
|
||||||
struct rcu_data *rdp;
|
struct rcu_data *rdp;
|
||||||
|
|
||||||
raw_spin_lock_irqsave_rcu_node(rnp, flags);
|
raw_lockdep_assert_held_rcu_node(rnp);
|
||||||
if (!(rnp->expmask & mask)) {
|
if (!(rnp->expmask & mask)) {
|
||||||
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
|
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
|
||||||
return;
|
return;
|
||||||
@ -257,8 +257,13 @@ static void rcu_report_exp_cpu_mult(struct rcu_node *rnp,
|
|||||||
*/
|
*/
|
||||||
static void rcu_report_exp_rdp(struct rcu_data *rdp)
|
static void rcu_report_exp_rdp(struct rcu_data *rdp)
|
||||||
{
|
{
|
||||||
|
unsigned long flags;
|
||||||
|
struct rcu_node *rnp = rdp->mynode;
|
||||||
|
|
||||||
|
raw_spin_lock_irqsave_rcu_node(rnp, flags);
|
||||||
WRITE_ONCE(rdp->cpu_no_qs.b.exp, false);
|
WRITE_ONCE(rdp->cpu_no_qs.b.exp, false);
|
||||||
rcu_report_exp_cpu_mult(rdp->mynode, rdp->grpmask, true);
|
ASSERT_EXCLUSIVE_WRITER(rdp->cpu_no_qs.b.exp);
|
||||||
|
rcu_report_exp_cpu_mult(rnp, flags, rdp->grpmask, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Common code for work-done checking. */
|
/* Common code for work-done checking. */
|
||||||
@ -432,8 +437,10 @@ static void __sync_rcu_exp_select_node_cpus(struct rcu_exp_work *rewp)
|
|||||||
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
|
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
|
||||||
}
|
}
|
||||||
/* Report quiescent states for those that went offline. */
|
/* Report quiescent states for those that went offline. */
|
||||||
if (mask_ofl_test)
|
if (mask_ofl_test) {
|
||||||
rcu_report_exp_cpu_mult(rnp, mask_ofl_test, false);
|
raw_spin_lock_irqsave_rcu_node(rnp, flags);
|
||||||
|
rcu_report_exp_cpu_mult(rnp, flags, mask_ofl_test, false);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void rcu_exp_sel_wait_wake(unsigned long s);
|
static void rcu_exp_sel_wait_wake(unsigned long s);
|
||||||
@ -712,6 +719,18 @@ static void rcu_exp_sel_wait_wake(unsigned long s)
|
|||||||
rcu_exp_wait_wake(s);
|
rcu_exp_wait_wake(s);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Request an expedited quiescent state. */
|
||||||
|
static void rcu_exp_need_qs(void)
|
||||||
|
{
|
||||||
|
lockdep_assert_irqs_disabled();
|
||||||
|
ASSERT_EXCLUSIVE_WRITER_SCOPED(*this_cpu_ptr(&rcu_data.cpu_no_qs.b.exp));
|
||||||
|
__this_cpu_write(rcu_data.cpu_no_qs.b.exp, true);
|
||||||
|
/* Store .exp before .rcu_urgent_qs. */
|
||||||
|
smp_store_release(this_cpu_ptr(&rcu_data.rcu_urgent_qs), true);
|
||||||
|
set_tsk_need_resched(current);
|
||||||
|
set_preempt_need_resched();
|
||||||
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_PREEMPT_RCU
|
#ifdef CONFIG_PREEMPT_RCU
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -730,24 +749,34 @@ static void rcu_exp_handler(void *unused)
|
|||||||
struct task_struct *t = current;
|
struct task_struct *t = current;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* First, the common case of not being in an RCU read-side
|
* First, is there no need for a quiescent state from this CPU,
|
||||||
|
* or is this CPU already looking for a quiescent state for the
|
||||||
|
* current grace period? If either is the case, just leave.
|
||||||
|
* However, this should not happen due to the preemptible
|
||||||
|
* sync_sched_exp_online_cleanup() implementation being a no-op,
|
||||||
|
* so warn if this does happen.
|
||||||
|
*/
|
||||||
|
ASSERT_EXCLUSIVE_WRITER_SCOPED(rdp->cpu_no_qs.b.exp);
|
||||||
|
if (WARN_ON_ONCE(!(READ_ONCE(rnp->expmask) & rdp->grpmask) ||
|
||||||
|
READ_ONCE(rdp->cpu_no_qs.b.exp)))
|
||||||
|
return;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Second, the common case of not being in an RCU read-side
|
||||||
* critical section. If also enabled or idle, immediately
|
* critical section. If also enabled or idle, immediately
|
||||||
* report the quiescent state, otherwise defer.
|
* report the quiescent state, otherwise defer.
|
||||||
*/
|
*/
|
||||||
if (!depth) {
|
if (!depth) {
|
||||||
if (!(preempt_count() & (PREEMPT_MASK | SOFTIRQ_MASK)) ||
|
if (!(preempt_count() & (PREEMPT_MASK | SOFTIRQ_MASK)) ||
|
||||||
rcu_is_cpu_rrupt_from_idle()) {
|
rcu_is_cpu_rrupt_from_idle())
|
||||||
rcu_report_exp_rdp(rdp);
|
rcu_report_exp_rdp(rdp);
|
||||||
} else {
|
else
|
||||||
WRITE_ONCE(rdp->cpu_no_qs.b.exp, true);
|
rcu_exp_need_qs();
|
||||||
set_tsk_need_resched(t);
|
|
||||||
set_preempt_need_resched();
|
|
||||||
}
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Second, the less-common case of being in an RCU read-side
|
* Third, the less-common case of being in an RCU read-side
|
||||||
* critical section. In this case we can count on a future
|
* critical section. In this case we can count on a future
|
||||||
* rcu_read_unlock(). However, this rcu_read_unlock() might
|
* rcu_read_unlock(). However, this rcu_read_unlock() might
|
||||||
* execute on some other CPU, but in that case there will be
|
* execute on some other CPU, but in that case there will be
|
||||||
@ -768,7 +797,7 @@ static void rcu_exp_handler(void *unused)
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Finally, negative nesting depth should not happen.
|
// Fourth and finally, negative nesting depth should not happen.
|
||||||
WARN_ON_ONCE(1);
|
WARN_ON_ONCE(1);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -835,16 +864,6 @@ static void rcu_exp_print_detail_task_stall_rnp(struct rcu_node *rnp)
|
|||||||
|
|
||||||
#else /* #ifdef CONFIG_PREEMPT_RCU */
|
#else /* #ifdef CONFIG_PREEMPT_RCU */
|
||||||
|
|
||||||
/* Request an expedited quiescent state. */
|
|
||||||
static void rcu_exp_need_qs(void)
|
|
||||||
{
|
|
||||||
__this_cpu_write(rcu_data.cpu_no_qs.b.exp, true);
|
|
||||||
/* Store .exp before .rcu_urgent_qs. */
|
|
||||||
smp_store_release(this_cpu_ptr(&rcu_data.rcu_urgent_qs), true);
|
|
||||||
set_tsk_need_resched(current);
|
|
||||||
set_preempt_need_resched();
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Invoked on each online non-idle CPU for expedited quiescent state. */
|
/* Invoked on each online non-idle CPU for expedited quiescent state. */
|
||||||
static void rcu_exp_handler(void *unused)
|
static void rcu_exp_handler(void *unused)
|
||||||
{
|
{
|
||||||
@ -852,6 +871,7 @@ static void rcu_exp_handler(void *unused)
|
|||||||
struct rcu_node *rnp = rdp->mynode;
|
struct rcu_node *rnp = rdp->mynode;
|
||||||
bool preempt_bh_enabled = !(preempt_count() & (PREEMPT_MASK | SOFTIRQ_MASK));
|
bool preempt_bh_enabled = !(preempt_count() & (PREEMPT_MASK | SOFTIRQ_MASK));
|
||||||
|
|
||||||
|
ASSERT_EXCLUSIVE_WRITER_SCOPED(rdp->cpu_no_qs.b.exp);
|
||||||
if (!(READ_ONCE(rnp->expmask) & rdp->grpmask) ||
|
if (!(READ_ONCE(rnp->expmask) & rdp->grpmask) ||
|
||||||
__this_cpu_read(rcu_data.cpu_no_qs.b.exp))
|
__this_cpu_read(rcu_data.cpu_no_qs.b.exp))
|
||||||
return;
|
return;
|
||||||
|
@ -275,6 +275,7 @@ static void rcu_preempt_ctxt_queue(struct rcu_node *rnp, struct rcu_data *rdp)
|
|||||||
rcu_report_exp_rdp(rdp);
|
rcu_report_exp_rdp(rdp);
|
||||||
else
|
else
|
||||||
WARN_ON_ONCE(rdp->cpu_no_qs.b.exp);
|
WARN_ON_ONCE(rdp->cpu_no_qs.b.exp);
|
||||||
|
ASSERT_EXCLUSIVE_WRITER_SCOPED(rdp->cpu_no_qs.b.exp);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -527,12 +527,12 @@ EXPORT_SYMBOL_GPL(do_trace_rcu_torture_read);
|
|||||||
|
|
||||||
#if IS_ENABLED(CONFIG_RCU_TORTURE_TEST) || IS_MODULE(CONFIG_RCU_TORTURE_TEST) || IS_ENABLED(CONFIG_LOCK_TORTURE_TEST) || IS_MODULE(CONFIG_LOCK_TORTURE_TEST)
|
#if IS_ENABLED(CONFIG_RCU_TORTURE_TEST) || IS_MODULE(CONFIG_RCU_TORTURE_TEST) || IS_ENABLED(CONFIG_LOCK_TORTURE_TEST) || IS_MODULE(CONFIG_LOCK_TORTURE_TEST)
|
||||||
/* Get rcutorture access to sched_setaffinity(). */
|
/* Get rcutorture access to sched_setaffinity(). */
|
||||||
long torture_sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
|
long torture_sched_setaffinity(pid_t pid, const struct cpumask *in_mask, bool dowarn)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
ret = sched_setaffinity(pid, in_mask);
|
ret = sched_setaffinity(pid, in_mask);
|
||||||
WARN_ONCE(ret, "%s: sched_setaffinity(%d) returned %d\n", __func__, pid, ret);
|
WARN_ONCE(dowarn && ret, "%s: sched_setaffinity(%d) returned %d\n", __func__, pid, ret);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(torture_sched_setaffinity);
|
EXPORT_SYMBOL_GPL(torture_sched_setaffinity);
|
||||||
|
@ -181,10 +181,11 @@ done
|
|||||||
|
|
||||||
# Function to check for presence of a file on the specified system.
|
# Function to check for presence of a file on the specified system.
|
||||||
# Complain if the system cannot be reached, and retry after a wait.
|
# Complain if the system cannot be reached, and retry after a wait.
|
||||||
# Currently just waits forever if a machine disappears.
|
# Currently just waits 15 minutes if a machine disappears.
|
||||||
#
|
#
|
||||||
# Usage: checkremotefile system pathname
|
# Usage: checkremotefile system pathname
|
||||||
checkremotefile () {
|
checkremotefile () {
|
||||||
|
local nsshfails=0
|
||||||
local ret
|
local ret
|
||||||
local sleeptime=60
|
local sleeptime=60
|
||||||
|
|
||||||
@ -195,6 +196,11 @@ checkremotefile () {
|
|||||||
if test "$ret" -eq 255
|
if test "$ret" -eq 255
|
||||||
then
|
then
|
||||||
echo " ---" ssh failure to $1 checking for file $2, retry after $sleeptime seconds. `date` | tee -a "$oldrun/remote-log"
|
echo " ---" ssh failure to $1 checking for file $2, retry after $sleeptime seconds. `date` | tee -a "$oldrun/remote-log"
|
||||||
|
nsshfails=$((nsshfails+1))
|
||||||
|
if ((nsshfails > 15))
|
||||||
|
then
|
||||||
|
return 255
|
||||||
|
fi
|
||||||
elif test "$ret" -eq 0
|
elif test "$ret" -eq 0
|
||||||
then
|
then
|
||||||
return 0
|
return 0
|
||||||
@ -268,12 +274,23 @@ echo All batches started. `date` | tee -a "$oldrun/remote-log"
|
|||||||
for i in $systems
|
for i in $systems
|
||||||
do
|
do
|
||||||
echo " ---" Waiting for $i `date` | tee -a "$oldrun/remote-log"
|
echo " ---" Waiting for $i `date` | tee -a "$oldrun/remote-log"
|
||||||
while checkremotefile "$i" "$resdir/$ds/remote.run"
|
while :
|
||||||
do
|
do
|
||||||
|
checkremotefile "$i" "$resdir/$ds/remote.run"
|
||||||
|
ret=$?
|
||||||
|
if test "$ret" -eq 1
|
||||||
|
then
|
||||||
|
echo " ---" Collecting results from $i `date` | tee -a "$oldrun/remote-log"
|
||||||
|
( cd "$oldrun"; ssh -o BatchMode=yes $i "cd $rundir; tar -czf - kvm-remote-*.sh.out */console.log */kvm-test-1-run*.sh.out */qemu[_-]pid */qemu-retval */qemu-affinity; rm -rf $T > /dev/null 2>&1" | tar -xzf - )
|
||||||
|
break;
|
||||||
|
fi
|
||||||
|
if test "$ret" -eq 255
|
||||||
|
then
|
||||||
|
echo System $i persistent ssh failure, lost results `date` | tee -a "$oldrun/remote-log"
|
||||||
|
break;
|
||||||
|
fi
|
||||||
sleep 30
|
sleep 30
|
||||||
done
|
done
|
||||||
echo " ---" Collecting results from $i `date` | tee -a "$oldrun/remote-log"
|
|
||||||
( cd "$oldrun"; ssh -o BatchMode=yes $i "cd $rundir; tar -czf - kvm-remote-*.sh.out */console.log */kvm-test-1-run*.sh.out */qemu[_-]pid */qemu-retval */qemu-affinity; rm -rf $T > /dev/null 2>&1" | tar -xzf - )
|
|
||||||
done
|
done
|
||||||
|
|
||||||
( kvm-end-run-stats.sh "$oldrun" "$starttime"; echo $? > $T/exitcode ) | tee -a "$oldrun/remote-log"
|
( kvm-end-run-stats.sh "$oldrun" "$starttime"; echo $? > $T/exitcode ) | tee -a "$oldrun/remote-log"
|
||||||
|
@ -5,3 +5,4 @@ rcutree.gp_cleanup_delay=3
|
|||||||
rcutree.kthread_prio=2
|
rcutree.kthread_prio=2
|
||||||
threadirqs
|
threadirqs
|
||||||
rcutree.use_softirq=0
|
rcutree.use_softirq=0
|
||||||
|
rcutorture.preempt_duration=10
|
||||||
|
Loading…
Reference in New Issue
Block a user