mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2025-01-15 21:23:23 +00:00
torture: Add dowarn argument to torture_sched_setaffinity()
Current use cases of torture_sched_setaffinity() are well served by its unconditional warning on error. However, an upcoming use case for a preemption kthread needs to avoid warnings that might otherwise arise when that kthread attempted to bind itself to a CPU on its way offline. This commit therefore adds a dowarn argument that, when false, suppresses the warning. Signed-off-by: Paul E. McKenney <paulmck@kernel.org> Signed-off-by: Uladzislau Rezki (Sony) <urezki@gmail.com>
This commit is contained in:
parent
fac04efc5c
commit
0203b485d2
@ -130,7 +130,7 @@ void _torture_stop_kthread(char *m, struct task_struct **tp);
|
||||
#endif
|
||||
|
||||
#if IS_ENABLED(CONFIG_RCU_TORTURE_TEST) || IS_MODULE(CONFIG_RCU_TORTURE_TEST) || IS_ENABLED(CONFIG_LOCK_TORTURE_TEST) || IS_MODULE(CONFIG_LOCK_TORTURE_TEST)
|
||||
long torture_sched_setaffinity(pid_t pid, const struct cpumask *in_mask);
|
||||
long torture_sched_setaffinity(pid_t pid, const struct cpumask *in_mask, bool dowarn);
|
||||
#endif
|
||||
|
||||
#endif /* __LINUX_TORTURE_H */
|
||||
|
@ -106,7 +106,7 @@ static const struct kernel_param_ops lt_bind_ops = {
|
||||
module_param_cb(bind_readers, <_bind_ops, &bind_readers, 0644);
|
||||
module_param_cb(bind_writers, <_bind_ops, &bind_writers, 0644);
|
||||
|
||||
long torture_sched_setaffinity(pid_t pid, const struct cpumask *in_mask);
|
||||
long torture_sched_setaffinity(pid_t pid, const struct cpumask *in_mask, bool dowarn);
|
||||
|
||||
static struct task_struct *stats_task;
|
||||
static struct task_struct **writer_tasks;
|
||||
@ -1358,7 +1358,7 @@ static int __init lock_torture_init(void)
|
||||
if (torture_init_error(firsterr))
|
||||
goto unwind;
|
||||
if (cpumask_nonempty(bind_writers))
|
||||
torture_sched_setaffinity(writer_tasks[i]->pid, bind_writers);
|
||||
torture_sched_setaffinity(writer_tasks[i]->pid, bind_writers, true);
|
||||
|
||||
create_reader:
|
||||
if (cxt.cur_ops->readlock == NULL || (j >= cxt.nrealreaders_stress))
|
||||
@ -1369,7 +1369,7 @@ static int __init lock_torture_init(void)
|
||||
if (torture_init_error(firsterr))
|
||||
goto unwind;
|
||||
if (cpumask_nonempty(bind_readers))
|
||||
torture_sched_setaffinity(reader_tasks[j]->pid, bind_readers);
|
||||
torture_sched_setaffinity(reader_tasks[j]->pid, bind_readers, true);
|
||||
}
|
||||
if (stat_interval > 0) {
|
||||
firsterr = torture_create_kthread(lock_torture_stats, NULL,
|
||||
|
@ -857,7 +857,7 @@ static void synchronize_rcu_trivial(void)
|
||||
int cpu;
|
||||
|
||||
for_each_online_cpu(cpu) {
|
||||
torture_sched_setaffinity(current->pid, cpumask_of(cpu));
|
||||
torture_sched_setaffinity(current->pid, cpumask_of(cpu), true);
|
||||
WARN_ON_ONCE(raw_smp_processor_id() != cpu);
|
||||
}
|
||||
}
|
||||
|
@ -527,12 +527,12 @@ EXPORT_SYMBOL_GPL(do_trace_rcu_torture_read);
|
||||
|
||||
#if IS_ENABLED(CONFIG_RCU_TORTURE_TEST) || IS_MODULE(CONFIG_RCU_TORTURE_TEST) || IS_ENABLED(CONFIG_LOCK_TORTURE_TEST) || IS_MODULE(CONFIG_LOCK_TORTURE_TEST)
|
||||
/* Get rcutorture access to sched_setaffinity(). */
|
||||
long torture_sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
|
||||
long torture_sched_setaffinity(pid_t pid, const struct cpumask *in_mask, bool dowarn)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = sched_setaffinity(pid, in_mask);
|
||||
WARN_ONCE(ret, "%s: sched_setaffinity(%d) returned %d\n", __func__, pid, ret);
|
||||
WARN_ONCE(dowarn && ret, "%s: sched_setaffinity(%d) returned %d\n", __func__, pid, ret);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(torture_sched_setaffinity);
|
||||
|
Loading…
x
Reference in New Issue
Block a user