mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2025-01-01 10:42:11 +00:00
Revert "rcu-tasks: Fix synchronize_rcu_tasks() VS zap_pid_ns_processes()"
This reverts commit28319d6dc5
. The race it fixed was subject to conditions that don't exist anymore since:1612160b91
("rcu-tasks: Eliminate deadlocks involving do_exit() and RCU tasks") This latter commit removes the use of SRCU that used to cover the RCU-tasks blind spot on exit between the tasklist's removal and the final preemption disabling. The task is now placed instead into a temporary list inside which voluntary sleeps are accounted as RCU-tasks quiescent states. This would disarm the deadlock initially reported against PID namespace exit. Signed-off-by: Frederic Weisbecker <frederic@kernel.org> Reviewed-by: Oleg Nesterov <oleg@redhat.com> Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
This commit is contained in:
parent
1613e604df
commit
9855c37edf
@ -209,7 +209,6 @@ void synchronize_rcu_tasks_rude(void);
|
||||
|
||||
#define rcu_note_voluntary_context_switch(t) rcu_tasks_qs(t, false)
|
||||
void exit_tasks_rcu_start(void);
|
||||
void exit_tasks_rcu_stop(void);
|
||||
void exit_tasks_rcu_finish(void);
|
||||
#else /* #ifdef CONFIG_TASKS_RCU_GENERIC */
|
||||
#define rcu_tasks_classic_qs(t, preempt) do { } while (0)
|
||||
@ -218,7 +217,6 @@ void exit_tasks_rcu_finish(void);
|
||||
#define call_rcu_tasks call_rcu
|
||||
#define synchronize_rcu_tasks synchronize_rcu
|
||||
static inline void exit_tasks_rcu_start(void) { }
|
||||
static inline void exit_tasks_rcu_stop(void) { }
|
||||
static inline void exit_tasks_rcu_finish(void) { }
|
||||
#endif /* #else #ifdef CONFIG_TASKS_RCU_GENERIC */
|
||||
|
||||
|
@ -248,24 +248,7 @@ void zap_pid_ns_processes(struct pid_namespace *pid_ns)
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
if (pid_ns->pid_allocated == init_pids)
|
||||
break;
|
||||
/*
|
||||
* Release tasks_rcu_exit_srcu to avoid following deadlock:
|
||||
*
|
||||
* 1) TASK A unshare(CLONE_NEWPID)
|
||||
* 2) TASK A fork() twice -> TASK B (child reaper for new ns)
|
||||
* and TASK C
|
||||
* 3) TASK B exits, kills TASK C, waits for TASK A to reap it
|
||||
* 4) TASK A calls synchronize_rcu_tasks()
|
||||
* -> synchronize_srcu(tasks_rcu_exit_srcu)
|
||||
* 5) *DEADLOCK*
|
||||
*
|
||||
* It is considered safe to release tasks_rcu_exit_srcu here
|
||||
* because we assume the current task can not be concurrently
|
||||
* reaped at this point.
|
||||
*/
|
||||
exit_tasks_rcu_stop();
|
||||
schedule();
|
||||
exit_tasks_rcu_start();
|
||||
}
|
||||
__set_current_state(TASK_RUNNING);
|
||||
|
||||
|
@ -858,7 +858,7 @@ static void rcu_tasks_wait_gp(struct rcu_tasks *rtp)
|
||||
// not know to synchronize with this RCU Tasks grace period) have
|
||||
// completed exiting. The synchronize_rcu() in rcu_tasks_postgp()
|
||||
// will take care of any tasks stuck in the non-preemptible region
|
||||
// of do_exit() following its call to exit_tasks_rcu_stop().
|
||||
// of do_exit() following its call to exit_tasks_rcu_finish().
|
||||
// check_all_holdout_tasks(), repeatedly until holdout list is empty:
|
||||
// Scans the holdout list, attempting to identify a quiescent state
|
||||
// for each task on the list. If there is a quiescent state, the
|
||||
@ -1220,7 +1220,7 @@ void exit_tasks_rcu_start(void)
|
||||
* Remove the task from the "yet another list" because do_exit() is now
|
||||
* non-preemptible, allowing synchronize_rcu() to wait beyond this point.
|
||||
*/
|
||||
void exit_tasks_rcu_stop(void)
|
||||
void exit_tasks_rcu_finish(void)
|
||||
{
|
||||
unsigned long flags;
|
||||
struct rcu_tasks_percpu *rtpcp;
|
||||
@ -1231,22 +1231,12 @@ void exit_tasks_rcu_stop(void)
|
||||
raw_spin_lock_irqsave_rcu_node(rtpcp, flags);
|
||||
list_del_init(&t->rcu_tasks_exit_list);
|
||||
raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
|
||||
}
|
||||
|
||||
/*
|
||||
* Contribute to protect against tasklist scan blind spot while the
|
||||
* task is exiting and may be removed from the tasklist. See
|
||||
* corresponding synchronize_srcu() for further details.
|
||||
*/
|
||||
void exit_tasks_rcu_finish(void)
|
||||
{
|
||||
exit_tasks_rcu_stop();
|
||||
exit_tasks_rcu_finish_trace(current);
|
||||
exit_tasks_rcu_finish_trace(t);
|
||||
}
|
||||
|
||||
#else /* #ifdef CONFIG_TASKS_RCU */
|
||||
void exit_tasks_rcu_start(void) { }
|
||||
void exit_tasks_rcu_stop(void) { }
|
||||
void exit_tasks_rcu_finish(void) { exit_tasks_rcu_finish_trace(current); }
|
||||
#endif /* #else #ifdef CONFIG_TASKS_RCU */
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user