mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2025-01-16 21:35:07 +00:00
introduce __next_thread(), fix next_tid() vs exec() race
Patch series "introduce __next_thread(), change next_thread()". After commit dce8f8ed1de1 ("document while_each_thread(), change first_tid() to use for_each_thread()") + this series 1. We have only one lockless user of next_thread(), task_group_seq_get_next(). I think it should be changed too. 2. We have only one user of task_struct->thread_group, thread_group_empty(). The next patches will change thread_group_empty() and kill ->thread_group. This patch (of 2): next_tid(start) does: rcu_read_lock(); if (pid_alive(start)) { pos = next_thread(start); if (thread_group_leader(pos)) pos = NULL; else get_task_struct(pos); it should return pos = NULL when next_thread() wraps to the 1st thread in the thread group, group leader, and the thread_group_leader() check tries to detect this case. But this can race with exec. To simplify, suppose we have a main thread M and a single sub-thread T, next_tid(T) should return NULL. Now suppose that T execs. If next_tid(T) is called after T changes the leadership and before it does release_task() which removes the old leader from list, then next_thread() returns M and thread_group_leader(M) = F. Lockless use of next_thread() should be avoided. After this change only task_group_seq_get_next() does this, and I believe it should be changed as well. Link: https://lkml.kernel.org/r/20230824143112.GA31208@redhat.com Link: https://lkml.kernel.org/r/20230824143142.GA31222@redhat.com Signed-off-by: Oleg Nesterov <oleg@redhat.com> Cc: Eric W. Biederman <ebiederm@xmission.com> Cc: Peter Zijlstra <peterz@infradead.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
00adf323b2
commit
33a9813825
@ -3840,10 +3840,8 @@ static struct task_struct *next_tid(struct task_struct *start)
|
||||
struct task_struct *pos = NULL;
|
||||
rcu_read_lock();
|
||||
if (pid_alive(start)) {
|
||||
pos = next_thread(start);
|
||||
if (thread_group_leader(pos))
|
||||
pos = NULL;
|
||||
else
|
||||
pos = __next_thread(start);
|
||||
if (pos)
|
||||
get_task_struct(pos);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
@ -715,6 +715,17 @@ bool same_thread_group(struct task_struct *p1, struct task_struct *p2)
|
||||
return p1->signal == p2->signal;
|
||||
}
|
||||
|
||||
/*
|
||||
* returns NULL if p is the last thread in the thread group
|
||||
*/
|
||||
static inline struct task_struct *__next_thread(struct task_struct *p)
|
||||
{
|
||||
return list_next_or_null_rcu(&p->signal->thread_head,
|
||||
&p->thread_node,
|
||||
struct task_struct,
|
||||
thread_node);
|
||||
}
|
||||
|
||||
static inline struct task_struct *next_thread(const struct task_struct *p)
|
||||
{
|
||||
return list_entry_rcu(p->thread_group.next,
|
||||
|
Loading…
x
Reference in New Issue
Block a user