mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2025-01-15 13:15:57 +00:00
sched, cgroup: reorganize threadgroup locking
threadgroup_change_begin/end() are used to mark the beginning and end of threadgroup modifying operations to allow code paths which require a threadgroup to stay stable across blocking operations to synchronize against those sections using threadgroup_lock/unlock(). It's currently implemented as a general mechanism in sched.h using per-signal_struct rwsem; however, this never grew non-cgroup use cases and becomes noop if !CONFIG_CGROUPS. It turns out that cgroups is gonna be better served with a different sycnrhonization scheme and is a bit silly to keep cgroups specific details as a general mechanism. What's general here is identifying the places where threadgroups are modified. This patch restructures threadgroup locking so that threadgroup_change_begin/end() become a place where subsystems which need to sycnhronize against threadgroup changes can hook into. cgroup_threadgroup_change_begin/end() which operate on the per-signal_struct rwsem are created and threadgroup_lock/unlock() are moved to cgroup.c and made static. This is pure reorganization which doesn't cause any functional changes. Signed-off-by: Tejun Heo <tj@kernel.org> Cc: Ingo Molnar <mingo@redhat.com> Cc: Peter Zijlstra <peterz@infradead.org>
This commit is contained in:
parent
8ab456ac36
commit
7d7efec368
@ -14,6 +14,7 @@
|
|||||||
#include <linux/mutex.h>
|
#include <linux/mutex.h>
|
||||||
#include <linux/rcupdate.h>
|
#include <linux/rcupdate.h>
|
||||||
#include <linux/percpu-refcount.h>
|
#include <linux/percpu-refcount.h>
|
||||||
|
#include <linux/percpu-rwsem.h>
|
||||||
#include <linux/workqueue.h>
|
#include <linux/workqueue.h>
|
||||||
|
|
||||||
#ifdef CONFIG_CGROUPS
|
#ifdef CONFIG_CGROUPS
|
||||||
@ -460,5 +461,14 @@ struct cgroup_subsys {
|
|||||||
unsigned int depends_on;
|
unsigned int depends_on;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
void cgroup_threadgroup_change_begin(struct task_struct *tsk);
|
||||||
|
void cgroup_threadgroup_change_end(struct task_struct *tsk);
|
||||||
|
|
||||||
|
#else /* CONFIG_CGROUPS */
|
||||||
|
|
||||||
|
static inline void cgroup_threadgroup_change_begin(struct task_struct *tsk) {}
|
||||||
|
static inline void cgroup_threadgroup_change_end(struct task_struct *tsk) {}
|
||||||
|
|
||||||
#endif /* CONFIG_CGROUPS */
|
#endif /* CONFIG_CGROUPS */
|
||||||
|
|
||||||
#endif /* _LINUX_CGROUP_DEFS_H */
|
#endif /* _LINUX_CGROUP_DEFS_H */
|
||||||
|
@ -58,6 +58,7 @@ struct sched_param {
|
|||||||
#include <linux/uidgid.h>
|
#include <linux/uidgid.h>
|
||||||
#include <linux/gfp.h>
|
#include <linux/gfp.h>
|
||||||
#include <linux/magic.h>
|
#include <linux/magic.h>
|
||||||
|
#include <linux/cgroup-defs.h>
|
||||||
|
|
||||||
#include <asm/processor.h>
|
#include <asm/processor.h>
|
||||||
|
|
||||||
@ -2648,54 +2649,34 @@ static inline void unlock_task_sighand(struct task_struct *tsk,
|
|||||||
spin_unlock_irqrestore(&tsk->sighand->siglock, *flags);
|
spin_unlock_irqrestore(&tsk->sighand->siglock, *flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_CGROUPS
|
/**
|
||||||
|
* threadgroup_change_begin - mark the beginning of changes to a threadgroup
|
||||||
|
* @tsk: task causing the changes
|
||||||
|
*
|
||||||
|
* All operations which modify a threadgroup - a new thread joining the
|
||||||
|
* group, death of a member thread (the assertion of PF_EXITING) and
|
||||||
|
* exec(2) dethreading the process and replacing the leader - are wrapped
|
||||||
|
* by threadgroup_change_{begin|end}(). This is to provide a place which
|
||||||
|
* subsystems needing threadgroup stability can hook into for
|
||||||
|
* synchronization.
|
||||||
|
*/
|
||||||
static inline void threadgroup_change_begin(struct task_struct *tsk)
|
static inline void threadgroup_change_begin(struct task_struct *tsk)
|
||||||
{
|
{
|
||||||
down_read(&tsk->signal->group_rwsem);
|
might_sleep();
|
||||||
|
cgroup_threadgroup_change_begin(tsk);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* threadgroup_change_end - mark the end of changes to a threadgroup
|
||||||
|
* @tsk: task causing the changes
|
||||||
|
*
|
||||||
|
* See threadgroup_change_begin().
|
||||||
|
*/
|
||||||
static inline void threadgroup_change_end(struct task_struct *tsk)
|
static inline void threadgroup_change_end(struct task_struct *tsk)
|
||||||
{
|
{
|
||||||
up_read(&tsk->signal->group_rwsem);
|
cgroup_threadgroup_change_end(tsk);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* threadgroup_lock - lock threadgroup
|
|
||||||
* @tsk: member task of the threadgroup to lock
|
|
||||||
*
|
|
||||||
* Lock the threadgroup @tsk belongs to. No new task is allowed to enter
|
|
||||||
* and member tasks aren't allowed to exit (as indicated by PF_EXITING) or
|
|
||||||
* change ->group_leader/pid. This is useful for cases where the threadgroup
|
|
||||||
* needs to stay stable across blockable operations.
|
|
||||||
*
|
|
||||||
* fork and exit paths explicitly call threadgroup_change_{begin|end}() for
|
|
||||||
* synchronization. While held, no new task will be added to threadgroup
|
|
||||||
* and no existing live task will have its PF_EXITING set.
|
|
||||||
*
|
|
||||||
* de_thread() does threadgroup_change_{begin|end}() when a non-leader
|
|
||||||
* sub-thread becomes a new leader.
|
|
||||||
*/
|
|
||||||
static inline void threadgroup_lock(struct task_struct *tsk)
|
|
||||||
{
|
|
||||||
down_write(&tsk->signal->group_rwsem);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* threadgroup_unlock - unlock threadgroup
|
|
||||||
* @tsk: member task of the threadgroup to unlock
|
|
||||||
*
|
|
||||||
* Reverse threadgroup_lock().
|
|
||||||
*/
|
|
||||||
static inline void threadgroup_unlock(struct task_struct *tsk)
|
|
||||||
{
|
|
||||||
up_write(&tsk->signal->group_rwsem);
|
|
||||||
}
|
|
||||||
#else
|
|
||||||
static inline void threadgroup_change_begin(struct task_struct *tsk) {}
|
|
||||||
static inline void threadgroup_change_end(struct task_struct *tsk) {}
|
|
||||||
static inline void threadgroup_lock(struct task_struct *tsk) {}
|
|
||||||
static inline void threadgroup_unlock(struct task_struct *tsk) {}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifndef __HAVE_THREAD_FUNCTIONS
|
#ifndef __HAVE_THREAD_FUNCTIONS
|
||||||
|
|
||||||
#define task_thread_info(task) ((struct thread_info *)(task)->stack)
|
#define task_thread_info(task) ((struct thread_info *)(task)->stack)
|
||||||
|
@ -848,6 +848,48 @@ static struct css_set *find_css_set(struct css_set *old_cset,
|
|||||||
return cset;
|
return cset;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void cgroup_threadgroup_change_begin(struct task_struct *tsk)
|
||||||
|
{
|
||||||
|
down_read(&tsk->signal->group_rwsem);
|
||||||
|
}
|
||||||
|
|
||||||
|
void cgroup_threadgroup_change_end(struct task_struct *tsk)
|
||||||
|
{
|
||||||
|
up_read(&tsk->signal->group_rwsem);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* threadgroup_lock - lock threadgroup
|
||||||
|
* @tsk: member task of the threadgroup to lock
|
||||||
|
*
|
||||||
|
* Lock the threadgroup @tsk belongs to. No new task is allowed to enter
|
||||||
|
* and member tasks aren't allowed to exit (as indicated by PF_EXITING) or
|
||||||
|
* change ->group_leader/pid. This is useful for cases where the threadgroup
|
||||||
|
* needs to stay stable across blockable operations.
|
||||||
|
*
|
||||||
|
* fork and exit explicitly call threadgroup_change_{begin|end}() for
|
||||||
|
* synchronization. While held, no new task will be added to threadgroup
|
||||||
|
* and no existing live task will have its PF_EXITING set.
|
||||||
|
*
|
||||||
|
* de_thread() does threadgroup_change_{begin|end}() when a non-leader
|
||||||
|
* sub-thread becomes a new leader.
|
||||||
|
*/
|
||||||
|
static void threadgroup_lock(struct task_struct *tsk)
|
||||||
|
{
|
||||||
|
down_write(&tsk->signal->group_rwsem);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* threadgroup_unlock - unlock threadgroup
|
||||||
|
* @tsk: member task of the threadgroup to unlock
|
||||||
|
*
|
||||||
|
* Reverse threadgroup_lock().
|
||||||
|
*/
|
||||||
|
static inline void threadgroup_unlock(struct task_struct *tsk)
|
||||||
|
{
|
||||||
|
up_write(&tsk->signal->group_rwsem);
|
||||||
|
}
|
||||||
|
|
||||||
static struct cgroup_root *cgroup_root_from_kf(struct kernfs_root *kf_root)
|
static struct cgroup_root *cgroup_root_from_kf(struct kernfs_root *kf_root)
|
||||||
{
|
{
|
||||||
struct cgroup *root_cgrp = kf_root->kn->priv;
|
struct cgroup *root_cgrp = kf_root->kn->priv;
|
||||||
|
Loading…
x
Reference in New Issue
Block a user