mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2025-01-07 13:43:51 +00:00
sched, autogroup: Fix CONFIG_RT_GROUP_SCHED sched_setscheduler() failure
If CONFIG_RT_GROUP_SCHED is set, __sched_setscheduler() fails due to autogroup not allocating rt_runtime. Free unused/unusable rt_se and rt_rq, redirect RT tasks to the root task group, and tell __sched_setscheduler() that it's ok. Reported-and-tested-by: Bharata B Rao <bharata@linux.vnet.ibm.com> Signed-off-by: Mike Galbraith <efault@gmx.de> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> LKML-Reference: <1294890890.8089.39.camel@marge.simson.net> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
8ecedd7a06
commit
f44937718c
@ -4871,7 +4871,8 @@ static int __sched_setscheduler(struct task_struct *p, int policy,
|
|||||||
* assigned.
|
* assigned.
|
||||||
*/
|
*/
|
||||||
if (rt_bandwidth_enabled() && rt_policy(policy) &&
|
if (rt_bandwidth_enabled() && rt_policy(policy) &&
|
||||||
task_group(p)->rt_bandwidth.rt_runtime == 0) {
|
task_group(p)->rt_bandwidth.rt_runtime == 0 &&
|
||||||
|
!task_group_is_autogroup(task_group(p))) {
|
||||||
__task_rq_unlock(rq);
|
__task_rq_unlock(rq);
|
||||||
raw_spin_unlock_irqrestore(&p->pi_lock, flags);
|
raw_spin_unlock_irqrestore(&p->pi_lock, flags);
|
||||||
return -EPERM;
|
return -EPERM;
|
||||||
|
@ -27,6 +27,11 @@ static inline void autogroup_destroy(struct kref *kref)
|
|||||||
{
|
{
|
||||||
struct autogroup *ag = container_of(kref, struct autogroup, kref);
|
struct autogroup *ag = container_of(kref, struct autogroup, kref);
|
||||||
|
|
||||||
|
#ifdef CONFIG_RT_GROUP_SCHED
|
||||||
|
/* We've redirected RT tasks to the root task group... */
|
||||||
|
ag->tg->rt_se = NULL;
|
||||||
|
ag->tg->rt_rq = NULL;
|
||||||
|
#endif
|
||||||
sched_destroy_group(ag->tg);
|
sched_destroy_group(ag->tg);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -55,6 +60,10 @@ static inline struct autogroup *autogroup_task_get(struct task_struct *p)
|
|||||||
return ag;
|
return ag;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifdef CONFIG_RT_GROUP_SCHED
|
||||||
|
static void free_rt_sched_group(struct task_group *tg);
|
||||||
|
#endif
|
||||||
|
|
||||||
static inline struct autogroup *autogroup_create(void)
|
static inline struct autogroup *autogroup_create(void)
|
||||||
{
|
{
|
||||||
struct autogroup *ag = kzalloc(sizeof(*ag), GFP_KERNEL);
|
struct autogroup *ag = kzalloc(sizeof(*ag), GFP_KERNEL);
|
||||||
@ -72,6 +81,19 @@ static inline struct autogroup *autogroup_create(void)
|
|||||||
init_rwsem(&ag->lock);
|
init_rwsem(&ag->lock);
|
||||||
ag->id = atomic_inc_return(&autogroup_seq_nr);
|
ag->id = atomic_inc_return(&autogroup_seq_nr);
|
||||||
ag->tg = tg;
|
ag->tg = tg;
|
||||||
|
#ifdef CONFIG_RT_GROUP_SCHED
|
||||||
|
/*
|
||||||
|
* Autogroup RT tasks are redirected to the root task group
|
||||||
|
* so we don't have to move tasks around upon policy change,
|
||||||
|
* or flail around trying to allocate bandwidth on the fly.
|
||||||
|
* A bandwidth exception in __sched_setscheduler() allows
|
||||||
|
* the policy change to proceed. Thereafter, task_group()
|
||||||
|
* returns &root_task_group, so zero bandwidth is required.
|
||||||
|
*/
|
||||||
|
free_rt_sched_group(tg);
|
||||||
|
tg->rt_se = root_task_group.rt_se;
|
||||||
|
tg->rt_rq = root_task_group.rt_rq;
|
||||||
|
#endif
|
||||||
tg->autogroup = ag;
|
tg->autogroup = ag;
|
||||||
|
|
||||||
return ag;
|
return ag;
|
||||||
@ -106,6 +128,11 @@ task_wants_autogroup(struct task_struct *p, struct task_group *tg)
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline bool task_group_is_autogroup(struct task_group *tg)
|
||||||
|
{
|
||||||
|
return tg != &root_task_group && tg->autogroup;
|
||||||
|
}
|
||||||
|
|
||||||
static inline struct task_group *
|
static inline struct task_group *
|
||||||
autogroup_task_group(struct task_struct *p, struct task_group *tg)
|
autogroup_task_group(struct task_struct *p, struct task_group *tg)
|
||||||
{
|
{
|
||||||
|
@ -15,6 +15,10 @@ autogroup_task_group(struct task_struct *p, struct task_group *tg);
|
|||||||
|
|
||||||
static inline void autogroup_init(struct task_struct *init_task) { }
|
static inline void autogroup_init(struct task_struct *init_task) { }
|
||||||
static inline void autogroup_free(struct task_group *tg) { }
|
static inline void autogroup_free(struct task_group *tg) { }
|
||||||
|
static inline bool task_group_is_autogroup(struct task_group *tg)
|
||||||
|
{
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
static inline struct task_group *
|
static inline struct task_group *
|
||||||
autogroup_task_group(struct task_struct *p, struct task_group *tg)
|
autogroup_task_group(struct task_struct *p, struct task_group *tg)
|
||||||
|
Loading…
Reference in New Issue
Block a user