mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-01-12 00:00:00 +00:00
sched/deadline: Rename __dl_clear() to __dl_sub()
__dl_sub() is more meaningful as a name, and is more consistent with the naming of the dual function (__dl_add()). Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Signed-off-by: Luca Abeni <luca.abeni@santannapisa.it> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Reviewed-by: Daniel Bristot de Oliveira <bristot@redhat.com> Cc: Juri Lelli <juri.lelli@arm.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Mathieu Poirier <mathieu.poirier@linaro.org> Cc: Mike Galbraith <efault@gmx.de> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Thomas Gleixner <tglx@linutronix.de> Link: http://lkml.kernel.org/r/1504778971-13573-4-git-send-email-luca.abeni@santannapisa.it Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
295d6d5e37
commit
8c0944cee7
@ -242,7 +242,7 @@ static void task_non_contending(struct task_struct *p)
|
||||
if (p->state == TASK_DEAD)
|
||||
sub_rq_bw(p->dl.dl_bw, &rq->dl);
|
||||
raw_spin_lock(&dl_b->lock);
|
||||
__dl_clear(dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p)));
|
||||
__dl_sub(dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p)));
|
||||
__dl_clear_params(p);
|
||||
raw_spin_unlock(&dl_b->lock);
|
||||
}
|
||||
@ -1209,7 +1209,7 @@ static enum hrtimer_restart inactive_task_timer(struct hrtimer *timer)
|
||||
}
|
||||
|
||||
raw_spin_lock(&dl_b->lock);
|
||||
__dl_clear(dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p)));
|
||||
__dl_sub(dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p)));
|
||||
raw_spin_unlock(&dl_b->lock);
|
||||
__dl_clear_params(p);
|
||||
|
||||
@ -2170,7 +2170,7 @@ static void set_cpus_allowed_dl(struct task_struct *p,
|
||||
* until we complete the update.
|
||||
*/
|
||||
raw_spin_lock(&src_dl_b->lock);
|
||||
__dl_clear(src_dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p)));
|
||||
__dl_sub(src_dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p)));
|
||||
raw_spin_unlock(&src_dl_b->lock);
|
||||
}
|
||||
|
||||
@ -2448,7 +2448,7 @@ int sched_dl_overflow(struct task_struct *p, int policy,
|
||||
if (dl_policy(policy) && !task_has_dl_policy(p) &&
|
||||
!__dl_overflow(dl_b, cpus, 0, new_bw)) {
|
||||
if (hrtimer_active(&p->dl.inactive_timer))
|
||||
__dl_clear(dl_b, p->dl.dl_bw, cpus);
|
||||
__dl_sub(dl_b, p->dl.dl_bw, cpus);
|
||||
__dl_add(dl_b, new_bw, cpus);
|
||||
err = 0;
|
||||
} else if (dl_policy(policy) && task_has_dl_policy(p) &&
|
||||
@ -2460,7 +2460,7 @@ int sched_dl_overflow(struct task_struct *p, int policy,
|
||||
* But this would require to set the task's "inactive
|
||||
* timer" when the task is not inactive.
|
||||
*/
|
||||
__dl_clear(dl_b, p->dl.dl_bw, cpus);
|
||||
__dl_sub(dl_b, p->dl.dl_bw, cpus);
|
||||
__dl_add(dl_b, new_bw, cpus);
|
||||
dl_change_utilization(p, new_bw);
|
||||
err = 0;
|
||||
|
@ -226,7 +226,7 @@ struct dl_bw {
|
||||
static inline void __dl_update(struct dl_bw *dl_b, s64 bw);
|
||||
|
||||
static inline
|
||||
void __dl_clear(struct dl_bw *dl_b, u64 tsk_bw, int cpus)
|
||||
void __dl_sub(struct dl_bw *dl_b, u64 tsk_bw, int cpus)
|
||||
{
|
||||
dl_b->total_bw -= tsk_bw;
|
||||
__dl_update(dl_b, (s32)tsk_bw / cpus);
|
||||
|
Loading…
x
Reference in New Issue
Block a user