- Remove wrong enqueueing of a task for a later wakeup when a task blocks on

a RT mutex
 
 - Do not setup a new deadline entity on a boosted task as that has happened
   already
 
 - Update preempt= kernel command line param
 
 - Prevent needless softirqd wakeups in the idle task's context
 
 - Detect the case where the idle load balancer CPU becomes busy and avoid
   unnecessary load balancing invocation
 
 - Remove an unnecessary load balancing need_resched() call in nohz_csd_func()
 
 - Allow for raising of SCHED_SOFTIRQ softirq type on RT but retain the warning
   to catch any other cases
 
 - Remove a wrong warning when a cpuset update makes the task affinity no
   longer a subset of the cpuset
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCgAdFiEEzv7L6UO9uDPlPSfHEsHwGGHeVUoFAmdWvHYACgkQEsHwGGHe
 VUrJ6g//eEwmHpa9+at3UvXrVlcYQmZsQpgL2ksjVE0n4KXFeUavwCR4h6SJzvcD
 RDF9AyDuPAoCqy5DhL5wTXPG/4AnnISqAEkoP7h7YO76P7ks6+HD7t31pCF/uqCH
 yqS4vc1RJ6yW8otcCpR7rOPEQ49Klqc1KTFTNAFLc6MNEb/SVH5Ih+wFL5Mj/W3I
 UkBEtUy1oR2Q4QPhJ+0sr0LAI1AwjykdbkWzOhs6D1kPaRqdV4Atgc2fwioLIvhO
 s++lev9BmGx02dmrRWRmIBL9S9ycSLT1qx28sbzlS+PZMGYqOnImVOW5+EPr+ovK
 fILc0m8sLD6GyZHIPgeIT2+DqSvDTQOGQwXyUYmoarI+BWGGSz6iZGn4RrZHMRQo
 cpqYV9z7F2t3X1hPfhrH+40BXJeMMX+wd4ahXNA44QD6Bf7I+zPUfsrfnrR4BwV7
 qpXhBzXOuZrgOKolIwJmHIxyLtd79idYccGvjIME5rwj8eBg0J7zmjzoVewqUXsb
 F9ualvq6twxUIdD4XiClpi+E16Z2Ot3PplNIohosVrUDRDUgvTBbTuDZnUuOkXbb
 wV26XKuYKQYfx5UfJBSYL3DCfCttkKCVrPX2oiqw6PKNXw9BM8BQIux+XQH2jvIg
 wOPqZWZf2VIoQJU2N+twc/BAIRAF7CNr/ioTJlXQ1hsOIlTp3kk=
 =XLf1
 -----END PGP SIGNATURE-----

Merge tag 'sched_urgent_for_v6.13_rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull scheduler fixes from Borislav Petkov:

 - Remove wrong enqueueing of a task for a later wakeup when a task
   blocks on a RT mutex

 - Do not setup a new deadline entity on a boosted task as that has
   happened already

 - Update preempt= kernel command line param

 - Prevent needless softirqd wakeups in the idle task's context

 - Detect the case where the idle load balancer CPU becomes busy and
   avoid unnecessary load balancing invocation

 - Remove an unnecessary load balancing need_resched() call in
   nohz_csd_func()

 - Allow for raising of SCHED_SOFTIRQ softirq type on RT but retain the
   warning to catch any other cases

 - Remove a wrong warning when a cpuset update makes the task affinity
   no longer a subset of the cpuset

* tag 'sched_urgent_for_v6.13_rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  locking: rtmutex: Fix wake_q logic in task_blocks_on_rt_mutex
  sched/deadline: Fix warning in migrate_enable for boosted tasks
  sched/core: Update kernel boot parameters for LAZY preempt.
  sched/core: Prevent wakeup of ksoftirqd during idle load balance
  sched/fair: Check idle_cpu() before need_resched() to detect ilb CPU turning busy
  sched/core: Remove the unnecessary need_resched() check in nohz_csd_func()
  softirq: Allow raising SCHED_SOFTIRQ from SMP-call-function on RT kernel
  sched: fix warning in sched_setaffinity
  sched/deadline: Fix replenish_dl_new_period dl_server condition
This commit is contained in:
Linus Torvalds 2024-12-09 10:28:55 -08:00
commit df9e2102de
7 changed files with 22 additions and 12 deletions

View File

@ -4822,6 +4822,11 @@
can be preempted anytime. Tasks will also yield
contended spinlocks (if the critical section isn't
explicitly preempt disabled beyond the lock itself).
lazy - Scheduler controlled. Similar to full but instead
of preempting the task immediately, the task gets
one HZ tick time to yield itself before the
preemption will be forced. One preemption is when the
task returns to user space.
print-fatal-signals=
[KNL] debug: print fatal signals

View File

@ -1248,10 +1248,7 @@ static int __sched task_blocks_on_rt_mutex(struct rt_mutex_base *lock,
/* Check whether the waiter should back out immediately */
rtm = container_of(lock, struct rt_mutex, rtmutex);
preempt_disable();
res = __ww_mutex_add_waiter(waiter, rtm, ww_ctx, wake_q);
wake_up_q(wake_q);
preempt_enable();
if (res) {
raw_spin_lock(&task->pi_lock);
rt_mutex_dequeue(lock, waiter);

View File

@ -1283,9 +1283,9 @@ static void nohz_csd_func(void *info)
WARN_ON(!(flags & NOHZ_KICK_MASK));
rq->idle_balance = idle_cpu(cpu);
if (rq->idle_balance && !need_resched()) {
if (rq->idle_balance) {
rq->nohz_idle_balance = flags;
raise_softirq_irqoff(SCHED_SOFTIRQ);
__raise_softirq_irqoff(SCHED_SOFTIRQ);
}
}

View File

@ -781,7 +781,7 @@ static inline void replenish_dl_new_period(struct sched_dl_entity *dl_se,
* If it is a deferred reservation, and the server
* is not handling an starvation case, defer it.
*/
if (dl_se->dl_defer & !dl_se->dl_defer_running) {
if (dl_se->dl_defer && !dl_se->dl_defer_running) {
dl_se->dl_throttled = 1;
dl_se->dl_defer_armed = 1;
}
@ -2042,6 +2042,7 @@ enqueue_dl_entity(struct sched_dl_entity *dl_se, int flags)
} else if (flags & ENQUEUE_REPLENISH) {
replenish_dl_entity(dl_se);
} else if ((flags & ENQUEUE_RESTORE) &&
!is_dl_boosted(dl_se) &&
dl_time_before(dl_se->deadline, rq_clock(rq_of_dl_se(dl_se)))) {
setup_new_dl_entity(dl_se);
}

View File

@ -12574,7 +12574,7 @@ static void _nohz_idle_balance(struct rq *this_rq, unsigned int flags)
* work being done for other CPUs. Next load
* balancing owner will pick it up.
*/
if (need_resched()) {
if (!idle_cpu(this_cpu) && need_resched()) {
if (flags & NOHZ_STATS_KICK)
has_blocked_load = true;
if (flags & NOHZ_NEXT_KICK)

View File

@ -1200,7 +1200,7 @@ int __sched_setaffinity(struct task_struct *p, struct affinity_context *ctx)
bool empty = !cpumask_and(new_mask, new_mask,
ctx->user_mask);
if (WARN_ON_ONCE(empty))
if (empty)
cpumask_copy(new_mask, cpus_allowed);
}
__set_cpus_allowed_ptr(p, ctx);

View File

@ -280,17 +280,24 @@ static inline void invoke_softirq(void)
wakeup_softirqd();
}
#define SCHED_SOFTIRQ_MASK BIT(SCHED_SOFTIRQ)
/*
* flush_smp_call_function_queue() can raise a soft interrupt in a function
* call. On RT kernels this is undesired and the only known functionality
* in the block layer which does this is disabled on RT. If soft interrupts
* get raised which haven't been raised before the flush, warn so it can be
* call. On RT kernels this is undesired and the only known functionalities
* are in the block layer which is disabled on RT, and in the scheduler for
* idle load balancing. If soft interrupts get raised which haven't been
* raised before the flush, warn if it is not a SCHED_SOFTIRQ so it can be
* investigated.
*/
void do_softirq_post_smp_call_flush(unsigned int was_pending)
{
if (WARN_ON_ONCE(was_pending != local_softirq_pending()))
unsigned int is_pending = local_softirq_pending();
if (unlikely(was_pending != is_pending)) {
WARN_ON_ONCE(was_pending != (is_pending & ~SCHED_SOFTIRQ_MASK));
invoke_softirq();
}
}
#else /* CONFIG_PREEMPT_RT */