mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2024-12-29 17:22:07 +00:00
- Do the push of pending hrtimers away from a CPU which is being
offlined earlier in the offlining process in order to prevent a deadlock -----BEGIN PGP SIGNATURE----- iQIzBAABCgAdFiEEzv7L6UO9uDPlPSfHEsHwGGHeVUoFAmVaB28ACgkQEsHwGGHe VUr3ZBAAwOLL5vimHB3Y59cTRLPN+zGKhzyVLMLnbkKs4sGJ+9srP4HLX4Q9PoAb kR9Hzq90+48YuyLe+S/R2pvm1x88K33spS+4w4fl3x6EeToqvUlop2GPuMS2yzXY yECdqCLEd3Q6DeI8hN35lv899qyfGSD+6WxezLCT+uwx6AMHljMAsDy2249UtMZv 1bqZnYCtN2zv3MQuV1uli/AVxTDv4vXcumza17inuw0IpEA26Wz2kWruxeyZnUXU /sWZudUdhiErfg428ok3oTL1BOwPzyiIWjhN2MzqlKFmyp463DwV7KoAc3SxYINE 8qbODN93CFdnU6h29+VQoRxO9vcmWL6w7A/Swc9ar/0/Qnt7H9JdzUKtJ4+EaTCY /IpRWcNcX4WI6BKuHHl6kOBvX3YW77PKaIsxj8JDNZTMk6rq6lMGi+tIaVsAki92 3MQZ9+Lkm0baykIZAWz4jajbA98KvJMeJ60qZQI6sWWdpyrncEqG9pH/ulkLY4aZ gT94LiRpdwT0LWrX0J6xPMTNi9NYWjdB/uyo6Drer42SB9J7ol4rAbOxs50srG8i z46VGDtgWz6C5MSkonhQqrpGzc/HF9xCWVVSF1UENT4K+2W55JhJrDZBs5XCPJiz Bj8T3Maz7wcVkA41DA7C5xlVed+ST1ID8/4y5cWImnrmWOdG5Zw= =Tekh -----END PGP SIGNATURE----- Merge tag 'timers_urgent_for_v6.7_rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip Pull timer fix from Borislav Petkov: - Do the push of pending hrtimers away from a CPU which is being offlined earlier in the offlining process in order to prevent a deadlock * tag 'timers_urgent_for_v6.7_rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: hrtimers: Push pending hrtimers away from outgoing CPU earlier
This commit is contained in:
commit
b0014556a2
@ -195,6 +195,7 @@ enum cpuhp_state {
|
||||
CPUHP_AP_ARM_CORESIGHT_CTI_STARTING,
|
||||
CPUHP_AP_ARM64_ISNDEP_STARTING,
|
||||
CPUHP_AP_SMPCFD_DYING,
|
||||
CPUHP_AP_HRTIMERS_DYING,
|
||||
CPUHP_AP_X86_TBOOT_DYING,
|
||||
CPUHP_AP_ARM_CACHE_B15_RAC_DYING,
|
||||
CPUHP_AP_ONLINE,
|
||||
|
@ -531,9 +531,9 @@ extern void sysrq_timer_list_show(void);
|
||||
|
||||
int hrtimers_prepare_cpu(unsigned int cpu);
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
int hrtimers_dead_cpu(unsigned int cpu);
|
||||
int hrtimers_cpu_dying(unsigned int cpu);
|
||||
#else
|
||||
#define hrtimers_dead_cpu NULL
|
||||
#define hrtimers_cpu_dying NULL
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
@ -2113,7 +2113,7 @@ static struct cpuhp_step cpuhp_hp_states[] = {
|
||||
[CPUHP_HRTIMERS_PREPARE] = {
|
||||
.name = "hrtimers:prepare",
|
||||
.startup.single = hrtimers_prepare_cpu,
|
||||
.teardown.single = hrtimers_dead_cpu,
|
||||
.teardown.single = NULL,
|
||||
},
|
||||
[CPUHP_SMPCFD_PREPARE] = {
|
||||
.name = "smpcfd:prepare",
|
||||
@ -2205,6 +2205,12 @@ static struct cpuhp_step cpuhp_hp_states[] = {
|
||||
.startup.single = NULL,
|
||||
.teardown.single = smpcfd_dying_cpu,
|
||||
},
|
||||
[CPUHP_AP_HRTIMERS_DYING] = {
|
||||
.name = "hrtimers:dying",
|
||||
.startup.single = NULL,
|
||||
.teardown.single = hrtimers_cpu_dying,
|
||||
},
|
||||
|
||||
/* Entry state on starting. Interrupts enabled from here on. Transient
|
||||
* state for synchronsization */
|
||||
[CPUHP_AP_ONLINE] = {
|
||||
|
@ -2219,29 +2219,22 @@ static void migrate_hrtimer_list(struct hrtimer_clock_base *old_base,
|
||||
}
|
||||
}
|
||||
|
||||
int hrtimers_dead_cpu(unsigned int scpu)
|
||||
int hrtimers_cpu_dying(unsigned int dying_cpu)
|
||||
{
|
||||
struct hrtimer_cpu_base *old_base, *new_base;
|
||||
int i;
|
||||
int i, ncpu = cpumask_first(cpu_active_mask);
|
||||
|
||||
BUG_ON(cpu_online(scpu));
|
||||
tick_cancel_sched_timer(scpu);
|
||||
tick_cancel_sched_timer(dying_cpu);
|
||||
|
||||
old_base = this_cpu_ptr(&hrtimer_bases);
|
||||
new_base = &per_cpu(hrtimer_bases, ncpu);
|
||||
|
||||
/*
|
||||
* this BH disable ensures that raise_softirq_irqoff() does
|
||||
* not wakeup ksoftirqd (and acquire the pi-lock) while
|
||||
* holding the cpu_base lock
|
||||
*/
|
||||
local_bh_disable();
|
||||
local_irq_disable();
|
||||
old_base = &per_cpu(hrtimer_bases, scpu);
|
||||
new_base = this_cpu_ptr(&hrtimer_bases);
|
||||
/*
|
||||
* The caller is globally serialized and nobody else
|
||||
* takes two locks at once, deadlock is not possible.
|
||||
*/
|
||||
raw_spin_lock(&new_base->lock);
|
||||
raw_spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
|
||||
raw_spin_lock(&old_base->lock);
|
||||
raw_spin_lock_nested(&new_base->lock, SINGLE_DEPTH_NESTING);
|
||||
|
||||
for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
|
||||
migrate_hrtimer_list(&old_base->clock_base[i],
|
||||
@ -2252,15 +2245,13 @@ int hrtimers_dead_cpu(unsigned int scpu)
|
||||
* The migration might have changed the first expiring softirq
|
||||
* timer on this CPU. Update it.
|
||||
*/
|
||||
hrtimer_update_softirq_timer(new_base, false);
|
||||
__hrtimer_get_next_event(new_base, HRTIMER_ACTIVE_SOFT);
|
||||
/* Tell the other CPU to retrigger the next event */
|
||||
smp_call_function_single(ncpu, retrigger_next_event, NULL, 0);
|
||||
|
||||
raw_spin_unlock(&old_base->lock);
|
||||
raw_spin_unlock(&new_base->lock);
|
||||
raw_spin_unlock(&old_base->lock);
|
||||
|
||||
/* Check, if we got expired work to do */
|
||||
__hrtimer_peek_ahead_timers();
|
||||
local_irq_enable();
|
||||
local_bh_enable();
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user