mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2025-01-01 10:42:11 +00:00
cpu/hotplug: Remove unused state functions
All users converted to the hotplug core mechanism. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Tested-by: Michael Kelley <mikelley@microsoft.com> Tested-by: Oleksandr Natalenko <oleksandr@natalenko.name> Tested-by: Helge Deller <deller@gmx.de> # parisc Tested-by: Guilherme G. Piccoli <gpiccoli@igalia.com> # Steam Deck Link: https://lore.kernel.org/r/20230512205256.972894276@linutronix.de
This commit is contained in:
parent
72b11aa7f8
commit
bc088f9a0d
@ -193,8 +193,6 @@ static inline void play_idle(unsigned long duration_us)
|
||||
}
|
||||
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
bool cpu_wait_death(unsigned int cpu, int seconds);
|
||||
bool cpu_report_death(void);
|
||||
void cpuhp_report_idle_dead(void);
|
||||
#else
|
||||
static inline void cpuhp_report_idle_dead(void) { }
|
||||
|
@ -325,78 +325,3 @@ void smpboot_unregister_percpu_thread(struct smp_hotplug_thread *plug_thread)
|
||||
cpus_read_unlock();
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(smpboot_unregister_percpu_thread);
|
||||
|
||||
#ifndef CONFIG_HOTPLUG_CORE_SYNC
|
||||
static DEFINE_PER_CPU(atomic_t, cpu_hotplug_state) = ATOMIC_INIT(CPU_POST_DEAD);
|
||||
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
/*
|
||||
* Wait for the specified CPU to exit the idle loop and die.
|
||||
*/
|
||||
bool cpu_wait_death(unsigned int cpu, int seconds)
|
||||
{
|
||||
int jf_left = seconds * HZ;
|
||||
int oldstate;
|
||||
bool ret = true;
|
||||
int sleep_jf = 1;
|
||||
|
||||
might_sleep();
|
||||
|
||||
/* The outgoing CPU will normally get done quite quickly. */
|
||||
if (atomic_read(&per_cpu(cpu_hotplug_state, cpu)) == CPU_DEAD)
|
||||
goto update_state_early;
|
||||
udelay(5);
|
||||
|
||||
/* But if the outgoing CPU dawdles, wait increasingly long times. */
|
||||
while (atomic_read(&per_cpu(cpu_hotplug_state, cpu)) != CPU_DEAD) {
|
||||
schedule_timeout_uninterruptible(sleep_jf);
|
||||
jf_left -= sleep_jf;
|
||||
if (jf_left <= 0)
|
||||
break;
|
||||
sleep_jf = DIV_ROUND_UP(sleep_jf * 11, 10);
|
||||
}
|
||||
update_state_early:
|
||||
oldstate = atomic_read(&per_cpu(cpu_hotplug_state, cpu));
|
||||
update_state:
|
||||
if (oldstate == CPU_DEAD) {
|
||||
/* Outgoing CPU died normally, update state. */
|
||||
smp_mb(); /* atomic_read() before update. */
|
||||
atomic_set(&per_cpu(cpu_hotplug_state, cpu), CPU_POST_DEAD);
|
||||
} else {
|
||||
/* Outgoing CPU still hasn't died, set state accordingly. */
|
||||
if (!atomic_try_cmpxchg(&per_cpu(cpu_hotplug_state, cpu),
|
||||
&oldstate, CPU_BROKEN))
|
||||
goto update_state;
|
||||
ret = false;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Called by the outgoing CPU to report its successful death. Return
|
||||
* false if this report follows the surviving CPU's timing out.
|
||||
*
|
||||
* A separate "CPU_DEAD_FROZEN" is used when the surviving CPU
|
||||
* timed out. This approach allows architectures to omit calls to
|
||||
* cpu_check_up_prepare() and cpu_set_state_online() without defeating
|
||||
* the next cpu_wait_death()'s polling loop.
|
||||
*/
|
||||
bool cpu_report_death(void)
|
||||
{
|
||||
int oldstate;
|
||||
int newstate;
|
||||
int cpu = smp_processor_id();
|
||||
|
||||
oldstate = atomic_read(&per_cpu(cpu_hotplug_state, cpu));
|
||||
do {
|
||||
if (oldstate != CPU_BROKEN)
|
||||
newstate = CPU_DEAD;
|
||||
else
|
||||
newstate = CPU_DEAD_FROZEN;
|
||||
} while (!atomic_try_cmpxchg(&per_cpu(cpu_hotplug_state, cpu),
|
||||
&oldstate, newstate));
|
||||
return newstate == CPU_DEAD;
|
||||
}
|
||||
|
||||
#endif /* #ifdef CONFIG_HOTPLUG_CPU */
|
||||
#endif /* !CONFIG_HOTPLUG_CORE_SYNC */
|
||||
|
Loading…
Reference in New Issue
Block a user