mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-12-29 09:16:33 +00:00
lib/dump_stack: move cpu lock to printk.c
dump_stack() implements its own cpu-reentrant spinning lock to best-effort serialize stack traces in the printk log. However, there are other functions (such as show_regs()) that can also benefit from this serialization. Move the cpu-reentrant spinning lock (cpu lock) into new helper functions printk_cpu_lock_irqsave()/printk_cpu_unlock_irqrestore() so that it is available for others as well. For !CONFIG_SMP the cpu lock is a NOP. Note that having multiple cpu locks in the system can easily lead to deadlock. Code needing a cpu lock should use the printk cpu lock, since the printk cpu lock could be acquired from any code and any context. Also note that it is not necessary for a cpu lock to disable interrupts. However, in upcoming work this cpu lock will be used for emergency tasks (for example, atomic consoles during kernel crashes) and any interruptions while holding the cpu lock should be avoided if possible. Signed-off-by: John Ogness <john.ogness@linutronix.de> Reviewed-by: Sergey Senozhatsky <senozhatsky@chromium.org> Reviewed-by: Petr Mladek <pmladek@suse.com> [pmladek@suse.com: Backported on top of 5.13-rc1.] Signed-off-by: Petr Mladek <pmladek@suse.com> Link: https://lore.kernel.org/r/20210617095051.4808-2-john.ogness@linutronix.de
This commit is contained in:
parent
acebb5597f
commit
766c268bc6
@ -282,6 +282,47 @@ static inline void printk_safe_flush_on_panic(void)
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
extern int __printk_cpu_trylock(void);
|
||||
extern void __printk_wait_on_cpu_lock(void);
|
||||
extern void __printk_cpu_unlock(void);
|
||||
|
||||
/**
|
||||
* printk_cpu_lock_irqsave() - Acquire the printk cpu-reentrant spinning
|
||||
* lock and disable interrupts.
|
||||
* @flags: Stack-allocated storage for saving local interrupt state,
|
||||
* to be passed to printk_cpu_unlock_irqrestore().
|
||||
*
|
||||
* If the lock is owned by another CPU, spin until it becomes available.
|
||||
* Interrupts are restored while spinning.
|
||||
*/
|
||||
#define printk_cpu_lock_irqsave(flags) \
|
||||
for (;;) { \
|
||||
local_irq_save(flags); \
|
||||
if (__printk_cpu_trylock()) \
|
||||
break; \
|
||||
local_irq_restore(flags); \
|
||||
__printk_wait_on_cpu_lock(); \
|
||||
}
|
||||
|
||||
/**
|
||||
* printk_cpu_unlock_irqrestore() - Release the printk cpu-reentrant spinning
|
||||
* lock and restore interrupts.
|
||||
* @flags: Caller's saved interrupt state, from printk_cpu_lock_irqsave().
|
||||
*/
|
||||
#define printk_cpu_unlock_irqrestore(flags) \
|
||||
do { \
|
||||
__printk_cpu_unlock(); \
|
||||
local_irq_restore(flags); \
|
||||
} while (0) \
|
||||
|
||||
#else
|
||||
|
||||
#define printk_cpu_lock_irqsave(flags) ((void)flags)
|
||||
#define printk_cpu_unlock_irqrestore(flags) ((void)flags)
|
||||
|
||||
#endif /* CONFIG_SMP */
|
||||
|
||||
extern int kptr_restrict;
|
||||
|
||||
/**
|
||||
|
@ -3531,3 +3531,72 @@ void kmsg_dump_rewind(struct kmsg_dump_iter *iter)
|
||||
EXPORT_SYMBOL_GPL(kmsg_dump_rewind);
|
||||
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
static atomic_t printk_cpulock_owner = ATOMIC_INIT(-1);
|
||||
static atomic_t printk_cpulock_nested = ATOMIC_INIT(0);
|
||||
|
||||
/**
|
||||
* __printk_wait_on_cpu_lock() - Busy wait until the printk cpu-reentrant
|
||||
* spinning lock is not owned by any CPU.
|
||||
*
|
||||
* Context: Any context.
|
||||
*/
|
||||
void __printk_wait_on_cpu_lock(void)
|
||||
{
|
||||
do {
|
||||
cpu_relax();
|
||||
} while (atomic_read(&printk_cpulock_owner) != -1);
|
||||
}
|
||||
EXPORT_SYMBOL(__printk_wait_on_cpu_lock);
|
||||
|
||||
/**
|
||||
* __printk_cpu_trylock() - Try to acquire the printk cpu-reentrant
|
||||
* spinning lock.
|
||||
*
|
||||
* If no processor has the lock, the calling processor takes the lock and
|
||||
* becomes the owner. If the calling processor is already the owner of the
|
||||
* lock, this function succeeds immediately.
|
||||
*
|
||||
* Context: Any context. Expects interrupts to be disabled.
|
||||
* Return: 1 on success, otherwise 0.
|
||||
*/
|
||||
int __printk_cpu_trylock(void)
|
||||
{
|
||||
int cpu;
|
||||
int old;
|
||||
|
||||
cpu = smp_processor_id();
|
||||
|
||||
old = atomic_cmpxchg(&printk_cpulock_owner, -1, cpu);
|
||||
if (old == -1) {
|
||||
/* This CPU is now the owner. */
|
||||
return 1;
|
||||
} else if (old == cpu) {
|
||||
/* This CPU is already the owner. */
|
||||
atomic_inc(&printk_cpulock_nested);
|
||||
return 1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(__printk_cpu_trylock);
|
||||
|
||||
/**
|
||||
* __printk_cpu_unlock() - Release the printk cpu-reentrant spinning lock.
|
||||
*
|
||||
* The calling processor must be the owner of the lock.
|
||||
*
|
||||
* Context: Any context. Expects interrupts to be disabled.
|
||||
*/
|
||||
void __printk_cpu_unlock(void)
|
||||
{
|
||||
if (atomic_read(&printk_cpulock_nested)) {
|
||||
atomic_dec(&printk_cpulock_nested);
|
||||
return;
|
||||
}
|
||||
|
||||
atomic_set(&printk_cpulock_owner, -1);
|
||||
}
|
||||
EXPORT_SYMBOL(__printk_cpu_unlock);
|
||||
#endif /* CONFIG_SMP */
|
||||
|
@ -84,50 +84,16 @@ static void __dump_stack(void)
|
||||
*
|
||||
* Architectures can override this implementation by implementing its own.
|
||||
*/
|
||||
#ifdef CONFIG_SMP
|
||||
static atomic_t dump_lock = ATOMIC_INIT(-1);
|
||||
|
||||
asmlinkage __visible void dump_stack(void)
|
||||
{
|
||||
unsigned long flags;
|
||||
int was_locked;
|
||||
int old;
|
||||
int cpu;
|
||||
|
||||
/*
|
||||
* Permit this cpu to perform nested stack dumps while serialising
|
||||
* against other CPUs
|
||||
*/
|
||||
retry:
|
||||
local_irq_save(flags);
|
||||
cpu = smp_processor_id();
|
||||
old = atomic_cmpxchg(&dump_lock, -1, cpu);
|
||||
if (old == -1) {
|
||||
was_locked = 0;
|
||||
} else if (old == cpu) {
|
||||
was_locked = 1;
|
||||
} else {
|
||||
local_irq_restore(flags);
|
||||
/*
|
||||
* Wait for the lock to release before jumping to
|
||||
* atomic_cmpxchg() in order to mitigate the thundering herd
|
||||
* problem.
|
||||
*/
|
||||
do { cpu_relax(); } while (atomic_read(&dump_lock) != -1);
|
||||
goto retry;
|
||||
}
|
||||
|
||||
printk_cpu_lock_irqsave(flags);
|
||||
__dump_stack();
|
||||
|
||||
if (!was_locked)
|
||||
atomic_set(&dump_lock, -1);
|
||||
|
||||
local_irq_restore(flags);
|
||||
printk_cpu_unlock_irqrestore(flags);
|
||||
}
|
||||
#else
|
||||
asmlinkage __visible void dump_stack(void)
|
||||
{
|
||||
__dump_stack();
|
||||
}
|
||||
#endif
|
||||
EXPORT_SYMBOL(dump_stack);
|
||||
|
Loading…
Reference in New Issue
Block a user