mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2024-12-28 16:52:18 +00:00
0784181b44
Add a function to check that an offline CPU has left the tracing
infrastructure in a sane state.
Commit 9bb69ba4c1
("ACPI: processor_idle: use raw_safe_halt() in
acpi_idle_play_dead()") fixed an issue where the acpi_idle_play_dead()
function called safe_halt() instead of raw_safe_halt(), which had the
side-effect of setting the hardirqs_enabled flag for the offline CPU.
On x86 this triggered warnings from lockdep_assert_irqs_disabled() when
the CPU was brought back online again later. These warnings were too
early for the exception to be handled correctly, leading to a
triple-fault.
Add lockdep_cleanup_dead_cpu() to check for this kind of failure mode,
print the events leading up to it, and correct it so that the CPU can
come online again correctly. Re-introducing the original bug now merely
results in this warning instead:
[ 61.556652] smpboot: CPU 1 is now offline
[ 61.556769] CPU 1 left hardirqs enabled!
[ 61.556915] irq event stamp: 128149
[ 61.556965] hardirqs last enabled at (128149): [<ffffffff81720a36>] acpi_idle_play_dead+0x46/0x70
[ 61.557055] hardirqs last disabled at (128148): [<ffffffff81124d50>] do_idle+0x90/0xe0
[ 61.557117] softirqs last enabled at (128078): [<ffffffff81cec74c>] __do_softirq+0x31c/0x423
[ 61.557199] softirqs last disabled at (128065): [<ffffffff810baae1>] __irq_exit_rcu+0x91/0x100
[boqun: Capitalize the title and reword the message a bit]
Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Boqun Feng <boqun.feng@gmail.com>
Link: https://lore.kernel.org/r/f7bd2b3b999051bb3ef4be34526a9262008285f5.camel@infradead.org
272 lines
8.0 KiB
C
272 lines
8.0 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
/*
|
|
* include/linux/irqflags.h
|
|
*
|
|
* IRQ flags tracing: follow the state of the hardirq and softirq flags and
|
|
* provide callbacks for transitions between ON and OFF states.
|
|
*
|
|
* This file gets included from lowlevel asm headers too, to provide
|
|
* wrapped versions of the local_irq_*() APIs, based on the
|
|
* raw_local_irq_*() macros from the lowlevel headers.
|
|
*/
|
|
#ifndef _LINUX_TRACE_IRQFLAGS_H
|
|
#define _LINUX_TRACE_IRQFLAGS_H
|
|
|
|
#include <linux/irqflags_types.h>
|
|
#include <linux/typecheck.h>
|
|
#include <linux/cleanup.h>
|
|
#include <asm/irqflags.h>
|
|
#include <asm/percpu.h>
|
|
|
|
struct task_struct;
|
|
|
|
/* Currently lockdep_softirqs_on/off is used only by lockdep */
|
|
#ifdef CONFIG_PROVE_LOCKING
|
|
extern void lockdep_softirqs_on(unsigned long ip);
|
|
extern void lockdep_softirqs_off(unsigned long ip);
|
|
extern void lockdep_hardirqs_on_prepare(void);
|
|
extern void lockdep_hardirqs_on(unsigned long ip);
|
|
extern void lockdep_hardirqs_off(unsigned long ip);
|
|
extern void lockdep_cleanup_dead_cpu(unsigned int cpu,
|
|
struct task_struct *idle);
|
|
#else
|
|
static inline void lockdep_softirqs_on(unsigned long ip) { }
|
|
static inline void lockdep_softirqs_off(unsigned long ip) { }
|
|
static inline void lockdep_hardirqs_on_prepare(void) { }
|
|
static inline void lockdep_hardirqs_on(unsigned long ip) { }
|
|
static inline void lockdep_hardirqs_off(unsigned long ip) { }
|
|
static inline void lockdep_cleanup_dead_cpu(unsigned int cpu,
|
|
struct task_struct *idle) {}
|
|
#endif
|
|
|
|
#ifdef CONFIG_TRACE_IRQFLAGS
|
|
|
|
DECLARE_PER_CPU(int, hardirqs_enabled);
|
|
DECLARE_PER_CPU(int, hardirq_context);
|
|
|
|
extern void trace_hardirqs_on_prepare(void);
|
|
extern void trace_hardirqs_off_finish(void);
|
|
extern void trace_hardirqs_on(void);
|
|
extern void trace_hardirqs_off(void);
|
|
|
|
# define lockdep_hardirq_context() (raw_cpu_read(hardirq_context))
|
|
# define lockdep_softirq_context(p) ((p)->softirq_context)
|
|
# define lockdep_hardirqs_enabled() (this_cpu_read(hardirqs_enabled))
|
|
# define lockdep_softirqs_enabled(p) ((p)->softirqs_enabled)
|
|
# define lockdep_hardirq_enter() \
|
|
do { \
|
|
if (__this_cpu_inc_return(hardirq_context) == 1)\
|
|
current->hardirq_threaded = 0; \
|
|
} while (0)
|
|
# define lockdep_hardirq_threaded() \
|
|
do { \
|
|
current->hardirq_threaded = 1; \
|
|
} while (0)
|
|
# define lockdep_hardirq_exit() \
|
|
do { \
|
|
__this_cpu_dec(hardirq_context); \
|
|
} while (0)
|
|
|
|
# define lockdep_hrtimer_enter(__hrtimer) \
|
|
({ \
|
|
bool __expires_hardirq = true; \
|
|
\
|
|
if (!__hrtimer->is_hard) { \
|
|
current->irq_config = 1; \
|
|
__expires_hardirq = false; \
|
|
} \
|
|
__expires_hardirq; \
|
|
})
|
|
|
|
# define lockdep_hrtimer_exit(__expires_hardirq) \
|
|
do { \
|
|
if (!__expires_hardirq) \
|
|
current->irq_config = 0; \
|
|
} while (0)
|
|
|
|
# define lockdep_posixtimer_enter() \
|
|
do { \
|
|
current->irq_config = 1; \
|
|
} while (0)
|
|
|
|
# define lockdep_posixtimer_exit() \
|
|
do { \
|
|
current->irq_config = 0; \
|
|
} while (0)
|
|
|
|
# define lockdep_irq_work_enter(_flags) \
|
|
do { \
|
|
if (!((_flags) & IRQ_WORK_HARD_IRQ)) \
|
|
current->irq_config = 1; \
|
|
} while (0)
|
|
# define lockdep_irq_work_exit(_flags) \
|
|
do { \
|
|
if (!((_flags) & IRQ_WORK_HARD_IRQ)) \
|
|
current->irq_config = 0; \
|
|
} while (0)
|
|
|
|
#else
|
|
# define trace_hardirqs_on_prepare() do { } while (0)
|
|
# define trace_hardirqs_off_finish() do { } while (0)
|
|
# define trace_hardirqs_on() do { } while (0)
|
|
# define trace_hardirqs_off() do { } while (0)
|
|
# define lockdep_hardirq_context() 0
|
|
# define lockdep_softirq_context(p) 0
|
|
# define lockdep_hardirqs_enabled() 0
|
|
# define lockdep_softirqs_enabled(p) 0
|
|
# define lockdep_hardirq_enter() do { } while (0)
|
|
# define lockdep_hardirq_threaded() do { } while (0)
|
|
# define lockdep_hardirq_exit() do { } while (0)
|
|
# define lockdep_softirq_enter() do { } while (0)
|
|
# define lockdep_softirq_exit() do { } while (0)
|
|
# define lockdep_hrtimer_enter(__hrtimer) false
|
|
# define lockdep_hrtimer_exit(__context) do { (void)(__context); } while (0)
|
|
# define lockdep_posixtimer_enter() do { } while (0)
|
|
# define lockdep_posixtimer_exit() do { } while (0)
|
|
# define lockdep_irq_work_enter(__work) do { } while (0)
|
|
# define lockdep_irq_work_exit(__work) do { } while (0)
|
|
#endif
|
|
|
|
#if defined(CONFIG_TRACE_IRQFLAGS) && !defined(CONFIG_PREEMPT_RT)
|
|
# define lockdep_softirq_enter() \
|
|
do { \
|
|
current->softirq_context++; \
|
|
} while (0)
|
|
# define lockdep_softirq_exit() \
|
|
do { \
|
|
current->softirq_context--; \
|
|
} while (0)
|
|
|
|
#else
|
|
# define lockdep_softirq_enter() do { } while (0)
|
|
# define lockdep_softirq_exit() do { } while (0)
|
|
#endif
|
|
|
|
#if defined(CONFIG_IRQSOFF_TRACER) || \
|
|
defined(CONFIG_PREEMPT_TRACER)
|
|
extern void stop_critical_timings(void);
|
|
extern void start_critical_timings(void);
|
|
#else
|
|
# define stop_critical_timings() do { } while (0)
|
|
# define start_critical_timings() do { } while (0)
|
|
#endif
|
|
|
|
#ifdef CONFIG_DEBUG_IRQFLAGS
|
|
extern void warn_bogus_irq_restore(void);
|
|
#define raw_check_bogus_irq_restore() \
|
|
do { \
|
|
if (unlikely(!arch_irqs_disabled())) \
|
|
warn_bogus_irq_restore(); \
|
|
} while (0)
|
|
#else
|
|
#define raw_check_bogus_irq_restore() do { } while (0)
|
|
#endif
|
|
|
|
/*
|
|
* Wrap the arch provided IRQ routines to provide appropriate checks.
|
|
*/
|
|
#define raw_local_irq_disable() arch_local_irq_disable()
|
|
#define raw_local_irq_enable() arch_local_irq_enable()
|
|
#define raw_local_irq_save(flags) \
|
|
do { \
|
|
typecheck(unsigned long, flags); \
|
|
flags = arch_local_irq_save(); \
|
|
} while (0)
|
|
#define raw_local_irq_restore(flags) \
|
|
do { \
|
|
typecheck(unsigned long, flags); \
|
|
raw_check_bogus_irq_restore(); \
|
|
arch_local_irq_restore(flags); \
|
|
} while (0)
|
|
#define raw_local_save_flags(flags) \
|
|
do { \
|
|
typecheck(unsigned long, flags); \
|
|
flags = arch_local_save_flags(); \
|
|
} while (0)
|
|
#define raw_irqs_disabled_flags(flags) \
|
|
({ \
|
|
typecheck(unsigned long, flags); \
|
|
arch_irqs_disabled_flags(flags); \
|
|
})
|
|
#define raw_irqs_disabled() (arch_irqs_disabled())
|
|
#define raw_safe_halt() arch_safe_halt()
|
|
|
|
/*
|
|
* The local_irq_*() APIs are equal to the raw_local_irq*()
|
|
* if !TRACE_IRQFLAGS.
|
|
*/
|
|
#ifdef CONFIG_TRACE_IRQFLAGS
|
|
|
|
#define local_irq_enable() \
|
|
do { \
|
|
trace_hardirqs_on(); \
|
|
raw_local_irq_enable(); \
|
|
} while (0)
|
|
|
|
#define local_irq_disable() \
|
|
do { \
|
|
bool was_disabled = raw_irqs_disabled();\
|
|
raw_local_irq_disable(); \
|
|
if (!was_disabled) \
|
|
trace_hardirqs_off(); \
|
|
} while (0)
|
|
|
|
#define local_irq_save(flags) \
|
|
do { \
|
|
raw_local_irq_save(flags); \
|
|
if (!raw_irqs_disabled_flags(flags)) \
|
|
trace_hardirqs_off(); \
|
|
} while (0)
|
|
|
|
#define local_irq_restore(flags) \
|
|
do { \
|
|
if (!raw_irqs_disabled_flags(flags)) \
|
|
trace_hardirqs_on(); \
|
|
raw_local_irq_restore(flags); \
|
|
} while (0)
|
|
|
|
#define safe_halt() \
|
|
do { \
|
|
trace_hardirqs_on(); \
|
|
raw_safe_halt(); \
|
|
} while (0)
|
|
|
|
|
|
#else /* !CONFIG_TRACE_IRQFLAGS */
|
|
|
|
#define local_irq_enable() do { raw_local_irq_enable(); } while (0)
|
|
#define local_irq_disable() do { raw_local_irq_disable(); } while (0)
|
|
#define local_irq_save(flags) do { raw_local_irq_save(flags); } while (0)
|
|
#define local_irq_restore(flags) do { raw_local_irq_restore(flags); } while (0)
|
|
#define safe_halt() do { raw_safe_halt(); } while (0)
|
|
|
|
#endif /* CONFIG_TRACE_IRQFLAGS */
|
|
|
|
#define local_save_flags(flags) raw_local_save_flags(flags)
|
|
|
|
/*
|
|
* Some architectures don't define arch_irqs_disabled(), so even if either
|
|
* definition would be fine we need to use different ones for the time being
|
|
* to avoid build issues.
|
|
*/
|
|
#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
|
|
#define irqs_disabled() \
|
|
({ \
|
|
unsigned long _flags; \
|
|
raw_local_save_flags(_flags); \
|
|
raw_irqs_disabled_flags(_flags); \
|
|
})
|
|
#else /* !CONFIG_TRACE_IRQFLAGS_SUPPORT */
|
|
#define irqs_disabled() raw_irqs_disabled()
|
|
#endif /* CONFIG_TRACE_IRQFLAGS_SUPPORT */
|
|
|
|
#define irqs_disabled_flags(flags) raw_irqs_disabled_flags(flags)
|
|
|
|
DEFINE_LOCK_GUARD_0(irq, local_irq_disable(), local_irq_enable())
|
|
DEFINE_LOCK_GUARD_0(irqsave,
|
|
local_irq_save(_T->flags),
|
|
local_irq_restore(_T->flags),
|
|
unsigned long flags)
|
|
|
|
#endif
|