mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2025-01-09 14:43:16 +00:00
Merge branch 'for-next/misc' into for-next/core
* for-next/misc: arm64: hibernate: Fix warning for cast from restricted gfp_t arm64: esr: Define ESR_ELx_EC_* constants as UL arm64: Constify struct kobj_type arm64: smp: smp_send_stop() and crash_smp_send_stop() should try non-NMI first arm64/sve: Remove unused declaration read_smcr_features() arm64: mm: Remove unused declaration early_io_map() arm64: el2_setup.h: Rename some labels to be more diff-friendly arm64: signal: Fix some under-bracketed UAPI macros arm64/mm: Drop TCR_SMP_FLAGS arm64/mm: Drop PMD_SECT_VALID
This commit is contained in:
commit
f661eb5f8d
@ -165,42 +165,45 @@
|
||||
mrs x1, id_aa64dfr0_el1
|
||||
ubfx x1, x1, #ID_AA64DFR0_EL1_PMSVer_SHIFT, #4
|
||||
cmp x1, #3
|
||||
b.lt .Lset_debug_fgt_\@
|
||||
b.lt .Lskip_spe_fgt_\@
|
||||
/* Disable PMSNEVFR_EL1 read and write traps */
|
||||
orr x0, x0, #(1 << 62)
|
||||
|
||||
.Lset_debug_fgt_\@:
|
||||
.Lskip_spe_fgt_\@:
|
||||
msr_s SYS_HDFGRTR_EL2, x0
|
||||
msr_s SYS_HDFGWTR_EL2, x0
|
||||
|
||||
mov x0, xzr
|
||||
mrs x1, id_aa64pfr1_el1
|
||||
ubfx x1, x1, #ID_AA64PFR1_EL1_SME_SHIFT, #4
|
||||
cbz x1, .Lset_pie_fgt_\@
|
||||
cbz x1, .Lskip_debug_fgt_\@
|
||||
|
||||
/* Disable nVHE traps of TPIDR2 and SMPRI */
|
||||
orr x0, x0, #HFGxTR_EL2_nSMPRI_EL1_MASK
|
||||
orr x0, x0, #HFGxTR_EL2_nTPIDR2_EL0_MASK
|
||||
|
||||
.Lset_pie_fgt_\@:
|
||||
.Lskip_debug_fgt_\@:
|
||||
mrs_s x1, SYS_ID_AA64MMFR3_EL1
|
||||
ubfx x1, x1, #ID_AA64MMFR3_EL1_S1PIE_SHIFT, #4
|
||||
cbz x1, .Lset_fgt_\@
|
||||
cbz x1, .Lskip_pie_fgt_\@
|
||||
|
||||
/* Disable trapping of PIR_EL1 / PIRE0_EL1 */
|
||||
orr x0, x0, #HFGxTR_EL2_nPIR_EL1
|
||||
orr x0, x0, #HFGxTR_EL2_nPIRE0_EL1
|
||||
|
||||
.Lset_fgt_\@:
|
||||
.Lskip_pie_fgt_\@:
|
||||
msr_s SYS_HFGRTR_EL2, x0
|
||||
msr_s SYS_HFGWTR_EL2, x0
|
||||
msr_s SYS_HFGITR_EL2, xzr
|
||||
|
||||
mrs x1, id_aa64pfr0_el1 // AMU traps UNDEF without AMU
|
||||
ubfx x1, x1, #ID_AA64PFR0_EL1_AMU_SHIFT, #4
|
||||
cbz x1, .Lskip_fgt_\@
|
||||
cbz x1, .Lskip_amu_fgt_\@
|
||||
|
||||
msr_s SYS_HAFGRTR_EL2, xzr
|
||||
|
||||
.Lskip_amu_fgt_\@:
|
||||
|
||||
.Lskip_fgt_\@:
|
||||
.endm
|
||||
|
||||
|
@ -10,63 +10,63 @@
|
||||
#include <asm/memory.h>
|
||||
#include <asm/sysreg.h>
|
||||
|
||||
#define ESR_ELx_EC_UNKNOWN (0x00)
|
||||
#define ESR_ELx_EC_WFx (0x01)
|
||||
#define ESR_ELx_EC_UNKNOWN UL(0x00)
|
||||
#define ESR_ELx_EC_WFx UL(0x01)
|
||||
/* Unallocated EC: 0x02 */
|
||||
#define ESR_ELx_EC_CP15_32 (0x03)
|
||||
#define ESR_ELx_EC_CP15_64 (0x04)
|
||||
#define ESR_ELx_EC_CP14_MR (0x05)
|
||||
#define ESR_ELx_EC_CP14_LS (0x06)
|
||||
#define ESR_ELx_EC_FP_ASIMD (0x07)
|
||||
#define ESR_ELx_EC_CP10_ID (0x08) /* EL2 only */
|
||||
#define ESR_ELx_EC_PAC (0x09) /* EL2 and above */
|
||||
#define ESR_ELx_EC_CP15_32 UL(0x03)
|
||||
#define ESR_ELx_EC_CP15_64 UL(0x04)
|
||||
#define ESR_ELx_EC_CP14_MR UL(0x05)
|
||||
#define ESR_ELx_EC_CP14_LS UL(0x06)
|
||||
#define ESR_ELx_EC_FP_ASIMD UL(0x07)
|
||||
#define ESR_ELx_EC_CP10_ID UL(0x08) /* EL2 only */
|
||||
#define ESR_ELx_EC_PAC UL(0x09) /* EL2 and above */
|
||||
/* Unallocated EC: 0x0A - 0x0B */
|
||||
#define ESR_ELx_EC_CP14_64 (0x0C)
|
||||
#define ESR_ELx_EC_BTI (0x0D)
|
||||
#define ESR_ELx_EC_ILL (0x0E)
|
||||
#define ESR_ELx_EC_CP14_64 UL(0x0C)
|
||||
#define ESR_ELx_EC_BTI UL(0x0D)
|
||||
#define ESR_ELx_EC_ILL UL(0x0E)
|
||||
/* Unallocated EC: 0x0F - 0x10 */
|
||||
#define ESR_ELx_EC_SVC32 (0x11)
|
||||
#define ESR_ELx_EC_HVC32 (0x12) /* EL2 only */
|
||||
#define ESR_ELx_EC_SMC32 (0x13) /* EL2 and above */
|
||||
#define ESR_ELx_EC_SVC32 UL(0x11)
|
||||
#define ESR_ELx_EC_HVC32 UL(0x12) /* EL2 only */
|
||||
#define ESR_ELx_EC_SMC32 UL(0x13) /* EL2 and above */
|
||||
/* Unallocated EC: 0x14 */
|
||||
#define ESR_ELx_EC_SVC64 (0x15)
|
||||
#define ESR_ELx_EC_HVC64 (0x16) /* EL2 and above */
|
||||
#define ESR_ELx_EC_SMC64 (0x17) /* EL2 and above */
|
||||
#define ESR_ELx_EC_SYS64 (0x18)
|
||||
#define ESR_ELx_EC_SVE (0x19)
|
||||
#define ESR_ELx_EC_ERET (0x1a) /* EL2 only */
|
||||
#define ESR_ELx_EC_SVC64 UL(0x15)
|
||||
#define ESR_ELx_EC_HVC64 UL(0x16) /* EL2 and above */
|
||||
#define ESR_ELx_EC_SMC64 UL(0x17) /* EL2 and above */
|
||||
#define ESR_ELx_EC_SYS64 UL(0x18)
|
||||
#define ESR_ELx_EC_SVE UL(0x19)
|
||||
#define ESR_ELx_EC_ERET UL(0x1a) /* EL2 only */
|
||||
/* Unallocated EC: 0x1B */
|
||||
#define ESR_ELx_EC_FPAC (0x1C) /* EL1 and above */
|
||||
#define ESR_ELx_EC_SME (0x1D)
|
||||
#define ESR_ELx_EC_FPAC UL(0x1C) /* EL1 and above */
|
||||
#define ESR_ELx_EC_SME UL(0x1D)
|
||||
/* Unallocated EC: 0x1E */
|
||||
#define ESR_ELx_EC_IMP_DEF (0x1f) /* EL3 only */
|
||||
#define ESR_ELx_EC_IABT_LOW (0x20)
|
||||
#define ESR_ELx_EC_IABT_CUR (0x21)
|
||||
#define ESR_ELx_EC_PC_ALIGN (0x22)
|
||||
#define ESR_ELx_EC_IMP_DEF UL(0x1f) /* EL3 only */
|
||||
#define ESR_ELx_EC_IABT_LOW UL(0x20)
|
||||
#define ESR_ELx_EC_IABT_CUR UL(0x21)
|
||||
#define ESR_ELx_EC_PC_ALIGN UL(0x22)
|
||||
/* Unallocated EC: 0x23 */
|
||||
#define ESR_ELx_EC_DABT_LOW (0x24)
|
||||
#define ESR_ELx_EC_DABT_CUR (0x25)
|
||||
#define ESR_ELx_EC_SP_ALIGN (0x26)
|
||||
#define ESR_ELx_EC_MOPS (0x27)
|
||||
#define ESR_ELx_EC_FP_EXC32 (0x28)
|
||||
#define ESR_ELx_EC_DABT_LOW UL(0x24)
|
||||
#define ESR_ELx_EC_DABT_CUR UL(0x25)
|
||||
#define ESR_ELx_EC_SP_ALIGN UL(0x26)
|
||||
#define ESR_ELx_EC_MOPS UL(0x27)
|
||||
#define ESR_ELx_EC_FP_EXC32 UL(0x28)
|
||||
/* Unallocated EC: 0x29 - 0x2B */
|
||||
#define ESR_ELx_EC_FP_EXC64 (0x2C)
|
||||
#define ESR_ELx_EC_FP_EXC64 UL(0x2C)
|
||||
/* Unallocated EC: 0x2D - 0x2E */
|
||||
#define ESR_ELx_EC_SERROR (0x2F)
|
||||
#define ESR_ELx_EC_BREAKPT_LOW (0x30)
|
||||
#define ESR_ELx_EC_BREAKPT_CUR (0x31)
|
||||
#define ESR_ELx_EC_SOFTSTP_LOW (0x32)
|
||||
#define ESR_ELx_EC_SOFTSTP_CUR (0x33)
|
||||
#define ESR_ELx_EC_WATCHPT_LOW (0x34)
|
||||
#define ESR_ELx_EC_WATCHPT_CUR (0x35)
|
||||
#define ESR_ELx_EC_SERROR UL(0x2F)
|
||||
#define ESR_ELx_EC_BREAKPT_LOW UL(0x30)
|
||||
#define ESR_ELx_EC_BREAKPT_CUR UL(0x31)
|
||||
#define ESR_ELx_EC_SOFTSTP_LOW UL(0x32)
|
||||
#define ESR_ELx_EC_SOFTSTP_CUR UL(0x33)
|
||||
#define ESR_ELx_EC_WATCHPT_LOW UL(0x34)
|
||||
#define ESR_ELx_EC_WATCHPT_CUR UL(0x35)
|
||||
/* Unallocated EC: 0x36 - 0x37 */
|
||||
#define ESR_ELx_EC_BKPT32 (0x38)
|
||||
#define ESR_ELx_EC_BKPT32 UL(0x38)
|
||||
/* Unallocated EC: 0x39 */
|
||||
#define ESR_ELx_EC_VECTOR32 (0x3A) /* EL2 only */
|
||||
#define ESR_ELx_EC_VECTOR32 UL(0x3A) /* EL2 only */
|
||||
/* Unallocated EC: 0x3B */
|
||||
#define ESR_ELx_EC_BRK64 (0x3C)
|
||||
#define ESR_ELx_EC_BRK64 UL(0x3C)
|
||||
/* Unallocated EC: 0x3D - 0x3F */
|
||||
#define ESR_ELx_EC_MAX (0x3F)
|
||||
#define ESR_ELx_EC_MAX UL(0x3F)
|
||||
|
||||
#define ESR_ELx_EC_SHIFT (26)
|
||||
#define ESR_ELx_EC_WIDTH (6)
|
||||
|
@ -155,8 +155,6 @@ extern void cpu_enable_sme2(const struct arm64_cpu_capabilities *__unused);
|
||||
extern void cpu_enable_fa64(const struct arm64_cpu_capabilities *__unused);
|
||||
extern void cpu_enable_fpmr(const struct arm64_cpu_capabilities *__unused);
|
||||
|
||||
extern u64 read_smcr_features(void);
|
||||
|
||||
/*
|
||||
* Helpers to translate bit indices in sve_vq_map to VQ values (and
|
||||
* vice versa). This allows find_next_bit() to be used to find the
|
||||
|
@ -63,7 +63,6 @@ static inline bool arm64_kernel_unmapped_at_el0(void)
|
||||
extern void arm64_memblock_init(void);
|
||||
extern void paging_init(void);
|
||||
extern void bootmem_init(void);
|
||||
extern void __iomem *early_io_map(phys_addr_t phys, unsigned long virt);
|
||||
extern void create_mapping_noalloc(phys_addr_t phys, unsigned long virt,
|
||||
phys_addr_t size, pgprot_t prot);
|
||||
extern void create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
|
||||
|
@ -135,7 +135,6 @@
|
||||
/*
|
||||
* Section
|
||||
*/
|
||||
#define PMD_SECT_VALID (_AT(pmdval_t, 1) << 0)
|
||||
#define PMD_SECT_USER (_AT(pmdval_t, 1) << 6) /* AP[1] */
|
||||
#define PMD_SECT_RDONLY (_AT(pmdval_t, 1) << 7) /* AP[2] */
|
||||
#define PMD_SECT_S (_AT(pmdval_t, 3) << 8)
|
||||
|
@ -320,10 +320,10 @@ struct zt_context {
|
||||
((sizeof(struct za_context) + (__SVE_VQ_BYTES - 1)) \
|
||||
/ __SVE_VQ_BYTES * __SVE_VQ_BYTES)
|
||||
|
||||
#define ZA_SIG_REGS_SIZE(vq) ((vq * __SVE_VQ_BYTES) * (vq * __SVE_VQ_BYTES))
|
||||
#define ZA_SIG_REGS_SIZE(vq) (((vq) * __SVE_VQ_BYTES) * ((vq) * __SVE_VQ_BYTES))
|
||||
|
||||
#define ZA_SIG_ZAV_OFFSET(vq, n) (ZA_SIG_REGS_OFFSET + \
|
||||
(SVE_SIG_ZREG_SIZE(vq) * n))
|
||||
(SVE_SIG_ZREG_SIZE(vq) * (n)))
|
||||
|
||||
#define ZA_SIG_CONTEXT_SIZE(vq) \
|
||||
(ZA_SIG_REGS_OFFSET + ZA_SIG_REGS_SIZE(vq))
|
||||
@ -334,7 +334,7 @@ struct zt_context {
|
||||
|
||||
#define ZT_SIG_REGS_OFFSET sizeof(struct zt_context)
|
||||
|
||||
#define ZT_SIG_REGS_SIZE(n) (ZT_SIG_REG_BYTES * n)
|
||||
#define ZT_SIG_REGS_SIZE(n) (ZT_SIG_REG_BYTES * (n))
|
||||
|
||||
#define ZT_SIG_CONTEXT_SIZE(n) \
|
||||
(sizeof(struct zt_context) + ZT_SIG_REGS_SIZE(n))
|
||||
|
@ -280,7 +280,7 @@ const struct seq_operations cpuinfo_op = {
|
||||
};
|
||||
|
||||
|
||||
static struct kobj_type cpuregs_kobj_type = {
|
||||
static const struct kobj_type cpuregs_kobj_type = {
|
||||
.sysfs_ops = &kobj_sysfs_ops,
|
||||
};
|
||||
|
||||
|
@ -407,7 +407,7 @@ int swsusp_arch_resume(void)
|
||||
void *, phys_addr_t, phys_addr_t);
|
||||
struct trans_pgd_info trans_info = {
|
||||
.trans_alloc_page = hibernate_page_alloc,
|
||||
.trans_alloc_arg = (void *)GFP_ATOMIC,
|
||||
.trans_alloc_arg = (__force void *)GFP_ATOMIC,
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -68,7 +68,7 @@ enum ipi_msg_type {
|
||||
IPI_RESCHEDULE,
|
||||
IPI_CALL_FUNC,
|
||||
IPI_CPU_STOP,
|
||||
IPI_CPU_CRASH_STOP,
|
||||
IPI_CPU_STOP_NMI,
|
||||
IPI_TIMER,
|
||||
IPI_IRQ_WORK,
|
||||
NR_IPI,
|
||||
@ -85,6 +85,8 @@ static int ipi_irq_base __ro_after_init;
|
||||
static int nr_ipi __ro_after_init = NR_IPI;
|
||||
static struct irq_desc *ipi_desc[MAX_IPI] __ro_after_init;
|
||||
|
||||
static bool crash_stop;
|
||||
|
||||
static void ipi_setup(int cpu);
|
||||
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
@ -821,7 +823,7 @@ static const char *ipi_types[MAX_IPI] __tracepoint_string = {
|
||||
[IPI_RESCHEDULE] = "Rescheduling interrupts",
|
||||
[IPI_CALL_FUNC] = "Function call interrupts",
|
||||
[IPI_CPU_STOP] = "CPU stop interrupts",
|
||||
[IPI_CPU_CRASH_STOP] = "CPU stop (for crash dump) interrupts",
|
||||
[IPI_CPU_STOP_NMI] = "CPU stop NMIs",
|
||||
[IPI_TIMER] = "Timer broadcast interrupts",
|
||||
[IPI_IRQ_WORK] = "IRQ work interrupts",
|
||||
[IPI_CPU_BACKTRACE] = "CPU backtrace interrupts",
|
||||
@ -865,9 +867,9 @@ void arch_irq_work_raise(void)
|
||||
}
|
||||
#endif
|
||||
|
||||
static void __noreturn local_cpu_stop(void)
|
||||
static void __noreturn local_cpu_stop(unsigned int cpu)
|
||||
{
|
||||
set_cpu_online(smp_processor_id(), false);
|
||||
set_cpu_online(cpu, false);
|
||||
|
||||
local_daif_mask();
|
||||
sdei_mask_local_cpu();
|
||||
@ -881,21 +883,26 @@ static void __noreturn local_cpu_stop(void)
|
||||
*/
|
||||
void __noreturn panic_smp_self_stop(void)
|
||||
{
|
||||
local_cpu_stop();
|
||||
local_cpu_stop(smp_processor_id());
|
||||
}
|
||||
|
||||
#ifdef CONFIG_KEXEC_CORE
|
||||
static atomic_t waiting_for_crash_ipi = ATOMIC_INIT(0);
|
||||
#endif
|
||||
|
||||
static void __noreturn ipi_cpu_crash_stop(unsigned int cpu, struct pt_regs *regs)
|
||||
{
|
||||
#ifdef CONFIG_KEXEC_CORE
|
||||
/*
|
||||
* Use local_daif_mask() instead of local_irq_disable() to make sure
|
||||
* that pseudo-NMIs are disabled. The "crash stop" code starts with
|
||||
* an IRQ and falls back to NMI (which might be pseudo). If the IRQ
|
||||
* finally goes through right as we're timing out then the NMI could
|
||||
* interrupt us. It's better to prevent the NMI and let the IRQ
|
||||
* finish since the pt_regs will be better.
|
||||
*/
|
||||
local_daif_mask();
|
||||
|
||||
crash_save_cpu(regs, cpu);
|
||||
|
||||
atomic_dec(&waiting_for_crash_ipi);
|
||||
set_cpu_online(cpu, false);
|
||||
|
||||
local_irq_disable();
|
||||
sdei_mask_local_cpu();
|
||||
|
||||
if (IS_ENABLED(CONFIG_HOTPLUG_CPU))
|
||||
@ -960,14 +967,12 @@ static void do_handle_IPI(int ipinr)
|
||||
break;
|
||||
|
||||
case IPI_CPU_STOP:
|
||||
local_cpu_stop();
|
||||
break;
|
||||
|
||||
case IPI_CPU_CRASH_STOP:
|
||||
if (IS_ENABLED(CONFIG_KEXEC_CORE)) {
|
||||
case IPI_CPU_STOP_NMI:
|
||||
if (IS_ENABLED(CONFIG_KEXEC_CORE) && crash_stop) {
|
||||
ipi_cpu_crash_stop(cpu, get_irq_regs());
|
||||
|
||||
unreachable();
|
||||
} else {
|
||||
local_cpu_stop(cpu);
|
||||
}
|
||||
break;
|
||||
|
||||
@ -1022,8 +1027,7 @@ static bool ipi_should_be_nmi(enum ipi_msg_type ipi)
|
||||
return false;
|
||||
|
||||
switch (ipi) {
|
||||
case IPI_CPU_STOP:
|
||||
case IPI_CPU_CRASH_STOP:
|
||||
case IPI_CPU_STOP_NMI:
|
||||
case IPI_CPU_BACKTRACE:
|
||||
case IPI_KGDB_ROUNDUP:
|
||||
return true;
|
||||
@ -1136,47 +1140,10 @@ static inline unsigned int num_other_online_cpus(void)
|
||||
|
||||
void smp_send_stop(void)
|
||||
{
|
||||
unsigned long timeout;
|
||||
|
||||
if (num_other_online_cpus()) {
|
||||
cpumask_t mask;
|
||||
|
||||
cpumask_copy(&mask, cpu_online_mask);
|
||||
cpumask_clear_cpu(smp_processor_id(), &mask);
|
||||
|
||||
if (system_state <= SYSTEM_RUNNING)
|
||||
pr_crit("SMP: stopping secondary CPUs\n");
|
||||
smp_cross_call(&mask, IPI_CPU_STOP);
|
||||
}
|
||||
|
||||
/* Wait up to one second for other CPUs to stop */
|
||||
timeout = USEC_PER_SEC;
|
||||
while (num_other_online_cpus() && timeout--)
|
||||
udelay(1);
|
||||
|
||||
if (num_other_online_cpus())
|
||||
pr_warn("SMP: failed to stop secondary CPUs %*pbl\n",
|
||||
cpumask_pr_args(cpu_online_mask));
|
||||
|
||||
sdei_mask_local_cpu();
|
||||
}
|
||||
|
||||
#ifdef CONFIG_KEXEC_CORE
|
||||
void crash_smp_send_stop(void)
|
||||
{
|
||||
static int cpus_stopped;
|
||||
static unsigned long stop_in_progress;
|
||||
cpumask_t mask;
|
||||
unsigned long timeout;
|
||||
|
||||
/*
|
||||
* This function can be called twice in panic path, but obviously
|
||||
* we execute this only once.
|
||||
*/
|
||||
if (cpus_stopped)
|
||||
return;
|
||||
|
||||
cpus_stopped = 1;
|
||||
|
||||
/*
|
||||
* If this cpu is the only one alive at this point in time, online or
|
||||
* not, there are no stop messages to be sent around, so just back out.
|
||||
@ -1184,31 +1151,98 @@ void crash_smp_send_stop(void)
|
||||
if (num_other_online_cpus() == 0)
|
||||
goto skip_ipi;
|
||||
|
||||
/* Only proceed if this is the first CPU to reach this code */
|
||||
if (test_and_set_bit(0, &stop_in_progress))
|
||||
return;
|
||||
|
||||
/*
|
||||
* Send an IPI to all currently online CPUs except the CPU running
|
||||
* this code.
|
||||
*
|
||||
* NOTE: we don't do anything here to prevent other CPUs from coming
|
||||
* online after we snapshot `cpu_online_mask`. Ideally, the calling code
|
||||
* should do something to prevent other CPUs from coming up. This code
|
||||
* can be called in the panic path and thus it doesn't seem wise to
|
||||
* grab the CPU hotplug mutex ourselves. Worst case:
|
||||
* - If a CPU comes online as we're running, we'll likely notice it
|
||||
* during the 1 second wait below and then we'll catch it when we try
|
||||
* with an NMI (assuming NMIs are enabled) since we re-snapshot the
|
||||
* mask before sending an NMI.
|
||||
* - If we leave the function and see that CPUs are still online we'll
|
||||
* at least print a warning. Especially without NMIs this function
|
||||
* isn't foolproof anyway so calling code will just have to accept
|
||||
* the fact that there could be cases where a CPU can't be stopped.
|
||||
*/
|
||||
cpumask_copy(&mask, cpu_online_mask);
|
||||
cpumask_clear_cpu(smp_processor_id(), &mask);
|
||||
|
||||
atomic_set(&waiting_for_crash_ipi, num_other_online_cpus());
|
||||
if (system_state <= SYSTEM_RUNNING)
|
||||
pr_crit("SMP: stopping secondary CPUs\n");
|
||||
|
||||
pr_crit("SMP: stopping secondary CPUs\n");
|
||||
smp_cross_call(&mask, IPI_CPU_CRASH_STOP);
|
||||
|
||||
/* Wait up to one second for other CPUs to stop */
|
||||
/*
|
||||
* Start with a normal IPI and wait up to one second for other CPUs to
|
||||
* stop. We do this first because it gives other processors a chance
|
||||
* to exit critical sections / drop locks and makes the rest of the
|
||||
* stop process (especially console flush) more robust.
|
||||
*/
|
||||
smp_cross_call(&mask, IPI_CPU_STOP);
|
||||
timeout = USEC_PER_SEC;
|
||||
while ((atomic_read(&waiting_for_crash_ipi) > 0) && timeout--)
|
||||
while (num_other_online_cpus() && timeout--)
|
||||
udelay(1);
|
||||
|
||||
if (atomic_read(&waiting_for_crash_ipi) > 0)
|
||||
/*
|
||||
* If CPUs are still online, try an NMI. There's no excuse for this to
|
||||
* be slow, so we only give them an extra 10 ms to respond.
|
||||
*/
|
||||
if (num_other_online_cpus() && ipi_should_be_nmi(IPI_CPU_STOP_NMI)) {
|
||||
smp_rmb();
|
||||
cpumask_copy(&mask, cpu_online_mask);
|
||||
cpumask_clear_cpu(smp_processor_id(), &mask);
|
||||
|
||||
pr_info("SMP: retry stop with NMI for CPUs %*pbl\n",
|
||||
cpumask_pr_args(&mask));
|
||||
|
||||
smp_cross_call(&mask, IPI_CPU_STOP_NMI);
|
||||
timeout = USEC_PER_MSEC * 10;
|
||||
while (num_other_online_cpus() && timeout--)
|
||||
udelay(1);
|
||||
}
|
||||
|
||||
if (num_other_online_cpus()) {
|
||||
smp_rmb();
|
||||
cpumask_copy(&mask, cpu_online_mask);
|
||||
cpumask_clear_cpu(smp_processor_id(), &mask);
|
||||
|
||||
pr_warn("SMP: failed to stop secondary CPUs %*pbl\n",
|
||||
cpumask_pr_args(&mask));
|
||||
}
|
||||
|
||||
skip_ipi:
|
||||
sdei_mask_local_cpu();
|
||||
}
|
||||
|
||||
#ifdef CONFIG_KEXEC_CORE
|
||||
void crash_smp_send_stop(void)
|
||||
{
|
||||
/*
|
||||
* This function can be called twice in panic path, but obviously
|
||||
* we execute this only once.
|
||||
*
|
||||
* We use this same boolean to tell whether the IPI we send was a
|
||||
* stop or a "crash stop".
|
||||
*/
|
||||
if (crash_stop)
|
||||
return;
|
||||
crash_stop = 1;
|
||||
|
||||
smp_send_stop();
|
||||
|
||||
sdei_handler_abort();
|
||||
}
|
||||
|
||||
bool smp_crash_stop_failed(void)
|
||||
{
|
||||
return (atomic_read(&waiting_for_crash_ipi) > 0);
|
||||
return num_other_online_cpus() != 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -36,8 +36,6 @@
|
||||
#define TCR_KASLR_FLAGS 0
|
||||
#endif
|
||||
|
||||
#define TCR_SMP_FLAGS TCR_SHARED
|
||||
|
||||
/* PTWs cacheable, inner/outer WBWA */
|
||||
#define TCR_CACHE_FLAGS TCR_IRGN_WBWA | TCR_ORGN_WBWA
|
||||
|
||||
@ -469,7 +467,7 @@ SYM_FUNC_START(__cpu_setup)
|
||||
tcr .req x16
|
||||
mov_q mair, MAIR_EL1_SET
|
||||
mov_q tcr, TCR_T0SZ(IDMAP_VA_BITS) | TCR_T1SZ(VA_BITS_MIN) | TCR_CACHE_FLAGS | \
|
||||
TCR_SMP_FLAGS | TCR_TG_FLAGS | TCR_KASLR_FLAGS | TCR_ASID16 | \
|
||||
TCR_SHARED | TCR_TG_FLAGS | TCR_KASLR_FLAGS | TCR_ASID16 | \
|
||||
TCR_TBI0 | TCR_A1 | TCR_KASAN_SW_FLAGS | TCR_MTE_FLAGS
|
||||
|
||||
tcr_clear_errata_bits tcr, x9, x5
|
||||
|
Loading…
x
Reference in New Issue
Block a user