From 68cf617309b5f6f3a651165f49f20af1494753ae Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Wed, 8 Jul 2020 17:25:46 +0100 Subject: [PATCH 01/13] KVM: arm64: Fix definition of PAGE_HYP_DEVICE PAGE_HYP_DEVICE is intended to encode attribute bits for an EL2 stage-1 pte mapping a device. Unfortunately, it includes PROT_DEVICE_nGnRE which encodes attributes for EL1 stage-1 mappings such as UXN and nG, which are RES0 for EL2, and DBM which is meaningless as TCR_EL2.HD is not set. Fix the definition of PAGE_HYP_DEVICE so that it doesn't set RES0 bits at EL2. Acked-by: Marc Zyngier Cc: Marc Zyngier Cc: Catalin Marinas Cc: James Morse Cc: Link: https://lore.kernel.org/r/20200708162546.26176-1-will@kernel.org Signed-off-by: Will Deacon --- arch/arm64/include/asm/pgtable-prot.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm64/include/asm/pgtable-prot.h b/arch/arm64/include/asm/pgtable-prot.h index 2e7e0f452301..4d867c6446c4 100644 --- a/arch/arm64/include/asm/pgtable-prot.h +++ b/arch/arm64/include/asm/pgtable-prot.h @@ -67,7 +67,7 @@ extern bool arm64_use_ng_mappings; #define PAGE_HYP __pgprot(_HYP_PAGE_DEFAULT | PTE_HYP | PTE_HYP_XN) #define PAGE_HYP_EXEC __pgprot(_HYP_PAGE_DEFAULT | PTE_HYP | PTE_RDONLY) #define PAGE_HYP_RO __pgprot(_HYP_PAGE_DEFAULT | PTE_HYP | PTE_RDONLY | PTE_HYP_XN) -#define PAGE_HYP_DEVICE __pgprot(PROT_DEVICE_nGnRE | PTE_HYP) +#define PAGE_HYP_DEVICE __pgprot(_PROT_DEFAULT | PTE_ATTRINDX(MT_DEVICE_nGnRE) | PTE_HYP | PTE_HYP_XN) #define PAGE_S2_MEMATTR(attr) \ ({ \ From c377e67c6271954969384f9be1b1b71de13eba30 Mon Sep 17 00:00:00 2001 From: Gavin Shan Date: Tue, 30 Jun 2020 17:52:27 +1000 Subject: [PATCH 02/13] drivers/firmware/psci: Fix memory leakage in alloc_init_cpu_groups() The CPU mask (@tmp) should be released on failing to allocate @cpu_groups or any of its elements. Otherwise, it leads to memory leakage because the CPU mask variable is dynamically allocated when CONFIG_CPUMASK_OFFSTACK is enabled. Signed-off-by: Gavin Shan Reviewed-by: Sudeep Holla Link: https://lore.kernel.org/r/20200630075227.199624-1-gshan@redhat.com Signed-off-by: Will Deacon --- drivers/firmware/psci/psci_checker.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/firmware/psci/psci_checker.c b/drivers/firmware/psci/psci_checker.c index 873841af8d57..d9b1a2d71223 100644 --- a/drivers/firmware/psci/psci_checker.c +++ b/drivers/firmware/psci/psci_checker.c @@ -157,8 +157,10 @@ static int alloc_init_cpu_groups(cpumask_var_t **pcpu_groups) cpu_groups = kcalloc(nb_available_cpus, sizeof(cpu_groups), GFP_KERNEL); - if (!cpu_groups) + if (!cpu_groups) { + free_cpumask_var(tmp); return -ENOMEM; + } cpumask_copy(tmp, cpu_online_mask); @@ -167,6 +169,7 @@ static int alloc_init_cpu_groups(cpumask_var_t **pcpu_groups) topology_core_cpumask(cpumask_any(tmp)); if (!alloc_cpumask_var(&cpu_groups[num_groups], GFP_KERNEL)) { + free_cpumask_var(tmp); free_cpu_groups(num_groups, &cpu_groups); return -ENOMEM; } From 132330f8044c8e0cfa83b5eee41ade52708390dc Mon Sep 17 00:00:00 2001 From: Gavin Shan Date: Tue, 30 Jun 2020 17:59:43 +1000 Subject: [PATCH 03/13] drivers/firmware/psci: Assign @err directly in hotplug_tests() The return value of down_and_up_cpus() can be assigned to @err directly. With that, the useless assignment to @err with zero can be dropped. Signed-off-by: Gavin Shan Reviewed-by: Sudeep Holla Link: https://lore.kernel.org/r/20200630075943.203954-1-gshan@redhat.com Signed-off-by: Will Deacon --- drivers/firmware/psci/psci_checker.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/firmware/psci/psci_checker.c b/drivers/firmware/psci/psci_checker.c index d9b1a2d71223..3d6ba425dbb9 100644 --- a/drivers/firmware/psci/psci_checker.c +++ b/drivers/firmware/psci/psci_checker.c @@ -199,13 +199,12 @@ static int hotplug_tests(void) if (!page_buf) goto out_free_cpu_groups; - err = 0; /* * Of course the last CPU cannot be powered down and cpu_down() should * refuse doing that. */ pr_info("Trying to turn off and on again all CPUs\n"); - err += down_and_up_cpus(cpu_online_mask, offlined_cpus); + err = down_and_up_cpus(cpu_online_mask, offlined_cpus); /* * Take down CPUs by cpu group this time. When the last CPU is turned From b8c1c9fe6a042dfbb169d14ab2000d9163f06d10 Mon Sep 17 00:00:00 2001 From: Kevin Hao Date: Fri, 17 Apr 2020 18:32:11 +0800 Subject: [PATCH 04/13] arm64: entry: Fix the typo in the comment of el1_dbg() The function name should be local_daif_mask(). Signed-off-by: Kevin Hao Acked-by: Mark Rutlamd Link: https://lore.kernel.org/r/20200417103212.45812-2-haokexin@gmail.com Signed-off-by: Will Deacon --- arch/arm64/kernel/entry-common.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm64/kernel/entry-common.c b/arch/arm64/kernel/entry-common.c index 3dbdf9752b11..d3be9dbf5490 100644 --- a/arch/arm64/kernel/entry-common.c +++ b/arch/arm64/kernel/entry-common.c @@ -57,7 +57,7 @@ static void notrace el1_dbg(struct pt_regs *regs, unsigned long esr) /* * The CPU masked interrupts, and we are leaving them masked during * do_debug_exception(). Update PMR as if we had called - * local_mask_daif(). + * local_daif_mask(). */ if (system_uses_irq_prio_masking()) gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET); From 97884ca8c2925d14c32188e865069f21378b4b4f Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Mon, 6 Jul 2020 17:37:59 +0100 Subject: [PATCH 05/13] arm64: Introduce a way to disable the 32bit vdso We have a class of errata (grouped under the ARM64_WORKAROUND_1418040 banner) that force the trapping of counter access from 32bit EL0. We would normally disable the whole vdso for such defect, except that it would disable it for 64bit userspace as well, which is a shame. Instead, add a new vdso_clock_mode, which signals that the vdso isn't usable for compat tasks. This gets checked in the new vdso_clocksource_ok() helper, now provided for the 32bit vdso. Signed-off-by: Marc Zyngier Acked-by: Mark Rutland Cc: stable@vger.kernel.org Link: https://lore.kernel.org/r/20200706163802.1836732-2-maz@kernel.org Signed-off-by: Will Deacon --- arch/arm64/include/asm/vdso/clocksource.h | 7 +++++-- arch/arm64/include/asm/vdso/compat_gettimeofday.h | 8 +++++++- 2 files changed, 12 insertions(+), 3 deletions(-) diff --git a/arch/arm64/include/asm/vdso/clocksource.h b/arch/arm64/include/asm/vdso/clocksource.h index df6ea65c1dec..b054d9febfb5 100644 --- a/arch/arm64/include/asm/vdso/clocksource.h +++ b/arch/arm64/include/asm/vdso/clocksource.h @@ -2,7 +2,10 @@ #ifndef __ASM_VDSOCLOCKSOURCE_H #define __ASM_VDSOCLOCKSOURCE_H -#define VDSO_ARCH_CLOCKMODES \ - VDSO_CLOCKMODE_ARCHTIMER +#define VDSO_ARCH_CLOCKMODES \ + /* vdso clocksource for both 32 and 64bit tasks */ \ + VDSO_CLOCKMODE_ARCHTIMER, \ + /* vdso clocksource for 64bit tasks only */ \ + VDSO_CLOCKMODE_ARCHTIMER_NOCOMPAT #endif diff --git a/arch/arm64/include/asm/vdso/compat_gettimeofday.h b/arch/arm64/include/asm/vdso/compat_gettimeofday.h index b6907ae78e53..9a625e8947ff 100644 --- a/arch/arm64/include/asm/vdso/compat_gettimeofday.h +++ b/arch/arm64/include/asm/vdso/compat_gettimeofday.h @@ -111,7 +111,7 @@ static __always_inline u64 __arch_get_hw_counter(s32 clock_mode) * update. Return something. Core will do another round and then * see the mode change and fallback to the syscall. */ - if (clock_mode == VDSO_CLOCKMODE_NONE) + if (clock_mode != VDSO_CLOCKMODE_ARCHTIMER) return 0; /* @@ -152,6 +152,12 @@ static __always_inline const struct vdso_data *__arch_get_vdso_data(void) return ret; } +static inline bool vdso_clocksource_ok(const struct vdso_data *vd) +{ + return vd->clock_mode == VDSO_CLOCKMODE_ARCHTIMER; +} +#define vdso_clocksource_ok vdso_clocksource_ok + #endif /* !__ASSEMBLY__ */ #endif /* __ASM_VDSO_GETTIMEOFDAY_H */ From c1fbec4ac0d701f350a581941d35643d5a9cd184 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Mon, 6 Jul 2020 17:38:00 +0100 Subject: [PATCH 06/13] arm64: arch_timer: Allow an workaround descriptor to disable compat vdso As we are about to disable the vdso for compat tasks in some circumstances, let's allow a workaround descriptor to express exactly that. Signed-off-by: Marc Zyngier Acked-by: Mark Rutland Cc: stable@vger.kernel.org Link: https://lore.kernel.org/r/20200706163802.1836732-3-maz@kernel.org Signed-off-by: Will Deacon --- arch/arm64/include/asm/arch_timer.h | 1 + drivers/clocksource/arm_arch_timer.c | 3 +++ 2 files changed, 4 insertions(+) diff --git a/arch/arm64/include/asm/arch_timer.h b/arch/arm64/include/asm/arch_timer.h index 7ae54d7d333a..9f0ec21d6327 100644 --- a/arch/arm64/include/asm/arch_timer.h +++ b/arch/arm64/include/asm/arch_timer.h @@ -58,6 +58,7 @@ struct arch_timer_erratum_workaround { u64 (*read_cntvct_el0)(void); int (*set_next_event_phys)(unsigned long, struct clock_event_device *); int (*set_next_event_virt)(unsigned long, struct clock_event_device *); + bool disable_compat_vdso; }; DECLARE_PER_CPU(const struct arch_timer_erratum_workaround *, diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c index ecf7b7db2d05..a8e4fb429f52 100644 --- a/drivers/clocksource/arm_arch_timer.c +++ b/drivers/clocksource/arm_arch_timer.c @@ -566,6 +566,9 @@ void arch_timer_enable_workaround(const struct arch_timer_erratum_workaround *wa if (wa->read_cntvct_el0) { clocksource_counter.vdso_clock_mode = VDSO_CLOCKMODE_NONE; vdso_default = VDSO_CLOCKMODE_NONE; + } else if (wa->disable_compat_vdso && vdso_default != VDSO_CLOCKMODE_NONE) { + vdso_default = VDSO_CLOCKMODE_ARCHTIMER_NOCOMPAT; + clocksource_counter.vdso_clock_mode = vdso_default; } } From 4b661d6133c5d3a7c9aca0b4ee5a78c7766eff3f Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Mon, 6 Jul 2020 17:38:01 +0100 Subject: [PATCH 07/13] arm64: arch_timer: Disable the compat vdso for cores affected by ARM64_WORKAROUND_1418040 ARM64_WORKAROUND_1418040 requires that AArch32 EL0 accesses to the virtual counter register are trapped and emulated by the kernel. This makes the vdso pretty pointless, and in some cases livelock prone. Provide a workaround entry that limits the vdso to 64bit tasks. Signed-off-by: Marc Zyngier Acked-by: Mark Rutland Cc: stable@vger.kernel.org Link: https://lore.kernel.org/r/20200706163802.1836732-4-maz@kernel.org Signed-off-by: Will Deacon --- drivers/clocksource/arm_arch_timer.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c index a8e4fb429f52..6c3e84180146 100644 --- a/drivers/clocksource/arm_arch_timer.c +++ b/drivers/clocksource/arm_arch_timer.c @@ -480,6 +480,14 @@ static const struct arch_timer_erratum_workaround ool_workarounds[] = { .set_next_event_virt = erratum_set_next_event_tval_virt, }, #endif +#ifdef CONFIG_ARM64_ERRATUM_1418040 + { + .match_type = ate_match_local_cap_id, + .id = (void *)ARM64_WORKAROUND_1418040, + .desc = "ARM erratum 1418040", + .disable_compat_vdso = true, + }, +#endif }; typedef bool (*ate_match_fn_t)(const struct arch_timer_erratum_workaround *, From dc802f2bc0208f4abca420705a860c5175db4bee Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Mon, 6 Jul 2020 17:38:02 +0100 Subject: [PATCH 08/13] arm64: Rework ARM_ERRATUM_1414080 handling The current handling of erratum 1414080 has the side effect that cntkctl_el1 can get changed for both 32 and 64bit tasks. This isn't a problem so far, but if we ever need to mitigate another of these errata on the 64bit side, we'd better keep the messing with cntkctl_el1 local to 32bit tasks. For that, make sure that on entering the kernel from a 32bit tasks, userspace access to cntvct gets enabled, and disabled returning to userspace, while it never gets changed for 64bit tasks. Signed-off-by: Marc Zyngier Reviewed-by: Mark Rutland Link: https://lore.kernel.org/r/20200706163802.1836732-5-maz@kernel.org [will: removed branch instructions per Mark's review comments] Signed-off-by: Will Deacon --- arch/arm64/kernel/entry.S | 34 +++++++++++++++++++--------------- 1 file changed, 19 insertions(+), 15 deletions(-) diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index 5304d193c79d..9757a8d5fd94 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -167,6 +167,17 @@ alternative_cb_end stp x28, x29, [sp, #16 * 14] .if \el == 0 + .if \regsize == 32 + // If we're returning from a 32-bit task on a system affected by + // 1418040 then re-enable userspace access to the virtual counter. +#ifdef CONFIG_ARM64_ERRATUM_1418040 +alternative_if ARM64_WORKAROUND_1418040 + mrs x0, cntkctl_el1 + orr x0, x0, #2 // ARCH_TIMER_USR_VCT_ACCESS_EN + msr cntkctl_el1, x0 +alternative_else_nop_endif +#endif + .endif clear_gp_regs mrs x21, sp_el0 ldr_this_cpu tsk, __entry_task, x20 @@ -320,6 +331,14 @@ alternative_else_nop_endif tst x22, #PSR_MODE32_BIT // native task? b.eq 3f +#ifdef CONFIG_ARM64_ERRATUM_1418040 +alternative_if ARM64_WORKAROUND_1418040 + mrs x0, cntkctl_el1 + bic x0, x0, #2 // ARCH_TIMER_USR_VCT_ACCESS_EN + msr cntkctl_el1, x0 +alternative_else_nop_endif +#endif + #ifdef CONFIG_ARM64_ERRATUM_845719 alternative_if ARM64_WORKAROUND_845719 #ifdef CONFIG_PID_IN_CONTEXTIDR @@ -331,21 +350,6 @@ alternative_if ARM64_WORKAROUND_845719 alternative_else_nop_endif #endif 3: -#ifdef CONFIG_ARM64_ERRATUM_1418040 -alternative_if_not ARM64_WORKAROUND_1418040 - b 4f -alternative_else_nop_endif - /* - * if (x22.mode32 == cntkctl_el1.el0vcten) - * cntkctl_el1.el0vcten = ~cntkctl_el1.el0vcten - */ - mrs x1, cntkctl_el1 - eon x0, x1, x22, lsr #3 - tbz x0, #1, 4f - eor x1, x1, #2 // ARCH_TIMER_USR_VCT_ACCESS_EN - msr cntkctl_el1, x1 -4: -#endif scs_save tsk, x0 /* No kernel C function calls after this as user keys are set. */ From 8c3001b9252d8dbf72289d3590a723eea8cfe824 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Wed, 8 Jul 2020 22:10:01 +0100 Subject: [PATCH 09/13] arm64: entry: Tidy up block comments and label numbers Continually butchering our entry code with CPU errata workarounds has led to it looking a little scruffy. Consistently used /* */ comment style for multi-line block comments and ensure that small numeric labels use consecutive integers. No functional change, but the state of things was irritating. Signed-off-by: Will Deacon --- arch/arm64/kernel/entry.S | 22 ++++++++++++++-------- 1 file changed, 14 insertions(+), 8 deletions(-) diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index 9757a8d5fd94..35de8ba60e3d 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -126,8 +126,10 @@ alternative_else_nop_endif add \dst, \dst, #(\sym - .entry.tramp.text) .endm - // This macro corrupts x0-x3. It is the caller's duty - // to save/restore them if required. + /* + * This macro corrupts x0-x3. It is the caller's duty to save/restore + * them if required. + */ .macro apply_ssbd, state, tmp1, tmp2 #ifdef CONFIG_ARM64_SSBD alternative_cb arm64_enable_wa2_handling @@ -168,8 +170,10 @@ alternative_cb_end .if \el == 0 .if \regsize == 32 - // If we're returning from a 32-bit task on a system affected by - // 1418040 then re-enable userspace access to the virtual counter. + /* + * If we're returning from a 32-bit task on a system affected by + * 1418040 then re-enable userspace access to the virtual counter. + */ #ifdef CONFIG_ARM64_ERRATUM_1418040 alternative_if ARM64_WORKAROUND_1418040 mrs x0, cntkctl_el1 @@ -183,8 +187,10 @@ alternative_else_nop_endif ldr_this_cpu tsk, __entry_task, x20 msr sp_el0, tsk - // Ensure MDSCR_EL1.SS is clear, since we can unmask debug exceptions - // when scheduling. + /* + * Ensure MDSCR_EL1.SS is clear, since we can unmask debug exceptions + * when scheduling. + */ ldr x19, [tsk, #TSK_TI_FLAGS] disable_step_tsk x19, x20 @@ -381,11 +387,11 @@ alternative_else_nop_endif .if \el == 0 alternative_insn eret, nop, ARM64_UNMAP_KERNEL_AT_EL0 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0 - bne 5f + bne 4f msr far_el1, x30 tramp_alias x30, tramp_exit_native br x30 -5: +4: tramp_alias x30, tramp_exit_compat br x30 #endif From 8523c006264df65aac7d77284cc69aac46a6f842 Mon Sep 17 00:00:00 2001 From: Wei Li Date: Sun, 10 May 2020 05:41:56 +0800 Subject: [PATCH 10/13] arm64: kgdb: Fix single-step exception handling oops After entering kdb due to breakpoint, when we execute 'ss' or 'go' (will delay installing breakpoints, do single-step first), it won't work correctly, and it will enter kdb due to oops. It's because the reason gotten in kdb_stub() is not as expected, and it seems that the ex_vector for single-step should be 0, like what arch powerpc/sh/parisc has implemented. Before the patch: Entering kdb (current=0xffff8000119e2dc0, pid 0) on processor 0 due to Keyboard Entry [0]kdb> bp printk Instruction(i) BP #0 at 0xffff8000101486cc (printk) is enabled addr at ffff8000101486cc, hardtype=0 installed=0 [0]kdb> g / # echo h > /proc/sysrq-trigger Entering kdb (current=0xffff0000fa878040, pid 266) on processor 3 due to Breakpoint @ 0xffff8000101486cc [3]kdb> ss Entering kdb (current=0xffff0000fa878040, pid 266) on processor 3 Oops: (null) due to oops @ 0xffff800010082ab8 CPU: 3 PID: 266 Comm: sh Not tainted 5.7.0-rc4-13839-gf0e5ad491718 #6 Hardware name: linux,dummy-virt (DT) pstate: 00000085 (nzcv daIf -PAN -UAO) pc : el1_irq+0x78/0x180 lr : __handle_sysrq+0x80/0x190 sp : ffff800015003bf0 x29: ffff800015003d20 x28: ffff0000fa878040 x27: 0000000000000000 x26: ffff80001126b1f0 x25: ffff800011b6a0d8 x24: 0000000000000000 x23: 0000000080200005 x22: ffff8000101486cc x21: ffff800015003d30 x20: 0000ffffffffffff x19: ffff8000119f2000 x18: 0000000000000000 x17: 0000000000000000 x16: 0000000000000000 x15: 0000000000000000 x14: 0000000000000000 x13: 0000000000000000 x12: 0000000000000000 x11: 0000000000000000 x10: 0000000000000000 x9 : 0000000000000000 x8 : ffff800015003e50 x7 : 0000000000000002 x6 : 00000000380b9990 x5 : ffff8000106e99e8 x4 : ffff0000fadd83c0 x3 : 0000ffffffffffff x2 : ffff800011b6a0d8 x1 : ffff800011b6a000 x0 : ffff80001130c9d8 Call trace: el1_irq+0x78/0x180 printk+0x0/0x84 write_sysrq_trigger+0xb0/0x118 proc_reg_write+0xb4/0xe0 __vfs_write+0x18/0x40 vfs_write+0xb0/0x1b8 ksys_write+0x64/0xf0 __arm64_sys_write+0x14/0x20 el0_svc_common.constprop.2+0xb0/0x168 do_el0_svc+0x20/0x98 el0_sync_handler+0xec/0x1a8 el0_sync+0x140/0x180 [3]kdb> After the patch: Entering kdb (current=0xffff8000119e2dc0, pid 0) on processor 0 due to Keyboard Entry [0]kdb> bp printk Instruction(i) BP #0 at 0xffff8000101486cc (printk) is enabled addr at ffff8000101486cc, hardtype=0 installed=0 [0]kdb> g / # echo h > /proc/sysrq-trigger Entering kdb (current=0xffff0000fa852bc0, pid 268) on processor 0 due to Breakpoint @ 0xffff8000101486cc [0]kdb> g Entering kdb (current=0xffff0000fa852bc0, pid 268) on processor 0 due to Breakpoint @ 0xffff8000101486cc [0]kdb> ss Entering kdb (current=0xffff0000fa852bc0, pid 268) on processor 0 due to SS trap @ 0xffff800010082ab8 [0]kdb> Fixes: 44679a4f142b ("arm64: KGDB: Add step debugging support") Signed-off-by: Wei Li Tested-by: Douglas Anderson Reviewed-by: Douglas Anderson Link: https://lore.kernel.org/r/20200509214159.19680-2-liwei391@huawei.com Signed-off-by: Will Deacon --- arch/arm64/kernel/kgdb.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm64/kernel/kgdb.c b/arch/arm64/kernel/kgdb.c index 43119922341f..1a157ca33262 100644 --- a/arch/arm64/kernel/kgdb.c +++ b/arch/arm64/kernel/kgdb.c @@ -252,7 +252,7 @@ static int kgdb_step_brk_fn(struct pt_regs *regs, unsigned int esr) if (!kgdb_single_step) return DBG_HOOK_ERROR; - kgdb_handle_exception(1, SIGTRAP, 0, regs); + kgdb_handle_exception(0, SIGTRAP, 0, regs); return DBG_HOOK_HANDLED; } NOKPROBE_SYMBOL(kgdb_step_brk_fn); From 581fce373581772470af8fb4fe13505dc66281e6 Mon Sep 17 00:00:00 2001 From: Suzuki K Poulose Date: Tue, 7 Jul 2020 15:31:52 +0100 Subject: [PATCH 11/13] arm64: Documentation: Fix broken table in generated HTML cpu-feature-registers.rst is missing a new line before a couple of tables listing the visible fields, causing broken tables in the HTML documentation generated by "make htmldocs". Fix this by adding the missing new line. Reported-by: Peter Maydell Signed-off-by: Suzuki K Poulose Cc: Mark Rutland Cc: Will Deacon Cc: Catalin Marinas Link: https://lore.kernel.org/r/20200707143152.154541-1-suzuki.poulose@arm.com Signed-off-by: Will Deacon --- Documentation/arm64/cpu-feature-registers.rst | 2 ++ 1 file changed, 2 insertions(+) diff --git a/Documentation/arm64/cpu-feature-registers.rst b/Documentation/arm64/cpu-feature-registers.rst index 314fa5bc2655..f28853f80089 100644 --- a/Documentation/arm64/cpu-feature-registers.rst +++ b/Documentation/arm64/cpu-feature-registers.rst @@ -171,6 +171,7 @@ infrastructure: 3) ID_AA64PFR1_EL1 - Processor Feature Register 1 + +------------------------------+---------+---------+ | Name | bits | visible | +------------------------------+---------+---------+ @@ -181,6 +182,7 @@ infrastructure: 4) MIDR_EL1 - Main ID Register + +------------------------------+---------+---------+ | Name | bits | visible | +------------------------------+---------+---------+ From 09c717c92b52df54918e12cbfe6a4658233fda69 Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Wed, 8 Jul 2020 22:13:40 -0700 Subject: [PATCH 12/13] arm64: Add missing sentinel to erratum_1463225 When the erratum_1463225 array was introduced a sentinel at the end was missing thus causing a KASAN: global-out-of-bounds in is_affected_midr_range_list on arm64 error. Fixes: a9e821b89daa ("arm64: Add KRYO4XX gold CPU cores to erratum list 1463225 and 1418040") Signed-off-by: Florian Fainelli Reviewed-by: Sai Prakash Ranjan Link: https://lore.kernel.org/linux-arm-kernel/CA+G9fYs3EavpU89-rTQfqQ9GgxAMgMAk7jiiVrfP0yxj5s+Q6g@mail.gmail.com/ Link: https://lore.kernel.org/r/20200709051345.14544-1-f.fainelli@gmail.com Signed-off-by: Will Deacon --- arch/arm64/kernel/cpu_errata.c | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c index 8e302dc093d0..79728bfb5351 100644 --- a/arch/arm64/kernel/cpu_errata.c +++ b/arch/arm64/kernel/cpu_errata.c @@ -782,6 +782,7 @@ static const struct midr_range erratum_1463225[] = { MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 1), /* Kryo4xx Gold (rcpe to rfpf) => (r0p0 to r3p1) */ MIDR_RANGE(MIDR_QCOM_KRYO_4XX_GOLD, 0xc, 0xe, 0xf, 0xf), + {}, }; #endif From 5679b28142193a62f6af93249c0477be9f0c669b Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Thu, 9 Jul 2020 15:59:53 +0300 Subject: [PATCH 13/13] arm64/alternatives: don't patch up internal branches Commit f7b93d42945c ("arm64/alternatives: use subsections for replacement sequences") moved the alternatives replacement sequences into subsections, in order to keep the as close as possible to the code that they replace. Unfortunately, this broke the logic in branch_insn_requires_update, which assumed that any branch into kernel executable code was a branch that required updating, which is no longer the case now that the code sequences that are patched in are in the same section as the patch site itself. So the only way to discriminate branches that require updating and ones that don't is to check whether the branch targets the replacement sequence itself, and so we can drop the call to kernel_text_address() entirely. Fixes: f7b93d42945c ("arm64/alternatives: use subsections for replacement sequences") Reported-by: Alexandru Elisei Signed-off-by: Ard Biesheuvel Tested-by: Alexandru Elisei Link: https://lore.kernel.org/r/20200709125953.30918-1-ardb@kernel.org Signed-off-by: Will Deacon --- arch/arm64/kernel/alternative.c | 16 ++-------------- 1 file changed, 2 insertions(+), 14 deletions(-) diff --git a/arch/arm64/kernel/alternative.c b/arch/arm64/kernel/alternative.c index d1757ef1b1e7..73039949b5ce 100644 --- a/arch/arm64/kernel/alternative.c +++ b/arch/arm64/kernel/alternative.c @@ -43,20 +43,8 @@ bool alternative_is_applied(u16 cpufeature) */ static bool branch_insn_requires_update(struct alt_instr *alt, unsigned long pc) { - unsigned long replptr; - - if (kernel_text_address(pc)) - return true; - - replptr = (unsigned long)ALT_REPL_PTR(alt); - if (pc >= replptr && pc <= (replptr + alt->alt_len)) - return false; - - /* - * Branching into *another* alternate sequence is doomed, and - * we're not even trying to fix it up. - */ - BUG(); + unsigned long replptr = (unsigned long)ALT_REPL_PTR(alt); + return !(pc >= replptr && pc <= (replptr + alt->alt_len)); } #define align_down(x, a) ((unsigned long)(x) & ~(((unsigned long)(a)) - 1))