From 1b57747e978f920fb2affd1952ed913276019115 Mon Sep 17 00:00:00 2001 From: Samuel Holland Date: Wed, 14 Aug 2024 01:10:54 -0700 Subject: [PATCH 01/38] riscv: Enable cbo.zero only when all harts support Zicboz Currently, we enable cbo.zero for usermode on each hart that supports the Zicboz extension. This means that the [ms]envcfg CSR value may differ between harts. Other features, such as pointer masking and CFI, require setting [ms]envcfg bits on a per-thread basis. The combination of these two adds quite some complexity and overhead to context switching, as we would need to maintain two separate masks for the per-hart and per-thread bits. Andrew Jones, who originally added Zicboz support, writes[1][2]: I've approached Zicboz the same way I would approach all extensions, which is to be per-hart. I'm not currently aware of a platform that is / will be composed of harts where some have Zicboz and others don't, but there's nothing stopping a platform like that from being built. So, how about we add code that confirms Zicboz is on all harts. If any hart does not have it, then we complain loudly and disable it on all the other harts. If it was just a hardware description bug, then it'll get fixed. If there's actually a platform which doesn't have Zicboz on all harts, then, when the issue is reported, we can decide to not support it, support it with defconfig, or support it under a Kconfig guard which must be enabled by the user. Let's follow his suggested solution and require the extension to be available on all harts, so the envcfg CSR value does not need to change when a thread migrates between harts. Since we are doing this for all extensions with fields in envcfg, the CSR itself only needs to be saved/ restored when it is present on all harts. This should not be a regression as no known hardware has asymmetric Zicboz support, but if anyone reports seeing the warning, we will re-evaluate our solution. Link: https://lore.kernel.org/linux-riscv/20240322-168f191eeb8479b2ea169a5e@orel/ [1] Link: https://lore.kernel.org/linux-riscv/20240323-28943722feb57a41fb0ff488@orel/ [2] Reviewed-by: Andrew Jones Reviewed-by: Conor Dooley Reviewed-by: Deepak Gupta Signed-off-by: Samuel Holland Reviewed-by: Charlie Jenkins Tested-by: Charlie Jenkins Link: https://lore.kernel.org/r/20240814081126.956287-2-samuel.holland@sifive.com Signed-off-by: Palmer Dabbelt --- arch/riscv/kernel/cpufeature.c | 7 ++++++- arch/riscv/kernel/suspend.c | 4 ++-- 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/arch/riscv/kernel/cpufeature.c b/arch/riscv/kernel/cpufeature.c index 3a8eeaa9310c..e560a253e99b 100644 --- a/arch/riscv/kernel/cpufeature.c +++ b/arch/riscv/kernel/cpufeature.c @@ -28,6 +28,8 @@ #define NUM_ALPHA_EXTS ('z' - 'a' + 1) +static bool any_cpu_has_zicboz; + unsigned long elf_hwcap __read_mostly; /* Host ISA bitmap */ @@ -98,6 +100,7 @@ static int riscv_ext_zicboz_validate(const struct riscv_isa_ext_data *data, pr_err("Zicboz disabled as cboz-block-size present, but is not a power-of-2\n"); return -EINVAL; } + any_cpu_has_zicboz = true; return 0; } @@ -919,8 +922,10 @@ unsigned long riscv_get_elf_hwcap(void) void riscv_user_isa_enable(void) { - if (riscv_cpu_has_extension_unlikely(smp_processor_id(), RISCV_ISA_EXT_ZICBOZ)) + if (riscv_has_extension_unlikely(RISCV_ISA_EXT_ZICBOZ)) csr_set(CSR_ENVCFG, ENVCFG_CBZE); + else if (any_cpu_has_zicboz) + pr_warn_once("Zicboz disabled as it is unavailable on some harts\n"); } #ifdef CONFIG_RISCV_ALTERNATIVE diff --git a/arch/riscv/kernel/suspend.c b/arch/riscv/kernel/suspend.c index c8cec0cc5833..9a8a0dc035b2 100644 --- a/arch/riscv/kernel/suspend.c +++ b/arch/riscv/kernel/suspend.c @@ -14,7 +14,7 @@ void suspend_save_csrs(struct suspend_context *context) { - if (riscv_cpu_has_extension_unlikely(smp_processor_id(), RISCV_ISA_EXT_XLINUXENVCFG)) + if (riscv_has_extension_unlikely(RISCV_ISA_EXT_XLINUXENVCFG)) context->envcfg = csr_read(CSR_ENVCFG); context->tvec = csr_read(CSR_TVEC); context->ie = csr_read(CSR_IE); @@ -37,7 +37,7 @@ void suspend_save_csrs(struct suspend_context *context) void suspend_restore_csrs(struct suspend_context *context) { csr_write(CSR_SCRATCH, 0); - if (riscv_cpu_has_extension_unlikely(smp_processor_id(), RISCV_ISA_EXT_XLINUXENVCFG)) + if (riscv_has_extension_unlikely(RISCV_ISA_EXT_XLINUXENVCFG)) csr_write(CSR_ENVCFG, context->envcfg); csr_write(CSR_TVEC, context->tvec); csr_write(CSR_IE, context->ie); From 5fc7355f01376e69964bb21b685025b042c37acc Mon Sep 17 00:00:00 2001 From: Samuel Holland Date: Wed, 14 Aug 2024 01:10:55 -0700 Subject: [PATCH 02/38] riscv: Add support for per-thread envcfg CSR values Some bits in the [ms]envcfg CSR, such as the CFI state and pointer masking mode, need to be controlled on a per-thread basis. Support this by keeping a copy of the CSR value in struct thread_struct and writing it during context switches. It is safe to discard the old CSR value during the context switch because the CSR is modified only by software, so the CSR will remain in sync with the copy in thread_struct. Use ALTERNATIVE directly instead of riscv_has_extension_unlikely() to minimize branchiness in the context switching code. Since thread_struct is copied during fork(), setting the value for the init task sets the default value for all other threads. Reviewed-by: Andrew Jones Reviewed-by: Deepak Gupta Signed-off-by: Samuel Holland Reviewed-by: Charlie Jenkins Link: https://lore.kernel.org/r/20240814081126.956287-3-samuel.holland@sifive.com Signed-off-by: Palmer Dabbelt --- arch/riscv/include/asm/processor.h | 1 + arch/riscv/include/asm/switch_to.h | 8 ++++++++ arch/riscv/kernel/cpufeature.c | 2 +- 3 files changed, 10 insertions(+), 1 deletion(-) diff --git a/arch/riscv/include/asm/processor.h b/arch/riscv/include/asm/processor.h index efa1b3519b23..c1a492508835 100644 --- a/arch/riscv/include/asm/processor.h +++ b/arch/riscv/include/asm/processor.h @@ -102,6 +102,7 @@ struct thread_struct { unsigned long s[12]; /* s[0]: frame pointer */ struct __riscv_d_ext_state fstate; unsigned long bad_cause; + unsigned long envcfg; u32 riscv_v_flags; u32 vstate_ctrl; struct __riscv_v_ext_state vstate; diff --git a/arch/riscv/include/asm/switch_to.h b/arch/riscv/include/asm/switch_to.h index 7594df37cc9f..9685cd85e57c 100644 --- a/arch/riscv/include/asm/switch_to.h +++ b/arch/riscv/include/asm/switch_to.h @@ -70,6 +70,13 @@ static __always_inline bool has_fpu(void) { return false; } #define __switch_to_fpu(__prev, __next) do { } while (0) #endif +static inline void __switch_to_envcfg(struct task_struct *next) +{ + asm volatile (ALTERNATIVE("nop", "csrw " __stringify(CSR_ENVCFG) ", %0", + 0, RISCV_ISA_EXT_XLINUXENVCFG, 1) + :: "r" (next->thread.envcfg) : "memory"); +} + extern struct task_struct *__switch_to(struct task_struct *, struct task_struct *); @@ -103,6 +110,7 @@ do { \ __switch_to_vector(__prev, __next); \ if (switch_to_should_flush_icache(__next)) \ local_flush_icache_all(); \ + __switch_to_envcfg(__next); \ ((last) = __switch_to(__prev, __next)); \ } while (0) diff --git a/arch/riscv/kernel/cpufeature.c b/arch/riscv/kernel/cpufeature.c index e560a253e99b..27bafc5dd62d 100644 --- a/arch/riscv/kernel/cpufeature.c +++ b/arch/riscv/kernel/cpufeature.c @@ -923,7 +923,7 @@ unsigned long riscv_get_elf_hwcap(void) void riscv_user_isa_enable(void) { if (riscv_has_extension_unlikely(RISCV_ISA_EXT_ZICBOZ)) - csr_set(CSR_ENVCFG, ENVCFG_CBZE); + current->thread.envcfg |= ENVCFG_CBZE; else if (any_cpu_has_zicboz) pr_warn_once("Zicboz disabled as it is unavailable on some harts\n"); } From 368546ebe7e74cb6e18f17768533ab7077392a8c Mon Sep 17 00:00:00 2001 From: Samuel Holland Date: Wed, 14 Aug 2024 01:10:56 -0700 Subject: [PATCH 03/38] riscv: Call riscv_user_isa_enable() only on the boot hart Now that the [ms]envcfg CSR value is maintained per thread, not per hart, riscv_user_isa_enable() only needs to be called once during boot, to set the value for the init task. This also allows it to be marked as __init. Reviewed-by: Andrew Jones Reviewed-by: Conor Dooley Reviewed-by: Deepak Gupta Signed-off-by: Samuel Holland Reviewed-by: Charlie Jenkins Link: https://lore.kernel.org/r/20240814081126.956287-4-samuel.holland@sifive.com Signed-off-by: Palmer Dabbelt --- arch/riscv/include/asm/cpufeature.h | 2 +- arch/riscv/kernel/cpufeature.c | 4 ++-- arch/riscv/kernel/smpboot.c | 2 -- 3 files changed, 3 insertions(+), 5 deletions(-) diff --git a/arch/riscv/include/asm/cpufeature.h b/arch/riscv/include/asm/cpufeature.h index 45f9c1171a48..ce9a995730c1 100644 --- a/arch/riscv/include/asm/cpufeature.h +++ b/arch/riscv/include/asm/cpufeature.h @@ -31,7 +31,7 @@ DECLARE_PER_CPU(struct riscv_cpuinfo, riscv_cpuinfo); /* Per-cpu ISA extensions. */ extern struct riscv_isainfo hart_isa[NR_CPUS]; -void riscv_user_isa_enable(void); +void __init riscv_user_isa_enable(void); #define _RISCV_ISA_EXT_DATA(_name, _id, _subset_exts, _subset_exts_size, _validate) { \ .name = #_name, \ diff --git a/arch/riscv/kernel/cpufeature.c b/arch/riscv/kernel/cpufeature.c index 27bafc5dd62d..b3a057c36996 100644 --- a/arch/riscv/kernel/cpufeature.c +++ b/arch/riscv/kernel/cpufeature.c @@ -920,12 +920,12 @@ unsigned long riscv_get_elf_hwcap(void) return hwcap; } -void riscv_user_isa_enable(void) +void __init riscv_user_isa_enable(void) { if (riscv_has_extension_unlikely(RISCV_ISA_EXT_ZICBOZ)) current->thread.envcfg |= ENVCFG_CBZE; else if (any_cpu_has_zicboz) - pr_warn_once("Zicboz disabled as it is unavailable on some harts\n"); + pr_warn("Zicboz disabled as it is unavailable on some harts\n"); } #ifdef CONFIG_RISCV_ALTERNATIVE diff --git a/arch/riscv/kernel/smpboot.c b/arch/riscv/kernel/smpboot.c index 0f8f1c95ac38..e36d20205bd7 100644 --- a/arch/riscv/kernel/smpboot.c +++ b/arch/riscv/kernel/smpboot.c @@ -233,8 +233,6 @@ asmlinkage __visible void smp_callin(void) numa_add_cpu(curr_cpuid); set_cpu_online(curr_cpuid, true); - riscv_user_isa_enable(); - /* * Remote cache and TLB flushes are ignored while the CPU is offline, * so flush them both right now just in case. From 5fb0ecf73e7ad59e7b3e4cd47a3d1a67eaa200af Mon Sep 17 00:00:00 2001 From: Drew Fustini Date: Mon, 14 Oct 2024 13:53:15 -0700 Subject: [PATCH 04/38] riscv: defconfig: enable gpio support for TH1520 Enable gpio-dwapb driver which is used by TH1520-based boards like the BeagleV Ahead and the Sipeed LicheePi 4A. Signed-off-by: Drew Fustini Link: https://lore.kernel.org/r/20241014205315.1349391-1-drew@pdp7.com Signed-off-by: Palmer Dabbelt --- arch/riscv/configs/defconfig | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/riscv/configs/defconfig b/arch/riscv/configs/defconfig index 2341393cfac1..cfc887a7243d 100644 --- a/arch/riscv/configs/defconfig +++ b/arch/riscv/configs/defconfig @@ -167,6 +167,7 @@ CONFIG_PINCTRL_SOPHGO_CV1800B=y CONFIG_PINCTRL_SOPHGO_CV1812H=y CONFIG_PINCTRL_SOPHGO_SG2000=y CONFIG_PINCTRL_SOPHGO_SG2002=y +CONFIG_GPIO_DWAPB=y CONFIG_GPIO_SIFIVE=y CONFIG_POWER_RESET_GPIO_RESTART=y CONFIG_SENSORS_SFCTEMP=m From f8a23e3b79d6c622e1b329706cbd802bc88a058f Mon Sep 17 00:00:00 2001 From: Nick Hu Date: Wed, 14 Aug 2024 13:44:33 +0800 Subject: [PATCH 05/38] cpuidle: riscv-sbi: Move sbi_cpuidle_init to arch_initcall Move the sbi_cpuidle_init to the arch_initcall to prevent the consumer devices from being deferred. Signed-off-by: Nick Hu Link: https://lore.kernel.org/lkml/CAKddAkAOUJSnM=Px-YO=U6pis_7mODHZbmYqcgEzXikriqYvXQ@mail.gmail.com/ Suggested-by: Anup Patel Reviewed-by: Anup Patel Link: https://lore.kernel.org/r/20240814054434.3563453-2-nick.hu@sifive.com Signed-off-by: Palmer Dabbelt --- drivers/cpuidle/cpuidle-riscv-sbi.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/cpuidle/cpuidle-riscv-sbi.c b/drivers/cpuidle/cpuidle-riscv-sbi.c index a6e123dfe394..98e7751dbfe8 100644 --- a/drivers/cpuidle/cpuidle-riscv-sbi.c +++ b/drivers/cpuidle/cpuidle-riscv-sbi.c @@ -592,4 +592,4 @@ static int __init sbi_cpuidle_init(void) return 0; } -device_initcall(sbi_cpuidle_init); +arch_initcall(sbi_cpuidle_init); From 27b4d6aa29abe6c4f0b7695bc831fcc8d32267d9 Mon Sep 17 00:00:00 2001 From: Nick Hu Date: Wed, 14 Aug 2024 13:44:34 +0800 Subject: [PATCH 06/38] cpuidle: riscv-sbi: Add cpuidle_disabled() check The consumer devices that inside the cpu/cluster power domain may register the genpd notifier where their power domains point to the pd nodes under '/cpus/power-domains'. If the cpuidle.off==1, the genpd notifier will fail due to sbi_cpuidle_pd_allow_domain_state is not set. We also need the sbi_cpuidle_cpuhp_up/down to invoke the callbacks. Therefore adding a cpuidle_disabled() check before cpuidle_register() to address the issue. Signed-off-by: Nick Hu Reviewed-by: Anup Patel Link: https://lore.kernel.org/r/20240814054434.3563453-3-nick.hu@sifive.com Signed-off-by: Palmer Dabbelt --- drivers/cpuidle/cpuidle-riscv-sbi.c | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/drivers/cpuidle/cpuidle-riscv-sbi.c b/drivers/cpuidle/cpuidle-riscv-sbi.c index 98e7751dbfe8..3c8a509288f3 100644 --- a/drivers/cpuidle/cpuidle-riscv-sbi.c +++ b/drivers/cpuidle/cpuidle-riscv-sbi.c @@ -25,6 +25,7 @@ #include #include +#include "cpuidle.h" #include "dt_idle_states.h" #include "dt_idle_genpd.h" @@ -336,6 +337,9 @@ static int sbi_cpuidle_init_cpu(struct device *dev, int cpu) return ret; } + if (cpuidle_disabled()) + return 0; + ret = cpuidle_register(drv, NULL); if (ret) goto deinit; @@ -548,7 +552,10 @@ static int sbi_cpuidle_probe(struct platform_device *pdev) /* Setup CPU hotplut notifiers */ sbi_idle_init_cpuhp(); - pr_info("idle driver registered for all CPUs\n"); + if (cpuidle_disabled()) + pr_info("cpuidle is disabled\n"); + else + pr_info("idle driver registered for all CPUs\n"); return 0; From 8d20a739f17a2de9e269db72330f5655d6545dd4 Mon Sep 17 00:00:00 2001 From: Jesse Taube Date: Thu, 17 Oct 2024 12:00:18 -0700 Subject: [PATCH 07/38] RISC-V: Check scalar unaligned access on all CPUs Originally, the check_unaligned_access_emulated_all_cpus function only checked the boot hart. This fixes the function to check all harts. Fixes: 71c54b3d169d ("riscv: report misaligned accesses emulation to hwprobe") Signed-off-by: Jesse Taube Reviewed-by: Charlie Jenkins Reviewed-by: Evan Green Cc: stable@vger.kernel.org Link: https://lore.kernel.org/r/20241017-jesse_unaligned_vector-v10-1-5b33500160f8@rivosinc.com Signed-off-by: Palmer Dabbelt --- arch/riscv/include/asm/cpufeature.h | 2 ++ arch/riscv/kernel/traps_misaligned.c | 14 +++++++------- 2 files changed, 9 insertions(+), 7 deletions(-) diff --git a/arch/riscv/include/asm/cpufeature.h b/arch/riscv/include/asm/cpufeature.h index 45f9c1171a48..dfa5cdddd367 100644 --- a/arch/riscv/include/asm/cpufeature.h +++ b/arch/riscv/include/asm/cpufeature.h @@ -8,6 +8,7 @@ #include #include +#include #include #include #include @@ -60,6 +61,7 @@ void riscv_user_isa_enable(void); #if defined(CONFIG_RISCV_MISALIGNED) bool check_unaligned_access_emulated_all_cpus(void); +void check_unaligned_access_emulated(struct work_struct *work __always_unused); void unaligned_emulation_finish(void); bool unaligned_ctl_available(void); DECLARE_PER_CPU(long, misaligned_access_speed); diff --git a/arch/riscv/kernel/traps_misaligned.c b/arch/riscv/kernel/traps_misaligned.c index d4fd8af7aaf5..d076dde5ad20 100644 --- a/arch/riscv/kernel/traps_misaligned.c +++ b/arch/riscv/kernel/traps_misaligned.c @@ -526,11 +526,11 @@ int handle_misaligned_store(struct pt_regs *regs) return 0; } -static bool check_unaligned_access_emulated(int cpu) +void check_unaligned_access_emulated(struct work_struct *work __always_unused) { + int cpu = smp_processor_id(); long *mas_ptr = per_cpu_ptr(&misaligned_access_speed, cpu); unsigned long tmp_var, tmp_val; - bool misaligned_emu_detected; *mas_ptr = RISCV_HWPROBE_MISALIGNED_SCALAR_UNKNOWN; @@ -538,19 +538,16 @@ static bool check_unaligned_access_emulated(int cpu) " "REG_L" %[tmp], 1(%[ptr])\n" : [tmp] "=r" (tmp_val) : [ptr] "r" (&tmp_var) : "memory"); - misaligned_emu_detected = (*mas_ptr == RISCV_HWPROBE_MISALIGNED_SCALAR_EMULATED); /* * If unaligned_ctl is already set, this means that we detected that all * CPUS uses emulated misaligned access at boot time. If that changed * when hotplugging the new cpu, this is something we don't handle. */ - if (unlikely(unaligned_ctl && !misaligned_emu_detected)) { + if (unlikely(unaligned_ctl && (*mas_ptr != RISCV_HWPROBE_MISALIGNED_SCALAR_EMULATED))) { pr_crit("CPU misaligned accesses non homogeneous (expected all emulated)\n"); while (true) cpu_relax(); } - - return misaligned_emu_detected; } bool check_unaligned_access_emulated_all_cpus(void) @@ -562,8 +559,11 @@ bool check_unaligned_access_emulated_all_cpus(void) * accesses emulated since tasks requesting such control can run on any * CPU. */ + schedule_on_each_cpu(check_unaligned_access_emulated); + for_each_online_cpu(cpu) - if (!check_unaligned_access_emulated(cpu)) + if (per_cpu(misaligned_access_speed, cpu) + != RISCV_HWPROBE_MISALIGNED_SCALAR_EMULATED) return false; unaligned_ctl = true; From 9c528b5f7927b857b40f3c46afbc869827af3c94 Mon Sep 17 00:00:00 2001 From: Jesse Taube Date: Thu, 17 Oct 2024 12:00:19 -0700 Subject: [PATCH 08/38] RISC-V: Scalar unaligned access emulated on hotplug CPUs The check_unaligned_access_emulated() function should have been called during CPU hotplug to ensure that if all CPUs had emulated unaligned accesses, the new CPU also does. This patch adds the call to check_unaligned_access_emulated() in the hotplug path. Fixes: 55e0bf49a0d0 ("RISC-V: Probe misaligned access speed in parallel") Signed-off-by: Jesse Taube Reviewed-by: Evan Green Cc: stable@vger.kernel.org Link: https://lore.kernel.org/r/20241017-jesse_unaligned_vector-v10-2-5b33500160f8@rivosinc.com Signed-off-by: Palmer Dabbelt --- arch/riscv/kernel/unaligned_access_speed.c | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/riscv/kernel/unaligned_access_speed.c b/arch/riscv/kernel/unaligned_access_speed.c index 160628a2116d..f3508cc54f91 100644 --- a/arch/riscv/kernel/unaligned_access_speed.c +++ b/arch/riscv/kernel/unaligned_access_speed.c @@ -191,6 +191,7 @@ static int riscv_online_cpu(unsigned int cpu) if (per_cpu(misaligned_access_speed, cpu) != RISCV_HWPROBE_MISALIGNED_SCALAR_UNKNOWN) goto exit; + check_unaligned_access_emulated(NULL); buf = alloc_pages(GFP_KERNEL, MISALIGNED_BUFFER_ORDER); if (!buf) { pr_warn("Allocation failure, not measuring misaligned performance\n"); From c05a62c92516d7679c819f8a5177cf84c8668954 Mon Sep 17 00:00:00 2001 From: Jesse Taube Date: Thu, 17 Oct 2024 12:00:20 -0700 Subject: [PATCH 09/38] RISC-V: Replace RISCV_MISALIGNED with RISCV_SCALAR_MISALIGNED Replace RISCV_MISALIGNED with RISCV_SCALAR_MISALIGNED to allow for the addition of RISCV_VECTOR_MISALIGNED in a later patch. Signed-off-by: Jesse Taube Reviewed-by: Conor Dooley Reviewed-by: Charlie Jenkins Reviewed-by: Evan Green Link: https://lore.kernel.org/r/20241017-jesse_unaligned_vector-v10-3-5b33500160f8@rivosinc.com Signed-off-by: Palmer Dabbelt --- arch/riscv/Kconfig | 6 +++--- arch/riscv/include/asm/cpufeature.h | 2 +- arch/riscv/include/asm/entry-common.h | 2 +- arch/riscv/kernel/Makefile | 4 ++-- arch/riscv/kernel/fpu.S | 4 ++-- 5 files changed, 9 insertions(+), 9 deletions(-) diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig index 22dc5ea4196c..fab7c6bc1729 100644 --- a/arch/riscv/Kconfig +++ b/arch/riscv/Kconfig @@ -784,7 +784,7 @@ config THREAD_SIZE_ORDER Specify the Pages of thread stack size (from 4KB to 64KB), which also affects irq stack size, which is equal to thread stack size. -config RISCV_MISALIGNED +config RISCV_SCALAR_MISALIGNED bool select SYSCTL_ARCH_UNALIGN_ALLOW help @@ -801,7 +801,7 @@ choice config RISCV_PROBE_UNALIGNED_ACCESS bool "Probe for hardware unaligned access support" - select RISCV_MISALIGNED + select RISCV_SCALAR_MISALIGNED help During boot, the kernel will run a series of tests to determine the speed of unaligned accesses. This probing will dynamically determine @@ -812,7 +812,7 @@ config RISCV_PROBE_UNALIGNED_ACCESS config RISCV_EMULATED_UNALIGNED_ACCESS bool "Emulate unaligned access where system support is missing" - select RISCV_MISALIGNED + select RISCV_SCALAR_MISALIGNED help If unaligned memory accesses trap into the kernel as they are not supported by the system, the kernel will emulate the unaligned diff --git a/arch/riscv/include/asm/cpufeature.h b/arch/riscv/include/asm/cpufeature.h index dfa5cdddd367..ccc6cf141c20 100644 --- a/arch/riscv/include/asm/cpufeature.h +++ b/arch/riscv/include/asm/cpufeature.h @@ -59,7 +59,7 @@ void riscv_user_isa_enable(void); #define __RISCV_ISA_EXT_SUPERSET_VALIDATE(_name, _id, _sub_exts, _validate) \ _RISCV_ISA_EXT_DATA(_name, _id, _sub_exts, ARRAY_SIZE(_sub_exts), _validate) -#if defined(CONFIG_RISCV_MISALIGNED) +#if defined(CONFIG_RISCV_SCALAR_MISALIGNED) bool check_unaligned_access_emulated_all_cpus(void); void check_unaligned_access_emulated(struct work_struct *work __always_unused); void unaligned_emulation_finish(void); diff --git a/arch/riscv/include/asm/entry-common.h b/arch/riscv/include/asm/entry-common.h index 2293e535f865..0a4e3544c877 100644 --- a/arch/riscv/include/asm/entry-common.h +++ b/arch/riscv/include/asm/entry-common.h @@ -25,7 +25,7 @@ static inline void arch_exit_to_user_mode_prepare(struct pt_regs *regs, void handle_page_fault(struct pt_regs *regs); void handle_break(struct pt_regs *regs); -#ifdef CONFIG_RISCV_MISALIGNED +#ifdef CONFIG_RISCV_SCALAR_MISALIGNED int handle_misaligned_load(struct pt_regs *regs); int handle_misaligned_store(struct pt_regs *regs); #else diff --git a/arch/riscv/kernel/Makefile b/arch/riscv/kernel/Makefile index 7f88cc4931f5..45624c5ea86c 100644 --- a/arch/riscv/kernel/Makefile +++ b/arch/riscv/kernel/Makefile @@ -68,8 +68,8 @@ obj-y += probes/ obj-y += tests/ obj-$(CONFIG_MMU) += vdso.o vdso/ -obj-$(CONFIG_RISCV_MISALIGNED) += traps_misaligned.o -obj-$(CONFIG_RISCV_MISALIGNED) += unaligned_access_speed.o +obj-$(CONFIG_RISCV_SCALAR_MISALIGNED) += traps_misaligned.o +obj-$(CONFIG_RISCV_SCALAR_MISALIGNED) += unaligned_access_speed.o obj-$(CONFIG_RISCV_PROBE_UNALIGNED_ACCESS) += copy-unaligned.o obj-$(CONFIG_FPU) += fpu.o diff --git a/arch/riscv/kernel/fpu.S b/arch/riscv/kernel/fpu.S index 327cf527dd7e..f74f6b60e347 100644 --- a/arch/riscv/kernel/fpu.S +++ b/arch/riscv/kernel/fpu.S @@ -170,7 +170,7 @@ SYM_FUNC_END(__fstate_restore) __access_func(f31) -#ifdef CONFIG_RISCV_MISALIGNED +#ifdef CONFIG_RISCV_SCALAR_MISALIGNED /* * Disable compressed instructions set to keep a constant offset between FP @@ -224,4 +224,4 @@ SYM_FUNC_START(get_f64_reg) fp_access_epilogue SYM_FUNC_END(get_f64_reg) -#endif /* CONFIG_RISCV_MISALIGNED */ +#endif /* CONFIG_RISCV_SCALAR_MISALIGNED */ From d1703dc7bc8ec7adb91f5ceaf1556ff1ed212858 Mon Sep 17 00:00:00 2001 From: Jesse Taube Date: Thu, 17 Oct 2024 12:00:21 -0700 Subject: [PATCH 10/38] RISC-V: Detect unaligned vector accesses supported Run an unaligned vector access to test if the system supports vector unaligned access. Add the result to a new key in hwprobe. This is useful for usermode to know if vector misaligned accesses are supported and if they are faster or slower than equivalent byte accesses. Signed-off-by: Jesse Taube Signed-off-by: Charlie Jenkins Link: https://lore.kernel.org/r/20241017-jesse_unaligned_vector-v10-4-5b33500160f8@rivosinc.com Signed-off-by: Palmer Dabbelt --- arch/riscv/Kconfig | 36 ++++++ arch/riscv/include/asm/cpufeature.h | 8 +- arch/riscv/include/asm/entry-common.h | 11 -- arch/riscv/include/asm/hwprobe.h | 2 +- arch/riscv/include/asm/vector.h | 2 + arch/riscv/include/uapi/asm/hwprobe.h | 5 + arch/riscv/kernel/Makefile | 4 +- arch/riscv/kernel/sys_hwprobe.c | 35 ++++++ arch/riscv/kernel/traps_misaligned.c | 125 ++++++++++++++++++++- arch/riscv/kernel/unaligned_access_speed.c | 22 ++-- arch/riscv/kernel/vector.c | 2 +- 11 files changed, 222 insertions(+), 30 deletions(-) diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig index fab7c6bc1729..05f698a88977 100644 --- a/arch/riscv/Kconfig +++ b/arch/riscv/Kconfig @@ -784,12 +784,26 @@ config THREAD_SIZE_ORDER Specify the Pages of thread stack size (from 4KB to 64KB), which also affects irq stack size, which is equal to thread stack size. +config RISCV_MISALIGNED + bool + help + Embed support for detecting and emulating misaligned + scalar or vector loads and stores. + config RISCV_SCALAR_MISALIGNED bool + select RISCV_MISALIGNED select SYSCTL_ARCH_UNALIGN_ALLOW help Embed support for emulating misaligned loads and stores. +config RISCV_VECTOR_MISALIGNED + bool + select RISCV_MISALIGNED + depends on RISCV_ISA_V + help + Enable detecting support for vector misaligned loads and stores. + choice prompt "Unaligned Accesses Support" default RISCV_PROBE_UNALIGNED_ACCESS @@ -841,6 +855,28 @@ config RISCV_EFFICIENT_UNALIGNED_ACCESS endchoice +choice + prompt "Vector unaligned Accesses Support" + depends on RISCV_ISA_V + default RISCV_PROBE_VECTOR_UNALIGNED_ACCESS + help + This determines the level of support for vector unaligned accesses. This + information is used by the kernel to perform optimizations. It is also + exposed to user space via the hwprobe syscall. The hardware will be + probed at boot by default. + +config RISCV_PROBE_VECTOR_UNALIGNED_ACCESS + bool "Probe speed of vector unaligned accesses" + select RISCV_VECTOR_MISALIGNED + depends on RISCV_ISA_V + help + During boot, the kernel will run a series of tests to determine the + speed of vector unaligned accesses if they are supported. This probing + will dynamically determine the speed of vector unaligned accesses on + the underlying system if they are supported. + +endchoice + source "arch/riscv/Kconfig.vendor" endmenu # "Platform type" diff --git a/arch/riscv/include/asm/cpufeature.h b/arch/riscv/include/asm/cpufeature.h index ccc6cf141c20..85bf1bce51e6 100644 --- a/arch/riscv/include/asm/cpufeature.h +++ b/arch/riscv/include/asm/cpufeature.h @@ -59,8 +59,8 @@ void riscv_user_isa_enable(void); #define __RISCV_ISA_EXT_SUPERSET_VALIDATE(_name, _id, _sub_exts, _validate) \ _RISCV_ISA_EXT_DATA(_name, _id, _sub_exts, ARRAY_SIZE(_sub_exts), _validate) -#if defined(CONFIG_RISCV_SCALAR_MISALIGNED) bool check_unaligned_access_emulated_all_cpus(void); +#if defined(CONFIG_RISCV_SCALAR_MISALIGNED) void check_unaligned_access_emulated(struct work_struct *work __always_unused); void unaligned_emulation_finish(void); bool unaligned_ctl_available(void); @@ -72,6 +72,12 @@ static inline bool unaligned_ctl_available(void) } #endif +bool check_vector_unaligned_access_emulated_all_cpus(void); +#if defined(CONFIG_RISCV_VECTOR_MISALIGNED) +void check_vector_unaligned_access_emulated(struct work_struct *work __always_unused); +DECLARE_PER_CPU(long, vector_misaligned_access); +#endif + #if defined(CONFIG_RISCV_PROBE_UNALIGNED_ACCESS) DECLARE_STATIC_KEY_FALSE(fast_unaligned_access_speed_key); diff --git a/arch/riscv/include/asm/entry-common.h b/arch/riscv/include/asm/entry-common.h index 0a4e3544c877..7b32d2b08bb6 100644 --- a/arch/riscv/include/asm/entry-common.h +++ b/arch/riscv/include/asm/entry-common.h @@ -25,18 +25,7 @@ static inline void arch_exit_to_user_mode_prepare(struct pt_regs *regs, void handle_page_fault(struct pt_regs *regs); void handle_break(struct pt_regs *regs); -#ifdef CONFIG_RISCV_SCALAR_MISALIGNED int handle_misaligned_load(struct pt_regs *regs); int handle_misaligned_store(struct pt_regs *regs); -#else -static inline int handle_misaligned_load(struct pt_regs *regs) -{ - return -1; -} -static inline int handle_misaligned_store(struct pt_regs *regs) -{ - return -1; -} -#endif #endif /* _ASM_RISCV_ENTRY_COMMON_H */ diff --git a/arch/riscv/include/asm/hwprobe.h b/arch/riscv/include/asm/hwprobe.h index ffb9484531af..1ce1df6d0ff3 100644 --- a/arch/riscv/include/asm/hwprobe.h +++ b/arch/riscv/include/asm/hwprobe.h @@ -8,7 +8,7 @@ #include -#define RISCV_HWPROBE_MAX_KEY 9 +#define RISCV_HWPROBE_MAX_KEY 10 static inline bool riscv_hwprobe_key_is_valid(__s64 key) { diff --git a/arch/riscv/include/asm/vector.h b/arch/riscv/include/asm/vector.h index be7d309cca8a..c7c023afbacd 100644 --- a/arch/riscv/include/asm/vector.h +++ b/arch/riscv/include/asm/vector.h @@ -21,6 +21,7 @@ extern unsigned long riscv_v_vsize; int riscv_v_setup_vsize(void); +bool insn_is_vector(u32 insn_buf); bool riscv_v_first_use_handler(struct pt_regs *regs); void kernel_vector_begin(void); void kernel_vector_end(void); @@ -268,6 +269,7 @@ struct pt_regs; static inline int riscv_v_setup_vsize(void) { return -EOPNOTSUPP; } static __always_inline bool has_vector(void) { return false; } +static __always_inline bool insn_is_vector(u32 insn_buf) { return false; } static inline bool riscv_v_first_use_handler(struct pt_regs *regs) { return false; } static inline bool riscv_v_vstate_query(struct pt_regs *regs) { return false; } static inline bool riscv_v_vstate_ctrl_user_allowed(void) { return false; } diff --git a/arch/riscv/include/uapi/asm/hwprobe.h b/arch/riscv/include/uapi/asm/hwprobe.h index 1e153cda57db..34c88c15322c 100644 --- a/arch/riscv/include/uapi/asm/hwprobe.h +++ b/arch/riscv/include/uapi/asm/hwprobe.h @@ -88,6 +88,11 @@ struct riscv_hwprobe { #define RISCV_HWPROBE_MISALIGNED_SCALAR_SLOW 2 #define RISCV_HWPROBE_MISALIGNED_SCALAR_FAST 3 #define RISCV_HWPROBE_MISALIGNED_SCALAR_UNSUPPORTED 4 +#define RISCV_HWPROBE_KEY_MISALIGNED_VECTOR_PERF 10 +#define RISCV_HWPROBE_MISALIGNED_VECTOR_UNKNOWN 0 +#define RISCV_HWPROBE_MISALIGNED_VECTOR_SLOW 2 +#define RISCV_HWPROBE_MISALIGNED_VECTOR_FAST 3 +#define RISCV_HWPROBE_MISALIGNED_VECTOR_UNSUPPORTED 4 /* Increase RISCV_HWPROBE_MAX_KEY when adding items. */ /* Flags */ diff --git a/arch/riscv/kernel/Makefile b/arch/riscv/kernel/Makefile index 45624c5ea86c..7f88cc4931f5 100644 --- a/arch/riscv/kernel/Makefile +++ b/arch/riscv/kernel/Makefile @@ -68,8 +68,8 @@ obj-y += probes/ obj-y += tests/ obj-$(CONFIG_MMU) += vdso.o vdso/ -obj-$(CONFIG_RISCV_SCALAR_MISALIGNED) += traps_misaligned.o -obj-$(CONFIG_RISCV_SCALAR_MISALIGNED) += unaligned_access_speed.o +obj-$(CONFIG_RISCV_MISALIGNED) += traps_misaligned.o +obj-$(CONFIG_RISCV_MISALIGNED) += unaligned_access_speed.o obj-$(CONFIG_RISCV_PROBE_UNALIGNED_ACCESS) += copy-unaligned.o obj-$(CONFIG_FPU) += fpu.o diff --git a/arch/riscv/kernel/sys_hwprobe.c b/arch/riscv/kernel/sys_hwprobe.c index cea0ca2bf2a2..6441baada36b 100644 --- a/arch/riscv/kernel/sys_hwprobe.c +++ b/arch/riscv/kernel/sys_hwprobe.c @@ -201,6 +201,37 @@ static u64 hwprobe_misaligned(const struct cpumask *cpus) } #endif +#ifdef CONFIG_RISCV_VECTOR_MISALIGNED +static u64 hwprobe_vec_misaligned(const struct cpumask *cpus) +{ + int cpu; + u64 perf = -1ULL; + + /* Return if supported or not even if speed wasn't probed */ + for_each_cpu(cpu, cpus) { + int this_perf = per_cpu(vector_misaligned_access, cpu); + + if (perf == -1ULL) + perf = this_perf; + + if (perf != this_perf) { + perf = RISCV_HWPROBE_MISALIGNED_VECTOR_UNKNOWN; + break; + } + } + + if (perf == -1ULL) + return RISCV_HWPROBE_MISALIGNED_VECTOR_UNKNOWN; + + return perf; +} +#else +static u64 hwprobe_vec_misaligned(const struct cpumask *cpus) +{ + return RISCV_HWPROBE_MISALIGNED_VECTOR_UNKNOWN; +} +#endif + static void hwprobe_one_pair(struct riscv_hwprobe *pair, const struct cpumask *cpus) { @@ -229,6 +260,10 @@ static void hwprobe_one_pair(struct riscv_hwprobe *pair, pair->value = hwprobe_misaligned(cpus); break; + case RISCV_HWPROBE_KEY_MISALIGNED_VECTOR_PERF: + pair->value = hwprobe_vec_misaligned(cpus); + break; + case RISCV_HWPROBE_KEY_ZICBOZ_BLOCK_SIZE: pair->value = 0; if (hwprobe_ext0_has(cpus, RISCV_HWPROBE_EXT_ZICBOZ)) diff --git a/arch/riscv/kernel/traps_misaligned.c b/arch/riscv/kernel/traps_misaligned.c index d076dde5ad20..ef59ecfc64cb 100644 --- a/arch/riscv/kernel/traps_misaligned.c +++ b/arch/riscv/kernel/traps_misaligned.c @@ -16,6 +16,7 @@ #include #include #include +#include #define INSN_MATCH_LB 0x3 #define INSN_MASK_LB 0x707f @@ -322,12 +323,37 @@ union reg_data { u64 data_u64; }; -static bool unaligned_ctl __read_mostly; - /* sysctl hooks */ int unaligned_enabled __read_mostly = 1; /* Enabled by default */ -int handle_misaligned_load(struct pt_regs *regs) +#ifdef CONFIG_RISCV_VECTOR_MISALIGNED +static int handle_vector_misaligned_load(struct pt_regs *regs) +{ + unsigned long epc = regs->epc; + unsigned long insn; + + if (get_insn(regs, epc, &insn)) + return -1; + + /* Only return 0 when in check_vector_unaligned_access_emulated */ + if (*this_cpu_ptr(&vector_misaligned_access) == RISCV_HWPROBE_MISALIGNED_VECTOR_UNKNOWN) { + *this_cpu_ptr(&vector_misaligned_access) = RISCV_HWPROBE_MISALIGNED_VECTOR_UNSUPPORTED; + regs->epc = epc + INSN_LEN(insn); + return 0; + } + + /* If vector instruction we don't emulate it yet */ + regs->epc = epc; + return -1; +} +#else +static int handle_vector_misaligned_load(struct pt_regs *regs) +{ + return -1; +} +#endif + +static int handle_scalar_misaligned_load(struct pt_regs *regs) { union reg_data val; unsigned long epc = regs->epc; @@ -435,7 +461,7 @@ int handle_misaligned_load(struct pt_regs *regs) return 0; } -int handle_misaligned_store(struct pt_regs *regs) +static int handle_scalar_misaligned_store(struct pt_regs *regs) { union reg_data val; unsigned long epc = regs->epc; @@ -526,6 +552,91 @@ int handle_misaligned_store(struct pt_regs *regs) return 0; } +int handle_misaligned_load(struct pt_regs *regs) +{ + unsigned long epc = regs->epc; + unsigned long insn; + + if (IS_ENABLED(CONFIG_RISCV_VECTOR_MISALIGNED)) { + if (get_insn(regs, epc, &insn)) + return -1; + + if (insn_is_vector(insn)) + return handle_vector_misaligned_load(regs); + } + + if (IS_ENABLED(CONFIG_RISCV_SCALAR_MISALIGNED)) + return handle_scalar_misaligned_load(regs); + + return -1; +} + +int handle_misaligned_store(struct pt_regs *regs) +{ + if (IS_ENABLED(CONFIG_RISCV_SCALAR_MISALIGNED)) + return handle_scalar_misaligned_store(regs); + + return -1; +} + +#ifdef CONFIG_RISCV_VECTOR_MISALIGNED +void check_vector_unaligned_access_emulated(struct work_struct *work __always_unused) +{ + long *mas_ptr = this_cpu_ptr(&vector_misaligned_access); + unsigned long tmp_var; + + *mas_ptr = RISCV_HWPROBE_MISALIGNED_VECTOR_UNKNOWN; + + kernel_vector_begin(); + /* + * In pre-13.0.0 versions of GCC, vector registers cannot appear in + * the clobber list. This inline asm clobbers v0, but since we do not + * currently build the kernel with V enabled, the v0 clobber arg is not + * needed (as the compiler will not emit vector code itself). If the kernel + * is changed to build with V enabled, the clobber arg will need to be + * added here. + */ + __asm__ __volatile__ ( + ".balign 4\n\t" + ".option push\n\t" + ".option arch, +zve32x\n\t" + " vsetivli zero, 1, e16, m1, ta, ma\n\t" // Vectors of 16b + " vle16.v v0, (%[ptr])\n\t" // Load bytes + ".option pop\n\t" + : : [ptr] "r" ((u8 *)&tmp_var + 1)); + kernel_vector_end(); +} + +bool check_vector_unaligned_access_emulated_all_cpus(void) +{ + int cpu; + + if (!has_vector()) { + for_each_online_cpu(cpu) + per_cpu(vector_misaligned_access, cpu) = RISCV_HWPROBE_MISALIGNED_VECTOR_UNSUPPORTED; + return false; + } + + schedule_on_each_cpu(check_vector_unaligned_access_emulated); + + for_each_online_cpu(cpu) + if (per_cpu(vector_misaligned_access, cpu) + == RISCV_HWPROBE_MISALIGNED_VECTOR_UNKNOWN) + return false; + + return true; +} +#else +bool check_vector_unaligned_access_emulated_all_cpus(void) +{ + return false; +} +#endif + +#ifdef CONFIG_RISCV_SCALAR_MISALIGNED + +static bool unaligned_ctl __read_mostly; + void check_unaligned_access_emulated(struct work_struct *work __always_unused) { int cpu = smp_processor_id(); @@ -574,3 +685,9 @@ bool unaligned_ctl_available(void) { return unaligned_ctl; } +#else +bool check_unaligned_access_emulated_all_cpus(void) +{ + return false; +} +#endif diff --git a/arch/riscv/kernel/unaligned_access_speed.c b/arch/riscv/kernel/unaligned_access_speed.c index f3508cc54f91..0b8b5e17453a 100644 --- a/arch/riscv/kernel/unaligned_access_speed.c +++ b/arch/riscv/kernel/unaligned_access_speed.c @@ -19,7 +19,8 @@ #define MISALIGNED_BUFFER_ORDER get_order(MISALIGNED_BUFFER_SIZE) #define MISALIGNED_COPY_SIZE ((MISALIGNED_BUFFER_SIZE / 2) - 0x80) -DEFINE_PER_CPU(long, misaligned_access_speed); +DEFINE_PER_CPU(long, misaligned_access_speed) = RISCV_HWPROBE_MISALIGNED_SCALAR_UNKNOWN; +DEFINE_PER_CPU(long, vector_misaligned_access) = RISCV_HWPROBE_MISALIGNED_VECTOR_UNSUPPORTED; #ifdef CONFIG_RISCV_PROBE_UNALIGNED_ACCESS static cpumask_t fast_misaligned_access; @@ -260,23 +261,24 @@ static int check_unaligned_access_speed_all_cpus(void) kfree(bufs); return 0; } +#else /* CONFIG_RISCV_PROBE_UNALIGNED_ACCESS */ +static int check_unaligned_access_speed_all_cpus(void) +{ + return 0; +} +#endif static int check_unaligned_access_all_cpus(void) { - bool all_cpus_emulated = check_unaligned_access_emulated_all_cpus(); + bool all_cpus_emulated; + + all_cpus_emulated = check_unaligned_access_emulated_all_cpus(); + check_vector_unaligned_access_emulated_all_cpus(); if (!all_cpus_emulated) return check_unaligned_access_speed_all_cpus(); return 0; } -#else /* CONFIG_RISCV_PROBE_UNALIGNED_ACCESS */ -static int check_unaligned_access_all_cpus(void) -{ - check_unaligned_access_emulated_all_cpus(); - - return 0; -} -#endif arch_initcall(check_unaligned_access_all_cpus); diff --git a/arch/riscv/kernel/vector.c b/arch/riscv/kernel/vector.c index 682b3feee451..821818886fab 100644 --- a/arch/riscv/kernel/vector.c +++ b/arch/riscv/kernel/vector.c @@ -66,7 +66,7 @@ void __init riscv_v_setup_ctx_cache(void) #endif } -static bool insn_is_vector(u32 insn_buf) +bool insn_is_vector(u32 insn_buf) { u32 opcode = insn_buf & __INSN_OPCODE_MASK; u32 width, csr; From e7c9d66e313bc0f7cb185c4972c3c9383a0da70f Mon Sep 17 00:00:00 2001 From: Jesse Taube Date: Thu, 17 Oct 2024 12:00:22 -0700 Subject: [PATCH 11/38] RISC-V: Report vector unaligned access speed hwprobe Detect if vector misaligned accesses are faster or slower than equivalent vector byte accesses. This is useful for usermode to know whether vector byte accesses or vector misaligned accesses have a better bandwidth for operations like memcpy. Signed-off-by: Jesse Taube Reviewed-by: Charlie Jenkins Link: https://lore.kernel.org/r/20241017-jesse_unaligned_vector-v10-5-5b33500160f8@rivosinc.com Signed-off-by: Palmer Dabbelt --- arch/riscv/Kconfig | 18 +++ arch/riscv/kernel/Makefile | 3 +- arch/riscv/kernel/copy-unaligned.h | 5 + arch/riscv/kernel/sys_hwprobe.c | 6 + arch/riscv/kernel/unaligned_access_speed.c | 141 ++++++++++++++++++++- arch/riscv/kernel/vec-copy-unaligned.S | 58 +++++++++ 6 files changed, 228 insertions(+), 3 deletions(-) create mode 100644 arch/riscv/kernel/vec-copy-unaligned.S diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig index 05f698a88977..d46394873afc 100644 --- a/arch/riscv/Kconfig +++ b/arch/riscv/Kconfig @@ -875,6 +875,24 @@ config RISCV_PROBE_VECTOR_UNALIGNED_ACCESS will dynamically determine the speed of vector unaligned accesses on the underlying system if they are supported. +config RISCV_SLOW_VECTOR_UNALIGNED_ACCESS + bool "Assume the system supports slow vector unaligned memory accesses" + depends on NONPORTABLE + help + Assume that the system supports slow vector unaligned memory accesses. The + kernel and userspace programs may not be able to run at all on systems + that do not support unaligned memory accesses. + +config RISCV_EFFICIENT_VECTOR_UNALIGNED_ACCESS + bool "Assume the system supports fast vector unaligned memory accesses" + depends on NONPORTABLE + help + Assume that the system supports fast vector unaligned memory accesses. When + enabled, this option improves the performance of the kernel on such + systems. However, the kernel and userspace programs will run much more + slowly, or will not be able to run at all, on systems that do not + support efficient unaligned memory accesses. + endchoice source "arch/riscv/Kconfig.vendor" diff --git a/arch/riscv/kernel/Makefile b/arch/riscv/kernel/Makefile index 7f88cc4931f5..30db92672ada 100644 --- a/arch/riscv/kernel/Makefile +++ b/arch/riscv/kernel/Makefile @@ -70,7 +70,8 @@ obj-$(CONFIG_MMU) += vdso.o vdso/ obj-$(CONFIG_RISCV_MISALIGNED) += traps_misaligned.o obj-$(CONFIG_RISCV_MISALIGNED) += unaligned_access_speed.o -obj-$(CONFIG_RISCV_PROBE_UNALIGNED_ACCESS) += copy-unaligned.o +obj-$(CONFIG_RISCV_PROBE_UNALIGNED_ACCESS) += copy-unaligned.o +obj-$(CONFIG_RISCV_PROBE_VECTOR_UNALIGNED_ACCESS) += vec-copy-unaligned.o obj-$(CONFIG_FPU) += fpu.o obj-$(CONFIG_FPU) += kernel_mode_fpu.o diff --git a/arch/riscv/kernel/copy-unaligned.h b/arch/riscv/kernel/copy-unaligned.h index e3d70d35b708..85d4d11450cb 100644 --- a/arch/riscv/kernel/copy-unaligned.h +++ b/arch/riscv/kernel/copy-unaligned.h @@ -10,4 +10,9 @@ void __riscv_copy_words_unaligned(void *dst, const void *src, size_t size); void __riscv_copy_bytes_unaligned(void *dst, const void *src, size_t size); +#ifdef CONFIG_RISCV_PROBE_VECTOR_UNALIGNED_ACCESS +void __riscv_copy_vec_words_unaligned(void *dst, const void *src, size_t size); +void __riscv_copy_vec_bytes_unaligned(void *dst, const void *src, size_t size); +#endif + #endif /* __RISCV_KERNEL_COPY_UNALIGNED_H */ diff --git a/arch/riscv/kernel/sys_hwprobe.c b/arch/riscv/kernel/sys_hwprobe.c index 6441baada36b..6673278e84d5 100644 --- a/arch/riscv/kernel/sys_hwprobe.c +++ b/arch/riscv/kernel/sys_hwprobe.c @@ -228,6 +228,12 @@ static u64 hwprobe_vec_misaligned(const struct cpumask *cpus) #else static u64 hwprobe_vec_misaligned(const struct cpumask *cpus) { + if (IS_ENABLED(CONFIG_RISCV_EFFICIENT_VECTOR_UNALIGNED_ACCESS)) + return RISCV_HWPROBE_MISALIGNED_VECTOR_FAST; + + if (IS_ENABLED(CONFIG_RISCV_SLOW_VECTOR_UNALIGNED_ACCESS)) + return RISCV_HWPROBE_MISALIGNED_VECTOR_SLOW; + return RISCV_HWPROBE_MISALIGNED_VECTOR_UNKNOWN; } #endif diff --git a/arch/riscv/kernel/unaligned_access_speed.c b/arch/riscv/kernel/unaligned_access_speed.c index 0b8b5e17453a..91f189cf1611 100644 --- a/arch/riscv/kernel/unaligned_access_speed.c +++ b/arch/riscv/kernel/unaligned_access_speed.c @@ -6,11 +6,13 @@ #include #include #include +#include #include #include #include #include #include +#include #include "copy-unaligned.h" @@ -268,12 +270,147 @@ static int check_unaligned_access_speed_all_cpus(void) } #endif +#ifdef CONFIG_RISCV_PROBE_VECTOR_UNALIGNED_ACCESS +static void check_vector_unaligned_access(struct work_struct *work __always_unused) +{ + int cpu = smp_processor_id(); + u64 start_cycles, end_cycles; + u64 word_cycles; + u64 byte_cycles; + int ratio; + unsigned long start_jiffies, now; + struct page *page; + void *dst; + void *src; + long speed = RISCV_HWPROBE_MISALIGNED_VECTOR_SLOW; + + if (per_cpu(vector_misaligned_access, cpu) != RISCV_HWPROBE_MISALIGNED_VECTOR_UNKNOWN) + return; + + page = alloc_pages(GFP_KERNEL, MISALIGNED_BUFFER_ORDER); + if (!page) { + pr_warn("Allocation failure, not measuring vector misaligned performance\n"); + return; + } + + /* Make an unaligned destination buffer. */ + dst = (void *)((unsigned long)page_address(page) | 0x1); + /* Unalign src as well, but differently (off by 1 + 2 = 3). */ + src = dst + (MISALIGNED_BUFFER_SIZE / 2); + src += 2; + word_cycles = -1ULL; + + /* Do a warmup. */ + kernel_vector_begin(); + __riscv_copy_vec_words_unaligned(dst, src, MISALIGNED_COPY_SIZE); + + start_jiffies = jiffies; + while ((now = jiffies) == start_jiffies) + cpu_relax(); + + /* + * For a fixed amount of time, repeatedly try the function, and take + * the best time in cycles as the measurement. + */ + while (time_before(jiffies, now + (1 << MISALIGNED_ACCESS_JIFFIES_LG2))) { + start_cycles = get_cycles64(); + /* Ensure the CSR read can't reorder WRT to the copy. */ + mb(); + __riscv_copy_vec_words_unaligned(dst, src, MISALIGNED_COPY_SIZE); + /* Ensure the copy ends before the end time is snapped. */ + mb(); + end_cycles = get_cycles64(); + if ((end_cycles - start_cycles) < word_cycles) + word_cycles = end_cycles - start_cycles; + } + + byte_cycles = -1ULL; + __riscv_copy_vec_bytes_unaligned(dst, src, MISALIGNED_COPY_SIZE); + start_jiffies = jiffies; + while ((now = jiffies) == start_jiffies) + cpu_relax(); + + while (time_before(jiffies, now + (1 << MISALIGNED_ACCESS_JIFFIES_LG2))) { + start_cycles = get_cycles64(); + /* Ensure the CSR read can't reorder WRT to the copy. */ + mb(); + __riscv_copy_vec_bytes_unaligned(dst, src, MISALIGNED_COPY_SIZE); + /* Ensure the copy ends before the end time is snapped. */ + mb(); + end_cycles = get_cycles64(); + if ((end_cycles - start_cycles) < byte_cycles) + byte_cycles = end_cycles - start_cycles; + } + + kernel_vector_end(); + + /* Don't divide by zero. */ + if (!word_cycles || !byte_cycles) { + pr_warn("cpu%d: rdtime lacks granularity needed to measure unaligned vector access speed\n", + cpu); + + return; + } + + if (word_cycles < byte_cycles) + speed = RISCV_HWPROBE_MISALIGNED_VECTOR_FAST; + + ratio = div_u64((byte_cycles * 100), word_cycles); + pr_info("cpu%d: Ratio of vector byte access time to vector unaligned word access is %d.%02d, unaligned accesses are %s\n", + cpu, + ratio / 100, + ratio % 100, + (speed == RISCV_HWPROBE_MISALIGNED_VECTOR_FAST) ? "fast" : "slow"); + + per_cpu(vector_misaligned_access, cpu) = speed; +} + +static int riscv_online_cpu_vec(unsigned int cpu) +{ + if (!has_vector()) + return 0; + + if (per_cpu(vector_misaligned_access, cpu) != RISCV_HWPROBE_MISALIGNED_VECTOR_UNSUPPORTED) + return 0; + + check_vector_unaligned_access_emulated(NULL); + check_vector_unaligned_access(NULL); + return 0; +} + +/* Measure unaligned access speed on all CPUs present at boot in parallel. */ +static int vec_check_unaligned_access_speed_all_cpus(void *unused __always_unused) +{ + schedule_on_each_cpu(check_vector_unaligned_access); + + /* + * Setup hotplug callbacks for any new CPUs that come online or go + * offline. + */ + cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "riscv:online", + riscv_online_cpu_vec, NULL); + + return 0; +} +#else /* CONFIG_RISCV_PROBE_VECTOR_UNALIGNED_ACCESS */ +static int vec_check_unaligned_access_speed_all_cpus(void *unused __always_unused) +{ + return 0; +} +#endif + static int check_unaligned_access_all_cpus(void) { - bool all_cpus_emulated; + bool all_cpus_emulated, all_cpus_vec_unsupported; all_cpus_emulated = check_unaligned_access_emulated_all_cpus(); - check_vector_unaligned_access_emulated_all_cpus(); + all_cpus_vec_unsupported = check_vector_unaligned_access_emulated_all_cpus(); + + if (!all_cpus_vec_unsupported && + IS_ENABLED(CONFIG_RISCV_PROBE_VECTOR_UNALIGNED_ACCESS)) { + kthread_run(vec_check_unaligned_access_speed_all_cpus, + NULL, "vec_check_unaligned_access_speed_all_cpus"); + } if (!all_cpus_emulated) return check_unaligned_access_speed_all_cpus(); diff --git a/arch/riscv/kernel/vec-copy-unaligned.S b/arch/riscv/kernel/vec-copy-unaligned.S new file mode 100644 index 000000000000..d16f19f1b3b6 --- /dev/null +++ b/arch/riscv/kernel/vec-copy-unaligned.S @@ -0,0 +1,58 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2024 Rivos Inc. */ + +#include +#include +#include + + .text + +#define WORD_EEW 32 + +#define WORD_SEW CONCATENATE(e, WORD_EEW) +#define VEC_L CONCATENATE(vle, WORD_EEW).v +#define VEC_S CONCATENATE(vle, WORD_EEW).v + +/* void __riscv_copy_vec_words_unaligned(void *, const void *, size_t) */ +/* Performs a memcpy without aligning buffers, using word loads and stores. */ +/* Note: The size is truncated to a multiple of WORD_EEW */ +SYM_FUNC_START(__riscv_copy_vec_words_unaligned) + andi a4, a2, ~(WORD_EEW-1) + beqz a4, 2f + add a3, a1, a4 + .option push + .option arch, +zve32x +1: + vsetivli t0, 8, WORD_SEW, m8, ta, ma + VEC_L v0, (a1) + VEC_S v0, (a0) + addi a0, a0, WORD_EEW + addi a1, a1, WORD_EEW + bltu a1, a3, 1b + +2: + .option pop + ret +SYM_FUNC_END(__riscv_copy_vec_words_unaligned) + +/* void __riscv_copy_vec_bytes_unaligned(void *, const void *, size_t) */ +/* Performs a memcpy without aligning buffers, using only byte accesses. */ +/* Note: The size is truncated to a multiple of 8 */ +SYM_FUNC_START(__riscv_copy_vec_bytes_unaligned) + andi a4, a2, ~(8-1) + beqz a4, 2f + add a3, a1, a4 + .option push + .option arch, +zve32x +1: + vsetivli t0, 8, e8, m8, ta, ma + vle8.v v0, (a1) + vse8.v v0, (a0) + addi a0, a0, 8 + addi a1, a1, 8 + bltu a1, a3, 1b + +2: + .option pop + ret +SYM_FUNC_END(__riscv_copy_vec_bytes_unaligned) From 40e09ebd791fe6b872df49c4ae859451977e1e64 Mon Sep 17 00:00:00 2001 From: Jesse Taube Date: Thu, 17 Oct 2024 12:00:23 -0700 Subject: [PATCH 12/38] RISC-V: hwprobe: Document unaligned vector perf key Document key for reporting the speed of unaligned vector accesses. The descriptions are the same as the scalar equivalent values. Signed-off-by: Jesse Taube Reviewed-by: Charlie Jenkins Link: https://lore.kernel.org/r/20241017-jesse_unaligned_vector-v10-6-5b33500160f8@rivosinc.com Signed-off-by: Palmer Dabbelt --- Documentation/arch/riscv/hwprobe.rst | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/Documentation/arch/riscv/hwprobe.rst b/Documentation/arch/riscv/hwprobe.rst index 85b709257918..ea4e0b9c73e7 100644 --- a/Documentation/arch/riscv/hwprobe.rst +++ b/Documentation/arch/riscv/hwprobe.rst @@ -274,3 +274,19 @@ The following keys are defined: represent the highest userspace virtual address usable. * :c:macro:`RISCV_HWPROBE_KEY_TIME_CSR_FREQ`: Frequency (in Hz) of `time CSR`. + +* :c:macro:`RISCV_HWPROBE_KEY_MISALIGNED_VECTOR_PERF`: An enum value describing the + performance of misaligned vector accesses on the selected set of processors. + + * :c:macro:`RISCV_HWPROBE_MISALIGNED_VECTOR_UNKNOWN`: The performance of misaligned + vector accesses is unknown. + + * :c:macro:`RISCV_HWPROBE_MISALIGNED_VECTOR_SLOW`: 32-bit misaligned accesses using vector + registers are slower than the equivalent quantity of byte accesses via vector registers. + Misaligned accesses may be supported directly in hardware, or trapped and emulated by software. + + * :c:macro:`RISCV_HWPROBE_MISALIGNED_VECTOR_FAST`: 32-bit misaligned accesses using vector + registers are faster than the equivalent quantity of byte accesses via vector registers. + + * :c:macro:`RISCV_HWPROBE_MISALIGNED_VECTOR_UNSUPPORTED`: Misaligned vector accesses are + not supported at all and will generate a misaligned address fault. From c6898d66fd198cd458734da40e414361f295de6a Mon Sep 17 00:00:00 2001 From: Alexandre Ghiti Date: Wed, 16 Oct 2024 10:36:25 +0200 Subject: [PATCH 13/38] riscv: Check that vdso does not contain any dynamic relocations MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Like other architectures, use the common cmd_vdso_check to make sure of that. Reviewed-by: Björn Töpel Tested-by: Vladimir Isaev Signed-off-by: Alexandre Ghiti Reviewed-by: Guo Ren Link: https://lore.kernel.org/r/20241016083625.136311-3-alexghiti@rivosinc.com Signed-off-by: Palmer Dabbelt --- arch/riscv/kernel/vdso/Makefile | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/arch/riscv/kernel/vdso/Makefile b/arch/riscv/kernel/vdso/Makefile index 3f1c4b2d0b06..9a1b555e8733 100644 --- a/arch/riscv/kernel/vdso/Makefile +++ b/arch/riscv/kernel/vdso/Makefile @@ -45,7 +45,7 @@ $(obj)/vdso.o: $(obj)/vdso.so # link rule for the .so file, .lds has to be first $(obj)/vdso.so.dbg: $(obj)/vdso.lds $(obj-vdso) FORCE - $(call if_changed,vdsold) + $(call if_changed,vdsold_and_check) LDFLAGS_vdso.so.dbg = -shared -soname=linux-vdso.so.1 \ --build-id=sha1 --hash-style=both --eh-frame-hdr @@ -65,7 +65,8 @@ include/generated/vdso-offsets.h: $(obj)/vdso.so.dbg FORCE # actual build commands # The DSO images are built using a special linker script # Make sure only to export the intended __vdso_xxx symbol offsets. -quiet_cmd_vdsold = VDSOLD $@ - cmd_vdsold = $(LD) $(ld_flags) -T $(filter-out FORCE,$^) -o $@.tmp && \ +quiet_cmd_vdsold_and_check = VDSOLD $@ + cmd_vdsold_and_check = $(LD) $(ld_flags) -T $(filter-out FORCE,$^) -o $@.tmp && \ $(OBJCOPY) $(patsubst %, -G __vdso_%, $(vdso-syms)) $@.tmp $@ && \ - rm $@.tmp + rm $@.tmp && \ + $(cmd_vdso_check) From 8727163a1ae304b5b4ac5952f593936ab9024d37 Mon Sep 17 00:00:00 2001 From: Samuel Holland Date: Wed, 16 Oct 2024 13:27:42 -0700 Subject: [PATCH 14/38] dt-bindings: riscv: Add pointer masking ISA extensions The RISC-V Pointer Masking specification defines three extensions: Smmpm, Smnpm, and Ssnpm. Document the behavior of these extensions as following the ratified version 1.0 of the specification. Acked-by: Conor Dooley Reviewed-by: Charlie Jenkins Signed-off-by: Samuel Holland Link: https://lore.kernel.org/r/20241016202814.4061541-2-samuel.holland@sifive.com Signed-off-by: Palmer Dabbelt --- .../devicetree/bindings/riscv/extensions.yaml | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/Documentation/devicetree/bindings/riscv/extensions.yaml b/Documentation/devicetree/bindings/riscv/extensions.yaml index 2cf2026cff57..28bf1daa1d27 100644 --- a/Documentation/devicetree/bindings/riscv/extensions.yaml +++ b/Documentation/devicetree/bindings/riscv/extensions.yaml @@ -128,6 +128,18 @@ properties: changes to interrupts as frozen at commit ccbddab ("Merge pull request #42 from riscv/jhauser-2023-RC4") of riscv-aia. + - const: smmpm + description: | + The standard Smmpm extension for M-mode pointer masking as + ratified at commit d70011dde6c2 ("Update to ratified state") + of riscv-j-extension. + + - const: smnpm + description: | + The standard Smnpm extension for next-mode pointer masking as + ratified at commit d70011dde6c2 ("Update to ratified state") + of riscv-j-extension. + - const: smstateen description: | The standard Smstateen extension for controlling access to CSRs @@ -147,6 +159,12 @@ properties: and mode-based filtering as ratified at commit 01d1df0 ("Add ability to manually trigger workflow. (#2)") of riscv-count-overflow. + - const: ssnpm + description: | + The standard Ssnpm extension for next-mode pointer masking as + ratified at commit d70011dde6c2 ("Update to ratified state") + of riscv-j-extension. + - const: sstc description: | The standard Sstc supervisor-level extension for time compare as From 2e6f6ea452aa9fa9f150520fdecf6bda31954db4 Mon Sep 17 00:00:00 2001 From: Samuel Holland Date: Wed, 16 Oct 2024 13:27:43 -0700 Subject: [PATCH 15/38] riscv: Add ISA extension parsing for pointer masking The RISC-V Pointer Masking specification defines three extensions: Smmpm, Smnpm, and Ssnpm. Add support for parsing each of them. The specific extension which provides pointer masking support to userspace (Supm) depends on the kernel's privilege mode, so provide a macro to abstract this selection. Smmpm implies the existence of the mseccfg CSR. As it is the only user of this CSR so far, there is no need for an Xlinuxmseccfg extension. Reviewed-by: Charlie Jenkins Signed-off-by: Samuel Holland Link: https://lore.kernel.org/r/20241016202814.4061541-3-samuel.holland@sifive.com Signed-off-by: Palmer Dabbelt --- arch/riscv/include/asm/hwcap.h | 5 +++++ arch/riscv/kernel/cpufeature.c | 3 +++ 2 files changed, 8 insertions(+) diff --git a/arch/riscv/include/asm/hwcap.h b/arch/riscv/include/asm/hwcap.h index 46d9de54179e..8608883da453 100644 --- a/arch/riscv/include/asm/hwcap.h +++ b/arch/riscv/include/asm/hwcap.h @@ -93,6 +93,9 @@ #define RISCV_ISA_EXT_ZCMOP 84 #define RISCV_ISA_EXT_ZAWRS 85 #define RISCV_ISA_EXT_SVVPTC 86 +#define RISCV_ISA_EXT_SMMPM 87 +#define RISCV_ISA_EXT_SMNPM 88 +#define RISCV_ISA_EXT_SSNPM 89 #define RISCV_ISA_EXT_XLINUXENVCFG 127 @@ -101,8 +104,10 @@ #ifdef CONFIG_RISCV_M_MODE #define RISCV_ISA_EXT_SxAIA RISCV_ISA_EXT_SMAIA +#define RISCV_ISA_EXT_SUPM RISCV_ISA_EXT_SMNPM #else #define RISCV_ISA_EXT_SxAIA RISCV_ISA_EXT_SSAIA +#define RISCV_ISA_EXT_SUPM RISCV_ISA_EXT_SSNPM #endif #endif /* _ASM_RISCV_HWCAP_H */ diff --git a/arch/riscv/kernel/cpufeature.c b/arch/riscv/kernel/cpufeature.c index b3a057c36996..94596bca464e 100644 --- a/arch/riscv/kernel/cpufeature.c +++ b/arch/riscv/kernel/cpufeature.c @@ -377,9 +377,12 @@ const struct riscv_isa_ext_data riscv_isa_ext[] = { __RISCV_ISA_EXT_BUNDLE(zvksg, riscv_zvksg_bundled_exts), __RISCV_ISA_EXT_DATA(zvkt, RISCV_ISA_EXT_ZVKT), __RISCV_ISA_EXT_DATA(smaia, RISCV_ISA_EXT_SMAIA), + __RISCV_ISA_EXT_DATA(smmpm, RISCV_ISA_EXT_SMMPM), + __RISCV_ISA_EXT_SUPERSET(smnpm, RISCV_ISA_EXT_SMNPM, riscv_xlinuxenvcfg_exts), __RISCV_ISA_EXT_DATA(smstateen, RISCV_ISA_EXT_SMSTATEEN), __RISCV_ISA_EXT_DATA(ssaia, RISCV_ISA_EXT_SSAIA), __RISCV_ISA_EXT_DATA(sscofpmf, RISCV_ISA_EXT_SSCOFPMF), + __RISCV_ISA_EXT_SUPERSET(ssnpm, RISCV_ISA_EXT_SSNPM, riscv_xlinuxenvcfg_exts), __RISCV_ISA_EXT_DATA(sstc, RISCV_ISA_EXT_SSTC), __RISCV_ISA_EXT_DATA(svinval, RISCV_ISA_EXT_SVINVAL), __RISCV_ISA_EXT_DATA(svnapot, RISCV_ISA_EXT_SVNAPOT), From 29eedc7d1587f42f33ae209be45c89c424ee9c00 Mon Sep 17 00:00:00 2001 From: Samuel Holland Date: Wed, 16 Oct 2024 13:27:44 -0700 Subject: [PATCH 16/38] riscv: Add CSR definitions for pointer masking Pointer masking is controlled via a two-bit PMM field, which appears in various CSRs depending on which extensions are implemented. Smmpm adds the field to mseccfg; Smnpm adds the field to menvcfg; Ssnpm adds the field to senvcfg. If the H extension is implemented, Ssnpm also defines henvcfg.PMM and hstatus.HUPMM. Reviewed-by: Charlie Jenkins Signed-off-by: Samuel Holland Link: https://lore.kernel.org/r/20241016202814.4061541-4-samuel.holland@sifive.com Signed-off-by: Palmer Dabbelt --- arch/riscv/include/asm/csr.h | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/arch/riscv/include/asm/csr.h b/arch/riscv/include/asm/csr.h index 25966995da04..fe5d4eb9adea 100644 --- a/arch/riscv/include/asm/csr.h +++ b/arch/riscv/include/asm/csr.h @@ -119,6 +119,10 @@ /* HSTATUS flags */ #ifdef CONFIG_64BIT +#define HSTATUS_HUPMM _AC(0x3000000000000, UL) +#define HSTATUS_HUPMM_PMLEN_0 _AC(0x0000000000000, UL) +#define HSTATUS_HUPMM_PMLEN_7 _AC(0x2000000000000, UL) +#define HSTATUS_HUPMM_PMLEN_16 _AC(0x3000000000000, UL) #define HSTATUS_VSXL _AC(0x300000000, UL) #define HSTATUS_VSXL_SHIFT 32 #endif @@ -195,6 +199,10 @@ /* xENVCFG flags */ #define ENVCFG_STCE (_AC(1, ULL) << 63) #define ENVCFG_PBMTE (_AC(1, ULL) << 62) +#define ENVCFG_PMM (_AC(0x3, ULL) << 32) +#define ENVCFG_PMM_PMLEN_0 (_AC(0x0, ULL) << 32) +#define ENVCFG_PMM_PMLEN_7 (_AC(0x2, ULL) << 32) +#define ENVCFG_PMM_PMLEN_16 (_AC(0x3, ULL) << 32) #define ENVCFG_CBZE (_AC(1, UL) << 7) #define ENVCFG_CBCFE (_AC(1, UL) << 6) #define ENVCFG_CBIE_SHIFT 4 @@ -216,6 +224,12 @@ #define SMSTATEEN0_SSTATEEN0_SHIFT 63 #define SMSTATEEN0_SSTATEEN0 (_ULL(1) << SMSTATEEN0_SSTATEEN0_SHIFT) +/* mseccfg bits */ +#define MSECCFG_PMM ENVCFG_PMM +#define MSECCFG_PMM_PMLEN_0 ENVCFG_PMM_PMLEN_0 +#define MSECCFG_PMM_PMLEN_7 ENVCFG_PMM_PMLEN_7 +#define MSECCFG_PMM_PMLEN_16 ENVCFG_PMM_PMLEN_16 + /* symbolic CSR names: */ #define CSR_CYCLE 0xc00 #define CSR_TIME 0xc01 @@ -382,6 +396,8 @@ #define CSR_MIP 0x344 #define CSR_PMPCFG0 0x3a0 #define CSR_PMPADDR0 0x3b0 +#define CSR_MSECCFG 0x747 +#define CSR_MSECCFGH 0x757 #define CSR_MVENDORID 0xf11 #define CSR_MARCHID 0xf12 #define CSR_MIMPID 0xf13 From 09d6775f503b393d0457c7126aa43208e1724004 Mon Sep 17 00:00:00 2001 From: Samuel Holland Date: Wed, 16 Oct 2024 13:27:45 -0700 Subject: [PATCH 17/38] riscv: Add support for userspace pointer masking RISC-V supports pointer masking with a variable number of tag bits (which is called "PMLEN" in the specification) and which is configured at the next higher privilege level. Wire up the PR_SET_TAGGED_ADDR_CTRL and PR_GET_TAGGED_ADDR_CTRL prctls so userspace can request a lower bound on the number of tag bits and determine the actual number of tag bits. As with arm64's PR_TAGGED_ADDR_ENABLE, the pointer masking configuration is thread-scoped, inherited on clone() and fork() and cleared on execve(). Reviewed-by: Charlie Jenkins Tested-by: Charlie Jenkins Signed-off-by: Samuel Holland Link: https://lore.kernel.org/r/20241016202814.4061541-5-samuel.holland@sifive.com Signed-off-by: Palmer Dabbelt --- Documentation/arch/riscv/uabi.rst | 12 ++++ arch/riscv/Kconfig | 11 ++++ arch/riscv/include/asm/processor.h | 8 +++ arch/riscv/include/asm/switch_to.h | 11 ++++ arch/riscv/kernel/process.c | 91 ++++++++++++++++++++++++++++++ include/uapi/linux/prctl.h | 5 +- 6 files changed, 137 insertions(+), 1 deletion(-) diff --git a/Documentation/arch/riscv/uabi.rst b/Documentation/arch/riscv/uabi.rst index 2b420bab0527..ddb8359a46ed 100644 --- a/Documentation/arch/riscv/uabi.rst +++ b/Documentation/arch/riscv/uabi.rst @@ -68,3 +68,15 @@ Misaligned accesses Misaligned scalar accesses are supported in userspace, but they may perform poorly. Misaligned vector accesses are only supported if the Zicclsm extension is supported. + +Pointer masking +--------------- + +Support for pointer masking in userspace (the Supm extension) is provided via +the ``PR_SET_TAGGED_ADDR_CTRL`` and ``PR_GET_TAGGED_ADDR_CTRL`` ``prctl()`` +operations. Pointer masking is disabled by default. To enable it, userspace +must call ``PR_SET_TAGGED_ADDR_CTRL`` with the ``PR_PMLEN`` field set to the +number of mask/tag bits needed by the application. ``PR_PMLEN`` is interpreted +as a lower bound; if the kernel is unable to satisfy the request, the +``PR_SET_TAGGED_ADDR_CTRL`` operation will fail. The actual number of tag bits +is returned in ``PR_PMLEN`` by the ``PR_GET_TAGGED_ADDR_CTRL`` operation. diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig index 22dc5ea4196c..0ef449465378 100644 --- a/arch/riscv/Kconfig +++ b/arch/riscv/Kconfig @@ -531,6 +531,17 @@ config RISCV_ISA_C If you don't know what to do here, say Y. +config RISCV_ISA_SUPM + bool "Supm extension for userspace pointer masking" + depends on 64BIT + default y + help + Add support for pointer masking in userspace (Supm) when the + underlying hardware extension (Smnpm or Ssnpm) is detected at boot. + + If this option is disabled, userspace will be unable to use + the prctl(PR_{SET,GET}_TAGGED_ADDR_CTRL) API. + config RISCV_ISA_SVNAPOT bool "Svnapot extension support for supervisor mode NAPOT pages" depends on 64BIT && MMU diff --git a/arch/riscv/include/asm/processor.h b/arch/riscv/include/asm/processor.h index c1a492508835..5f56eb9d114a 100644 --- a/arch/riscv/include/asm/processor.h +++ b/arch/riscv/include/asm/processor.h @@ -178,6 +178,14 @@ extern int set_unalign_ctl(struct task_struct *tsk, unsigned int val); #define RISCV_SET_ICACHE_FLUSH_CTX(arg1, arg2) riscv_set_icache_flush_ctx(arg1, arg2) extern int riscv_set_icache_flush_ctx(unsigned long ctx, unsigned long per_thread); +#ifdef CONFIG_RISCV_ISA_SUPM +/* PR_{SET,GET}_TAGGED_ADDR_CTRL prctl */ +long set_tagged_addr_ctrl(struct task_struct *task, unsigned long arg); +long get_tagged_addr_ctrl(struct task_struct *task); +#define SET_TAGGED_ADDR_CTRL(arg) set_tagged_addr_ctrl(current, arg) +#define GET_TAGGED_ADDR_CTRL() get_tagged_addr_ctrl(current) +#endif + #endif /* __ASSEMBLY__ */ #endif /* _ASM_RISCV_PROCESSOR_H */ diff --git a/arch/riscv/include/asm/switch_to.h b/arch/riscv/include/asm/switch_to.h index 9685cd85e57c..94e33216b2d9 100644 --- a/arch/riscv/include/asm/switch_to.h +++ b/arch/riscv/include/asm/switch_to.h @@ -70,6 +70,17 @@ static __always_inline bool has_fpu(void) { return false; } #define __switch_to_fpu(__prev, __next) do { } while (0) #endif +static inline void envcfg_update_bits(struct task_struct *task, + unsigned long mask, unsigned long val) +{ + unsigned long envcfg; + + envcfg = (task->thread.envcfg & ~mask) | val; + task->thread.envcfg = envcfg; + if (task == current) + csr_write(CSR_ENVCFG, envcfg); +} + static inline void __switch_to_envcfg(struct task_struct *next) { asm volatile (ALTERNATIVE("nop", "csrw " __stringify(CSR_ENVCFG) ", %0", diff --git a/arch/riscv/kernel/process.c b/arch/riscv/kernel/process.c index e3142d8a6e28..200d2ed64dfe 100644 --- a/arch/riscv/kernel/process.c +++ b/arch/riscv/kernel/process.c @@ -7,6 +7,7 @@ * Copyright (C) 2017 SiFive */ +#include #include #include #include @@ -180,6 +181,10 @@ void flush_thread(void) memset(¤t->thread.vstate, 0, sizeof(struct __riscv_v_ext_state)); clear_tsk_thread_flag(current, TIF_RISCV_V_DEFER_RESTORE); #endif +#ifdef CONFIG_RISCV_ISA_SUPM + if (riscv_has_extension_unlikely(RISCV_ISA_EXT_SUPM)) + envcfg_update_bits(current, ENVCFG_PMM, ENVCFG_PMM_PMLEN_0); +#endif } void arch_release_task_struct(struct task_struct *tsk) @@ -242,3 +247,89 @@ void __init arch_task_cache_init(void) { riscv_v_setup_ctx_cache(); } + +#ifdef CONFIG_RISCV_ISA_SUPM +enum { + PMLEN_0 = 0, + PMLEN_7 = 7, + PMLEN_16 = 16, +}; + +static bool have_user_pmlen_7; +static bool have_user_pmlen_16; + +long set_tagged_addr_ctrl(struct task_struct *task, unsigned long arg) +{ + unsigned long valid_mask = PR_PMLEN_MASK; + struct thread_info *ti = task_thread_info(task); + unsigned long pmm; + u8 pmlen; + + if (is_compat_thread(ti)) + return -EINVAL; + + if (arg & ~valid_mask) + return -EINVAL; + + /* + * Prefer the smallest PMLEN that satisfies the user's request, + * in case choosing a larger PMLEN has a performance impact. + */ + pmlen = FIELD_GET(PR_PMLEN_MASK, arg); + if (pmlen == PMLEN_0) + pmm = ENVCFG_PMM_PMLEN_0; + else if (pmlen <= PMLEN_7 && have_user_pmlen_7) + pmm = ENVCFG_PMM_PMLEN_7; + else if (pmlen <= PMLEN_16 && have_user_pmlen_16) + pmm = ENVCFG_PMM_PMLEN_16; + else + return -EINVAL; + + envcfg_update_bits(task, ENVCFG_PMM, pmm); + + return 0; +} + +long get_tagged_addr_ctrl(struct task_struct *task) +{ + struct thread_info *ti = task_thread_info(task); + long ret = 0; + + if (is_compat_thread(ti)) + return -EINVAL; + + switch (task->thread.envcfg & ENVCFG_PMM) { + case ENVCFG_PMM_PMLEN_7: + ret = FIELD_PREP(PR_PMLEN_MASK, PMLEN_7); + break; + case ENVCFG_PMM_PMLEN_16: + ret = FIELD_PREP(PR_PMLEN_MASK, PMLEN_16); + break; + } + + return ret; +} + +static bool try_to_set_pmm(unsigned long value) +{ + csr_set(CSR_ENVCFG, value); + return (csr_read_clear(CSR_ENVCFG, ENVCFG_PMM) & ENVCFG_PMM) == value; +} + +static int __init tagged_addr_init(void) +{ + if (!riscv_has_extension_unlikely(RISCV_ISA_EXT_SUPM)) + return 0; + + /* + * envcfg.PMM is a WARL field. Detect which values are supported. + * Assume the supported PMLEN values are the same on all harts. + */ + csr_clear(CSR_ENVCFG, ENVCFG_PMM); + have_user_pmlen_7 = try_to_set_pmm(ENVCFG_PMM_PMLEN_7); + have_user_pmlen_16 = try_to_set_pmm(ENVCFG_PMM_PMLEN_16); + + return 0; +} +core_initcall(tagged_addr_init); +#endif /* CONFIG_RISCV_ISA_SUPM */ diff --git a/include/uapi/linux/prctl.h b/include/uapi/linux/prctl.h index 35791791a879..cefd656ebf43 100644 --- a/include/uapi/linux/prctl.h +++ b/include/uapi/linux/prctl.h @@ -230,7 +230,7 @@ struct prctl_mm_map { # define PR_PAC_APDBKEY (1UL << 3) # define PR_PAC_APGAKEY (1UL << 4) -/* Tagged user address controls for arm64 */ +/* Tagged user address controls for arm64 and RISC-V */ #define PR_SET_TAGGED_ADDR_CTRL 55 #define PR_GET_TAGGED_ADDR_CTRL 56 # define PR_TAGGED_ADDR_ENABLE (1UL << 0) @@ -244,6 +244,9 @@ struct prctl_mm_map { # define PR_MTE_TAG_MASK (0xffffUL << PR_MTE_TAG_SHIFT) /* Unused; kept only for source compatibility */ # define PR_MTE_TCF_SHIFT 1 +/* RISC-V pointer masking tag length */ +# define PR_PMLEN_SHIFT 24 +# define PR_PMLEN_MASK (0x7fUL << PR_PMLEN_SHIFT) /* Control reclaim behavior when allocating memory */ #define PR_SET_IO_FLUSHER 57 From 2e1743085887ba3f553c2bb472a75a3ff744b242 Mon Sep 17 00:00:00 2001 From: Samuel Holland Date: Wed, 16 Oct 2024 13:27:46 -0700 Subject: [PATCH 18/38] riscv: Add support for the tagged address ABI When pointer masking is enabled for userspace, the kernel can accept tagged pointers as arguments to some system calls. Allow this by untagging the pointers in access_ok() and the uaccess routines. The uaccess routines must peform untagging in software because U-mode and S-mode have entirely separate pointer masking configurations. In fact, hardware may not even implement pointer masking for S-mode. Since the number of tag bits is variable, untagged_addr_remote() needs to know what PMLEN to use for the remote mm. Therefore, the pointer masking mode must be the same for all threads sharing an mm. Enforce this with a lock flag in the mm context, as x86 does for LAM. The flag gets reset in init_new_context() during fork(), as the new mm is no longer multithreaded. Reviewed-by: Charlie Jenkins Tested-by: Charlie Jenkins Signed-off-by: Samuel Holland Link: https://lore.kernel.org/r/20241016202814.4061541-6-samuel.holland@sifive.com Signed-off-by: Palmer Dabbelt --- Documentation/arch/riscv/uabi.rst | 4 ++ arch/riscv/include/asm/mmu.h | 7 +++ arch/riscv/include/asm/mmu_context.h | 13 +++++ arch/riscv/include/asm/uaccess.h | 43 ++++++++++++++-- arch/riscv/kernel/process.c | 73 ++++++++++++++++++++++++++-- 5 files changed, 130 insertions(+), 10 deletions(-) diff --git a/Documentation/arch/riscv/uabi.rst b/Documentation/arch/riscv/uabi.rst index ddb8359a46ed..243e40062e34 100644 --- a/Documentation/arch/riscv/uabi.rst +++ b/Documentation/arch/riscv/uabi.rst @@ -80,3 +80,7 @@ number of mask/tag bits needed by the application. ``PR_PMLEN`` is interpreted as a lower bound; if the kernel is unable to satisfy the request, the ``PR_SET_TAGGED_ADDR_CTRL`` operation will fail. The actual number of tag bits is returned in ``PR_PMLEN`` by the ``PR_GET_TAGGED_ADDR_CTRL`` operation. + +Additionally, when pointer masking is enabled (``PR_PMLEN`` is greater than 0), +a tagged address ABI is supported, with the same interface and behavior as +documented for AArch64 (Documentation/arch/arm64/tagged-address-abi.rst). diff --git a/arch/riscv/include/asm/mmu.h b/arch/riscv/include/asm/mmu.h index c9e03e9da3dc..1cc90465d75b 100644 --- a/arch/riscv/include/asm/mmu.h +++ b/arch/riscv/include/asm/mmu.h @@ -25,9 +25,16 @@ typedef struct { #ifdef CONFIG_BINFMT_ELF_FDPIC unsigned long exec_fdpic_loadmap; unsigned long interp_fdpic_loadmap; +#endif + unsigned long flags; +#ifdef CONFIG_RISCV_ISA_SUPM + u8 pmlen; #endif } mm_context_t; +/* Lock the pointer masking mode because this mm is multithreaded */ +#define MM_CONTEXT_LOCK_PMLEN 0 + #define cntx2asid(cntx) ((cntx) & SATP_ASID_MASK) #define cntx2version(cntx) ((cntx) & ~SATP_ASID_MASK) diff --git a/arch/riscv/include/asm/mmu_context.h b/arch/riscv/include/asm/mmu_context.h index 7030837adc1a..8c4bc49a3a0f 100644 --- a/arch/riscv/include/asm/mmu_context.h +++ b/arch/riscv/include/asm/mmu_context.h @@ -20,6 +20,9 @@ void switch_mm(struct mm_struct *prev, struct mm_struct *next, static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next) { +#ifdef CONFIG_RISCV_ISA_SUPM + next->context.pmlen = 0; +#endif switch_mm(prev, next, NULL); } @@ -30,11 +33,21 @@ static inline int init_new_context(struct task_struct *tsk, #ifdef CONFIG_MMU atomic_long_set(&mm->context.id, 0); #endif + if (IS_ENABLED(CONFIG_RISCV_ISA_SUPM)) + clear_bit(MM_CONTEXT_LOCK_PMLEN, &mm->context.flags); return 0; } DECLARE_STATIC_KEY_FALSE(use_asid_allocator); +#ifdef CONFIG_RISCV_ISA_SUPM +#define mm_untag_mask mm_untag_mask +static inline unsigned long mm_untag_mask(struct mm_struct *mm) +{ + return -1UL >> mm->context.pmlen; +} +#endif + #include #endif /* _ASM_RISCV_MMU_CONTEXT_H */ diff --git a/arch/riscv/include/asm/uaccess.h b/arch/riscv/include/asm/uaccess.h index 72ec1d9bd3f3..fee56b0c8058 100644 --- a/arch/riscv/include/asm/uaccess.h +++ b/arch/riscv/include/asm/uaccess.h @@ -9,8 +9,41 @@ #define _ASM_RISCV_UACCESS_H #include +#include #include /* for TASK_SIZE */ +#ifdef CONFIG_RISCV_ISA_SUPM +static inline unsigned long __untagged_addr_remote(struct mm_struct *mm, unsigned long addr) +{ + if (riscv_has_extension_unlikely(RISCV_ISA_EXT_SUPM)) { + u8 pmlen = mm->context.pmlen; + + /* Virtual addresses are sign-extended; physical addresses are zero-extended. */ + if (IS_ENABLED(CONFIG_MMU)) + return (long)(addr << pmlen) >> pmlen; + else + return (addr << pmlen) >> pmlen; + } + + return addr; +} + +#define untagged_addr(addr) ({ \ + unsigned long __addr = (__force unsigned long)(addr); \ + (__force __typeof__(addr))__untagged_addr_remote(current->mm, __addr); \ +}) + +#define untagged_addr_remote(mm, addr) ({ \ + unsigned long __addr = (__force unsigned long)(addr); \ + mmap_assert_locked(mm); \ + (__force __typeof__(addr))__untagged_addr_remote(mm, __addr); \ +}) + +#define access_ok(addr, size) likely(__access_ok(untagged_addr(addr), size)) +#else +#define untagged_addr(addr) (addr) +#endif + /* * User space memory access functions */ @@ -130,7 +163,7 @@ do { \ */ #define __get_user(x, ptr) \ ({ \ - const __typeof__(*(ptr)) __user *__gu_ptr = (ptr); \ + const __typeof__(*(ptr)) __user *__gu_ptr = untagged_addr(ptr); \ long __gu_err = 0; \ \ __chk_user_ptr(__gu_ptr); \ @@ -246,7 +279,7 @@ do { \ */ #define __put_user(x, ptr) \ ({ \ - __typeof__(*(ptr)) __user *__gu_ptr = (ptr); \ + __typeof__(*(ptr)) __user *__gu_ptr = untagged_addr(ptr); \ __typeof__(*__gu_ptr) __val = (x); \ long __pu_err = 0; \ \ @@ -293,13 +326,13 @@ unsigned long __must_check __asm_copy_from_user(void *to, static inline unsigned long raw_copy_from_user(void *to, const void __user *from, unsigned long n) { - return __asm_copy_from_user(to, from, n); + return __asm_copy_from_user(to, untagged_addr(from), n); } static inline unsigned long raw_copy_to_user(void __user *to, const void *from, unsigned long n) { - return __asm_copy_to_user(to, from, n); + return __asm_copy_to_user(untagged_addr(to), from, n); } extern long strncpy_from_user(char *dest, const char __user *src, long count); @@ -314,7 +347,7 @@ unsigned long __must_check clear_user(void __user *to, unsigned long n) { might_fault(); return access_ok(to, n) ? - __clear_user(to, n) : n; + __clear_user(untagged_addr(to), n) : n; } #define __get_kernel_nofault(dst, src, type, err_label) \ diff --git a/arch/riscv/kernel/process.c b/arch/riscv/kernel/process.c index 200d2ed64dfe..58b6482c2bf6 100644 --- a/arch/riscv/kernel/process.c +++ b/arch/riscv/kernel/process.c @@ -213,6 +213,10 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args) unsigned long tls = args->tls; struct pt_regs *childregs = task_pt_regs(p); + /* Ensure all threads in this mm have the same pointer masking mode. */ + if (IS_ENABLED(CONFIG_RISCV_ISA_SUPM) && p->mm && (clone_flags & CLONE_VM)) + set_bit(MM_CONTEXT_LOCK_PMLEN, &p->mm->context.flags); + memset(&p->thread.s, 0, sizeof(p->thread.s)); /* p->thread holds context to be restored by __switch_to() */ @@ -258,10 +262,16 @@ enum { static bool have_user_pmlen_7; static bool have_user_pmlen_16; +/* + * Control the relaxed ABI allowing tagged user addresses into the kernel. + */ +static unsigned int tagged_addr_disabled; + long set_tagged_addr_ctrl(struct task_struct *task, unsigned long arg) { - unsigned long valid_mask = PR_PMLEN_MASK; + unsigned long valid_mask = PR_PMLEN_MASK | PR_TAGGED_ADDR_ENABLE; struct thread_info *ti = task_thread_info(task); + struct mm_struct *mm = task->mm; unsigned long pmm; u8 pmlen; @@ -276,16 +286,41 @@ long set_tagged_addr_ctrl(struct task_struct *task, unsigned long arg) * in case choosing a larger PMLEN has a performance impact. */ pmlen = FIELD_GET(PR_PMLEN_MASK, arg); - if (pmlen == PMLEN_0) + if (pmlen == PMLEN_0) { pmm = ENVCFG_PMM_PMLEN_0; - else if (pmlen <= PMLEN_7 && have_user_pmlen_7) + } else if (pmlen <= PMLEN_7 && have_user_pmlen_7) { + pmlen = PMLEN_7; pmm = ENVCFG_PMM_PMLEN_7; - else if (pmlen <= PMLEN_16 && have_user_pmlen_16) + } else if (pmlen <= PMLEN_16 && have_user_pmlen_16) { + pmlen = PMLEN_16; pmm = ENVCFG_PMM_PMLEN_16; - else + } else { + return -EINVAL; + } + + /* + * Do not allow the enabling of the tagged address ABI if globally + * disabled via sysctl abi.tagged_addr_disabled, if pointer masking + * is disabled for userspace. + */ + if (arg & PR_TAGGED_ADDR_ENABLE && (tagged_addr_disabled || !pmlen)) return -EINVAL; + if (!(arg & PR_TAGGED_ADDR_ENABLE)) + pmlen = PMLEN_0; + + if (mmap_write_lock_killable(mm)) + return -EINTR; + + if (test_bit(MM_CONTEXT_LOCK_PMLEN, &mm->context.flags) && mm->context.pmlen != pmlen) { + mmap_write_unlock(mm); + return -EBUSY; + } + envcfg_update_bits(task, ENVCFG_PMM, pmm); + mm->context.pmlen = pmlen; + + mmap_write_unlock(mm); return 0; } @@ -298,6 +333,10 @@ long get_tagged_addr_ctrl(struct task_struct *task) if (is_compat_thread(ti)) return -EINVAL; + /* + * The mm context's pmlen is set only when the tagged address ABI is + * enabled, so the effective PMLEN must be extracted from envcfg.PMM. + */ switch (task->thread.envcfg & ENVCFG_PMM) { case ENVCFG_PMM_PMLEN_7: ret = FIELD_PREP(PR_PMLEN_MASK, PMLEN_7); @@ -307,6 +346,9 @@ long get_tagged_addr_ctrl(struct task_struct *task) break; } + if (task->mm->context.pmlen) + ret |= PR_TAGGED_ADDR_ENABLE; + return ret; } @@ -316,6 +358,24 @@ static bool try_to_set_pmm(unsigned long value) return (csr_read_clear(CSR_ENVCFG, ENVCFG_PMM) & ENVCFG_PMM) == value; } +/* + * Global sysctl to disable the tagged user addresses support. This control + * only prevents the tagged address ABI enabling via prctl() and does not + * disable it for tasks that already opted in to the relaxed ABI. + */ + +static struct ctl_table tagged_addr_sysctl_table[] = { + { + .procname = "tagged_addr_disabled", + .mode = 0644, + .data = &tagged_addr_disabled, + .maxlen = sizeof(int), + .proc_handler = proc_dointvec_minmax, + .extra1 = SYSCTL_ZERO, + .extra2 = SYSCTL_ONE, + }, +}; + static int __init tagged_addr_init(void) { if (!riscv_has_extension_unlikely(RISCV_ISA_EXT_SUPM)) @@ -329,6 +389,9 @@ static int __init tagged_addr_init(void) have_user_pmlen_7 = try_to_set_pmm(ENVCFG_PMM_PMLEN_7); have_user_pmlen_16 = try_to_set_pmm(ENVCFG_PMM_PMLEN_16); + if (!register_sysctl("abi", tagged_addr_sysctl_table)) + return -EINVAL; + return 0; } core_initcall(tagged_addr_init); From 78844482a1c939a972681842f8ee2a8ddb202441 Mon Sep 17 00:00:00 2001 From: Samuel Holland Date: Wed, 16 Oct 2024 13:27:47 -0700 Subject: [PATCH 19/38] riscv: Allow ptrace control of the tagged address ABI This allows a tracer to control the ABI of the tracee, as on arm64. Signed-off-by: Samuel Holland Link: https://lore.kernel.org/r/20241016202814.4061541-7-samuel.holland@sifive.com Signed-off-by: Palmer Dabbelt --- arch/riscv/kernel/ptrace.c | 42 ++++++++++++++++++++++++++++++++++++++ include/uapi/linux/elf.h | 1 + 2 files changed, 43 insertions(+) diff --git a/arch/riscv/kernel/ptrace.c b/arch/riscv/kernel/ptrace.c index 92731ff8c79a..ea67e9fb7a58 100644 --- a/arch/riscv/kernel/ptrace.c +++ b/arch/riscv/kernel/ptrace.c @@ -28,6 +28,9 @@ enum riscv_regset { #ifdef CONFIG_RISCV_ISA_V REGSET_V, #endif +#ifdef CONFIG_RISCV_ISA_SUPM + REGSET_TAGGED_ADDR_CTRL, +#endif }; static int riscv_gpr_get(struct task_struct *target, @@ -152,6 +155,35 @@ static int riscv_vr_set(struct task_struct *target, } #endif +#ifdef CONFIG_RISCV_ISA_SUPM +static int tagged_addr_ctrl_get(struct task_struct *target, + const struct user_regset *regset, + struct membuf to) +{ + long ctrl = get_tagged_addr_ctrl(target); + + if (IS_ERR_VALUE(ctrl)) + return ctrl; + + return membuf_write(&to, &ctrl, sizeof(ctrl)); +} + +static int tagged_addr_ctrl_set(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + const void *kbuf, const void __user *ubuf) +{ + int ret; + long ctrl; + + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ctrl, 0, -1); + if (ret) + return ret; + + return set_tagged_addr_ctrl(target, ctrl); +} +#endif + static const struct user_regset riscv_user_regset[] = { [REGSET_X] = { .core_note_type = NT_PRSTATUS, @@ -182,6 +214,16 @@ static const struct user_regset riscv_user_regset[] = { .set = riscv_vr_set, }, #endif +#ifdef CONFIG_RISCV_ISA_SUPM + [REGSET_TAGGED_ADDR_CTRL] = { + .core_note_type = NT_RISCV_TAGGED_ADDR_CTRL, + .n = 1, + .size = sizeof(long), + .align = sizeof(long), + .regset_get = tagged_addr_ctrl_get, + .set = tagged_addr_ctrl_set, + }, +#endif }; static const struct user_regset_view riscv_user_native_view = { diff --git a/include/uapi/linux/elf.h b/include/uapi/linux/elf.h index b9935988da5c..a920cf8934dc 100644 --- a/include/uapi/linux/elf.h +++ b/include/uapi/linux/elf.h @@ -450,6 +450,7 @@ typedef struct elf64_shdr { #define NT_MIPS_MSA 0x802 /* MIPS SIMD registers */ #define NT_RISCV_CSR 0x900 /* RISC-V Control and Status Registers */ #define NT_RISCV_VECTOR 0x901 /* RISC-V vector registers */ +#define NT_RISCV_TAGGED_ADDR_CTRL 0x902 /* RISC-V tagged address control (prctl()) */ #define NT_LOONGARCH_CPUCFG 0xa00 /* LoongArch CPU config registers */ #define NT_LOONGARCH_CSR 0xa01 /* LoongArch control and status registers */ #define NT_LOONGARCH_LSX 0xa02 /* LoongArch Loongson SIMD Extension registers */ From 7470b5afd150e683c7aef03961d0c4c6f500de3b Mon Sep 17 00:00:00 2001 From: Samuel Holland Date: Wed, 16 Oct 2024 13:27:48 -0700 Subject: [PATCH 20/38] riscv: selftests: Add a pointer masking test This test covers the behavior of the PR_SET_TAGGED_ADDR_CTRL and PR_GET_TAGGED_ADDR_CTRL prctl() operations, their effects on the userspace ABI, and their effects on the system call ABI. Reviewed-by: Charlie Jenkins Tested-by: Charlie Jenkins Signed-off-by: Samuel Holland Link: https://lore.kernel.org/r/20241016202814.4061541-8-samuel.holland@sifive.com Signed-off-by: Palmer Dabbelt --- tools/testing/selftests/riscv/Makefile | 2 +- tools/testing/selftests/riscv/abi/.gitignore | 1 + tools/testing/selftests/riscv/abi/Makefile | 10 + .../selftests/riscv/abi/pointer_masking.c | 332 ++++++++++++++++++ 4 files changed, 344 insertions(+), 1 deletion(-) create mode 100644 tools/testing/selftests/riscv/abi/.gitignore create mode 100644 tools/testing/selftests/riscv/abi/Makefile create mode 100644 tools/testing/selftests/riscv/abi/pointer_masking.c diff --git a/tools/testing/selftests/riscv/Makefile b/tools/testing/selftests/riscv/Makefile index 7ce03d832b64..099b8c1f46f8 100644 --- a/tools/testing/selftests/riscv/Makefile +++ b/tools/testing/selftests/riscv/Makefile @@ -5,7 +5,7 @@ ARCH ?= $(shell uname -m 2>/dev/null || echo not) ifneq (,$(filter $(ARCH),riscv)) -RISCV_SUBTARGETS ?= hwprobe vector mm sigreturn +RISCV_SUBTARGETS ?= abi hwprobe mm sigreturn vector else RISCV_SUBTARGETS := endif diff --git a/tools/testing/selftests/riscv/abi/.gitignore b/tools/testing/selftests/riscv/abi/.gitignore new file mode 100644 index 000000000000..b38358f91c4d --- /dev/null +++ b/tools/testing/selftests/riscv/abi/.gitignore @@ -0,0 +1 @@ +pointer_masking diff --git a/tools/testing/selftests/riscv/abi/Makefile b/tools/testing/selftests/riscv/abi/Makefile new file mode 100644 index 000000000000..ed82ff9c664e --- /dev/null +++ b/tools/testing/selftests/riscv/abi/Makefile @@ -0,0 +1,10 @@ +# SPDX-License-Identifier: GPL-2.0 + +CFLAGS += -I$(top_srcdir)/tools/include + +TEST_GEN_PROGS := pointer_masking + +include ../../lib.mk + +$(OUTPUT)/pointer_masking: pointer_masking.c + $(CC) -static -o$@ $(CFLAGS) $(LDFLAGS) $^ diff --git a/tools/testing/selftests/riscv/abi/pointer_masking.c b/tools/testing/selftests/riscv/abi/pointer_masking.c new file mode 100644 index 000000000000..dee41b7ee3e3 --- /dev/null +++ b/tools/testing/selftests/riscv/abi/pointer_masking.c @@ -0,0 +1,332 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "../../kselftest.h" + +#ifndef PR_PMLEN_SHIFT +#define PR_PMLEN_SHIFT 24 +#endif +#ifndef PR_PMLEN_MASK +#define PR_PMLEN_MASK (0x7fUL << PR_PMLEN_SHIFT) +#endif + +static int dev_zero; + +static int pipefd[2]; + +static sigjmp_buf jmpbuf; + +static void sigsegv_handler(int sig) +{ + siglongjmp(jmpbuf, 1); +} + +static int min_pmlen; +static int max_pmlen; + +static inline bool valid_pmlen(int pmlen) +{ + return pmlen == 0 || pmlen == 7 || pmlen == 16; +} + +static void test_pmlen(void) +{ + ksft_print_msg("Testing available PMLEN values\n"); + + for (int request = 0; request <= 16; request++) { + int pmlen, ret; + + ret = prctl(PR_SET_TAGGED_ADDR_CTRL, request << PR_PMLEN_SHIFT, 0, 0, 0); + if (ret) + goto pr_set_error; + + ret = prctl(PR_GET_TAGGED_ADDR_CTRL, 0, 0, 0, 0); + ksft_test_result(ret >= 0, "PMLEN=%d PR_GET_TAGGED_ADDR_CTRL\n", request); + if (ret < 0) + goto pr_get_error; + + pmlen = (ret & PR_PMLEN_MASK) >> PR_PMLEN_SHIFT; + ksft_test_result(pmlen >= request, "PMLEN=%d constraint\n", request); + ksft_test_result(valid_pmlen(pmlen), "PMLEN=%d validity\n", request); + + if (min_pmlen == 0) + min_pmlen = pmlen; + if (max_pmlen < pmlen) + max_pmlen = pmlen; + + continue; + +pr_set_error: + ksft_test_result_skip("PMLEN=%d PR_GET_TAGGED_ADDR_CTRL\n", request); +pr_get_error: + ksft_test_result_skip("PMLEN=%d constraint\n", request); + ksft_test_result_skip("PMLEN=%d validity\n", request); + } + + if (max_pmlen == 0) + ksft_exit_fail_msg("Failed to enable pointer masking\n"); +} + +static int set_tagged_addr_ctrl(int pmlen, bool tagged_addr_abi) +{ + int arg, ret; + + arg = pmlen << PR_PMLEN_SHIFT | tagged_addr_abi; + ret = prctl(PR_SET_TAGGED_ADDR_CTRL, arg, 0, 0, 0); + if (!ret) { + ret = prctl(PR_GET_TAGGED_ADDR_CTRL, 0, 0, 0, 0); + if (ret == arg) + return 0; + } + + return ret < 0 ? -errno : -ENODATA; +} + +static void test_dereference_pmlen(int pmlen) +{ + static volatile int i; + volatile int *p; + int ret; + + ret = set_tagged_addr_ctrl(pmlen, false); + if (ret) + return ksft_test_result_error("PMLEN=%d setup (%d)\n", pmlen, ret); + + i = pmlen; + + if (pmlen) { + p = (volatile int *)((uintptr_t)&i | 1UL << (__riscv_xlen - pmlen)); + + /* These dereferences should succeed. */ + if (sigsetjmp(jmpbuf, 1)) + return ksft_test_result_fail("PMLEN=%d valid tag\n", pmlen); + if (*p != pmlen) + return ksft_test_result_fail("PMLEN=%d bad value\n", pmlen); + ++*p; + } + + p = (volatile int *)((uintptr_t)&i | 1UL << (__riscv_xlen - pmlen - 1)); + + /* These dereferences should raise SIGSEGV. */ + if (sigsetjmp(jmpbuf, 1)) + return ksft_test_result_pass("PMLEN=%d dereference\n", pmlen); + ++*p; + ksft_test_result_fail("PMLEN=%d invalid tag\n", pmlen); +} + +static void test_dereference(void) +{ + ksft_print_msg("Testing userspace pointer dereference\n"); + + signal(SIGSEGV, sigsegv_handler); + + test_dereference_pmlen(0); + test_dereference_pmlen(min_pmlen); + test_dereference_pmlen(max_pmlen); + + signal(SIGSEGV, SIG_DFL); +} + +static void execve_child_sigsegv_handler(int sig) +{ + exit(42); +} + +static int execve_child(void) +{ + static volatile int i; + volatile int *p = (volatile int *)((uintptr_t)&i | 1UL << (__riscv_xlen - 7)); + + signal(SIGSEGV, execve_child_sigsegv_handler); + + /* This dereference should raise SIGSEGV. */ + return *p; +} + +static void test_fork_exec(void) +{ + int ret, status; + + ksft_print_msg("Testing fork/exec behavior\n"); + + ret = set_tagged_addr_ctrl(min_pmlen, false); + if (ret) + return ksft_test_result_error("setup (%d)\n", ret); + + if (fork()) { + wait(&status); + ksft_test_result(WIFEXITED(status) && WEXITSTATUS(status) == 42, + "dereference after fork\n"); + } else { + static volatile int i = 42; + volatile int *p; + + p = (volatile int *)((uintptr_t)&i | 1UL << (__riscv_xlen - min_pmlen)); + + /* This dereference should succeed. */ + exit(*p); + } + + if (fork()) { + wait(&status); + ksft_test_result(WIFEXITED(status) && WEXITSTATUS(status) == 42, + "dereference after fork+exec\n"); + } else { + /* Will call execve_child(). */ + execve("/proc/self/exe", (char *const []) { "", NULL }, NULL); + } +} + +static void test_tagged_addr_abi_sysctl(void) +{ + char value; + int fd; + + ksft_print_msg("Testing tagged address ABI sysctl\n"); + + fd = open("/proc/sys/abi/tagged_addr_disabled", O_WRONLY); + if (fd < 0) { + ksft_test_result_skip("failed to open sysctl file\n"); + ksft_test_result_skip("failed to open sysctl file\n"); + return; + } + + value = '1'; + pwrite(fd, &value, 1, 0); + ksft_test_result(set_tagged_addr_ctrl(min_pmlen, true) == -EINVAL, + "sysctl disabled\n"); + + value = '0'; + pwrite(fd, &value, 1, 0); + ksft_test_result(set_tagged_addr_ctrl(min_pmlen, true) == 0, + "sysctl enabled\n"); + + set_tagged_addr_ctrl(0, false); + + close(fd); +} + +static void test_tagged_addr_abi_pmlen(int pmlen) +{ + int i, *p, ret; + + i = ~pmlen; + + if (pmlen) { + p = (int *)((uintptr_t)&i | 1UL << (__riscv_xlen - pmlen)); + + ret = set_tagged_addr_ctrl(pmlen, false); + if (ret) + return ksft_test_result_error("PMLEN=%d ABI disabled setup (%d)\n", + pmlen, ret); + + ret = write(pipefd[1], p, sizeof(*p)); + if (ret >= 0 || errno != EFAULT) + return ksft_test_result_fail("PMLEN=%d ABI disabled write\n", pmlen); + + ret = read(dev_zero, p, sizeof(*p)); + if (ret >= 0 || errno != EFAULT) + return ksft_test_result_fail("PMLEN=%d ABI disabled read\n", pmlen); + + if (i != ~pmlen) + return ksft_test_result_fail("PMLEN=%d ABI disabled value\n", pmlen); + + ret = set_tagged_addr_ctrl(pmlen, true); + if (ret) + return ksft_test_result_error("PMLEN=%d ABI enabled setup (%d)\n", + pmlen, ret); + + ret = write(pipefd[1], p, sizeof(*p)); + if (ret != sizeof(*p)) + return ksft_test_result_fail("PMLEN=%d ABI enabled write\n", pmlen); + + ret = read(dev_zero, p, sizeof(*p)); + if (ret != sizeof(*p)) + return ksft_test_result_fail("PMLEN=%d ABI enabled read\n", pmlen); + + if (i) + return ksft_test_result_fail("PMLEN=%d ABI enabled value\n", pmlen); + + i = ~pmlen; + } else { + /* The tagged address ABI cannot be enabled when PMLEN == 0. */ + ret = set_tagged_addr_ctrl(pmlen, true); + if (ret != -EINVAL) + return ksft_test_result_error("PMLEN=%d ABI setup (%d)\n", + pmlen, ret); + } + + p = (int *)((uintptr_t)&i | 1UL << (__riscv_xlen - pmlen - 1)); + + ret = write(pipefd[1], p, sizeof(*p)); + if (ret >= 0 || errno != EFAULT) + return ksft_test_result_fail("PMLEN=%d invalid tag write (%d)\n", pmlen, errno); + + ret = read(dev_zero, p, sizeof(*p)); + if (ret >= 0 || errno != EFAULT) + return ksft_test_result_fail("PMLEN=%d invalid tag read\n", pmlen); + + if (i != ~pmlen) + return ksft_test_result_fail("PMLEN=%d invalid tag value\n", pmlen); + + ksft_test_result_pass("PMLEN=%d tagged address ABI\n", pmlen); +} + +static void test_tagged_addr_abi(void) +{ + ksft_print_msg("Testing tagged address ABI\n"); + + test_tagged_addr_abi_pmlen(0); + test_tagged_addr_abi_pmlen(min_pmlen); + test_tagged_addr_abi_pmlen(max_pmlen); +} + +static struct test_info { + unsigned int nr_tests; + void (*test_fn)(void); +} tests[] = { + { .nr_tests = 17 * 3, test_pmlen }, + { .nr_tests = 3, test_dereference }, + { .nr_tests = 2, test_fork_exec }, + { .nr_tests = 2, test_tagged_addr_abi_sysctl }, + { .nr_tests = 3, test_tagged_addr_abi }, +}; + +int main(int argc, char **argv) +{ + unsigned int plan = 0; + int ret; + + /* Check if this is the child process after execve(). */ + if (!argv[0][0]) + return execve_child(); + + dev_zero = open("/dev/zero", O_RDWR); + if (dev_zero < 0) + return 1; + + /* Write to a pipe so the kernel must dereference the buffer pointer. */ + ret = pipe(pipefd); + if (ret) + return 1; + + ksft_print_header(); + + for (int i = 0; i < ARRAY_SIZE(tests); i++) + plan += tests[i].nr_tests; + + ksft_set_plan(plan); + + for (int i = 0; i < ARRAY_SIZE(tests); i++) + tests[i].test_fn(); + + ksft_finished(); +} From 3c2e0aff7b4f03fbc11b7d63c8db5b94a48978cf Mon Sep 17 00:00:00 2001 From: Samuel Holland Date: Wed, 16 Oct 2024 13:27:49 -0700 Subject: [PATCH 21/38] riscv: hwprobe: Export the Supm ISA extension Supm is a virtual ISA extension defined in the RISC-V Pointer Masking specification, which indicates that pointer masking is available in U-mode. It can be provided by either Smnpm or Ssnpm, depending on which mode the kernel runs in. Userspace should not care about this distinction, so export Supm instead of either underlying extension. Hide the extension if the kernel was compiled without support for the pointer masking prctl() interface. Signed-off-by: Samuel Holland Link: https://lore.kernel.org/r/20241016202814.4061541-9-samuel.holland@sifive.com Signed-off-by: Palmer Dabbelt --- Documentation/arch/riscv/hwprobe.rst | 3 +++ arch/riscv/include/uapi/asm/hwprobe.h | 1 + arch/riscv/kernel/sys_hwprobe.c | 3 +++ 3 files changed, 7 insertions(+) diff --git a/Documentation/arch/riscv/hwprobe.rst b/Documentation/arch/riscv/hwprobe.rst index 85b709257918..b9aec2e5bbd4 100644 --- a/Documentation/arch/riscv/hwprobe.rst +++ b/Documentation/arch/riscv/hwprobe.rst @@ -239,6 +239,9 @@ The following keys are defined: ratified in commit 98918c844281 ("Merge pull request #1217 from riscv/zawrs") of riscv-isa-manual. + * :c:macro:`RISCV_HWPROBE_EXT_SUPM`: The Supm extension is supported as + defined in version 1.0 of the RISC-V Pointer Masking extensions. + * :c:macro:`RISCV_HWPROBE_KEY_CPUPERF_0`: Deprecated. Returns similar values to :c:macro:`RISCV_HWPROBE_KEY_MISALIGNED_SCALAR_PERF`, but the key was mistakenly classified as a bitmask rather than a value. diff --git a/arch/riscv/include/uapi/asm/hwprobe.h b/arch/riscv/include/uapi/asm/hwprobe.h index 1e153cda57db..868ff41b93d6 100644 --- a/arch/riscv/include/uapi/asm/hwprobe.h +++ b/arch/riscv/include/uapi/asm/hwprobe.h @@ -72,6 +72,7 @@ struct riscv_hwprobe { #define RISCV_HWPROBE_EXT_ZCF (1ULL << 46) #define RISCV_HWPROBE_EXT_ZCMOP (1ULL << 47) #define RISCV_HWPROBE_EXT_ZAWRS (1ULL << 48) +#define RISCV_HWPROBE_EXT_SUPM (1ULL << 49) #define RISCV_HWPROBE_KEY_CPUPERF_0 5 #define RISCV_HWPROBE_MISALIGNED_UNKNOWN (0 << 0) #define RISCV_HWPROBE_MISALIGNED_EMULATED (1 << 0) diff --git a/arch/riscv/kernel/sys_hwprobe.c b/arch/riscv/kernel/sys_hwprobe.c index cea0ca2bf2a2..0ac78e9f7c94 100644 --- a/arch/riscv/kernel/sys_hwprobe.c +++ b/arch/riscv/kernel/sys_hwprobe.c @@ -150,6 +150,9 @@ static void hwprobe_isa_ext0(struct riscv_hwprobe *pair, EXT_KEY(ZFH); EXT_KEY(ZFHMIN); } + + if (IS_ENABLED(CONFIG_RISCV_ISA_SUPM)) + EXT_KEY(SUPM); #undef EXT_KEY } From 1851e7836212c76bebb6944bb1541ddcccbea535 Mon Sep 17 00:00:00 2001 From: Samuel Holland Date: Wed, 16 Oct 2024 13:27:50 -0700 Subject: [PATCH 22/38] RISC-V: KVM: Allow Smnpm and Ssnpm extensions for guests The interface for controlling pointer masking in VS-mode is henvcfg.PMM, which is part of the Ssnpm extension, even though pointer masking in HS-mode is provided by the Smnpm extension. As a result, emulating Smnpm in the guest requires (only) Ssnpm on the host. The guest configures Smnpm through the SBI Firmware Features extension, which KVM does not yet implement, so currently the ISA extension has no visible effect on the guest, and thus it cannot be disabled. Ssnpm is configured using the senvcfg CSR within the guest, so that extension cannot be hidden from the guest without intercepting writes to the CSR. Signed-off-by: Samuel Holland Reviewed-by: Anup Patel Link: https://lore.kernel.org/r/20241016202814.4061541-10-samuel.holland@sifive.com Signed-off-by: Palmer Dabbelt --- arch/riscv/include/uapi/asm/kvm.h | 2 ++ arch/riscv/kvm/vcpu_onereg.c | 4 ++++ 2 files changed, 6 insertions(+) diff --git a/arch/riscv/include/uapi/asm/kvm.h b/arch/riscv/include/uapi/asm/kvm.h index e97db3296456..4f24201376b1 100644 --- a/arch/riscv/include/uapi/asm/kvm.h +++ b/arch/riscv/include/uapi/asm/kvm.h @@ -175,6 +175,8 @@ enum KVM_RISCV_ISA_EXT_ID { KVM_RISCV_ISA_EXT_ZCF, KVM_RISCV_ISA_EXT_ZCMOP, KVM_RISCV_ISA_EXT_ZAWRS, + KVM_RISCV_ISA_EXT_SMNPM, + KVM_RISCV_ISA_EXT_SSNPM, KVM_RISCV_ISA_EXT_MAX, }; diff --git a/arch/riscv/kvm/vcpu_onereg.c b/arch/riscv/kvm/vcpu_onereg.c index b319c4c13c54..5b68490ad9b7 100644 --- a/arch/riscv/kvm/vcpu_onereg.c +++ b/arch/riscv/kvm/vcpu_onereg.c @@ -34,9 +34,11 @@ static const unsigned long kvm_isa_ext_arr[] = { [KVM_RISCV_ISA_EXT_M] = RISCV_ISA_EXT_m, [KVM_RISCV_ISA_EXT_V] = RISCV_ISA_EXT_v, /* Multi letter extensions (alphabetically sorted) */ + [KVM_RISCV_ISA_EXT_SMNPM] = RISCV_ISA_EXT_SSNPM, KVM_ISA_EXT_ARR(SMSTATEEN), KVM_ISA_EXT_ARR(SSAIA), KVM_ISA_EXT_ARR(SSCOFPMF), + KVM_ISA_EXT_ARR(SSNPM), KVM_ISA_EXT_ARR(SSTC), KVM_ISA_EXT_ARR(SVINVAL), KVM_ISA_EXT_ARR(SVNAPOT), @@ -127,8 +129,10 @@ static bool kvm_riscv_vcpu_isa_disable_allowed(unsigned long ext) case KVM_RISCV_ISA_EXT_C: case KVM_RISCV_ISA_EXT_I: case KVM_RISCV_ISA_EXT_M: + case KVM_RISCV_ISA_EXT_SMNPM: /* There is not architectural config bit to disable sscofpmf completely */ case KVM_RISCV_ISA_EXT_SSCOFPMF: + case KVM_RISCV_ISA_EXT_SSNPM: case KVM_RISCV_ISA_EXT_SSTC: case KVM_RISCV_ISA_EXT_SVINVAL: case KVM_RISCV_ISA_EXT_SVNAPOT: From 036a1407b4d49790ca5b35436d02de62212bc790 Mon Sep 17 00:00:00 2001 From: Samuel Holland Date: Wed, 16 Oct 2024 13:27:51 -0700 Subject: [PATCH 23/38] KVM: riscv: selftests: Add Smnpm and Ssnpm to get-reg-list test Add testing for the pointer masking extensions exposed to KVM guests. Reviewed-by: Anup Patel Signed-off-by: Samuel Holland Link: https://lore.kernel.org/r/20241016202814.4061541-11-samuel.holland@sifive.com Signed-off-by: Palmer Dabbelt --- tools/testing/selftests/kvm/riscv/get-reg-list.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/tools/testing/selftests/kvm/riscv/get-reg-list.c b/tools/testing/selftests/kvm/riscv/get-reg-list.c index 8e34f7fa44e9..54ab484d0000 100644 --- a/tools/testing/selftests/kvm/riscv/get-reg-list.c +++ b/tools/testing/selftests/kvm/riscv/get-reg-list.c @@ -41,9 +41,11 @@ bool filter_reg(__u64 reg) case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_I: case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_M: case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_V: + case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_SMNPM: case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_SMSTATEEN: case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_SSAIA: case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_SSCOFPMF: + case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_SSNPM: case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_SSTC: case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_SVINVAL: case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_SVNAPOT: @@ -414,9 +416,11 @@ static const char *isa_ext_single_id_to_str(__u64 reg_off) KVM_ISA_EXT_ARR(I), KVM_ISA_EXT_ARR(M), KVM_ISA_EXT_ARR(V), + KVM_ISA_EXT_ARR(SMNPM), KVM_ISA_EXT_ARR(SMSTATEEN), KVM_ISA_EXT_ARR(SSAIA), KVM_ISA_EXT_ARR(SSCOFPMF), + KVM_ISA_EXT_ARR(SSNPM), KVM_ISA_EXT_ARR(SSTC), KVM_ISA_EXT_ARR(SVINVAL), KVM_ISA_EXT_ARR(SVNAPOT), @@ -946,8 +950,10 @@ KVM_ISA_EXT_SUBLIST_CONFIG(aia, AIA); KVM_ISA_EXT_SUBLIST_CONFIG(fp_f, FP_F); KVM_ISA_EXT_SUBLIST_CONFIG(fp_d, FP_D); KVM_ISA_EXT_SIMPLE_CONFIG(h, H); +KVM_ISA_EXT_SIMPLE_CONFIG(smnpm, SMNPM); KVM_ISA_EXT_SUBLIST_CONFIG(smstateen, SMSTATEEN); KVM_ISA_EXT_SIMPLE_CONFIG(sscofpmf, SSCOFPMF); +KVM_ISA_EXT_SIMPLE_CONFIG(ssnpm, SSNPM); KVM_ISA_EXT_SIMPLE_CONFIG(sstc, SSTC); KVM_ISA_EXT_SIMPLE_CONFIG(svinval, SVINVAL); KVM_ISA_EXT_SIMPLE_CONFIG(svnapot, SVNAPOT); @@ -1009,8 +1015,10 @@ struct vcpu_reg_list *vcpu_configs[] = { &config_fp_f, &config_fp_d, &config_h, + &config_smnpm, &config_smstateen, &config_sscofpmf, + &config_ssnpm, &config_sstc, &config_svinval, &config_svnapot, From 010e12aa4925b36700ebacb763a7e6cfd771d9a2 Mon Sep 17 00:00:00 2001 From: Alexandre Ghiti Date: Sun, 3 Nov 2024 15:51:41 +0100 Subject: [PATCH 24/38] riscv: Move cpufeature.h macros into their own header asm/cmpxchg.h will soon need riscv_has_extension_unlikely() macros and then needs to include asm/cpufeature.h which introduces a lot of header circular dependencies. So move the riscv_has_extension_XXX() macros into their own header which prevents such circular dependencies by including a restricted number of headers. Signed-off-by: Alexandre Ghiti Reviewed-by: Andrew Jones Reviewed-by: Andrea Parri Link: https://lore.kernel.org/r/20241103145153.105097-2-alexghiti@rivosinc.com Signed-off-by: Palmer Dabbelt --- arch/riscv/include/asm/cpufeature-macros.h | 66 ++++++++++++++++++++++ arch/riscv/include/asm/cpufeature.h | 61 ++------------------ 2 files changed, 70 insertions(+), 57 deletions(-) create mode 100644 arch/riscv/include/asm/cpufeature-macros.h diff --git a/arch/riscv/include/asm/cpufeature-macros.h b/arch/riscv/include/asm/cpufeature-macros.h new file mode 100644 index 000000000000..a8103edbf51f --- /dev/null +++ b/arch/riscv/include/asm/cpufeature-macros.h @@ -0,0 +1,66 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright 2022-2024 Rivos, Inc + */ + +#ifndef _ASM_CPUFEATURE_MACROS_H +#define _ASM_CPUFEATURE_MACROS_H + +#include +#include + +#define STANDARD_EXT 0 + +bool __riscv_isa_extension_available(const unsigned long *isa_bitmap, unsigned int bit); +#define riscv_isa_extension_available(isa_bitmap, ext) \ + __riscv_isa_extension_available(isa_bitmap, RISCV_ISA_EXT_##ext) + +static __always_inline bool __riscv_has_extension_likely(const unsigned long vendor, + const unsigned long ext) +{ + asm goto(ALTERNATIVE("j %l[l_no]", "nop", %[vendor], %[ext], 1) + : + : [vendor] "i" (vendor), [ext] "i" (ext) + : + : l_no); + + return true; +l_no: + return false; +} + +static __always_inline bool __riscv_has_extension_unlikely(const unsigned long vendor, + const unsigned long ext) +{ + asm goto(ALTERNATIVE("nop", "j %l[l_yes]", %[vendor], %[ext], 1) + : + : [vendor] "i" (vendor), [ext] "i" (ext) + : + : l_yes); + + return false; +l_yes: + return true; +} + +static __always_inline bool riscv_has_extension_unlikely(const unsigned long ext) +{ + compiletime_assert(ext < RISCV_ISA_EXT_MAX, "ext must be < RISCV_ISA_EXT_MAX"); + + if (IS_ENABLED(CONFIG_RISCV_ALTERNATIVE)) + return __riscv_has_extension_unlikely(STANDARD_EXT, ext); + + return __riscv_isa_extension_available(NULL, ext); +} + +static __always_inline bool riscv_has_extension_likely(const unsigned long ext) +{ + compiletime_assert(ext < RISCV_ISA_EXT_MAX, "ext must be < RISCV_ISA_EXT_MAX"); + + if (IS_ENABLED(CONFIG_RISCV_ALTERNATIVE)) + return __riscv_has_extension_likely(STANDARD_EXT, ext); + + return __riscv_isa_extension_available(NULL, ext); +} + +#endif /* _ASM_CPUFEATURE_MACROS_H */ diff --git a/arch/riscv/include/asm/cpufeature.h b/arch/riscv/include/asm/cpufeature.h index 45f9c1171a48..87ed88fc950d 100644 --- a/arch/riscv/include/asm/cpufeature.h +++ b/arch/riscv/include/asm/cpufeature.h @@ -8,9 +8,11 @@ #include #include +#include +#include +#include #include -#include -#include +#include /* * These are probed via a device_initcall(), via either the SBI or directly @@ -103,61 +105,6 @@ extern const size_t riscv_isa_ext_count; extern bool riscv_isa_fallback; unsigned long riscv_isa_extension_base(const unsigned long *isa_bitmap); - -#define STANDARD_EXT 0 - -bool __riscv_isa_extension_available(const unsigned long *isa_bitmap, unsigned int bit); -#define riscv_isa_extension_available(isa_bitmap, ext) \ - __riscv_isa_extension_available(isa_bitmap, RISCV_ISA_EXT_##ext) - -static __always_inline bool __riscv_has_extension_likely(const unsigned long vendor, - const unsigned long ext) -{ - asm goto(ALTERNATIVE("j %l[l_no]", "nop", %[vendor], %[ext], 1) - : - : [vendor] "i" (vendor), [ext] "i" (ext) - : - : l_no); - - return true; -l_no: - return false; -} - -static __always_inline bool __riscv_has_extension_unlikely(const unsigned long vendor, - const unsigned long ext) -{ - asm goto(ALTERNATIVE("nop", "j %l[l_yes]", %[vendor], %[ext], 1) - : - : [vendor] "i" (vendor), [ext] "i" (ext) - : - : l_yes); - - return false; -l_yes: - return true; -} - -static __always_inline bool riscv_has_extension_unlikely(const unsigned long ext) -{ - compiletime_assert(ext < RISCV_ISA_EXT_MAX, "ext must be < RISCV_ISA_EXT_MAX"); - - if (IS_ENABLED(CONFIG_RISCV_ALTERNATIVE)) - return __riscv_has_extension_unlikely(STANDARD_EXT, ext); - - return __riscv_isa_extension_available(NULL, ext); -} - -static __always_inline bool riscv_has_extension_likely(const unsigned long ext) -{ - compiletime_assert(ext < RISCV_ISA_EXT_MAX, "ext must be < RISCV_ISA_EXT_MAX"); - - if (IS_ENABLED(CONFIG_RISCV_ALTERNATIVE)) - return __riscv_has_extension_likely(STANDARD_EXT, ext); - - return __riscv_isa_extension_available(NULL, ext); -} - static __always_inline bool riscv_cpu_has_extension_likely(int cpu, const unsigned long ext) { compiletime_assert(ext < RISCV_ISA_EXT_MAX, "ext must be < RISCV_ISA_EXT_MAX"); From af042c457db07db4bc1baa5c22d089cab69cfc5b Mon Sep 17 00:00:00 2001 From: Alexandre Ghiti Date: Sun, 3 Nov 2024 15:51:42 +0100 Subject: [PATCH 25/38] riscv: Do not fail to build on byte/halfword operations with Zawrs riscv does not have lr instructions on byte and halfword but the qspinlock implementation actually uses such atomics provided by the Zabha extension, so those sizes are legitimate. Then instead of failing to build, just fallback to the !Zawrs path. Signed-off-by: Alexandre Ghiti Reviewed-by: Andrew Jones Reviewed-by: Andrea Parri Link: https://lore.kernel.org/r/20241103145153.105097-3-alexghiti@rivosinc.com Signed-off-by: Palmer Dabbelt --- arch/riscv/include/asm/cmpxchg.h | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/arch/riscv/include/asm/cmpxchg.h b/arch/riscv/include/asm/cmpxchg.h index ebbce134917c..ac1d7df898ef 100644 --- a/arch/riscv/include/asm/cmpxchg.h +++ b/arch/riscv/include/asm/cmpxchg.h @@ -245,6 +245,11 @@ static __always_inline void __cmpwait(volatile void *ptr, : : : : no_zawrs); switch (size) { + case 1: + fallthrough; + case 2: + /* RISC-V doesn't have lr instructions on byte and half-word. */ + goto no_zawrs; case 4: asm volatile( " lr.w %0, %1\n" From 38acdee32d23f789e866488c99867fd497d43c86 Mon Sep 17 00:00:00 2001 From: Alexandre Ghiti Date: Sun, 3 Nov 2024 15:51:43 +0100 Subject: [PATCH 26/38] riscv: Implement cmpxchg32/64() using Zacas This adds runtime support for Zacas in cmpxchg operations. Signed-off-by: Alexandre Ghiti Reviewed-by: Andrew Jones Reviewed-by: Andrea Parri Link: https://lore.kernel.org/r/20241103145153.105097-4-alexghiti@rivosinc.com Signed-off-by: Palmer Dabbelt --- arch/riscv/Kconfig | 16 +++++++++++ arch/riscv/Makefile | 3 ++ arch/riscv/include/asm/cmpxchg.h | 48 +++++++++++++++++++++----------- 3 files changed, 50 insertions(+), 17 deletions(-) diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig index 62545946ecf4..3542efe3088b 100644 --- a/arch/riscv/Kconfig +++ b/arch/riscv/Kconfig @@ -632,6 +632,22 @@ config RISCV_ISA_ZAWRS use of these instructions in the kernel when the Zawrs extension is detected at boot. +config TOOLCHAIN_HAS_ZACAS + bool + default y + depends on !64BIT || $(cc-option,-mabi=lp64 -march=rv64ima_zacas) + depends on !32BIT || $(cc-option,-mabi=ilp32 -march=rv32ima_zacas) + depends on AS_HAS_OPTION_ARCH + +config RISCV_ISA_ZACAS + bool "Zacas extension support for atomic CAS" + depends on TOOLCHAIN_HAS_ZACAS + depends on RISCV_ALTERNATIVE + default y + help + Enable the use of the Zacas ISA-extension to implement kernel atomic + cmpxchg operations when it is detected at boot. + If you don't know what to do here, say Y. config TOOLCHAIN_HAS_ZBB diff --git a/arch/riscv/Makefile b/arch/riscv/Makefile index d469db9f46f4..3700a1574413 100644 --- a/arch/riscv/Makefile +++ b/arch/riscv/Makefile @@ -82,6 +82,9 @@ else riscv-march-$(CONFIG_TOOLCHAIN_NEEDS_EXPLICIT_ZICSR_ZIFENCEI) := $(riscv-march-y)_zicsr_zifencei endif +# Check if the toolchain supports Zacas +riscv-march-$(CONFIG_TOOLCHAIN_HAS_ZACAS) := $(riscv-march-y)_zacas + # Remove F,D,V from isa string for all. Keep extensions between "fd" and "v" by # matching non-v and non-multi-letter extensions out with the filter ([^v_]*) KBUILD_CFLAGS += -march=$(shell echo $(riscv-march-y) | sed -E 's/(rv32ima|rv64ima)fd([^v_]*)v?/\1\2/') diff --git a/arch/riscv/include/asm/cmpxchg.h b/arch/riscv/include/asm/cmpxchg.h index ac1d7df898ef..39c1daf39f6a 100644 --- a/arch/riscv/include/asm/cmpxchg.h +++ b/arch/riscv/include/asm/cmpxchg.h @@ -12,6 +12,7 @@ #include #include #include +#include #define __arch_xchg_masked(sc_sfx, prepend, append, r, p, n) \ ({ \ @@ -137,24 +138,37 @@ r = (__typeof__(*(p)))((__retx & __mask) >> __s); \ }) -#define __arch_cmpxchg(lr_sfx, sc_sfx, prepend, append, r, p, co, o, n) \ +#define __arch_cmpxchg(lr_sfx, sc_cas_sfx, prepend, append, r, p, co, o, n) \ ({ \ - register unsigned int __rc; \ + if (IS_ENABLED(CONFIG_RISCV_ISA_ZACAS) && \ + riscv_has_extension_unlikely(RISCV_ISA_EXT_ZACAS)) { \ + r = o; \ \ - __asm__ __volatile__ ( \ - prepend \ - "0: lr" lr_sfx " %0, %2\n" \ - " bne %0, %z3, 1f\n" \ - " sc" sc_sfx " %1, %z4, %2\n" \ - " bnez %1, 0b\n" \ - append \ - "1:\n" \ - : "=&r" (r), "=&r" (__rc), "+A" (*(p)) \ - : "rJ" (co o), "rJ" (n) \ - : "memory"); \ + __asm__ __volatile__ ( \ + prepend \ + " amocas" sc_cas_sfx " %0, %z2, %1\n" \ + append \ + : "+&r" (r), "+A" (*(p)) \ + : "rJ" (n) \ + : "memory"); \ + } else { \ + register unsigned int __rc; \ + \ + __asm__ __volatile__ ( \ + prepend \ + "0: lr" lr_sfx " %0, %2\n" \ + " bne %0, %z3, 1f\n" \ + " sc" sc_cas_sfx " %1, %z4, %2\n" \ + " bnez %1, 0b\n" \ + append \ + "1:\n" \ + : "=&r" (r), "=&r" (__rc), "+A" (*(p)) \ + : "rJ" (co o), "rJ" (n) \ + : "memory"); \ + } \ }) -#define _arch_cmpxchg(ptr, old, new, sc_sfx, prepend, append) \ +#define _arch_cmpxchg(ptr, old, new, sc_cas_sfx, prepend, append) \ ({ \ __typeof__(ptr) __ptr = (ptr); \ __typeof__(*(__ptr)) __old = (old); \ @@ -164,15 +178,15 @@ switch (sizeof(*__ptr)) { \ case 1: \ case 2: \ - __arch_cmpxchg_masked(sc_sfx, prepend, append, \ + __arch_cmpxchg_masked(sc_cas_sfx, prepend, append, \ __ret, __ptr, __old, __new); \ break; \ case 4: \ - __arch_cmpxchg(".w", ".w" sc_sfx, prepend, append, \ + __arch_cmpxchg(".w", ".w" sc_cas_sfx, prepend, append, \ __ret, __ptr, (long), __old, __new); \ break; \ case 8: \ - __arch_cmpxchg(".d", ".d" sc_sfx, prepend, append, \ + __arch_cmpxchg(".d", ".d" sc_cas_sfx, prepend, append, \ __ret, __ptr, /**/, __old, __new); \ break; \ default: \ From 51624ddcf59dd78c810fd7da768d688e193b42d6 Mon Sep 17 00:00:00 2001 From: Alexandre Ghiti Date: Sun, 3 Nov 2024 15:51:44 +0100 Subject: [PATCH 27/38] dt-bindings: riscv: Add Zabha ISA extension description Add description for the Zabha ISA extension which was ratified in April 2024. Signed-off-by: Alexandre Ghiti Reviewed-by: Guo Ren Acked-by: Conor Dooley Reviewed-by: Andrew Jones Reviewed-by: Andrea Parri Link: https://lore.kernel.org/r/20241103145153.105097-5-alexghiti@rivosinc.com Signed-off-by: Palmer Dabbelt --- Documentation/devicetree/bindings/riscv/extensions.yaml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/Documentation/devicetree/bindings/riscv/extensions.yaml b/Documentation/devicetree/bindings/riscv/extensions.yaml index 2cf2026cff57..db062107823b 100644 --- a/Documentation/devicetree/bindings/riscv/extensions.yaml +++ b/Documentation/devicetree/bindings/riscv/extensions.yaml @@ -178,6 +178,12 @@ properties: as ratified at commit 4a69197e5617 ("Update to ratified state") of riscv-svvptc. + - const: zabha + description: | + The Zabha extension for Byte and Halfword Atomic Memory Operations + as ratified at commit 49f49c842ff9 ("Update to Rafified state") of + riscv-zabha. + - const: zacas description: | The Zacas extension for Atomic Compare-and-Swap (CAS) instructions From 1658ef4314b37ff4858a6c207646ff9d280ca4f7 Mon Sep 17 00:00:00 2001 From: Alexandre Ghiti Date: Sun, 3 Nov 2024 15:51:45 +0100 Subject: [PATCH 28/38] riscv: Implement cmpxchg8/16() using Zabha This adds runtime support for Zabha in cmpxchg8/16() operations. Note that in the absence of Zacas support in the toolchain, CAS instructions from Zabha won't be used. Signed-off-by: Alexandre Ghiti Reviewed-by: Andrew Jones Reviewed-by: Andrea Parri Link: https://lore.kernel.org/r/20241103145153.105097-6-alexghiti@rivosinc.com Signed-off-by: Palmer Dabbelt --- arch/riscv/Kconfig | 18 ++++++++ arch/riscv/Makefile | 3 ++ arch/riscv/include/asm/cmpxchg.h | 78 ++++++++++++++++++++------------ arch/riscv/include/asm/hwcap.h | 1 + arch/riscv/kernel/cpufeature.c | 1 + 5 files changed, 72 insertions(+), 29 deletions(-) diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig index 3542efe3088b..668be90a42e4 100644 --- a/arch/riscv/Kconfig +++ b/arch/riscv/Kconfig @@ -632,6 +632,24 @@ config RISCV_ISA_ZAWRS use of these instructions in the kernel when the Zawrs extension is detected at boot. +config TOOLCHAIN_HAS_ZABHA + bool + default y + depends on !64BIT || $(cc-option,-mabi=lp64 -march=rv64ima_zabha) + depends on !32BIT || $(cc-option,-mabi=ilp32 -march=rv32ima_zabha) + depends on AS_HAS_OPTION_ARCH + +config RISCV_ISA_ZABHA + bool "Zabha extension support for atomic byte/halfword operations" + depends on TOOLCHAIN_HAS_ZABHA + depends on RISCV_ALTERNATIVE + default y + help + Enable the use of the Zabha ISA-extension to implement kernel + byte/halfword atomic memory operations when it is detected at boot. + + If you don't know what to do here, say Y. + config TOOLCHAIN_HAS_ZACAS bool default y diff --git a/arch/riscv/Makefile b/arch/riscv/Makefile index 3700a1574413..9fe1ee740dda 100644 --- a/arch/riscv/Makefile +++ b/arch/riscv/Makefile @@ -85,6 +85,9 @@ endif # Check if the toolchain supports Zacas riscv-march-$(CONFIG_TOOLCHAIN_HAS_ZACAS) := $(riscv-march-y)_zacas +# Check if the toolchain supports Zabha +riscv-march-$(CONFIG_TOOLCHAIN_HAS_ZABHA) := $(riscv-march-y)_zabha + # Remove F,D,V from isa string for all. Keep extensions between "fd" and "v" by # matching non-v and non-multi-letter extensions out with the filter ([^v_]*) KBUILD_CFLAGS += -march=$(shell echo $(riscv-march-y) | sed -E 's/(rv32ima|rv64ima)fd([^v_]*)v?/\1\2/') diff --git a/arch/riscv/include/asm/cmpxchg.h b/arch/riscv/include/asm/cmpxchg.h index 39c1daf39f6a..1f4cd12e4664 100644 --- a/arch/riscv/include/asm/cmpxchg.h +++ b/arch/riscv/include/asm/cmpxchg.h @@ -108,34 +108,49 @@ * indicated by comparing RETURN with OLD. */ -#define __arch_cmpxchg_masked(sc_sfx, prepend, append, r, p, o, n) \ -({ \ - u32 *__ptr32b = (u32 *)((ulong)(p) & ~0x3); \ - ulong __s = ((ulong)(p) & (0x4 - sizeof(*p))) * BITS_PER_BYTE; \ - ulong __mask = GENMASK(((sizeof(*p)) * BITS_PER_BYTE) - 1, 0) \ - << __s; \ - ulong __newx = (ulong)(n) << __s; \ - ulong __oldx = (ulong)(o) << __s; \ - ulong __retx; \ - ulong __rc; \ - \ - __asm__ __volatile__ ( \ - prepend \ - "0: lr.w %0, %2\n" \ - " and %1, %0, %z5\n" \ - " bne %1, %z3, 1f\n" \ - " and %1, %0, %z6\n" \ - " or %1, %1, %z4\n" \ - " sc.w" sc_sfx " %1, %1, %2\n" \ - " bnez %1, 0b\n" \ - append \ - "1:\n" \ - : "=&r" (__retx), "=&r" (__rc), "+A" (*(__ptr32b)) \ - : "rJ" ((long)__oldx), "rJ" (__newx), \ - "rJ" (__mask), "rJ" (~__mask) \ - : "memory"); \ - \ - r = (__typeof__(*(p)))((__retx & __mask) >> __s); \ +#define __arch_cmpxchg_masked(sc_sfx, cas_sfx, prepend, append, r, p, o, n) \ +({ \ + if (IS_ENABLED(CONFIG_RISCV_ISA_ZABHA) && \ + IS_ENABLED(CONFIG_RISCV_ISA_ZACAS) && \ + riscv_has_extension_unlikely(RISCV_ISA_EXT_ZABHA) && \ + riscv_has_extension_unlikely(RISCV_ISA_EXT_ZACAS)) { \ + r = o; \ + \ + __asm__ __volatile__ ( \ + prepend \ + " amocas" cas_sfx " %0, %z2, %1\n" \ + append \ + : "+&r" (r), "+A" (*(p)) \ + : "rJ" (n) \ + : "memory"); \ + } else { \ + u32 *__ptr32b = (u32 *)((ulong)(p) & ~0x3); \ + ulong __s = ((ulong)(p) & (0x4 - sizeof(*p))) * BITS_PER_BYTE; \ + ulong __mask = GENMASK(((sizeof(*p)) * BITS_PER_BYTE) - 1, 0) \ + << __s; \ + ulong __newx = (ulong)(n) << __s; \ + ulong __oldx = (ulong)(o) << __s; \ + ulong __retx; \ + ulong __rc; \ + \ + __asm__ __volatile__ ( \ + prepend \ + "0: lr.w %0, %2\n" \ + " and %1, %0, %z5\n" \ + " bne %1, %z3, 1f\n" \ + " and %1, %0, %z6\n" \ + " or %1, %1, %z4\n" \ + " sc.w" sc_sfx " %1, %1, %2\n" \ + " bnez %1, 0b\n" \ + append \ + "1:\n" \ + : "=&r" (__retx), "=&r" (__rc), "+A" (*(__ptr32b)) \ + : "rJ" ((long)__oldx), "rJ" (__newx), \ + "rJ" (__mask), "rJ" (~__mask) \ + : "memory"); \ + \ + r = (__typeof__(*(p)))((__retx & __mask) >> __s); \ + } \ }) #define __arch_cmpxchg(lr_sfx, sc_cas_sfx, prepend, append, r, p, co, o, n) \ @@ -177,8 +192,13 @@ \ switch (sizeof(*__ptr)) { \ case 1: \ + __arch_cmpxchg_masked(sc_cas_sfx, ".b" sc_cas_sfx, \ + prepend, append, \ + __ret, __ptr, __old, __new); \ + break; \ case 2: \ - __arch_cmpxchg_masked(sc_cas_sfx, prepend, append, \ + __arch_cmpxchg_masked(sc_cas_sfx, ".h" sc_cas_sfx, \ + prepend, append, \ __ret, __ptr, __old, __new); \ break; \ case 4: \ diff --git a/arch/riscv/include/asm/hwcap.h b/arch/riscv/include/asm/hwcap.h index 46d9de54179e..74bcb0e2bd1f 100644 --- a/arch/riscv/include/asm/hwcap.h +++ b/arch/riscv/include/asm/hwcap.h @@ -93,6 +93,7 @@ #define RISCV_ISA_EXT_ZCMOP 84 #define RISCV_ISA_EXT_ZAWRS 85 #define RISCV_ISA_EXT_SVVPTC 86 +#define RISCV_ISA_EXT_ZABHA 87 #define RISCV_ISA_EXT_XLINUXENVCFG 127 diff --git a/arch/riscv/kernel/cpufeature.c b/arch/riscv/kernel/cpufeature.c index 3a8eeaa9310c..5e743d8d34f5 100644 --- a/arch/riscv/kernel/cpufeature.c +++ b/arch/riscv/kernel/cpufeature.c @@ -322,6 +322,7 @@ const struct riscv_isa_ext_data riscv_isa_ext[] = { __RISCV_ISA_EXT_DATA(zihintpause, RISCV_ISA_EXT_ZIHINTPAUSE), __RISCV_ISA_EXT_DATA(zihpm, RISCV_ISA_EXT_ZIHPM), __RISCV_ISA_EXT_DATA(zimop, RISCV_ISA_EXT_ZIMOP), + __RISCV_ISA_EXT_DATA(zabha, RISCV_ISA_EXT_ZABHA), __RISCV_ISA_EXT_DATA(zacas, RISCV_ISA_EXT_ZACAS), __RISCV_ISA_EXT_DATA(zawrs, RISCV_ISA_EXT_ZAWRS), __RISCV_ISA_EXT_DATA(zfa, RISCV_ISA_EXT_ZFA), From 6116e22ef33a8239f3d53bb25377e9ed733c4176 Mon Sep 17 00:00:00 2001 From: Alexandre Ghiti Date: Sun, 3 Nov 2024 15:51:46 +0100 Subject: [PATCH 29/38] riscv: Improve zacas fully-ordered cmpxchg() The current fully-ordered cmpxchgXX() implementation results in: amocas.X.rl a5,a4,(s1) fence rw,rw This provides enough sync but we can actually use the following better mapping instead: amocas.X.aqrl a5,a4,(s1) Suggested-by: Andrea Parri Signed-off-by: Alexandre Ghiti Reviewed-by: Andrew Jones Reviewed-by: Andrea Parri Link: https://lore.kernel.org/r/20241103145153.105097-7-alexghiti@rivosinc.com Signed-off-by: Palmer Dabbelt --- arch/riscv/include/asm/cmpxchg.h | 92 ++++++++++++++++++++++---------- 1 file changed, 64 insertions(+), 28 deletions(-) diff --git a/arch/riscv/include/asm/cmpxchg.h b/arch/riscv/include/asm/cmpxchg.h index 1f4cd12e4664..052418aba11a 100644 --- a/arch/riscv/include/asm/cmpxchg.h +++ b/arch/riscv/include/asm/cmpxchg.h @@ -107,8 +107,10 @@ * store NEW in MEM. Return the initial value in MEM. Success is * indicated by comparing RETURN with OLD. */ - -#define __arch_cmpxchg_masked(sc_sfx, cas_sfx, prepend, append, r, p, o, n) \ +#define __arch_cmpxchg_masked(sc_sfx, cas_sfx, \ + sc_prepend, sc_append, \ + cas_prepend, cas_append, \ + r, p, o, n) \ ({ \ if (IS_ENABLED(CONFIG_RISCV_ISA_ZABHA) && \ IS_ENABLED(CONFIG_RISCV_ISA_ZACAS) && \ @@ -117,9 +119,9 @@ r = o; \ \ __asm__ __volatile__ ( \ - prepend \ + cas_prepend \ " amocas" cas_sfx " %0, %z2, %1\n" \ - append \ + cas_append \ : "+&r" (r), "+A" (*(p)) \ : "rJ" (n) \ : "memory"); \ @@ -134,7 +136,7 @@ ulong __rc; \ \ __asm__ __volatile__ ( \ - prepend \ + sc_prepend \ "0: lr.w %0, %2\n" \ " and %1, %0, %z5\n" \ " bne %1, %z3, 1f\n" \ @@ -142,7 +144,7 @@ " or %1, %1, %z4\n" \ " sc.w" sc_sfx " %1, %1, %2\n" \ " bnez %1, 0b\n" \ - append \ + sc_append \ "1:\n" \ : "=&r" (__retx), "=&r" (__rc), "+A" (*(__ptr32b)) \ : "rJ" ((long)__oldx), "rJ" (__newx), \ @@ -153,16 +155,19 @@ } \ }) -#define __arch_cmpxchg(lr_sfx, sc_cas_sfx, prepend, append, r, p, co, o, n) \ +#define __arch_cmpxchg(lr_sfx, sc_sfx, cas_sfx, \ + sc_prepend, sc_append, \ + cas_prepend, cas_append, \ + r, p, co, o, n) \ ({ \ if (IS_ENABLED(CONFIG_RISCV_ISA_ZACAS) && \ riscv_has_extension_unlikely(RISCV_ISA_EXT_ZACAS)) { \ r = o; \ \ __asm__ __volatile__ ( \ - prepend \ - " amocas" sc_cas_sfx " %0, %z2, %1\n" \ - append \ + cas_prepend \ + " amocas" cas_sfx " %0, %z2, %1\n" \ + cas_append \ : "+&r" (r), "+A" (*(p)) \ : "rJ" (n) \ : "memory"); \ @@ -170,12 +175,12 @@ register unsigned int __rc; \ \ __asm__ __volatile__ ( \ - prepend \ + sc_prepend \ "0: lr" lr_sfx " %0, %2\n" \ " bne %0, %z3, 1f\n" \ - " sc" sc_cas_sfx " %1, %z4, %2\n" \ + " sc" sc_sfx " %1, %z4, %2\n" \ " bnez %1, 0b\n" \ - append \ + sc_append \ "1:\n" \ : "=&r" (r), "=&r" (__rc), "+A" (*(p)) \ : "rJ" (co o), "rJ" (n) \ @@ -183,7 +188,9 @@ } \ }) -#define _arch_cmpxchg(ptr, old, new, sc_cas_sfx, prepend, append) \ +#define _arch_cmpxchg(ptr, old, new, sc_sfx, cas_sfx, \ + sc_prepend, sc_append, \ + cas_prepend, cas_append) \ ({ \ __typeof__(ptr) __ptr = (ptr); \ __typeof__(*(__ptr)) __old = (old); \ @@ -192,22 +199,28 @@ \ switch (sizeof(*__ptr)) { \ case 1: \ - __arch_cmpxchg_masked(sc_cas_sfx, ".b" sc_cas_sfx, \ - prepend, append, \ - __ret, __ptr, __old, __new); \ + __arch_cmpxchg_masked(sc_sfx, ".b" cas_sfx, \ + sc_prepend, sc_append, \ + cas_prepend, cas_append, \ + __ret, __ptr, __old, __new); \ break; \ case 2: \ - __arch_cmpxchg_masked(sc_cas_sfx, ".h" sc_cas_sfx, \ - prepend, append, \ - __ret, __ptr, __old, __new); \ + __arch_cmpxchg_masked(sc_sfx, ".h" cas_sfx, \ + sc_prepend, sc_append, \ + cas_prepend, cas_append, \ + __ret, __ptr, __old, __new); \ break; \ case 4: \ - __arch_cmpxchg(".w", ".w" sc_cas_sfx, prepend, append, \ - __ret, __ptr, (long), __old, __new); \ + __arch_cmpxchg(".w", ".w" sc_sfx, ".w" cas_sfx, \ + sc_prepend, sc_append, \ + cas_prepend, cas_append, \ + __ret, __ptr, (long), __old, __new); \ break; \ case 8: \ - __arch_cmpxchg(".d", ".d" sc_cas_sfx, prepend, append, \ - __ret, __ptr, /**/, __old, __new); \ + __arch_cmpxchg(".d", ".d" sc_sfx, ".d" cas_sfx, \ + sc_prepend, sc_append, \ + cas_prepend, cas_append, \ + __ret, __ptr, /**/, __old, __new); \ break; \ default: \ BUILD_BUG(); \ @@ -215,17 +228,40 @@ (__typeof__(*(__ptr)))__ret; \ }) +/* + * These macros are here to improve the readability of the arch_cmpxchg_XXX() + * macros. + */ +#define SC_SFX(x) x +#define CAS_SFX(x) x +#define SC_PREPEND(x) x +#define SC_APPEND(x) x +#define CAS_PREPEND(x) x +#define CAS_APPEND(x) x + #define arch_cmpxchg_relaxed(ptr, o, n) \ - _arch_cmpxchg((ptr), (o), (n), "", "", "") + _arch_cmpxchg((ptr), (o), (n), \ + SC_SFX(""), CAS_SFX(""), \ + SC_PREPEND(""), SC_APPEND(""), \ + CAS_PREPEND(""), CAS_APPEND("")) #define arch_cmpxchg_acquire(ptr, o, n) \ - _arch_cmpxchg((ptr), (o), (n), "", "", RISCV_ACQUIRE_BARRIER) + _arch_cmpxchg((ptr), (o), (n), \ + SC_SFX(""), CAS_SFX(""), \ + SC_PREPEND(""), SC_APPEND(RISCV_ACQUIRE_BARRIER), \ + CAS_PREPEND(""), CAS_APPEND(RISCV_ACQUIRE_BARRIER)) #define arch_cmpxchg_release(ptr, o, n) \ - _arch_cmpxchg((ptr), (o), (n), "", RISCV_RELEASE_BARRIER, "") + _arch_cmpxchg((ptr), (o), (n), \ + SC_SFX(""), CAS_SFX(""), \ + SC_PREPEND(RISCV_RELEASE_BARRIER), SC_APPEND(""), \ + CAS_PREPEND(RISCV_RELEASE_BARRIER), CAS_APPEND("")) #define arch_cmpxchg(ptr, o, n) \ - _arch_cmpxchg((ptr), (o), (n), ".rl", "", " fence rw, rw\n") + _arch_cmpxchg((ptr), (o), (n), \ + SC_SFX(".rl"), CAS_SFX(".aqrl"), \ + SC_PREPEND(""), SC_APPEND(RISCV_FULL_BARRIER), \ + CAS_PREPEND(""), CAS_APPEND("")) #define arch_cmpxchg_local(ptr, o, n) \ arch_cmpxchg_relaxed((ptr), (o), (n)) From f7bd2be7663c7de1dde27dadd352b2c3f4e19106 Mon Sep 17 00:00:00 2001 From: Alexandre Ghiti Date: Sun, 3 Nov 2024 15:51:47 +0100 Subject: [PATCH 30/38] riscv: Implement arch_cmpxchg128() using Zacas Now that Zacas is supported in the kernel, let's use the double word atomic version of amocas to improve the SLUB allocator. Note that we have to select fixed registers, otherwise gcc fails to pick even registers and then produces a reserved encoding which fails to assemble. Signed-off-by: Alexandre Ghiti Reviewed-by: Andrew Jones Reviewed-by: Andrea Parri Link: https://lore.kernel.org/r/20241103145153.105097-8-alexghiti@rivosinc.com Signed-off-by: Palmer Dabbelt --- arch/riscv/Kconfig | 1 + arch/riscv/include/asm/cmpxchg.h | 38 ++++++++++++++++++++++++++++++++ 2 files changed, 39 insertions(+) diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig index 668be90a42e4..093ee6537331 100644 --- a/arch/riscv/Kconfig +++ b/arch/riscv/Kconfig @@ -115,6 +115,7 @@ config RISCV select GENERIC_VDSO_TIME_NS if HAVE_GENERIC_VDSO select HARDIRQS_SW_RESEND select HAS_IOPORT if MMU + select HAVE_ALIGNED_STRUCT_PAGE select HAVE_ARCH_AUDITSYSCALL select HAVE_ARCH_HUGE_VMALLOC if HAVE_ARCH_HUGE_VMAP select HAVE_ARCH_HUGE_VMAP if MMU && 64BIT diff --git a/arch/riscv/include/asm/cmpxchg.h b/arch/riscv/include/asm/cmpxchg.h index 052418aba11a..f95929f538b2 100644 --- a/arch/riscv/include/asm/cmpxchg.h +++ b/arch/riscv/include/asm/cmpxchg.h @@ -296,6 +296,44 @@ arch_cmpxchg_release((ptr), (o), (n)); \ }) +#if defined(CONFIG_64BIT) && defined(CONFIG_RISCV_ISA_ZACAS) + +#define system_has_cmpxchg128() riscv_has_extension_unlikely(RISCV_ISA_EXT_ZACAS) + +union __u128_halves { + u128 full; + struct { + u64 low, high; + }; +}; + +#define __arch_cmpxchg128(p, o, n, cas_sfx) \ +({ \ + __typeof__(*(p)) __o = (o); \ + union __u128_halves __hn = { .full = (n) }; \ + union __u128_halves __ho = { .full = (__o) }; \ + register unsigned long t1 asm ("t1") = __hn.low; \ + register unsigned long t2 asm ("t2") = __hn.high; \ + register unsigned long t3 asm ("t3") = __ho.low; \ + register unsigned long t4 asm ("t4") = __ho.high; \ + \ + __asm__ __volatile__ ( \ + " amocas.q" cas_sfx " %0, %z3, %2" \ + : "+&r" (t3), "+&r" (t4), "+A" (*(p)) \ + : "rJ" (t1), "rJ" (t2) \ + : "memory"); \ + \ + ((u128)t4 << 64) | t3; \ +}) + +#define arch_cmpxchg128(ptr, o, n) \ + __arch_cmpxchg128((ptr), (o), (n), ".aqrl") + +#define arch_cmpxchg128_local(ptr, o, n) \ + __arch_cmpxchg128((ptr), (o), (n), "") + +#endif /* CONFIG_64BIT && CONFIG_RISCV_ISA_ZACAS */ + #ifdef CONFIG_RISCV_ISA_ZAWRS /* * Despite wrs.nto being "WRS-with-no-timeout", in the absence of changes to From 97ddab7fbea8fceb044108b64ba2ee2c96ff8dab Mon Sep 17 00:00:00 2001 From: Alexandre Ghiti Date: Sun, 3 Nov 2024 15:51:48 +0100 Subject: [PATCH 31/38] riscv: Implement xchg8/16() using Zabha This adds runtime support for Zabha in xchg8/16() operations. Signed-off-by: Alexandre Ghiti Reviewed-by: Andrew Jones Reviewed-by: Andrea Parri Link: https://lore.kernel.org/r/20241103145153.105097-9-alexghiti@rivosinc.com Signed-off-by: Palmer Dabbelt --- arch/riscv/include/asm/cmpxchg.h | 65 ++++++++++++++++++++------------ 1 file changed, 41 insertions(+), 24 deletions(-) diff --git a/arch/riscv/include/asm/cmpxchg.h b/arch/riscv/include/asm/cmpxchg.h index f95929f538b2..4cadc56220fe 100644 --- a/arch/riscv/include/asm/cmpxchg.h +++ b/arch/riscv/include/asm/cmpxchg.h @@ -14,29 +14,41 @@ #include #include -#define __arch_xchg_masked(sc_sfx, prepend, append, r, p, n) \ -({ \ - u32 *__ptr32b = (u32 *)((ulong)(p) & ~0x3); \ - ulong __s = ((ulong)(p) & (0x4 - sizeof(*p))) * BITS_PER_BYTE; \ - ulong __mask = GENMASK(((sizeof(*p)) * BITS_PER_BYTE) - 1, 0) \ - << __s; \ - ulong __newx = (ulong)(n) << __s; \ - ulong __retx; \ - ulong __rc; \ - \ - __asm__ __volatile__ ( \ - prepend \ - "0: lr.w %0, %2\n" \ - " and %1, %0, %z4\n" \ - " or %1, %1, %z3\n" \ - " sc.w" sc_sfx " %1, %1, %2\n" \ - " bnez %1, 0b\n" \ - append \ - : "=&r" (__retx), "=&r" (__rc), "+A" (*(__ptr32b)) \ - : "rJ" (__newx), "rJ" (~__mask) \ - : "memory"); \ - \ - r = (__typeof__(*(p)))((__retx & __mask) >> __s); \ +#define __arch_xchg_masked(sc_sfx, swap_sfx, prepend, sc_append, \ + swap_append, r, p, n) \ +({ \ + if (IS_ENABLED(CONFIG_RISCV_ISA_ZABHA) && \ + riscv_has_extension_unlikely(RISCV_ISA_EXT_ZABHA)) { \ + __asm__ __volatile__ ( \ + prepend \ + " amoswap" swap_sfx " %0, %z2, %1\n" \ + swap_append \ + : "=&r" (r), "+A" (*(p)) \ + : "rJ" (n) \ + : "memory"); \ + } else { \ + u32 *__ptr32b = (u32 *)((ulong)(p) & ~0x3); \ + ulong __s = ((ulong)(p) & (0x4 - sizeof(*p))) * BITS_PER_BYTE; \ + ulong __mask = GENMASK(((sizeof(*p)) * BITS_PER_BYTE) - 1, 0) \ + << __s; \ + ulong __newx = (ulong)(n) << __s; \ + ulong __retx; \ + ulong __rc; \ + \ + __asm__ __volatile__ ( \ + prepend \ + "0: lr.w %0, %2\n" \ + " and %1, %0, %z4\n" \ + " or %1, %1, %z3\n" \ + " sc.w" sc_sfx " %1, %1, %2\n" \ + " bnez %1, 0b\n" \ + sc_append \ + : "=&r" (__retx), "=&r" (__rc), "+A" (*(__ptr32b)) \ + : "rJ" (__newx), "rJ" (~__mask) \ + : "memory"); \ + \ + r = (__typeof__(*(p)))((__retx & __mask) >> __s); \ + } \ }) #define __arch_xchg(sfx, prepend, append, r, p, n) \ @@ -59,8 +71,13 @@ \ switch (sizeof(*__ptr)) { \ case 1: \ + __arch_xchg_masked(sc_sfx, ".b" swap_sfx, \ + prepend, sc_append, swap_append, \ + __ret, __ptr, __new); \ + break; \ case 2: \ - __arch_xchg_masked(sc_sfx, prepend, sc_append, \ + __arch_xchg_masked(sc_sfx, ".h" swap_sfx, \ + prepend, sc_append, swap_append, \ __ret, __ptr, __new); \ break; \ case 4: \ From cbe82e140bb76e1aa9f808cc841654a25b70d4e5 Mon Sep 17 00:00:00 2001 From: Guo Ren Date: Sun, 3 Nov 2024 15:51:49 +0100 Subject: [PATCH 32/38] asm-generic: ticket-lock: Reuse arch_spinlock_t of qspinlock The arch_spinlock_t of qspinlock has contained the atomic_t val, which satisfies the ticket-lock requirement. Thus, unify the arch_spinlock_t into qspinlock_types.h. This is the preparation for the next combo spinlock. Reviewed-by: Leonardo Bras Suggested-by: Arnd Bergmann Link: https://lore.kernel.org/linux-riscv/CAK8P3a2rnz9mQqhN6-e0CGUUv9rntRELFdxt_weiD7FxH7fkfQ@mail.gmail.com/ Signed-off-by: Guo Ren Signed-off-by: Guo Ren Acked-by: Peter Zijlstra (Intel) Reviewed-by: Andrew Jones Reviewed-by: Andrea Parri Link: https://lore.kernel.org/r/20241103145153.105097-10-alexghiti@rivosinc.com Signed-off-by: Palmer Dabbelt --- include/asm-generic/spinlock.h | 14 +++++++------- include/asm-generic/spinlock_types.h | 12 ++---------- 2 files changed, 9 insertions(+), 17 deletions(-) diff --git a/include/asm-generic/spinlock.h b/include/asm-generic/spinlock.h index 90803a826ba0..4773334ee638 100644 --- a/include/asm-generic/spinlock.h +++ b/include/asm-generic/spinlock.h @@ -32,7 +32,7 @@ static __always_inline void arch_spin_lock(arch_spinlock_t *lock) { - u32 val = atomic_fetch_add(1<<16, lock); + u32 val = atomic_fetch_add(1<<16, &lock->val); u16 ticket = val >> 16; if (ticket == (u16)val) @@ -46,31 +46,31 @@ static __always_inline void arch_spin_lock(arch_spinlock_t *lock) * have no outstanding writes due to the atomic_fetch_add() the extra * orderings are free. */ - atomic_cond_read_acquire(lock, ticket == (u16)VAL); + atomic_cond_read_acquire(&lock->val, ticket == (u16)VAL); smp_mb(); } static __always_inline bool arch_spin_trylock(arch_spinlock_t *lock) { - u32 old = atomic_read(lock); + u32 old = atomic_read(&lock->val); if ((old >> 16) != (old & 0xffff)) return false; - return atomic_try_cmpxchg(lock, &old, old + (1<<16)); /* SC, for RCsc */ + return atomic_try_cmpxchg(&lock->val, &old, old + (1<<16)); /* SC, for RCsc */ } static __always_inline void arch_spin_unlock(arch_spinlock_t *lock) { u16 *ptr = (u16 *)lock + IS_ENABLED(CONFIG_CPU_BIG_ENDIAN); - u32 val = atomic_read(lock); + u32 val = atomic_read(&lock->val); smp_store_release(ptr, (u16)val + 1); } static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock) { - u32 val = lock.counter; + u32 val = lock.val.counter; return ((val >> 16) == (val & 0xffff)); } @@ -84,7 +84,7 @@ static __always_inline int arch_spin_is_locked(arch_spinlock_t *lock) static __always_inline int arch_spin_is_contended(arch_spinlock_t *lock) { - u32 val = atomic_read(lock); + u32 val = atomic_read(&lock->val); return (s16)((val >> 16) - (val & 0xffff)) > 1; } diff --git a/include/asm-generic/spinlock_types.h b/include/asm-generic/spinlock_types.h index 8962bb730945..f534aa5de394 100644 --- a/include/asm-generic/spinlock_types.h +++ b/include/asm-generic/spinlock_types.h @@ -3,15 +3,7 @@ #ifndef __ASM_GENERIC_SPINLOCK_TYPES_H #define __ASM_GENERIC_SPINLOCK_TYPES_H -#include -typedef atomic_t arch_spinlock_t; - -/* - * qrwlock_types depends on arch_spinlock_t, so we must typedef that before the - * include. - */ -#include - -#define __ARCH_SPIN_LOCK_UNLOCKED ATOMIC_INIT(0) +#include +#include #endif /* __ASM_GENERIC_SPINLOCK_TYPES_H */ From 22c33321e260c8b4c1877b2cc0c4e26a0c74c23f Mon Sep 17 00:00:00 2001 From: Guo Ren Date: Sun, 3 Nov 2024 15:51:50 +0100 Subject: [PATCH 33/38] asm-generic: ticket-lock: Add separate ticket-lock.h Add a separate ticket-lock.h to include multiple spinlock versions and select one at compile time or runtime. Reviewed-by: Leonardo Bras Suggested-by: Arnd Bergmann Link: https://lore.kernel.org/linux-riscv/CAK8P3a2rnz9mQqhN6-e0CGUUv9rntRELFdxt_weiD7FxH7fkfQ@mail.gmail.com/ Signed-off-by: Guo Ren Signed-off-by: Guo Ren Acked-by: Peter Zijlstra (Intel) Reviewed-by: Andrew Jones Reviewed-by: Andrea Parri Link: https://lore.kernel.org/r/20241103145153.105097-11-alexghiti@rivosinc.com Signed-off-by: Palmer Dabbelt --- include/asm-generic/spinlock.h | 87 +--------------------- include/asm-generic/ticket_spinlock.h | 103 ++++++++++++++++++++++++++ 2 files changed, 104 insertions(+), 86 deletions(-) create mode 100644 include/asm-generic/ticket_spinlock.h diff --git a/include/asm-generic/spinlock.h b/include/asm-generic/spinlock.h index 4773334ee638..970590baf61b 100644 --- a/include/asm-generic/spinlock.h +++ b/include/asm-generic/spinlock.h @@ -1,94 +1,9 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* - * 'Generic' ticket-lock implementation. - * - * It relies on atomic_fetch_add() having well defined forward progress - * guarantees under contention. If your architecture cannot provide this, stick - * to a test-and-set lock. - * - * It also relies on atomic_fetch_add() being safe vs smp_store_release() on a - * sub-word of the value. This is generally true for anything LL/SC although - * you'd be hard pressed to find anything useful in architecture specifications - * about this. If your architecture cannot do this you might be better off with - * a test-and-set. - * - * It further assumes atomic_*_release() + atomic_*_acquire() is RCpc and hence - * uses atomic_fetch_add() which is RCsc to create an RCsc hot path, along with - * a full fence after the spin to upgrade the otherwise-RCpc - * atomic_cond_read_acquire(). - * - * The implementation uses smp_cond_load_acquire() to spin, so if the - * architecture has WFE like instructions to sleep instead of poll for word - * modifications be sure to implement that (see ARM64 for example). - * - */ - #ifndef __ASM_GENERIC_SPINLOCK_H #define __ASM_GENERIC_SPINLOCK_H -#include -#include - -static __always_inline void arch_spin_lock(arch_spinlock_t *lock) -{ - u32 val = atomic_fetch_add(1<<16, &lock->val); - u16 ticket = val >> 16; - - if (ticket == (u16)val) - return; - - /* - * atomic_cond_read_acquire() is RCpc, but rather than defining a - * custom cond_read_rcsc() here we just emit a full fence. We only - * need the prior reads before subsequent writes ordering from - * smb_mb(), but as atomic_cond_read_acquire() just emits reads and we - * have no outstanding writes due to the atomic_fetch_add() the extra - * orderings are free. - */ - atomic_cond_read_acquire(&lock->val, ticket == (u16)VAL); - smp_mb(); -} - -static __always_inline bool arch_spin_trylock(arch_spinlock_t *lock) -{ - u32 old = atomic_read(&lock->val); - - if ((old >> 16) != (old & 0xffff)) - return false; - - return atomic_try_cmpxchg(&lock->val, &old, old + (1<<16)); /* SC, for RCsc */ -} - -static __always_inline void arch_spin_unlock(arch_spinlock_t *lock) -{ - u16 *ptr = (u16 *)lock + IS_ENABLED(CONFIG_CPU_BIG_ENDIAN); - u32 val = atomic_read(&lock->val); - - smp_store_release(ptr, (u16)val + 1); -} - -static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock) -{ - u32 val = lock.val.counter; - - return ((val >> 16) == (val & 0xffff)); -} - -static __always_inline int arch_spin_is_locked(arch_spinlock_t *lock) -{ - arch_spinlock_t val = READ_ONCE(*lock); - - return !arch_spin_value_unlocked(val); -} - -static __always_inline int arch_spin_is_contended(arch_spinlock_t *lock) -{ - u32 val = atomic_read(&lock->val); - - return (s16)((val >> 16) - (val & 0xffff)) > 1; -} - +#include #include #endif /* __ASM_GENERIC_SPINLOCK_H */ diff --git a/include/asm-generic/ticket_spinlock.h b/include/asm-generic/ticket_spinlock.h new file mode 100644 index 000000000000..cfcff22b37b3 --- /dev/null +++ b/include/asm-generic/ticket_spinlock.h @@ -0,0 +1,103 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +/* + * 'Generic' ticket-lock implementation. + * + * It relies on atomic_fetch_add() having well defined forward progress + * guarantees under contention. If your architecture cannot provide this, stick + * to a test-and-set lock. + * + * It also relies on atomic_fetch_add() being safe vs smp_store_release() on a + * sub-word of the value. This is generally true for anything LL/SC although + * you'd be hard pressed to find anything useful in architecture specifications + * about this. If your architecture cannot do this you might be better off with + * a test-and-set. + * + * It further assumes atomic_*_release() + atomic_*_acquire() is RCpc and hence + * uses atomic_fetch_add() which is RCsc to create an RCsc hot path, along with + * a full fence after the spin to upgrade the otherwise-RCpc + * atomic_cond_read_acquire(). + * + * The implementation uses smp_cond_load_acquire() to spin, so if the + * architecture has WFE like instructions to sleep instead of poll for word + * modifications be sure to implement that (see ARM64 for example). + * + */ + +#ifndef __ASM_GENERIC_TICKET_SPINLOCK_H +#define __ASM_GENERIC_TICKET_SPINLOCK_H + +#include +#include + +static __always_inline void ticket_spin_lock(arch_spinlock_t *lock) +{ + u32 val = atomic_fetch_add(1<<16, &lock->val); + u16 ticket = val >> 16; + + if (ticket == (u16)val) + return; + + /* + * atomic_cond_read_acquire() is RCpc, but rather than defining a + * custom cond_read_rcsc() here we just emit a full fence. We only + * need the prior reads before subsequent writes ordering from + * smb_mb(), but as atomic_cond_read_acquire() just emits reads and we + * have no outstanding writes due to the atomic_fetch_add() the extra + * orderings are free. + */ + atomic_cond_read_acquire(&lock->val, ticket == (u16)VAL); + smp_mb(); +} + +static __always_inline bool ticket_spin_trylock(arch_spinlock_t *lock) +{ + u32 old = atomic_read(&lock->val); + + if ((old >> 16) != (old & 0xffff)) + return false; + + return atomic_try_cmpxchg(&lock->val, &old, old + (1<<16)); /* SC, for RCsc */ +} + +static __always_inline void ticket_spin_unlock(arch_spinlock_t *lock) +{ + u16 *ptr = (u16 *)lock + IS_ENABLED(CONFIG_CPU_BIG_ENDIAN); + u32 val = atomic_read(&lock->val); + + smp_store_release(ptr, (u16)val + 1); +} + +static __always_inline int ticket_spin_value_unlocked(arch_spinlock_t lock) +{ + u32 val = lock.val.counter; + + return ((val >> 16) == (val & 0xffff)); +} + +static __always_inline int ticket_spin_is_locked(arch_spinlock_t *lock) +{ + arch_spinlock_t val = READ_ONCE(*lock); + + return !ticket_spin_value_unlocked(val); +} + +static __always_inline int ticket_spin_is_contended(arch_spinlock_t *lock) +{ + u32 val = atomic_read(&lock->val); + + return (s16)((val >> 16) - (val & 0xffff)) > 1; +} + +/* + * Remapping spinlock architecture specific functions to the corresponding + * ticket spinlock functions. + */ +#define arch_spin_is_locked(l) ticket_spin_is_locked(l) +#define arch_spin_is_contended(l) ticket_spin_is_contended(l) +#define arch_spin_value_unlocked(l) ticket_spin_value_unlocked(l) +#define arch_spin_lock(l) ticket_spin_lock(l) +#define arch_spin_trylock(l) ticket_spin_trylock(l) +#define arch_spin_unlock(l) ticket_spin_unlock(l) + +#endif /* __ASM_GENERIC_TICKET_SPINLOCK_H */ From 2d36fe89d872f1e655670280ce13a8dbe9d366a7 Mon Sep 17 00:00:00 2001 From: Alexandre Ghiti Date: Sun, 3 Nov 2024 15:51:51 +0100 Subject: [PATCH 34/38] riscv: Add ISA extension parsing for Ziccrse Add support to parse the Ziccrse string in the riscv,isa string. Signed-off-by: Alexandre Ghiti Reviewed-by: Andrew Jones Reviewed-by: Andrea Parri Link: https://lore.kernel.org/r/20241103145153.105097-12-alexghiti@rivosinc.com Signed-off-by: Palmer Dabbelt --- arch/riscv/include/asm/hwcap.h | 1 + arch/riscv/kernel/cpufeature.c | 1 + 2 files changed, 2 insertions(+) diff --git a/arch/riscv/include/asm/hwcap.h b/arch/riscv/include/asm/hwcap.h index 74bcb0e2bd1f..0aa3c3f5e682 100644 --- a/arch/riscv/include/asm/hwcap.h +++ b/arch/riscv/include/asm/hwcap.h @@ -94,6 +94,7 @@ #define RISCV_ISA_EXT_ZAWRS 85 #define RISCV_ISA_EXT_SVVPTC 86 #define RISCV_ISA_EXT_ZABHA 87 +#define RISCV_ISA_EXT_ZICCRSE 88 #define RISCV_ISA_EXT_XLINUXENVCFG 127 diff --git a/arch/riscv/kernel/cpufeature.c b/arch/riscv/kernel/cpufeature.c index 5e743d8d34f5..5f453a039ec9 100644 --- a/arch/riscv/kernel/cpufeature.c +++ b/arch/riscv/kernel/cpufeature.c @@ -314,6 +314,7 @@ const struct riscv_isa_ext_data riscv_isa_ext[] = { riscv_ext_zicbom_validate), __RISCV_ISA_EXT_SUPERSET_VALIDATE(zicboz, RISCV_ISA_EXT_ZICBOZ, riscv_xlinuxenvcfg_exts, riscv_ext_zicboz_validate), + __RISCV_ISA_EXT_DATA(ziccrse, RISCV_ISA_EXT_ZICCRSE), __RISCV_ISA_EXT_DATA(zicntr, RISCV_ISA_EXT_ZICNTR), __RISCV_ISA_EXT_DATA(zicond, RISCV_ISA_EXT_ZICOND), __RISCV_ISA_EXT_DATA(zicsr, RISCV_ISA_EXT_ZICSR), From 447b2afbcde16be43c9459507f48f5c602c121c0 Mon Sep 17 00:00:00 2001 From: Alexandre Ghiti Date: Sun, 3 Nov 2024 15:51:52 +0100 Subject: [PATCH 35/38] dt-bindings: riscv: Add Ziccrse ISA extension description Add description for the Ziccrse ISA extension which was ratified in the riscv profiles specification v1.0. Signed-off-by: Alexandre Ghiti Reviewed-by: Guo Ren Acked-by: Conor Dooley Reviewed-by: Andrew Jones Reviewed-by: Andrea Parri Link: https://lore.kernel.org/r/20241103145153.105097-13-alexghiti@rivosinc.com Signed-off-by: Palmer Dabbelt --- Documentation/devicetree/bindings/riscv/extensions.yaml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/Documentation/devicetree/bindings/riscv/extensions.yaml b/Documentation/devicetree/bindings/riscv/extensions.yaml index db062107823b..35a9ad1d7e63 100644 --- a/Documentation/devicetree/bindings/riscv/extensions.yaml +++ b/Documentation/devicetree/bindings/riscv/extensions.yaml @@ -296,6 +296,12 @@ properties: in commit 64074bc ("Update version numbers for Zfh/Zfinx") of riscv-isa-manual. + - const: ziccrse + description: + The standard Ziccrse extension which provides forward progress + guarantee on LR/SC sequences, as ratified in commit b1d806605f87 + ("Updated to ratified state.") of the riscv profiles specification. + - const: zk description: The standard Zk Standard Scalar cryptography extension as ratified From ab83647fadae2f1f723119dc066b39a461d6d288 Mon Sep 17 00:00:00 2001 From: Alexandre Ghiti Date: Sun, 3 Nov 2024 15:51:53 +0100 Subject: [PATCH 36/38] riscv: Add qspinlock support In order to produce a generic kernel, a user can select CONFIG_COMBO_SPINLOCKS which will fallback at runtime to the ticket spinlock implementation if Zabha or Ziccrse are not present. Note that we can't use alternatives here because the discovery of extensions is done too late and we need to start with the qspinlock implementation because the ticket spinlock implementation would pollute the spinlock value, so let's use static keys. This is largely based on Guo's work and Leonardo reviews at [1]. Link: https://lore.kernel.org/linux-riscv/20231225125847.2778638-1-guoren@kernel.org/ [1] Signed-off-by: Guo Ren Signed-off-by: Alexandre Ghiti Reviewed-by: Andrea Parri Link: https://lore.kernel.org/r/20241103145153.105097-14-alexghiti@rivosinc.com Signed-off-by: Palmer Dabbelt --- .../locking/queued-spinlocks/arch-support.txt | 2 +- arch/riscv/Kconfig | 34 ++++++++++++++ arch/riscv/include/asm/Kbuild | 4 +- arch/riscv/include/asm/spinlock.h | 47 +++++++++++++++++++ arch/riscv/kernel/setup.c | 37 +++++++++++++++ include/asm-generic/qspinlock.h | 2 + include/asm-generic/ticket_spinlock.h | 2 + 7 files changed, 126 insertions(+), 2 deletions(-) create mode 100644 arch/riscv/include/asm/spinlock.h diff --git a/Documentation/features/locking/queued-spinlocks/arch-support.txt b/Documentation/features/locking/queued-spinlocks/arch-support.txt index 22f2990392ff..cf26042480e2 100644 --- a/Documentation/features/locking/queued-spinlocks/arch-support.txt +++ b/Documentation/features/locking/queued-spinlocks/arch-support.txt @@ -20,7 +20,7 @@ | openrisc: | ok | | parisc: | TODO | | powerpc: | ok | - | riscv: | TODO | + | riscv: | ok | | s390: | TODO | | sh: | TODO | | sparc: | ok | diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig index 093ee6537331..f5698ecc5ccc 100644 --- a/arch/riscv/Kconfig +++ b/arch/riscv/Kconfig @@ -82,6 +82,7 @@ config RISCV select ARCH_WANT_OPTIMIZE_HUGETLB_VMEMMAP select ARCH_WANTS_NO_INSTR select ARCH_WANTS_THP_SWAP if HAVE_ARCH_TRANSPARENT_HUGEPAGE + select ARCH_WEAK_RELEASE_ACQUIRE if ARCH_USE_QUEUED_SPINLOCKS select BINFMT_FLAT_NO_DATA_START_OFFSET if !MMU select BUILDTIME_TABLE_SORT if MMU select CLINT_TIMER if RISCV_M_MODE @@ -507,6 +508,39 @@ config NODES_SHIFT Specify the maximum number of NUMA Nodes available on the target system. Increases memory reserved to accommodate various tables. +choice + prompt "RISC-V spinlock type" + default RISCV_COMBO_SPINLOCKS + +config RISCV_TICKET_SPINLOCKS + bool "Using ticket spinlock" + +config RISCV_QUEUED_SPINLOCKS + bool "Using queued spinlock" + depends on SMP && MMU && NONPORTABLE + select ARCH_USE_QUEUED_SPINLOCKS + help + The queued spinlock implementation requires the forward progress + guarantee of cmpxchg()/xchg() atomic operations: CAS with Zabha or + LR/SC with Ziccrse provide such guarantee. + + Select this if and only if Zabha or Ziccrse is available on your + platform, RISCV_QUEUED_SPINLOCKS must not be selected for platforms + without one of those extensions. + + If unsure, select RISCV_COMBO_SPINLOCKS, which will use qspinlocks + when supported and otherwise ticket spinlocks. + +config RISCV_COMBO_SPINLOCKS + bool "Using combo spinlock" + depends on SMP && MMU + select ARCH_USE_QUEUED_SPINLOCKS + help + Embed both queued spinlock and ticket lock so that the spinlock + implementation can be chosen at runtime. + +endchoice + config RISCV_ALTERNATIVE bool depends on !XIP_KERNEL diff --git a/arch/riscv/include/asm/Kbuild b/arch/riscv/include/asm/Kbuild index 1461af12da6e..de13d5a234f8 100644 --- a/arch/riscv/include/asm/Kbuild +++ b/arch/riscv/include/asm/Kbuild @@ -6,10 +6,12 @@ generic-y += early_ioremap.h generic-y += flat.h generic-y += kvm_para.h generic-y += mmzone.h +generic-y += mcs_spinlock.h generic-y += parport.h -generic-y += spinlock.h generic-y += spinlock_types.h +generic-y += ticket_spinlock.h generic-y += qrwlock.h generic-y += qrwlock_types.h +generic-y += qspinlock.h generic-y += user.h generic-y += vmlinux.lds.h diff --git a/arch/riscv/include/asm/spinlock.h b/arch/riscv/include/asm/spinlock.h new file mode 100644 index 000000000000..e5121b89acea --- /dev/null +++ b/arch/riscv/include/asm/spinlock.h @@ -0,0 +1,47 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __ASM_RISCV_SPINLOCK_H +#define __ASM_RISCV_SPINLOCK_H + +#ifdef CONFIG_RISCV_COMBO_SPINLOCKS +#define _Q_PENDING_LOOPS (1 << 9) + +#define __no_arch_spinlock_redefine +#include +#include +#include + +/* + * TODO: Use an alternative instead of a static key when we are able to parse + * the extensions string earlier in the boot process. + */ +DECLARE_STATIC_KEY_TRUE(qspinlock_key); + +#define SPINLOCK_BASE_DECLARE(op, type, type_lock) \ +static __always_inline type arch_spin_##op(type_lock lock) \ +{ \ + if (static_branch_unlikely(&qspinlock_key)) \ + return queued_spin_##op(lock); \ + return ticket_spin_##op(lock); \ +} + +SPINLOCK_BASE_DECLARE(lock, void, arch_spinlock_t *) +SPINLOCK_BASE_DECLARE(unlock, void, arch_spinlock_t *) +SPINLOCK_BASE_DECLARE(is_locked, int, arch_spinlock_t *) +SPINLOCK_BASE_DECLARE(is_contended, int, arch_spinlock_t *) +SPINLOCK_BASE_DECLARE(trylock, bool, arch_spinlock_t *) +SPINLOCK_BASE_DECLARE(value_unlocked, int, arch_spinlock_t) + +#elif defined(CONFIG_RISCV_QUEUED_SPINLOCKS) + +#include + +#else + +#include + +#endif + +#include + +#endif /* __ASM_RISCV_SPINLOCK_H */ diff --git a/arch/riscv/kernel/setup.c b/arch/riscv/kernel/setup.c index a2cde65b69e9..438e4f6ad2ad 100644 --- a/arch/riscv/kernel/setup.c +++ b/arch/riscv/kernel/setup.c @@ -244,6 +244,42 @@ static void __init parse_dtb(void) #endif } +#if defined(CONFIG_RISCV_COMBO_SPINLOCKS) +DEFINE_STATIC_KEY_TRUE(qspinlock_key); +EXPORT_SYMBOL(qspinlock_key); +#endif + +static void __init riscv_spinlock_init(void) +{ + char *using_ext = NULL; + + if (IS_ENABLED(CONFIG_RISCV_TICKET_SPINLOCKS)) { + pr_info("Ticket spinlock: enabled\n"); + return; + } + + if (IS_ENABLED(CONFIG_RISCV_ISA_ZABHA) && + IS_ENABLED(CONFIG_RISCV_ISA_ZACAS) && + riscv_isa_extension_available(NULL, ZABHA) && + riscv_isa_extension_available(NULL, ZACAS)) { + using_ext = "using Zabha"; + } else if (riscv_isa_extension_available(NULL, ZICCRSE)) { + using_ext = "using Ziccrse"; + } +#if defined(CONFIG_RISCV_COMBO_SPINLOCKS) + else { + static_branch_disable(&qspinlock_key); + pr_info("Ticket spinlock: enabled\n"); + return; + } +#endif + + if (!using_ext) + pr_err("Queued spinlock without Zabha or Ziccrse"); + else + pr_info("Queued spinlock %s: enabled\n", using_ext); +} + extern void __init init_rt_signal_env(void); void __init setup_arch(char **cmdline_p) @@ -297,6 +333,7 @@ void __init setup_arch(char **cmdline_p) riscv_set_dma_cache_alignment(); riscv_user_isa_enable(); + riscv_spinlock_init(); } bool arch_cpu_is_hotpluggable(int cpu) diff --git a/include/asm-generic/qspinlock.h b/include/asm-generic/qspinlock.h index 0655aa5b57b2..bf47cca2c375 100644 --- a/include/asm-generic/qspinlock.h +++ b/include/asm-generic/qspinlock.h @@ -136,6 +136,7 @@ static __always_inline bool virt_spin_lock(struct qspinlock *lock) } #endif +#ifndef __no_arch_spinlock_redefine /* * Remapping spinlock architecture specific functions to the corresponding * queued spinlock functions. @@ -146,5 +147,6 @@ static __always_inline bool virt_spin_lock(struct qspinlock *lock) #define arch_spin_lock(l) queued_spin_lock(l) #define arch_spin_trylock(l) queued_spin_trylock(l) #define arch_spin_unlock(l) queued_spin_unlock(l) +#endif #endif /* __ASM_GENERIC_QSPINLOCK_H */ diff --git a/include/asm-generic/ticket_spinlock.h b/include/asm-generic/ticket_spinlock.h index cfcff22b37b3..325779970d8a 100644 --- a/include/asm-generic/ticket_spinlock.h +++ b/include/asm-generic/ticket_spinlock.h @@ -89,6 +89,7 @@ static __always_inline int ticket_spin_is_contended(arch_spinlock_t *lock) return (s16)((val >> 16) - (val & 0xffff)) > 1; } +#ifndef __no_arch_spinlock_redefine /* * Remapping spinlock architecture specific functions to the corresponding * ticket spinlock functions. @@ -99,5 +100,6 @@ static __always_inline int ticket_spin_is_contended(arch_spinlock_t *lock) #define arch_spin_lock(l) ticket_spin_lock(l) #define arch_spin_trylock(l) ticket_spin_trylock(l) #define arch_spin_unlock(l) ticket_spin_unlock(l) +#endif #endif /* __ASM_GENERIC_TICKET_SPINLOCK_H */ From 0eb512779d642b21ced83778287a0f7a3ca8f2a1 Mon Sep 17 00:00:00 2001 From: Charlie Jenkins Date: Fri, 8 Nov 2024 15:47:36 -0800 Subject: [PATCH 37/38] riscv: Fix default misaligned access trap Commit d1703dc7bc8e ("RISC-V: Detect unaligned vector accesses supported") removed the default handlers for handle_misaligned_load() and handle_misaligned_store(). When the kernel is compiled without RISCV_SCALAR_MISALIGNED, these handlers are never defined, causing compilation errors. Signed-off-by: Charlie Jenkins Fixes: d1703dc7bc8e ("RISC-V: Detect unaligned vector accesses supported") Reviewed-by: Jesse Taube Link: https://lore.kernel.org/r/20241108-fix_handle_misaligned_load-v2-1-91d547ce64db@rivosinc.com Signed-off-by: Palmer Dabbelt --- arch/riscv/include/asm/entry-common.h | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/arch/riscv/include/asm/entry-common.h b/arch/riscv/include/asm/entry-common.h index 7b32d2b08bb6..b28ccc6cdeea 100644 --- a/arch/riscv/include/asm/entry-common.h +++ b/arch/riscv/include/asm/entry-common.h @@ -25,7 +25,19 @@ static inline void arch_exit_to_user_mode_prepare(struct pt_regs *regs, void handle_page_fault(struct pt_regs *regs); void handle_break(struct pt_regs *regs); +#ifdef CONFIG_RISCV_MISALIGNED int handle_misaligned_load(struct pt_regs *regs); int handle_misaligned_store(struct pt_regs *regs); +#else +static inline int handle_misaligned_load(struct pt_regs *regs) +{ + return -1; +} + +static inline int handle_misaligned_store(struct pt_regs *regs) +{ + return -1; +} +#endif #endif /* _ASM_RISCV_ENTRY_COMMON_H */ From 8d4f1e05ff821a5d59116ab8c3a30fcae81d8597 Mon Sep 17 00:00:00 2001 From: Palmer Dabbelt Date: Tue, 26 Nov 2024 06:32:50 -0800 Subject: [PATCH 38/38] RISC-V: Remove unnecessary include from compat.h MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Without this I get a bunch of build errors like In file included from ./include/linux/sched/task_stack.h:12, from ./arch/riscv/include/asm/compat.h:12, from ./arch/riscv/include/asm/pgtable.h:115, from ./include/linux/pgtable.h:6, from ./include/linux/mm.h:30, from arch/riscv/kernel/asm-offsets.c:8: ./include/linux/kasan.h:50:37: error: ‘MAX_PTRS_PER_PTE’ undeclared here (not in a function); did you mean ‘PTRS_PER_PTE’? 50 | extern pte_t kasan_early_shadow_pte[MAX_PTRS_PER_PTE + PTE_HWTABLE_PTRS]; | ^~~~~~~~~~~~~~~~ | PTRS_PER_PTE ./include/linux/kasan.h:51:8: error: unknown type name ‘pmd_t’; did you mean ‘pgd_t’? 51 | extern pmd_t kasan_early_shadow_pmd[MAX_PTRS_PER_PMD]; | ^~~~~ | pgd_t ./include/linux/kasan.h:51:37: error: ‘MAX_PTRS_PER_PMD’ undeclared here (not in a function); did you mean ‘PTRS_PER_PGD’? 51 | extern pmd_t kasan_early_shadow_pmd[MAX_PTRS_PER_PMD]; | ^~~~~~~~~~~~~~~~ | PTRS_PER_PGD ./include/linux/kasan.h:52:8: error: unknown type name ‘pud_t’; did you mean ‘pgd_t’? 52 | extern pud_t kasan_early_shadow_pud[MAX_PTRS_PER_PUD]; | ^~~~~ | pgd_t ./include/linux/kasan.h:52:37: error: ‘MAX_PTRS_PER_PUD’ undeclared here (not in a function); did you mean ‘PTRS_PER_PGD’? 52 | extern pud_t kasan_early_shadow_pud[MAX_PTRS_PER_PUD]; | ^~~~~~~~~~~~~~~~ | PTRS_PER_PGD ./include/linux/kasan.h:53:8: error: unknown type name ‘p4d_t’; did you mean ‘pgd_t’? 53 | extern p4d_t kasan_early_shadow_p4d[MAX_PTRS_PER_P4D]; | ^~~~~ | pgd_t ./include/linux/kasan.h:53:37: error: ‘MAX_PTRS_PER_P4D’ undeclared here (not in a function); did you mean ‘PTRS_PER_PGD’? 53 | extern p4d_t kasan_early_shadow_p4d[MAX_PTRS_PER_P4D]; | ^~~~~~~~~~~~~~~~ | PTRS_PER_PGD Link: https://lore.kernel.org/r/20241126143250.29708-1-palmer@rivosinc.com Signed-off-by: Palmer Dabbelt --- arch/riscv/include/asm/compat.h | 1 - 1 file changed, 1 deletion(-) diff --git a/arch/riscv/include/asm/compat.h b/arch/riscv/include/asm/compat.h index aa103530a5c8..6081327e55f5 100644 --- a/arch/riscv/include/asm/compat.h +++ b/arch/riscv/include/asm/compat.h @@ -9,7 +9,6 @@ */ #include #include -#include #include static inline int is_compat_task(void)