From 5f882f4aa8aa893f6c7dee11bbe57b0910a6d995 Mon Sep 17 00:00:00 2001 From: Anshuman Khandual Date: Mon, 2 Dec 2024 10:05:53 +0530 Subject: [PATCH 01/13] arm64/mm: Drop INIT_MM_CONTEXT() Platform override for INIT_MM_CONTEXT() is redundant because swapper_pg_dir always gets assigned as the pgd during init_mm initialization. So just drop this override on arm64. Originally this override was added via the 'commit 2b5548b68199 ("arm64/mm: Separate boot-time page tables from swapper_pg_dir")' because non standard init_pg_dir was assigned as the pgd. Subsequently it was changed as default swapper_pg_dir by the 'commit ba5b0333a847 ("arm64: mm: omit redundant remap of kernel image")', which might have also just dropped this override. Cc: Catalin Marinas Cc: Will Deacon Cc: Ard Biesheuvel Cc: Ryan Roberts Cc: Mark Rutland Cc: linux-arm-kernel@lists.infradead.org Cc: linux-kernel@vger.kernel.org Signed-off-by: Anshuman Khandual Acked-by: Ard Biesheuvel Reviewed-by: Ryan Roberts Reviewed-by: Gavin Shan Link: https://lore.kernel.org/r/20241202043553.29592-1-anshuman.khandual@arm.com Signed-off-by: Will Deacon --- arch/arm64/include/asm/mmu.h | 3 --- 1 file changed, 3 deletions(-) diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h index 2ec96d91acc6..662471cfc536 100644 --- a/arch/arm64/include/asm/mmu.h +++ b/arch/arm64/include/asm/mmu.h @@ -109,8 +109,5 @@ static inline bool kaslr_requires_kpti(void) return true; } -#define INIT_MM_CONTEXT(name) \ - .pgd = swapper_pg_dir, - #endif /* !__ASSEMBLY__ */ #endif From a0e33f528e09ae45308880999dee3f54b52e3182 Mon Sep 17 00:00:00 2001 From: Anshuman Khandual Date: Mon, 2 Dec 2024 14:08:50 +0530 Subject: [PATCH 02/13] arm64/mm: Replace open encodings with PXD_TABLE_BIT [pgd|p4d]_bad() helpers have open encodings for their respective table bits which can be replaced with corresponding macros. This makes things clearer, thus improving their readability as well. Cc: Catalin Marinas Cc: Will Deacon Cc: Ard Biesheuvel Cc: Ryan Roberts Cc: Mark Rutland Cc: linux-arm-kernel@lists.infradead.org Cc: linux-kernel@vger.kernel.org Signed-off-by: Anshuman Khandual Reviewed-by: Ryan Roberts Reviewed-by: Gavin Shan Link: https://lore.kernel.org/r/20241202083850.73207-1-anshuman.khandual@arm.com Signed-off-by: Will Deacon --- arch/arm64/include/asm/pgtable.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index 6986345b537a..e20b80229910 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -896,7 +896,7 @@ static inline bool mm_pud_folded(const struct mm_struct *mm) pr_err("%s:%d: bad pud %016llx.\n", __FILE__, __LINE__, pud_val(e)) #define p4d_none(p4d) (pgtable_l4_enabled() && !p4d_val(p4d)) -#define p4d_bad(p4d) (pgtable_l4_enabled() && !(p4d_val(p4d) & 2)) +#define p4d_bad(p4d) (pgtable_l4_enabled() && !(p4d_val(p4d) & P4D_TABLE_BIT)) #define p4d_present(p4d) (!p4d_none(p4d)) static inline void set_p4d(p4d_t *p4dp, p4d_t p4d) @@ -1023,7 +1023,7 @@ static inline bool mm_p4d_folded(const struct mm_struct *mm) pr_err("%s:%d: bad p4d %016llx.\n", __FILE__, __LINE__, p4d_val(e)) #define pgd_none(pgd) (pgtable_l5_enabled() && !pgd_val(pgd)) -#define pgd_bad(pgd) (pgtable_l5_enabled() && !(pgd_val(pgd) & 2)) +#define pgd_bad(pgd) (pgtable_l5_enabled() && !(pgd_val(pgd) & PGD_TABLE_BIT)) #define pgd_present(pgd) (!pgd_none(pgd)) static inline void set_pgd(pgd_t *pgdp, pgd_t pgd) From 1e5823c8e86de83a43d59a522b4de29066d3b306 Mon Sep 17 00:00:00 2001 From: Anshuman Khandual Date: Mon, 2 Dec 2024 12:14:07 +0530 Subject: [PATCH 03/13] arm64/mm: Ensure adequate HUGE_MAX_HSTATE This asserts that HUGE_MAX_HSTATE is sufficient enough preventing potential hugetlb_max_hstate runtime overflow in hugetlb_add_hstate() thus triggering a BUG_ON() there after. Cc: Catalin Marinas Cc: Will Deacon Cc: Ard Biesheuvel Cc: Ryan Roberts Cc: Mark Rutland Cc: linux-arm-kernel@lists.infradead.org Cc: linux-kernel@vger.kernel.org Signed-off-by: Anshuman Khandual Reviewed-by: Ryan Roberts Reviewed-by: Gavin Shan Link: https://lore.kernel.org/r/20241202064407.53807-1-anshuman.khandual@arm.com Signed-off-by: Will Deacon --- arch/arm64/mm/hugetlbpage.c | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c index 3215adf48a1b..98a2a0e64e25 100644 --- a/arch/arm64/mm/hugetlbpage.c +++ b/arch/arm64/mm/hugetlbpage.c @@ -519,6 +519,18 @@ pte_t huge_ptep_clear_flush(struct vm_area_struct *vma, static int __init hugetlbpage_init(void) { + /* + * HugeTLB pages are supported on maximum four page table + * levels (PUD, CONT PMD, PMD, CONT PTE) for a given base + * page size, corresponding to hugetlb_add_hstate() calls + * here. + * + * HUGE_MAX_HSTATE should at least match maximum supported + * HugeTLB page sizes on the platform. Any new addition to + * supported HugeTLB page sizes will also require changing + * HUGE_MAX_HSTATE as well. + */ + BUILD_BUG_ON(HUGE_MAX_HSTATE < 4); if (pud_sect_supported()) hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT); From e281bd22998b4ff8e4add6a5cea2db980bf64d8d Mon Sep 17 00:00:00 2001 From: Zhu Jun Date: Tue, 3 Dec 2024 01:33:23 -0800 Subject: [PATCH 04/13] arm64: asm: Fix typo in pgtable.h The word 'trasferring' is wrong, so fix it. Signed-off-by: Zhu Jun Link: https://lore.kernel.org/r/20241203093323.7831-1-zhujun2@cmss.chinamobile.com Signed-off-by: Will Deacon --- arch/arm64/include/asm/pgtable.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index e20b80229910..f8dac6673887 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -1345,7 +1345,7 @@ static inline void ___ptep_set_wrprotect(struct mm_struct *mm, } /* - * __ptep_set_wrprotect - mark read-only while trasferring potential hardware + * __ptep_set_wrprotect - mark read-only while transferring potential hardware * dirty status (PTE_DBM && !PTE_RDONLY) to the software PTE_DIRTY bit. */ static inline void __ptep_set_wrprotect(struct mm_struct *mm, From 9456a15947c1fe875e75d790a8ecd0b659fd2915 Mon Sep 17 00:00:00 2001 From: Anshuman Khandual Date: Tue, 10 Dec 2024 10:02:57 +0530 Subject: [PATCH 05/13] arm64/Kconfig: Drop EXECMEM dependency from ARCH_WANTS_EXECMEM_LATE ARCH_WANTS_EXECMEM_LATE indicates subscribing platform's preference for EXECMEM late initialisation without creating a new dependency. Hence this just drops EXECMEM dependency while selecting ARCH_WANTS_EXECMEM_LATE. Cc: Catalin Marinas Cc: Will Deacon Cc: Andrew Morton Cc: linux-arm-kernel@lists.infradead.org Cc: linux-mm@kvack.org Cc: linux-kernel@vger.kernel.org Signed-off-by: Anshuman Khandual Link: https://lore.kernel.org/r/20241210043257.715822-1-anshuman.khandual@arm.com Signed-off-by: Will Deacon --- arch/arm64/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 100570a048c5..05817b62df6d 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -113,7 +113,7 @@ config ARM64 select ARCH_WANT_FRAME_POINTERS select ARCH_WANT_HUGE_PMD_SHARE if ARM64_4K_PAGES || (ARM64_16K_PAGES && !ARM64_VA_BITS_36) select ARCH_WANT_LD_ORPHAN_WARN - select ARCH_WANTS_EXECMEM_LATE if EXECMEM + select ARCH_WANTS_EXECMEM_LATE select ARCH_WANTS_NO_INSTR select ARCH_WANTS_THP_SWAP if ARM64_4K_PAGES select ARCH_HAS_UBSAN From 3e5be4e11aac40eb9d3ea6b5e79b7e95b0a6ebe5 Mon Sep 17 00:00:00 2001 From: Anshuman Khandual Date: Wed, 11 Dec 2024 12:24:24 +0530 Subject: [PATCH 06/13] docs: arm64: Document EL3 requirements for cpu debug architecture This documents EL3 requirements for debug architecture. The register field MDCR_EL3.TDA needs to be cleared for accesses into debug registers without any trap being generated into EL3. CPU debug registers like DBGBCR_EL1, DBGBVR_EL1, DBGWCR_EL1, DBGWVR_EL1 and MDSCR_EL1 are already being accessed for HW breakpoint, watchpoint and debug monitor implementations on the platform. Cc: Catalin Marinas Cc: Will Deacon Cc: Mark Rutland Cc: Jonathan Corbet Cc: linux-arm-kernel@lists.infradead.org Cc: linux-doc@vger.kernel.org Cc: linux-kernel@vger.kernel.org Signed-off-by: Anshuman Khandual Link: https://lore.kernel.org/r/20241211065425.1106683-2-anshuman.khandual@arm.com Signed-off-by: Will Deacon --- Documentation/arch/arm64/booting.rst | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/Documentation/arch/arm64/booting.rst b/Documentation/arch/arm64/booting.rst index 3278fb4bf219..1b3ac1394e5f 100644 --- a/Documentation/arch/arm64/booting.rst +++ b/Documentation/arch/arm64/booting.rst @@ -449,6 +449,12 @@ Before jumping into the kernel, the following conditions must be met: - HFGWTR_EL2.nGCS_EL0 (bit 52) must be initialised to 0b1. + - For CPUs with debug architecture i.e FEAT_Debugv8pN (all versions): + + - If EL3 is present: + + - MDCR_EL3.TDA (bit 9) must be initialized to 0b0 + The requirements described above for CPU mode, caches, MMUs, architected timers, coherency and system registers apply to all CPUs. All CPUs must enter the kernel in the same exception level. Where the values documented From 1e4a5e3679cc4d037982bfc822d939d5ba954e70 Mon Sep 17 00:00:00 2001 From: Anshuman Khandual Date: Wed, 11 Dec 2024 12:24:25 +0530 Subject: [PATCH 07/13] docs: arm64: Document EL3 requirements for FEAT_PMUv3 This documents EL3 requirements for FEAT_PMUv3. The register field MDCR_EL3 .TPM needs to be cleared for accesses into PMU registers without any trap being generated into EL3. PMUv3 registers like PMCCFILTR_EL0, PMCCNTR_EL0 PMCNTENCLR_EL0, PMCNTENSET_EL0, PMCR_EL0, PMEVCNTR_EL0, PMEVTYPER_EL0 etc are already being accessed for perf HW PMU implementation. Cc: Catalin Marinas Cc: Will Deacon Cc: Mark Rutland Cc: Jonathan Corbet Cc: linux-arm-kernel@lists.infradead.org Cc: linux-doc@vger.kernel.org Cc: linux-kernel@vger.kernel.org Signed-off-by: Anshuman Khandual Link: https://lore.kernel.org/r/20241211065425.1106683-3-anshuman.khandual@arm.com Signed-off-by: Will Deacon --- Documentation/arch/arm64/booting.rst | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/Documentation/arch/arm64/booting.rst b/Documentation/arch/arm64/booting.rst index 1b3ac1394e5f..cad6fdc96b98 100644 --- a/Documentation/arch/arm64/booting.rst +++ b/Documentation/arch/arm64/booting.rst @@ -455,6 +455,12 @@ Before jumping into the kernel, the following conditions must be met: - MDCR_EL3.TDA (bit 9) must be initialized to 0b0 + - For CPUs with FEAT_PMUv3: + + - If EL3 is present: + + - MDCR_EL3.TPM (bit 6) must be initialized to 0b0 + The requirements described above for CPU mode, caches, MMUs, architected timers, coherency and system registers apply to all CPUs. All CPUs must enter the kernel in the same exception level. Where the values documented From bf74bb73cd87c64bd5afc1fd4b749029997b6170 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Thu, 12 Dec 2024 09:18:43 +0100 Subject: [PATCH 08/13] arm64/mm: Reduce PA space to 48 bits when LPA2 is not enabled Currently, LPA2 kernel support implies support for up to 52 bits of physical addressing, and this is reflected in global definitions such as PHYS_MASK_SHIFT and MAX_PHYSMEM_BITS. This is potentially problematic, given that LPA2 hardware support is modeled as a CPU feature which can be overridden, and with LPA2 hardware support turned off, attempting to map physical regions with address bits [51:48] set (which may exist on LPA2 capable systems booting with arm64.nolva) will result in corrupted mappings with a truncated output address and bogus shareability attributes. This means that the accepted physical address range in the mapping routines should be at most 48 bits wide when LPA2 support is configured but not enabled at runtime. Fixes: 352b0395b505 ("arm64: Enable 52-bit virtual addressing for 4k and 16k granule configs") Cc: stable@vger.kernel.org Reviewed-by: Anshuman Khandual Signed-off-by: Ard Biesheuvel Acked-by: Marc Zyngier Link: https://lore.kernel.org/r/20241212081841.2168124-9-ardb+git@google.com Signed-off-by: Will Deacon --- arch/arm64/include/asm/pgtable-hwdef.h | 6 ------ arch/arm64/include/asm/pgtable-prot.h | 7 +++++++ arch/arm64/include/asm/sparsemem.h | 5 ++++- 3 files changed, 11 insertions(+), 7 deletions(-) diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h index c78a988cca93..a9136cc551cc 100644 --- a/arch/arm64/include/asm/pgtable-hwdef.h +++ b/arch/arm64/include/asm/pgtable-hwdef.h @@ -222,12 +222,6 @@ */ #define S1_TABLE_AP (_AT(pmdval_t, 3) << 61) -/* - * Highest possible physical address supported. - */ -#define PHYS_MASK_SHIFT (CONFIG_ARM64_PA_BITS) -#define PHYS_MASK ((UL(1) << PHYS_MASK_SHIFT) - 1) - #define TTBR_CNP_BIT (UL(1) << 0) /* diff --git a/arch/arm64/include/asm/pgtable-prot.h b/arch/arm64/include/asm/pgtable-prot.h index 9f9cf13bbd95..a95f1f77bb39 100644 --- a/arch/arm64/include/asm/pgtable-prot.h +++ b/arch/arm64/include/asm/pgtable-prot.h @@ -81,6 +81,7 @@ extern unsigned long prot_ns_shared; #define lpa2_is_enabled() false #define PTE_MAYBE_SHARED PTE_SHARED #define PMD_MAYBE_SHARED PMD_SECT_S +#define PHYS_MASK_SHIFT (CONFIG_ARM64_PA_BITS) #else static inline bool __pure lpa2_is_enabled(void) { @@ -89,8 +90,14 @@ static inline bool __pure lpa2_is_enabled(void) #define PTE_MAYBE_SHARED (lpa2_is_enabled() ? 0 : PTE_SHARED) #define PMD_MAYBE_SHARED (lpa2_is_enabled() ? 0 : PMD_SECT_S) +#define PHYS_MASK_SHIFT (lpa2_is_enabled() ? CONFIG_ARM64_PA_BITS : 48) #endif +/* + * Highest possible physical address supported. + */ +#define PHYS_MASK ((UL(1) << PHYS_MASK_SHIFT) - 1) + /* * If we have userspace only BTI we don't want to mark kernel pages * guarded even if the system does support BTI. diff --git a/arch/arm64/include/asm/sparsemem.h b/arch/arm64/include/asm/sparsemem.h index 8a8acc220371..84783efdc9d1 100644 --- a/arch/arm64/include/asm/sparsemem.h +++ b/arch/arm64/include/asm/sparsemem.h @@ -5,7 +5,10 @@ #ifndef __ASM_SPARSEMEM_H #define __ASM_SPARSEMEM_H -#define MAX_PHYSMEM_BITS CONFIG_ARM64_PA_BITS +#include + +#define MAX_PHYSMEM_BITS PHYS_MASK_SHIFT +#define MAX_POSSIBLE_PHYSMEM_BITS (52) /* * Section size must be at least 512MB for 64K base From 62cffa496aac0c2c4eeca00d080058affd7a0172 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Thu, 12 Dec 2024 09:18:44 +0100 Subject: [PATCH 09/13] arm64/mm: Override PARange for !LPA2 and use it consistently When FEAT_LPA{,2} are not implemented, the ID_AA64MMFR0_EL1.PARange and TCR.IPS values corresponding with 52-bit physical addressing are reserved. Setting the TCR.IPS field to 0b110 (52-bit physical addressing) has side effects, such as how the TTBRn_ELx.BADDR fields are interpreted, and so it is important that disabling FEAT_LPA2 (by overriding the ID_AA64MMFR0.TGran fields) also presents a PARange field consistent with that. So limit the field to 48 bits unless LPA2 is enabled, and update existing references to use the override consistently. Fixes: 352b0395b505 ("arm64: Enable 52-bit virtual addressing for 4k and 16k granule configs") Cc: stable@vger.kernel.org Signed-off-by: Ard Biesheuvel Acked-by: Marc Zyngier Link: https://lore.kernel.org/r/20241212081841.2168124-10-ardb+git@google.com Signed-off-by: Will Deacon --- arch/arm64/include/asm/assembler.h | 5 +++++ arch/arm64/kernel/cpufeature.c | 2 +- arch/arm64/kernel/pi/idreg-override.c | 9 +++++++++ arch/arm64/kernel/pi/map_kernel.c | 6 ++++++ arch/arm64/mm/init.c | 7 ++++++- 5 files changed, 27 insertions(+), 2 deletions(-) diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h index 3d8d534a7a77..ad63457a05c5 100644 --- a/arch/arm64/include/asm/assembler.h +++ b/arch/arm64/include/asm/assembler.h @@ -343,6 +343,11 @@ alternative_cb_end // Narrow PARange to fit the PS field in TCR_ELx ubfx \tmp0, \tmp0, #ID_AA64MMFR0_EL1_PARANGE_SHIFT, #3 mov \tmp1, #ID_AA64MMFR0_EL1_PARANGE_MAX +#ifdef CONFIG_ARM64_LPA2 +alternative_if_not ARM64_HAS_VA52 + mov \tmp1, #ID_AA64MMFR0_EL1_PARANGE_48 +alternative_else_nop_endif +#endif cmp \tmp0, \tmp1 csel \tmp0, \tmp1, \tmp0, hi bfi \tcr, \tmp0, \pos, #3 diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 6ce71f444ed8..f8cb8a6ab98a 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -3478,7 +3478,7 @@ static void verify_hyp_capabilities(void) return; safe_mmfr1 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1); - mmfr0 = read_cpuid(ID_AA64MMFR0_EL1); + mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1); mmfr1 = read_cpuid(ID_AA64MMFR1_EL1); /* Verify VMID bits */ diff --git a/arch/arm64/kernel/pi/idreg-override.c b/arch/arm64/kernel/pi/idreg-override.c index 22159251eb3a..c6b185b885f7 100644 --- a/arch/arm64/kernel/pi/idreg-override.c +++ b/arch/arm64/kernel/pi/idreg-override.c @@ -83,6 +83,15 @@ static bool __init mmfr2_varange_filter(u64 val) id_aa64mmfr0_override.val |= (ID_AA64MMFR0_EL1_TGRAN_LPA2 - 1) << ID_AA64MMFR0_EL1_TGRAN_SHIFT; id_aa64mmfr0_override.mask |= 0xfU << ID_AA64MMFR0_EL1_TGRAN_SHIFT; + + /* + * Override PARange to 48 bits - the override will just be + * ignored if the actual PARange is smaller, but this is + * unlikely to be the case for LPA2 capable silicon. + */ + id_aa64mmfr0_override.val |= + ID_AA64MMFR0_EL1_PARANGE_48 << ID_AA64MMFR0_EL1_PARANGE_SHIFT; + id_aa64mmfr0_override.mask |= 0xfU << ID_AA64MMFR0_EL1_PARANGE_SHIFT; } #endif return true; diff --git a/arch/arm64/kernel/pi/map_kernel.c b/arch/arm64/kernel/pi/map_kernel.c index f374a3e5a5fe..e57b043f324b 100644 --- a/arch/arm64/kernel/pi/map_kernel.c +++ b/arch/arm64/kernel/pi/map_kernel.c @@ -136,6 +136,12 @@ static void noinline __section(".idmap.text") set_ttbr0_for_lpa2(u64 ttbr) { u64 sctlr = read_sysreg(sctlr_el1); u64 tcr = read_sysreg(tcr_el1) | TCR_DS; + u64 mmfr0 = read_sysreg(id_aa64mmfr0_el1); + u64 parange = cpuid_feature_extract_unsigned_field(mmfr0, + ID_AA64MMFR0_EL1_PARANGE_SHIFT); + + tcr &= ~TCR_IPS_MASK; + tcr |= parange << TCR_IPS_SHIFT; asm(" msr sctlr_el1, %0 ;" " isb ;" diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index ccdef53872a0..9c0b8d9558fc 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c @@ -279,7 +279,12 @@ void __init arm64_memblock_init(void) if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) { extern u16 memstart_offset_seed; - u64 mmfr0 = read_cpuid(ID_AA64MMFR0_EL1); + + /* + * Use the sanitised version of id_aa64mmfr0_el1 so that linear + * map randomization can be enabled by shrinking the IPA space. + */ + u64 mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1); int parange = cpuid_feature_extract_unsigned_field( mmfr0, ID_AA64MMFR0_EL1_PARANGE_SHIFT); s64 range = linear_region_size - From f0da16992aef7e246b2f3bba1492e3a52c38ca0e Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Thu, 12 Dec 2024 09:18:45 +0100 Subject: [PATCH 10/13] arm64/kvm: Configure HYP TCR.PS/DS based on host stage1 When the host stage1 is configured for LPA2, the value currently being programmed into TCR_EL2.T0SZ may be invalid unless LPA2 is configured at HYP as well. This means kvm_lpa2_is_enabled() is not the right condition to test when setting TCR_EL2.DS, as it will return false if LPA2 is only available for stage 1 but not for stage 2. Similary, programming TCR_EL2.PS based on a limited IPA range due to lack of stage2 LPA2 support could potentially result in problems. So use lpa2_is_enabled() instead, and set the PS field according to the host's IPS, which is capped at 48 bits if LPA2 support is absent or disabled. Whether or not we can make meaningful use of such a configuration is a different question. Cc: stable@vger.kernel.org Signed-off-by: Ard Biesheuvel Acked-by: Marc Zyngier Link: https://lore.kernel.org/r/20241212081841.2168124-11-ardb+git@google.com Signed-off-by: Will Deacon --- arch/arm64/kvm/arm.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index a102c3aebdbc..7b2735ad32e9 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -1990,8 +1990,7 @@ static int kvm_init_vector_slots(void) static void __init cpu_prepare_hyp_mode(int cpu, u32 hyp_va_bits) { struct kvm_nvhe_init_params *params = per_cpu_ptr_nvhe_sym(kvm_init_params, cpu); - u64 mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1); - unsigned long tcr; + unsigned long tcr, ips; /* * Calculate the raw per-cpu offset without a translation from the @@ -2005,6 +2004,7 @@ static void __init cpu_prepare_hyp_mode(int cpu, u32 hyp_va_bits) params->mair_el2 = read_sysreg(mair_el1); tcr = read_sysreg(tcr_el1); + ips = FIELD_GET(TCR_IPS_MASK, tcr); if (cpus_have_final_cap(ARM64_KVM_HVHE)) { tcr |= TCR_EPD1_MASK; } else { @@ -2014,8 +2014,8 @@ static void __init cpu_prepare_hyp_mode(int cpu, u32 hyp_va_bits) tcr &= ~TCR_T0SZ_MASK; tcr |= TCR_T0SZ(hyp_va_bits); tcr &= ~TCR_EL2_PS_MASK; - tcr |= FIELD_PREP(TCR_EL2_PS_MASK, kvm_get_parange(mmfr0)); - if (kvm_lpa2_is_enabled()) + tcr |= FIELD_PREP(TCR_EL2_PS_MASK, ips); + if (lpa2_is_enabled()) tcr |= TCR_EL2_DS; params->tcr_el2 = tcr; From 9d86c3c974348eb4220b637fae1a2466232078b7 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Thu, 12 Dec 2024 09:18:46 +0100 Subject: [PATCH 11/13] arm64/kvm: Avoid invalid physical addresses to signal owner updates The pKVM stage2 mapping code relies on an invalid physical address to signal to the internal API that only the annotations of descriptors should be updated, and these are stored in the high bits of invalid descriptors covering memory that has been donated to protected guests, and is therefore unmapped from the host stage-2 page tables. Given that these invalid PAs are never stored into the descriptors, it is better to rely on an explicit flag, to clarify the API and to avoid confusion regarding whether or not the output address of a descriptor can ever be invalid to begin with (which is not the case with LPA2). That removes a dependency on the logic that reasons about the maximum PA range, which differs on LPA2 capable CPUs based on whether LPA2 is enabled or not, and will be further clarified in subsequent patches. Cc: Quentin Perret Signed-off-by: Ard Biesheuvel Reviewed-by: Quentin Perret Acked-by: Marc Zyngier Link: https://lore.kernel.org/r/20241212081841.2168124-12-ardb+git@google.com Signed-off-by: Will Deacon --- arch/arm64/kvm/hyp/pgtable.c | 33 ++++++++++----------------------- 1 file changed, 10 insertions(+), 23 deletions(-) diff --git a/arch/arm64/kvm/hyp/pgtable.c b/arch/arm64/kvm/hyp/pgtable.c index 40bd55966540..d2b6fa051d6b 100644 --- a/arch/arm64/kvm/hyp/pgtable.c +++ b/arch/arm64/kvm/hyp/pgtable.c @@ -35,14 +35,6 @@ static bool kvm_pgtable_walk_skip_cmo(const struct kvm_pgtable_visit_ctx *ctx) return unlikely(ctx->flags & KVM_PGTABLE_WALK_SKIP_CMO); } -static bool kvm_phys_is_valid(u64 phys) -{ - u64 parange_max = kvm_get_parange_max(); - u8 shift = id_aa64mmfr0_parange_to_phys_shift(parange_max); - - return phys < BIT(shift); -} - static bool kvm_block_mapping_supported(const struct kvm_pgtable_visit_ctx *ctx, u64 phys) { u64 granule = kvm_granule_size(ctx->level); @@ -53,7 +45,7 @@ static bool kvm_block_mapping_supported(const struct kvm_pgtable_visit_ctx *ctx, if (granule > (ctx->end - ctx->addr)) return false; - if (kvm_phys_is_valid(phys) && !IS_ALIGNED(phys, granule)) + if (!IS_ALIGNED(phys, granule)) return false; return IS_ALIGNED(ctx->addr, granule); @@ -587,6 +579,9 @@ struct stage2_map_data { /* Force mappings to page granularity */ bool force_pte; + + /* Walk should update owner_id only */ + bool annotation; }; u64 kvm_get_vtcr(u64 mmfr0, u64 mmfr1, u32 phys_shift) @@ -885,18 +880,7 @@ static u64 stage2_map_walker_phys_addr(const struct kvm_pgtable_visit_ctx *ctx, { u64 phys = data->phys; - /* - * Stage-2 walks to update ownership data are communicated to the map - * walker using an invalid PA. Avoid offsetting an already invalid PA, - * which could overflow and make the address valid again. - */ - if (!kvm_phys_is_valid(phys)) - return phys; - - /* - * Otherwise, work out the correct PA based on how far the walk has - * gotten. - */ + /* Work out the correct PA based on how far the walk has gotten */ return phys + (ctx->addr - ctx->start); } @@ -908,6 +892,9 @@ static bool stage2_leaf_mapping_allowed(const struct kvm_pgtable_visit_ctx *ctx, if (data->force_pte && ctx->level < KVM_PGTABLE_LAST_LEVEL) return false; + if (data->annotation) + return true; + return kvm_block_mapping_supported(ctx, phys); } @@ -923,7 +910,7 @@ static int stage2_map_walker_try_leaf(const struct kvm_pgtable_visit_ctx *ctx, if (!stage2_leaf_mapping_allowed(ctx, data)) return -E2BIG; - if (kvm_phys_is_valid(phys)) + if (!data->annotation) new = kvm_init_valid_leaf_pte(phys, data->attr, ctx->level); else new = kvm_init_invalid_leaf_owner(data->owner_id); @@ -1085,11 +1072,11 @@ int kvm_pgtable_stage2_set_owner(struct kvm_pgtable *pgt, u64 addr, u64 size, { int ret; struct stage2_map_data map_data = { - .phys = KVM_PHYS_INVALID, .mmu = pgt->mmu, .memcache = mc, .owner_id = owner_id, .force_pte = true, + .annotation = true, }; struct kvm_pgtable_walker walker = { .cb = stage2_map_walker, From 92b6919d7fb29691a8bc5aca49044056683542ca Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Thu, 12 Dec 2024 09:18:47 +0100 Subject: [PATCH 12/13] arm64: Kconfig: force ARM64_PAN=y when enabling TTBR0 sw PAN There are a couple of instances of Kconfig constraints where PAN must be enabled too if TTBR0 sw PAN is enabled, primarily to avoid dealing with the modified TTBR0_EL1 sysreg format that is used when 52-bit physical addressing and/or CnP are enabled (support for either implies support for hardware PAN as well, which will supersede PAN emulation if both are available) Let's simplify this, and always enable ARM64_PAN when enabling TTBR0 sw PAN. This decouples the PAN configuration from the VA size selection, permitting us to simplify the latter in subsequent patches. (Note that PAN and TTBR0 sw PAN can still be disabled after this patch, but not independently) To avoid a convoluted circular Kconfig dependency involving KCSAN, make ARM64_MTE select ARM64_PAN too, instead of depending on it. Signed-off-by: Ard Biesheuvel Acked-by: Marc Zyngier Link: https://lore.kernel.org/r/20241212081841.2168124-13-ardb+git@google.com Signed-off-by: Will Deacon --- arch/arm64/Kconfig | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 100570a048c5..c1ca21adddc1 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -1379,7 +1379,6 @@ config ARM64_VA_BITS_48 config ARM64_VA_BITS_52 bool "52-bit" - depends on ARM64_PAN || !ARM64_SW_TTBR0_PAN help Enable 52-bit virtual addressing for userspace when explicitly requested via a hint to mmap(). The kernel will also use 52-bit @@ -1431,7 +1430,6 @@ config ARM64_PA_BITS_48 config ARM64_PA_BITS_52 bool "52-bit" depends on ARM64_64K_PAGES || ARM64_VA_BITS_52 - depends on ARM64_PAN || !ARM64_SW_TTBR0_PAN help Enable support for a 52-bit physical address space, introduced as part of the ARMv8.2-LPA extension. @@ -1681,6 +1679,7 @@ config RODATA_FULL_DEFAULT_ENABLED config ARM64_SW_TTBR0_PAN bool "Emulate Privileged Access Never using TTBR0_EL1 switching" depends on !KCSAN + select ARM64_PAN help Enabling this option prevents the kernel from accessing user-space memory directly by pointing TTBR0_EL1 to a reserved @@ -1937,7 +1936,6 @@ config ARM64_RAS_EXTN config ARM64_CNP bool "Enable support for Common Not Private (CNP) translations" default y - depends on ARM64_PAN || !ARM64_SW_TTBR0_PAN help Common Not Private (CNP) allows translation table entries to be shared between different PEs in the same inner shareable @@ -2132,7 +2130,7 @@ config ARM64_MTE depends on AS_HAS_ARMV8_5 depends on AS_HAS_LSE_ATOMICS # Required for tag checking in the uaccess routines - depends on ARM64_PAN + select ARM64_PAN select ARCH_HAS_SUBPAGE_FAULTS select ARCH_USES_HIGH_VMA_FLAGS select ARCH_USES_PG_ARCH_2 From 32d053d6f5e92efd82349e7c481cba5a43dc1a22 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Thu, 12 Dec 2024 09:18:48 +0100 Subject: [PATCH 13/13] arm64/mm: Drop configurable 48-bit physical address space limit Currently, the maximum supported physical address space can be configured as either 48 bits or 52 bits. The only remaining difference between these in practice is that the former omits the masking and shifting required to construct TTBR and PTE values, which carry bits #48 and higher disjoint from the rest of the physical address. The overhead of performing these additional calculations is negligible, and so there is little reason to retain support for two different configurations, and we can simply support whatever the hardware supports. Signed-off-by: Ard Biesheuvel Acked-by: Marc Zyngier Link: https://lore.kernel.org/r/20241212081841.2168124-14-ardb+git@google.com Signed-off-by: Will Deacon --- arch/arm64/Kconfig | 31 +------------------------- arch/arm64/include/asm/assembler.h | 13 ++--------- arch/arm64/include/asm/cpufeature.h | 3 +-- arch/arm64/include/asm/kvm_pgtable.h | 3 +-- arch/arm64/include/asm/pgtable-hwdef.h | 6 +---- arch/arm64/include/asm/pgtable-prot.h | 4 ++-- arch/arm64/include/asm/pgtable.h | 11 +-------- arch/arm64/include/asm/sysreg.h | 6 ----- arch/arm64/mm/pgd.c | 9 ++++---- arch/arm64/mm/proc.S | 2 -- scripts/gdb/linux/constants.py.in | 1 - tools/arch/arm64/include/asm/sysreg.h | 6 ----- 12 files changed, 14 insertions(+), 81 deletions(-) diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index c1ca21adddc1..7ebd0ba32a32 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -1416,38 +1416,9 @@ config ARM64_VA_BITS default 48 if ARM64_VA_BITS_48 default 52 if ARM64_VA_BITS_52 -choice - prompt "Physical address space size" - default ARM64_PA_BITS_48 - help - Choose the maximum physical address range that the kernel will - support. - -config ARM64_PA_BITS_48 - bool "48-bit" - depends on ARM64_64K_PAGES || !ARM64_VA_BITS_52 - -config ARM64_PA_BITS_52 - bool "52-bit" - depends on ARM64_64K_PAGES || ARM64_VA_BITS_52 - help - Enable support for a 52-bit physical address space, introduced as - part of the ARMv8.2-LPA extension. - - With this enabled, the kernel will also continue to work on CPUs that - do not support ARMv8.2-LPA, but with some added memory overhead (and - minor performance overhead). - -endchoice - -config ARM64_PA_BITS - int - default 48 if ARM64_PA_BITS_48 - default 52 if ARM64_PA_BITS_52 - config ARM64_LPA2 def_bool y - depends on ARM64_PA_BITS_52 && !ARM64_64K_PAGES + depends on !ARM64_64K_PAGES choice prompt "Endianness" diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h index ad63457a05c5..01a1e3c16283 100644 --- a/arch/arm64/include/asm/assembler.h +++ b/arch/arm64/include/asm/assembler.h @@ -342,14 +342,13 @@ alternative_cb_end mrs \tmp0, ID_AA64MMFR0_EL1 // Narrow PARange to fit the PS field in TCR_ELx ubfx \tmp0, \tmp0, #ID_AA64MMFR0_EL1_PARANGE_SHIFT, #3 - mov \tmp1, #ID_AA64MMFR0_EL1_PARANGE_MAX #ifdef CONFIG_ARM64_LPA2 alternative_if_not ARM64_HAS_VA52 mov \tmp1, #ID_AA64MMFR0_EL1_PARANGE_48 -alternative_else_nop_endif -#endif cmp \tmp0, \tmp1 csel \tmp0, \tmp1, \tmp0, hi +alternative_else_nop_endif +#endif bfi \tcr, \tmp0, \pos, #3 .endm @@ -599,21 +598,13 @@ alternative_endif * ttbr: returns the TTBR value */ .macro phys_to_ttbr, ttbr, phys -#ifdef CONFIG_ARM64_PA_BITS_52 orr \ttbr, \phys, \phys, lsr #46 and \ttbr, \ttbr, #TTBR_BADDR_MASK_52 -#else - mov \ttbr, \phys -#endif .endm .macro phys_to_pte, pte, phys -#ifdef CONFIG_ARM64_PA_BITS_52 orr \pte, \phys, \phys, lsr #PTE_ADDR_HIGH_SHIFT and \pte, \pte, #PHYS_TO_PTE_ADDR_MASK -#else - mov \pte, \phys -#endif .endm /* diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h index 8b4e5a3cd24c..8f9f7bd4d482 100644 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h @@ -884,9 +884,8 @@ static inline u32 id_aa64mmfr0_parange_to_phys_shift(int parange) * However, by the "D10.1.4 Principles of the ID scheme * for fields in ID registers", ARM DDI 0487C.a, any new * value is guaranteed to be higher than what we know already. - * As a safe limit, we return the limit supported by the kernel. */ - default: return CONFIG_ARM64_PA_BITS; + default: return 52; } } diff --git a/arch/arm64/include/asm/kvm_pgtable.h b/arch/arm64/include/asm/kvm_pgtable.h index aab04097b505..525aef178cb4 100644 --- a/arch/arm64/include/asm/kvm_pgtable.h +++ b/arch/arm64/include/asm/kvm_pgtable.h @@ -30,8 +30,7 @@ static inline u64 kvm_get_parange_max(void) { - if (kvm_lpa2_is_enabled() || - (IS_ENABLED(CONFIG_ARM64_PA_BITS_52) && PAGE_SHIFT == 16)) + if (kvm_lpa2_is_enabled() || PAGE_SHIFT == 16) return ID_AA64MMFR0_EL1_PARANGE_52; else return ID_AA64MMFR0_EL1_PARANGE_48; diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h index a9136cc551cc..9b34180042b2 100644 --- a/arch/arm64/include/asm/pgtable-hwdef.h +++ b/arch/arm64/include/asm/pgtable-hwdef.h @@ -176,7 +176,6 @@ #define PTE_SWBITS_MASK _AT(pteval_t, (BIT(63) | GENMASK(58, 55))) #define PTE_ADDR_LOW (((_AT(pteval_t, 1) << (50 - PAGE_SHIFT)) - 1) << PAGE_SHIFT) -#ifdef CONFIG_ARM64_PA_BITS_52 #ifdef CONFIG_ARM64_64K_PAGES #define PTE_ADDR_HIGH (_AT(pteval_t, 0xf) << 12) #define PTE_ADDR_HIGH_SHIFT 36 @@ -186,7 +185,6 @@ #define PTE_ADDR_HIGH_SHIFT 42 #define PHYS_TO_PTE_ADDR_MASK GENMASK_ULL(49, 8) #endif -#endif /* * AttrIndx[2:0] encoding (mapping attributes defined in the MAIR* registers). @@ -327,12 +325,10 @@ /* * TTBR. */ -#ifdef CONFIG_ARM64_PA_BITS_52 /* - * TTBR_ELx[1] is RES0 in this configuration. + * TTBR_ELx[1] is RES0 when using 52-bit physical addressing */ #define TTBR_BADDR_MASK_52 GENMASK_ULL(47, 2) -#endif #ifdef CONFIG_ARM64_VA_BITS_52 /* Must be at least 64-byte aligned to prevent corruption of the TTBR */ diff --git a/arch/arm64/include/asm/pgtable-prot.h b/arch/arm64/include/asm/pgtable-prot.h index a95f1f77bb39..b73acf25341f 100644 --- a/arch/arm64/include/asm/pgtable-prot.h +++ b/arch/arm64/include/asm/pgtable-prot.h @@ -81,7 +81,7 @@ extern unsigned long prot_ns_shared; #define lpa2_is_enabled() false #define PTE_MAYBE_SHARED PTE_SHARED #define PMD_MAYBE_SHARED PMD_SECT_S -#define PHYS_MASK_SHIFT (CONFIG_ARM64_PA_BITS) +#define PHYS_MASK_SHIFT (52) #else static inline bool __pure lpa2_is_enabled(void) { @@ -90,7 +90,7 @@ static inline bool __pure lpa2_is_enabled(void) #define PTE_MAYBE_SHARED (lpa2_is_enabled() ? 0 : PTE_SHARED) #define PMD_MAYBE_SHARED (lpa2_is_enabled() ? 0 : PMD_SECT_S) -#define PHYS_MASK_SHIFT (lpa2_is_enabled() ? CONFIG_ARM64_PA_BITS : 48) +#define PHYS_MASK_SHIFT (lpa2_is_enabled() ? 52 : 48) #endif /* diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index 6986345b537a..ec8124d66b9c 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -69,10 +69,9 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; pr_err("%s:%d: bad pte %016llx.\n", __FILE__, __LINE__, pte_val(e)) /* - * Macros to convert between a physical address and its placement in a + * Helpers to convert between a physical address and its placement in a * page table entry, taking care of 52-bit addresses. */ -#ifdef CONFIG_ARM64_PA_BITS_52 static inline phys_addr_t __pte_to_phys(pte_t pte) { pte_val(pte) &= ~PTE_MAYBE_SHARED; @@ -83,10 +82,6 @@ static inline pteval_t __phys_to_pte_val(phys_addr_t phys) { return (phys | (phys >> PTE_ADDR_HIGH_SHIFT)) & PHYS_TO_PTE_ADDR_MASK; } -#else -#define __pte_to_phys(pte) (pte_val(pte) & PTE_ADDR_LOW) -#define __phys_to_pte_val(phys) (phys) -#endif #define pte_pfn(pte) (__pte_to_phys(pte) >> PAGE_SHIFT) #define pfn_pte(pfn,prot) \ @@ -1495,11 +1490,7 @@ static inline void update_mmu_cache_range(struct vm_fault *vmf, update_mmu_cache_range(NULL, vma, addr, ptep, 1) #define update_mmu_cache_pmd(vma, address, pmd) do { } while (0) -#ifdef CONFIG_ARM64_PA_BITS_52 #define phys_to_ttbr(addr) (((addr) | ((addr) >> 46)) & TTBR_BADDR_MASK_52) -#else -#define phys_to_ttbr(addr) (addr) -#endif /* * On arm64 without hardware Access Flag, copying from user will fail because diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index b8303a83c0bf..f902893ec903 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -916,12 +916,6 @@ #define ID_AA64MMFR0_EL1_TGRAN_2_SUPPORTED_LPA2 0x3 #define ID_AA64MMFR0_EL1_TGRAN_2_SUPPORTED_MAX 0x7 -#ifdef CONFIG_ARM64_PA_BITS_52 -#define ID_AA64MMFR0_EL1_PARANGE_MAX ID_AA64MMFR0_EL1_PARANGE_52 -#else -#define ID_AA64MMFR0_EL1_PARANGE_MAX ID_AA64MMFR0_EL1_PARANGE_48 -#endif - #if defined(CONFIG_ARM64_4K_PAGES) #define ID_AA64MMFR0_EL1_TGRAN_SHIFT ID_AA64MMFR0_EL1_TGRAN4_SHIFT #define ID_AA64MMFR0_EL1_TGRAN_LPA2 ID_AA64MMFR0_EL1_TGRAN4_52_BIT diff --git a/arch/arm64/mm/pgd.c b/arch/arm64/mm/pgd.c index 0c501cabc238..8722ab6d4b1c 100644 --- a/arch/arm64/mm/pgd.c +++ b/arch/arm64/mm/pgd.c @@ -48,20 +48,21 @@ void pgd_free(struct mm_struct *mm, pgd_t *pgd) void __init pgtable_cache_init(void) { + unsigned int pgd_size = PGD_SIZE; + if (pgdir_is_page_size()) return; -#ifdef CONFIG_ARM64_PA_BITS_52 /* * With 52-bit physical addresses, the architecture requires the * top-level table to be aligned to at least 64 bytes. */ - BUILD_BUG_ON(PGD_SIZE < 64); -#endif + if (PHYS_MASK_SHIFT >= 52) + pgd_size = max(pgd_size, 64); /* * Naturally aligned pgds required by the architecture. */ - pgd_cache = kmem_cache_create("pgd_cache", PGD_SIZE, PGD_SIZE, + pgd_cache = kmem_cache_create("pgd_cache", pgd_size, pgd_size, SLAB_PANIC, NULL); } diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S index b8edc5765441..51ed0e9d0a0d 100644 --- a/arch/arm64/mm/proc.S +++ b/arch/arm64/mm/proc.S @@ -197,10 +197,8 @@ SYM_FUNC_ALIAS(__pi_idmap_cpu_replace_ttbr1, idmap_cpu_replace_ttbr1) .macro pte_to_phys, phys, pte and \phys, \pte, #PTE_ADDR_LOW -#ifdef CONFIG_ARM64_PA_BITS_52 and \pte, \pte, #PTE_ADDR_HIGH orr \phys, \phys, \pte, lsl #PTE_ADDR_HIGH_SHIFT -#endif .endm .macro kpti_mk_tbl_ng, type, num_entries diff --git a/scripts/gdb/linux/constants.py.in b/scripts/gdb/linux/constants.py.in index fd6bd69c5096..05034c0b8fd7 100644 --- a/scripts/gdb/linux/constants.py.in +++ b/scripts/gdb/linux/constants.py.in @@ -141,7 +141,6 @@ LX_CONFIG(CONFIG_ARM64_4K_PAGES) LX_CONFIG(CONFIG_ARM64_16K_PAGES) LX_CONFIG(CONFIG_ARM64_64K_PAGES) if IS_BUILTIN(CONFIG_ARM64): - LX_VALUE(CONFIG_ARM64_PA_BITS) LX_VALUE(CONFIG_ARM64_VA_BITS) LX_VALUE(CONFIG_PAGE_SHIFT) LX_VALUE(CONFIG_ARCH_FORCE_MAX_ORDER) diff --git a/tools/arch/arm64/include/asm/sysreg.h b/tools/arch/arm64/include/asm/sysreg.h index cd8420e8c3ad..daeecb1a5366 100644 --- a/tools/arch/arm64/include/asm/sysreg.h +++ b/tools/arch/arm64/include/asm/sysreg.h @@ -574,12 +574,6 @@ #define ID_AA64MMFR0_EL1_TGRAN_2_SUPPORTED_MIN 0x2 #define ID_AA64MMFR0_EL1_TGRAN_2_SUPPORTED_MAX 0x7 -#ifdef CONFIG_ARM64_PA_BITS_52 -#define ID_AA64MMFR0_EL1_PARANGE_MAX ID_AA64MMFR0_EL1_PARANGE_52 -#else -#define ID_AA64MMFR0_EL1_PARANGE_MAX ID_AA64MMFR0_EL1_PARANGE_48 -#endif - #if defined(CONFIG_ARM64_4K_PAGES) #define ID_AA64MMFR0_EL1_TGRAN_SHIFT ID_AA64MMFR0_EL1_TGRAN4_SHIFT #define ID_AA64MMFR0_EL1_TGRAN_SUPPORTED_MIN ID_AA64MMFR0_EL1_TGRAN4_SUPPORTED_MIN