arm64/mm: Drop configurable 48-bit physical address space limit

Currently, the maximum supported physical address space can be
configured as either 48 bits or 52 bits. The only remaining difference
between these in practice is that the former omits the masking and
shifting required to construct TTBR and PTE values, which carry bits #48
and higher disjoint from the rest of the physical address.

The overhead of performing these additional calculations is negligible,
and so there is little reason to retain support for two different
configurations, and we can simply support whatever the hardware
supports.

Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
Acked-by: Marc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20241212081841.2168124-14-ardb+git@google.com
Signed-off-by: Will Deacon <will@kernel.org>
This commit is contained in:
Ard Biesheuvel 2024-12-12 09:18:48 +01:00 committed by Will Deacon
parent 92b6919d7f
commit 32d053d6f5
12 changed files with 14 additions and 81 deletions

View File

@ -1416,38 +1416,9 @@ config ARM64_VA_BITS
default 48 if ARM64_VA_BITS_48 default 48 if ARM64_VA_BITS_48
default 52 if ARM64_VA_BITS_52 default 52 if ARM64_VA_BITS_52
choice
prompt "Physical address space size"
default ARM64_PA_BITS_48
help
Choose the maximum physical address range that the kernel will
support.
config ARM64_PA_BITS_48
bool "48-bit"
depends on ARM64_64K_PAGES || !ARM64_VA_BITS_52
config ARM64_PA_BITS_52
bool "52-bit"
depends on ARM64_64K_PAGES || ARM64_VA_BITS_52
help
Enable support for a 52-bit physical address space, introduced as
part of the ARMv8.2-LPA extension.
With this enabled, the kernel will also continue to work on CPUs that
do not support ARMv8.2-LPA, but with some added memory overhead (and
minor performance overhead).
endchoice
config ARM64_PA_BITS
int
default 48 if ARM64_PA_BITS_48
default 52 if ARM64_PA_BITS_52
config ARM64_LPA2 config ARM64_LPA2
def_bool y def_bool y
depends on ARM64_PA_BITS_52 && !ARM64_64K_PAGES depends on !ARM64_64K_PAGES
choice choice
prompt "Endianness" prompt "Endianness"

View File

@ -342,14 +342,13 @@ alternative_cb_end
mrs \tmp0, ID_AA64MMFR0_EL1 mrs \tmp0, ID_AA64MMFR0_EL1
// Narrow PARange to fit the PS field in TCR_ELx // Narrow PARange to fit the PS field in TCR_ELx
ubfx \tmp0, \tmp0, #ID_AA64MMFR0_EL1_PARANGE_SHIFT, #3 ubfx \tmp0, \tmp0, #ID_AA64MMFR0_EL1_PARANGE_SHIFT, #3
mov \tmp1, #ID_AA64MMFR0_EL1_PARANGE_MAX
#ifdef CONFIG_ARM64_LPA2 #ifdef CONFIG_ARM64_LPA2
alternative_if_not ARM64_HAS_VA52 alternative_if_not ARM64_HAS_VA52
mov \tmp1, #ID_AA64MMFR0_EL1_PARANGE_48 mov \tmp1, #ID_AA64MMFR0_EL1_PARANGE_48
alternative_else_nop_endif
#endif
cmp \tmp0, \tmp1 cmp \tmp0, \tmp1
csel \tmp0, \tmp1, \tmp0, hi csel \tmp0, \tmp1, \tmp0, hi
alternative_else_nop_endif
#endif
bfi \tcr, \tmp0, \pos, #3 bfi \tcr, \tmp0, \pos, #3
.endm .endm
@ -599,21 +598,13 @@ alternative_endif
* ttbr: returns the TTBR value * ttbr: returns the TTBR value
*/ */
.macro phys_to_ttbr, ttbr, phys .macro phys_to_ttbr, ttbr, phys
#ifdef CONFIG_ARM64_PA_BITS_52
orr \ttbr, \phys, \phys, lsr #46 orr \ttbr, \phys, \phys, lsr #46
and \ttbr, \ttbr, #TTBR_BADDR_MASK_52 and \ttbr, \ttbr, #TTBR_BADDR_MASK_52
#else
mov \ttbr, \phys
#endif
.endm .endm
.macro phys_to_pte, pte, phys .macro phys_to_pte, pte, phys
#ifdef CONFIG_ARM64_PA_BITS_52
orr \pte, \phys, \phys, lsr #PTE_ADDR_HIGH_SHIFT orr \pte, \phys, \phys, lsr #PTE_ADDR_HIGH_SHIFT
and \pte, \pte, #PHYS_TO_PTE_ADDR_MASK and \pte, \pte, #PHYS_TO_PTE_ADDR_MASK
#else
mov \pte, \phys
#endif
.endm .endm
/* /*

View File

@ -884,9 +884,8 @@ static inline u32 id_aa64mmfr0_parange_to_phys_shift(int parange)
* However, by the "D10.1.4 Principles of the ID scheme * However, by the "D10.1.4 Principles of the ID scheme
* for fields in ID registers", ARM DDI 0487C.a, any new * for fields in ID registers", ARM DDI 0487C.a, any new
* value is guaranteed to be higher than what we know already. * value is guaranteed to be higher than what we know already.
* As a safe limit, we return the limit supported by the kernel.
*/ */
default: return CONFIG_ARM64_PA_BITS; default: return 52;
} }
} }

View File

@ -30,8 +30,7 @@
static inline u64 kvm_get_parange_max(void) static inline u64 kvm_get_parange_max(void)
{ {
if (kvm_lpa2_is_enabled() || if (kvm_lpa2_is_enabled() || PAGE_SHIFT == 16)
(IS_ENABLED(CONFIG_ARM64_PA_BITS_52) && PAGE_SHIFT == 16))
return ID_AA64MMFR0_EL1_PARANGE_52; return ID_AA64MMFR0_EL1_PARANGE_52;
else else
return ID_AA64MMFR0_EL1_PARANGE_48; return ID_AA64MMFR0_EL1_PARANGE_48;

View File

@ -176,7 +176,6 @@
#define PTE_SWBITS_MASK _AT(pteval_t, (BIT(63) | GENMASK(58, 55))) #define PTE_SWBITS_MASK _AT(pteval_t, (BIT(63) | GENMASK(58, 55)))
#define PTE_ADDR_LOW (((_AT(pteval_t, 1) << (50 - PAGE_SHIFT)) - 1) << PAGE_SHIFT) #define PTE_ADDR_LOW (((_AT(pteval_t, 1) << (50 - PAGE_SHIFT)) - 1) << PAGE_SHIFT)
#ifdef CONFIG_ARM64_PA_BITS_52
#ifdef CONFIG_ARM64_64K_PAGES #ifdef CONFIG_ARM64_64K_PAGES
#define PTE_ADDR_HIGH (_AT(pteval_t, 0xf) << 12) #define PTE_ADDR_HIGH (_AT(pteval_t, 0xf) << 12)
#define PTE_ADDR_HIGH_SHIFT 36 #define PTE_ADDR_HIGH_SHIFT 36
@ -186,7 +185,6 @@
#define PTE_ADDR_HIGH_SHIFT 42 #define PTE_ADDR_HIGH_SHIFT 42
#define PHYS_TO_PTE_ADDR_MASK GENMASK_ULL(49, 8) #define PHYS_TO_PTE_ADDR_MASK GENMASK_ULL(49, 8)
#endif #endif
#endif
/* /*
* AttrIndx[2:0] encoding (mapping attributes defined in the MAIR* registers). * AttrIndx[2:0] encoding (mapping attributes defined in the MAIR* registers).
@ -327,12 +325,10 @@
/* /*
* TTBR. * TTBR.
*/ */
#ifdef CONFIG_ARM64_PA_BITS_52
/* /*
* TTBR_ELx[1] is RES0 in this configuration. * TTBR_ELx[1] is RES0 when using 52-bit physical addressing
*/ */
#define TTBR_BADDR_MASK_52 GENMASK_ULL(47, 2) #define TTBR_BADDR_MASK_52 GENMASK_ULL(47, 2)
#endif
#ifdef CONFIG_ARM64_VA_BITS_52 #ifdef CONFIG_ARM64_VA_BITS_52
/* Must be at least 64-byte aligned to prevent corruption of the TTBR */ /* Must be at least 64-byte aligned to prevent corruption of the TTBR */

View File

@ -81,7 +81,7 @@ extern unsigned long prot_ns_shared;
#define lpa2_is_enabled() false #define lpa2_is_enabled() false
#define PTE_MAYBE_SHARED PTE_SHARED #define PTE_MAYBE_SHARED PTE_SHARED
#define PMD_MAYBE_SHARED PMD_SECT_S #define PMD_MAYBE_SHARED PMD_SECT_S
#define PHYS_MASK_SHIFT (CONFIG_ARM64_PA_BITS) #define PHYS_MASK_SHIFT (52)
#else #else
static inline bool __pure lpa2_is_enabled(void) static inline bool __pure lpa2_is_enabled(void)
{ {
@ -90,7 +90,7 @@ static inline bool __pure lpa2_is_enabled(void)
#define PTE_MAYBE_SHARED (lpa2_is_enabled() ? 0 : PTE_SHARED) #define PTE_MAYBE_SHARED (lpa2_is_enabled() ? 0 : PTE_SHARED)
#define PMD_MAYBE_SHARED (lpa2_is_enabled() ? 0 : PMD_SECT_S) #define PMD_MAYBE_SHARED (lpa2_is_enabled() ? 0 : PMD_SECT_S)
#define PHYS_MASK_SHIFT (lpa2_is_enabled() ? CONFIG_ARM64_PA_BITS : 48) #define PHYS_MASK_SHIFT (lpa2_is_enabled() ? 52 : 48)
#endif #endif
/* /*

View File

@ -69,10 +69,9 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
pr_err("%s:%d: bad pte %016llx.\n", __FILE__, __LINE__, pte_val(e)) pr_err("%s:%d: bad pte %016llx.\n", __FILE__, __LINE__, pte_val(e))
/* /*
* Macros to convert between a physical address and its placement in a * Helpers to convert between a physical address and its placement in a
* page table entry, taking care of 52-bit addresses. * page table entry, taking care of 52-bit addresses.
*/ */
#ifdef CONFIG_ARM64_PA_BITS_52
static inline phys_addr_t __pte_to_phys(pte_t pte) static inline phys_addr_t __pte_to_phys(pte_t pte)
{ {
pte_val(pte) &= ~PTE_MAYBE_SHARED; pte_val(pte) &= ~PTE_MAYBE_SHARED;
@ -83,10 +82,6 @@ static inline pteval_t __phys_to_pte_val(phys_addr_t phys)
{ {
return (phys | (phys >> PTE_ADDR_HIGH_SHIFT)) & PHYS_TO_PTE_ADDR_MASK; return (phys | (phys >> PTE_ADDR_HIGH_SHIFT)) & PHYS_TO_PTE_ADDR_MASK;
} }
#else
#define __pte_to_phys(pte) (pte_val(pte) & PTE_ADDR_LOW)
#define __phys_to_pte_val(phys) (phys)
#endif
#define pte_pfn(pte) (__pte_to_phys(pte) >> PAGE_SHIFT) #define pte_pfn(pte) (__pte_to_phys(pte) >> PAGE_SHIFT)
#define pfn_pte(pfn,prot) \ #define pfn_pte(pfn,prot) \
@ -1495,11 +1490,7 @@ static inline void update_mmu_cache_range(struct vm_fault *vmf,
update_mmu_cache_range(NULL, vma, addr, ptep, 1) update_mmu_cache_range(NULL, vma, addr, ptep, 1)
#define update_mmu_cache_pmd(vma, address, pmd) do { } while (0) #define update_mmu_cache_pmd(vma, address, pmd) do { } while (0)
#ifdef CONFIG_ARM64_PA_BITS_52
#define phys_to_ttbr(addr) (((addr) | ((addr) >> 46)) & TTBR_BADDR_MASK_52) #define phys_to_ttbr(addr) (((addr) | ((addr) >> 46)) & TTBR_BADDR_MASK_52)
#else
#define phys_to_ttbr(addr) (addr)
#endif
/* /*
* On arm64 without hardware Access Flag, copying from user will fail because * On arm64 without hardware Access Flag, copying from user will fail because

View File

@ -916,12 +916,6 @@
#define ID_AA64MMFR0_EL1_TGRAN_2_SUPPORTED_LPA2 0x3 #define ID_AA64MMFR0_EL1_TGRAN_2_SUPPORTED_LPA2 0x3
#define ID_AA64MMFR0_EL1_TGRAN_2_SUPPORTED_MAX 0x7 #define ID_AA64MMFR0_EL1_TGRAN_2_SUPPORTED_MAX 0x7
#ifdef CONFIG_ARM64_PA_BITS_52
#define ID_AA64MMFR0_EL1_PARANGE_MAX ID_AA64MMFR0_EL1_PARANGE_52
#else
#define ID_AA64MMFR0_EL1_PARANGE_MAX ID_AA64MMFR0_EL1_PARANGE_48
#endif
#if defined(CONFIG_ARM64_4K_PAGES) #if defined(CONFIG_ARM64_4K_PAGES)
#define ID_AA64MMFR0_EL1_TGRAN_SHIFT ID_AA64MMFR0_EL1_TGRAN4_SHIFT #define ID_AA64MMFR0_EL1_TGRAN_SHIFT ID_AA64MMFR0_EL1_TGRAN4_SHIFT
#define ID_AA64MMFR0_EL1_TGRAN_LPA2 ID_AA64MMFR0_EL1_TGRAN4_52_BIT #define ID_AA64MMFR0_EL1_TGRAN_LPA2 ID_AA64MMFR0_EL1_TGRAN4_52_BIT

View File

@ -48,20 +48,21 @@ void pgd_free(struct mm_struct *mm, pgd_t *pgd)
void __init pgtable_cache_init(void) void __init pgtable_cache_init(void)
{ {
unsigned int pgd_size = PGD_SIZE;
if (pgdir_is_page_size()) if (pgdir_is_page_size())
return; return;
#ifdef CONFIG_ARM64_PA_BITS_52
/* /*
* With 52-bit physical addresses, the architecture requires the * With 52-bit physical addresses, the architecture requires the
* top-level table to be aligned to at least 64 bytes. * top-level table to be aligned to at least 64 bytes.
*/ */
BUILD_BUG_ON(PGD_SIZE < 64); if (PHYS_MASK_SHIFT >= 52)
#endif pgd_size = max(pgd_size, 64);
/* /*
* Naturally aligned pgds required by the architecture. * Naturally aligned pgds required by the architecture.
*/ */
pgd_cache = kmem_cache_create("pgd_cache", PGD_SIZE, PGD_SIZE, pgd_cache = kmem_cache_create("pgd_cache", pgd_size, pgd_size,
SLAB_PANIC, NULL); SLAB_PANIC, NULL);
} }

View File

@ -197,10 +197,8 @@ SYM_FUNC_ALIAS(__pi_idmap_cpu_replace_ttbr1, idmap_cpu_replace_ttbr1)
.macro pte_to_phys, phys, pte .macro pte_to_phys, phys, pte
and \phys, \pte, #PTE_ADDR_LOW and \phys, \pte, #PTE_ADDR_LOW
#ifdef CONFIG_ARM64_PA_BITS_52
and \pte, \pte, #PTE_ADDR_HIGH and \pte, \pte, #PTE_ADDR_HIGH
orr \phys, \phys, \pte, lsl #PTE_ADDR_HIGH_SHIFT orr \phys, \phys, \pte, lsl #PTE_ADDR_HIGH_SHIFT
#endif
.endm .endm
.macro kpti_mk_tbl_ng, type, num_entries .macro kpti_mk_tbl_ng, type, num_entries

View File

@ -141,7 +141,6 @@ LX_CONFIG(CONFIG_ARM64_4K_PAGES)
LX_CONFIG(CONFIG_ARM64_16K_PAGES) LX_CONFIG(CONFIG_ARM64_16K_PAGES)
LX_CONFIG(CONFIG_ARM64_64K_PAGES) LX_CONFIG(CONFIG_ARM64_64K_PAGES)
if IS_BUILTIN(CONFIG_ARM64): if IS_BUILTIN(CONFIG_ARM64):
LX_VALUE(CONFIG_ARM64_PA_BITS)
LX_VALUE(CONFIG_ARM64_VA_BITS) LX_VALUE(CONFIG_ARM64_VA_BITS)
LX_VALUE(CONFIG_PAGE_SHIFT) LX_VALUE(CONFIG_PAGE_SHIFT)
LX_VALUE(CONFIG_ARCH_FORCE_MAX_ORDER) LX_VALUE(CONFIG_ARCH_FORCE_MAX_ORDER)

View File

@ -574,12 +574,6 @@
#define ID_AA64MMFR0_EL1_TGRAN_2_SUPPORTED_MIN 0x2 #define ID_AA64MMFR0_EL1_TGRAN_2_SUPPORTED_MIN 0x2
#define ID_AA64MMFR0_EL1_TGRAN_2_SUPPORTED_MAX 0x7 #define ID_AA64MMFR0_EL1_TGRAN_2_SUPPORTED_MAX 0x7
#ifdef CONFIG_ARM64_PA_BITS_52
#define ID_AA64MMFR0_EL1_PARANGE_MAX ID_AA64MMFR0_EL1_PARANGE_52
#else
#define ID_AA64MMFR0_EL1_PARANGE_MAX ID_AA64MMFR0_EL1_PARANGE_48
#endif
#if defined(CONFIG_ARM64_4K_PAGES) #if defined(CONFIG_ARM64_4K_PAGES)
#define ID_AA64MMFR0_EL1_TGRAN_SHIFT ID_AA64MMFR0_EL1_TGRAN4_SHIFT #define ID_AA64MMFR0_EL1_TGRAN_SHIFT ID_AA64MMFR0_EL1_TGRAN4_SHIFT
#define ID_AA64MMFR0_EL1_TGRAN_SUPPORTED_MIN ID_AA64MMFR0_EL1_TGRAN4_SUPPORTED_MIN #define ID_AA64MMFR0_EL1_TGRAN_SUPPORTED_MIN ID_AA64MMFR0_EL1_TGRAN4_SUPPORTED_MIN