mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2024-12-28 16:52:18 +00:00
Merge branches 'for-next/cpufeature', 'for-next/docs', 'for-next/misc' and 'for-next/mm' into for-next/core
This commit is contained in:
commit
d6ab634f1b
@ -449,6 +449,18 @@ Before jumping into the kernel, the following conditions must be met:
|
||||
|
||||
- HFGWTR_EL2.nGCS_EL0 (bit 52) must be initialised to 0b1.
|
||||
|
||||
- For CPUs with debug architecture i.e FEAT_Debugv8pN (all versions):
|
||||
|
||||
- If EL3 is present:
|
||||
|
||||
- MDCR_EL3.TDA (bit 9) must be initialized to 0b0
|
||||
|
||||
- For CPUs with FEAT_PMUv3:
|
||||
|
||||
- If EL3 is present:
|
||||
|
||||
- MDCR_EL3.TPM (bit 6) must be initialized to 0b0
|
||||
|
||||
The requirements described above for CPU mode, caches, MMUs, architected
|
||||
timers, coherency and system registers apply to all CPUs. All CPUs must
|
||||
enter the kernel in the same exception level. Where the values documented
|
||||
|
@ -113,7 +113,7 @@ config ARM64
|
||||
select ARCH_WANT_FRAME_POINTERS
|
||||
select ARCH_WANT_HUGE_PMD_SHARE if ARM64_4K_PAGES || (ARM64_16K_PAGES && !ARM64_VA_BITS_36)
|
||||
select ARCH_WANT_LD_ORPHAN_WARN
|
||||
select ARCH_WANTS_EXECMEM_LATE if EXECMEM
|
||||
select ARCH_WANTS_EXECMEM_LATE
|
||||
select ARCH_WANTS_NO_INSTR
|
||||
select ARCH_WANTS_THP_SWAP if ARM64_4K_PAGES
|
||||
select ARCH_HAS_UBSAN
|
||||
@ -1379,7 +1379,6 @@ config ARM64_VA_BITS_48
|
||||
|
||||
config ARM64_VA_BITS_52
|
||||
bool "52-bit"
|
||||
depends on ARM64_PAN || !ARM64_SW_TTBR0_PAN
|
||||
help
|
||||
Enable 52-bit virtual addressing for userspace when explicitly
|
||||
requested via a hint to mmap(). The kernel will also use 52-bit
|
||||
@ -1417,39 +1416,9 @@ config ARM64_VA_BITS
|
||||
default 48 if ARM64_VA_BITS_48
|
||||
default 52 if ARM64_VA_BITS_52
|
||||
|
||||
choice
|
||||
prompt "Physical address space size"
|
||||
default ARM64_PA_BITS_48
|
||||
help
|
||||
Choose the maximum physical address range that the kernel will
|
||||
support.
|
||||
|
||||
config ARM64_PA_BITS_48
|
||||
bool "48-bit"
|
||||
depends on ARM64_64K_PAGES || !ARM64_VA_BITS_52
|
||||
|
||||
config ARM64_PA_BITS_52
|
||||
bool "52-bit"
|
||||
depends on ARM64_64K_PAGES || ARM64_VA_BITS_52
|
||||
depends on ARM64_PAN || !ARM64_SW_TTBR0_PAN
|
||||
help
|
||||
Enable support for a 52-bit physical address space, introduced as
|
||||
part of the ARMv8.2-LPA extension.
|
||||
|
||||
With this enabled, the kernel will also continue to work on CPUs that
|
||||
do not support ARMv8.2-LPA, but with some added memory overhead (and
|
||||
minor performance overhead).
|
||||
|
||||
endchoice
|
||||
|
||||
config ARM64_PA_BITS
|
||||
int
|
||||
default 48 if ARM64_PA_BITS_48
|
||||
default 52 if ARM64_PA_BITS_52
|
||||
|
||||
config ARM64_LPA2
|
||||
def_bool y
|
||||
depends on ARM64_PA_BITS_52 && !ARM64_64K_PAGES
|
||||
depends on !ARM64_64K_PAGES
|
||||
|
||||
choice
|
||||
prompt "Endianness"
|
||||
@ -1681,6 +1650,7 @@ config RODATA_FULL_DEFAULT_ENABLED
|
||||
config ARM64_SW_TTBR0_PAN
|
||||
bool "Emulate Privileged Access Never using TTBR0_EL1 switching"
|
||||
depends on !KCSAN
|
||||
select ARM64_PAN
|
||||
help
|
||||
Enabling this option prevents the kernel from accessing
|
||||
user-space memory directly by pointing TTBR0_EL1 to a reserved
|
||||
@ -1937,7 +1907,6 @@ config ARM64_RAS_EXTN
|
||||
config ARM64_CNP
|
||||
bool "Enable support for Common Not Private (CNP) translations"
|
||||
default y
|
||||
depends on ARM64_PAN || !ARM64_SW_TTBR0_PAN
|
||||
help
|
||||
Common Not Private (CNP) allows translation table entries to
|
||||
be shared between different PEs in the same inner shareable
|
||||
@ -2132,7 +2101,7 @@ config ARM64_MTE
|
||||
depends on AS_HAS_ARMV8_5
|
||||
depends on AS_HAS_LSE_ATOMICS
|
||||
# Required for tag checking in the uaccess routines
|
||||
depends on ARM64_PAN
|
||||
select ARM64_PAN
|
||||
select ARCH_HAS_SUBPAGE_FAULTS
|
||||
select ARCH_USES_HIGH_VMA_FLAGS
|
||||
select ARCH_USES_PG_ARCH_2
|
||||
|
@ -342,9 +342,13 @@ alternative_cb_end
|
||||
mrs \tmp0, ID_AA64MMFR0_EL1
|
||||
// Narrow PARange to fit the PS field in TCR_ELx
|
||||
ubfx \tmp0, \tmp0, #ID_AA64MMFR0_EL1_PARANGE_SHIFT, #3
|
||||
mov \tmp1, #ID_AA64MMFR0_EL1_PARANGE_MAX
|
||||
#ifdef CONFIG_ARM64_LPA2
|
||||
alternative_if_not ARM64_HAS_VA52
|
||||
mov \tmp1, #ID_AA64MMFR0_EL1_PARANGE_48
|
||||
cmp \tmp0, \tmp1
|
||||
csel \tmp0, \tmp1, \tmp0, hi
|
||||
alternative_else_nop_endif
|
||||
#endif
|
||||
bfi \tcr, \tmp0, \pos, #3
|
||||
.endm
|
||||
|
||||
@ -594,21 +598,13 @@ alternative_endif
|
||||
* ttbr: returns the TTBR value
|
||||
*/
|
||||
.macro phys_to_ttbr, ttbr, phys
|
||||
#ifdef CONFIG_ARM64_PA_BITS_52
|
||||
orr \ttbr, \phys, \phys, lsr #46
|
||||
and \ttbr, \ttbr, #TTBR_BADDR_MASK_52
|
||||
#else
|
||||
mov \ttbr, \phys
|
||||
#endif
|
||||
.endm
|
||||
|
||||
.macro phys_to_pte, pte, phys
|
||||
#ifdef CONFIG_ARM64_PA_BITS_52
|
||||
orr \pte, \phys, \phys, lsr #PTE_ADDR_HIGH_SHIFT
|
||||
and \pte, \pte, #PHYS_TO_PTE_ADDR_MASK
|
||||
#else
|
||||
mov \pte, \phys
|
||||
#endif
|
||||
.endm
|
||||
|
||||
/*
|
||||
|
@ -883,9 +883,8 @@ static inline u32 id_aa64mmfr0_parange_to_phys_shift(int parange)
|
||||
* However, by the "D10.1.4 Principles of the ID scheme
|
||||
* for fields in ID registers", ARM DDI 0487C.a, any new
|
||||
* value is guaranteed to be higher than what we know already.
|
||||
* As a safe limit, we return the limit supported by the kernel.
|
||||
*/
|
||||
default: return CONFIG_ARM64_PA_BITS;
|
||||
default: return 52;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -30,8 +30,7 @@
|
||||
|
||||
static inline u64 kvm_get_parange_max(void)
|
||||
{
|
||||
if (kvm_lpa2_is_enabled() ||
|
||||
(IS_ENABLED(CONFIG_ARM64_PA_BITS_52) && PAGE_SHIFT == 16))
|
||||
if (kvm_lpa2_is_enabled() || PAGE_SHIFT == 16)
|
||||
return ID_AA64MMFR0_EL1_PARANGE_52;
|
||||
else
|
||||
return ID_AA64MMFR0_EL1_PARANGE_48;
|
||||
|
@ -109,8 +109,5 @@ static inline bool kaslr_requires_kpti(void)
|
||||
return true;
|
||||
}
|
||||
|
||||
#define INIT_MM_CONTEXT(name) \
|
||||
.pgd = swapper_pg_dir,
|
||||
|
||||
#endif /* !__ASSEMBLY__ */
|
||||
#endif
|
||||
|
@ -176,7 +176,6 @@
|
||||
#define PTE_SWBITS_MASK _AT(pteval_t, (BIT(63) | GENMASK(58, 55)))
|
||||
|
||||
#define PTE_ADDR_LOW (((_AT(pteval_t, 1) << (50 - PAGE_SHIFT)) - 1) << PAGE_SHIFT)
|
||||
#ifdef CONFIG_ARM64_PA_BITS_52
|
||||
#ifdef CONFIG_ARM64_64K_PAGES
|
||||
#define PTE_ADDR_HIGH (_AT(pteval_t, 0xf) << 12)
|
||||
#define PTE_ADDR_HIGH_SHIFT 36
|
||||
@ -186,7 +185,6 @@
|
||||
#define PTE_ADDR_HIGH_SHIFT 42
|
||||
#define PHYS_TO_PTE_ADDR_MASK GENMASK_ULL(49, 8)
|
||||
#endif
|
||||
#endif
|
||||
|
||||
/*
|
||||
* AttrIndx[2:0] encoding (mapping attributes defined in the MAIR* registers).
|
||||
@ -222,12 +220,6 @@
|
||||
*/
|
||||
#define S1_TABLE_AP (_AT(pmdval_t, 3) << 61)
|
||||
|
||||
/*
|
||||
* Highest possible physical address supported.
|
||||
*/
|
||||
#define PHYS_MASK_SHIFT (CONFIG_ARM64_PA_BITS)
|
||||
#define PHYS_MASK ((UL(1) << PHYS_MASK_SHIFT) - 1)
|
||||
|
||||
#define TTBR_CNP_BIT (UL(1) << 0)
|
||||
|
||||
/*
|
||||
@ -333,12 +325,10 @@
|
||||
/*
|
||||
* TTBR.
|
||||
*/
|
||||
#ifdef CONFIG_ARM64_PA_BITS_52
|
||||
/*
|
||||
* TTBR_ELx[1] is RES0 in this configuration.
|
||||
* TTBR_ELx[1] is RES0 when using 52-bit physical addressing
|
||||
*/
|
||||
#define TTBR_BADDR_MASK_52 GENMASK_ULL(47, 2)
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_ARM64_VA_BITS_52
|
||||
/* Must be at least 64-byte aligned to prevent corruption of the TTBR */
|
||||
|
@ -81,6 +81,7 @@ extern unsigned long prot_ns_shared;
|
||||
#define lpa2_is_enabled() false
|
||||
#define PTE_MAYBE_SHARED PTE_SHARED
|
||||
#define PMD_MAYBE_SHARED PMD_SECT_S
|
||||
#define PHYS_MASK_SHIFT (52)
|
||||
#else
|
||||
static inline bool __pure lpa2_is_enabled(void)
|
||||
{
|
||||
@ -89,8 +90,14 @@ static inline bool __pure lpa2_is_enabled(void)
|
||||
|
||||
#define PTE_MAYBE_SHARED (lpa2_is_enabled() ? 0 : PTE_SHARED)
|
||||
#define PMD_MAYBE_SHARED (lpa2_is_enabled() ? 0 : PMD_SECT_S)
|
||||
#define PHYS_MASK_SHIFT (lpa2_is_enabled() ? 52 : 48)
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Highest possible physical address supported.
|
||||
*/
|
||||
#define PHYS_MASK ((UL(1) << PHYS_MASK_SHIFT) - 1)
|
||||
|
||||
/*
|
||||
* If we have userspace only BTI we don't want to mark kernel pages
|
||||
* guarded even if the system does support BTI.
|
||||
|
@ -69,10 +69,9 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
|
||||
pr_err("%s:%d: bad pte %016llx.\n", __FILE__, __LINE__, pte_val(e))
|
||||
|
||||
/*
|
||||
* Macros to convert between a physical address and its placement in a
|
||||
* Helpers to convert between a physical address and its placement in a
|
||||
* page table entry, taking care of 52-bit addresses.
|
||||
*/
|
||||
#ifdef CONFIG_ARM64_PA_BITS_52
|
||||
static inline phys_addr_t __pte_to_phys(pte_t pte)
|
||||
{
|
||||
pte_val(pte) &= ~PTE_MAYBE_SHARED;
|
||||
@ -83,10 +82,6 @@ static inline pteval_t __phys_to_pte_val(phys_addr_t phys)
|
||||
{
|
||||
return (phys | (phys >> PTE_ADDR_HIGH_SHIFT)) & PHYS_TO_PTE_ADDR_MASK;
|
||||
}
|
||||
#else
|
||||
#define __pte_to_phys(pte) (pte_val(pte) & PTE_ADDR_LOW)
|
||||
#define __phys_to_pte_val(phys) (phys)
|
||||
#endif
|
||||
|
||||
#define pte_pfn(pte) (__pte_to_phys(pte) >> PAGE_SHIFT)
|
||||
#define pfn_pte(pfn,prot) \
|
||||
@ -896,7 +891,7 @@ static inline bool mm_pud_folded(const struct mm_struct *mm)
|
||||
pr_err("%s:%d: bad pud %016llx.\n", __FILE__, __LINE__, pud_val(e))
|
||||
|
||||
#define p4d_none(p4d) (pgtable_l4_enabled() && !p4d_val(p4d))
|
||||
#define p4d_bad(p4d) (pgtable_l4_enabled() && !(p4d_val(p4d) & 2))
|
||||
#define p4d_bad(p4d) (pgtable_l4_enabled() && !(p4d_val(p4d) & P4D_TABLE_BIT))
|
||||
#define p4d_present(p4d) (!p4d_none(p4d))
|
||||
|
||||
static inline void set_p4d(p4d_t *p4dp, p4d_t p4d)
|
||||
@ -1023,7 +1018,7 @@ static inline bool mm_p4d_folded(const struct mm_struct *mm)
|
||||
pr_err("%s:%d: bad p4d %016llx.\n", __FILE__, __LINE__, p4d_val(e))
|
||||
|
||||
#define pgd_none(pgd) (pgtable_l5_enabled() && !pgd_val(pgd))
|
||||
#define pgd_bad(pgd) (pgtable_l5_enabled() && !(pgd_val(pgd) & 2))
|
||||
#define pgd_bad(pgd) (pgtable_l5_enabled() && !(pgd_val(pgd) & PGD_TABLE_BIT))
|
||||
#define pgd_present(pgd) (!pgd_none(pgd))
|
||||
|
||||
static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
|
||||
@ -1345,7 +1340,7 @@ static inline void ___ptep_set_wrprotect(struct mm_struct *mm,
|
||||
}
|
||||
|
||||
/*
|
||||
* __ptep_set_wrprotect - mark read-only while trasferring potential hardware
|
||||
* __ptep_set_wrprotect - mark read-only while transferring potential hardware
|
||||
* dirty status (PTE_DBM && !PTE_RDONLY) to the software PTE_DIRTY bit.
|
||||
*/
|
||||
static inline void __ptep_set_wrprotect(struct mm_struct *mm,
|
||||
@ -1495,11 +1490,7 @@ static inline void update_mmu_cache_range(struct vm_fault *vmf,
|
||||
update_mmu_cache_range(NULL, vma, addr, ptep, 1)
|
||||
#define update_mmu_cache_pmd(vma, address, pmd) do { } while (0)
|
||||
|
||||
#ifdef CONFIG_ARM64_PA_BITS_52
|
||||
#define phys_to_ttbr(addr) (((addr) | ((addr) >> 46)) & TTBR_BADDR_MASK_52)
|
||||
#else
|
||||
#define phys_to_ttbr(addr) (addr)
|
||||
#endif
|
||||
|
||||
/*
|
||||
* On arm64 without hardware Access Flag, copying from user will fail because
|
||||
|
@ -5,7 +5,10 @@
|
||||
#ifndef __ASM_SPARSEMEM_H
|
||||
#define __ASM_SPARSEMEM_H
|
||||
|
||||
#define MAX_PHYSMEM_BITS CONFIG_ARM64_PA_BITS
|
||||
#include <asm/pgtable-prot.h>
|
||||
|
||||
#define MAX_PHYSMEM_BITS PHYS_MASK_SHIFT
|
||||
#define MAX_POSSIBLE_PHYSMEM_BITS (52)
|
||||
|
||||
/*
|
||||
* Section size must be at least 512MB for 64K base
|
||||
|
@ -916,12 +916,6 @@
|
||||
#define ID_AA64MMFR0_EL1_TGRAN_2_SUPPORTED_LPA2 0x3
|
||||
#define ID_AA64MMFR0_EL1_TGRAN_2_SUPPORTED_MAX 0x7
|
||||
|
||||
#ifdef CONFIG_ARM64_PA_BITS_52
|
||||
#define ID_AA64MMFR0_EL1_PARANGE_MAX ID_AA64MMFR0_EL1_PARANGE_52
|
||||
#else
|
||||
#define ID_AA64MMFR0_EL1_PARANGE_MAX ID_AA64MMFR0_EL1_PARANGE_48
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_ARM64_4K_PAGES)
|
||||
#define ID_AA64MMFR0_EL1_TGRAN_SHIFT ID_AA64MMFR0_EL1_TGRAN4_SHIFT
|
||||
#define ID_AA64MMFR0_EL1_TGRAN_LPA2 ID_AA64MMFR0_EL1_TGRAN4_52_BIT
|
||||
|
@ -3477,7 +3477,7 @@ static void verify_hyp_capabilities(void)
|
||||
return;
|
||||
|
||||
safe_mmfr1 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1);
|
||||
mmfr0 = read_cpuid(ID_AA64MMFR0_EL1);
|
||||
mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
|
||||
mmfr1 = read_cpuid(ID_AA64MMFR1_EL1);
|
||||
|
||||
/* Verify VMID bits */
|
||||
|
@ -83,6 +83,15 @@ static bool __init mmfr2_varange_filter(u64 val)
|
||||
id_aa64mmfr0_override.val |=
|
||||
(ID_AA64MMFR0_EL1_TGRAN_LPA2 - 1) << ID_AA64MMFR0_EL1_TGRAN_SHIFT;
|
||||
id_aa64mmfr0_override.mask |= 0xfU << ID_AA64MMFR0_EL1_TGRAN_SHIFT;
|
||||
|
||||
/*
|
||||
* Override PARange to 48 bits - the override will just be
|
||||
* ignored if the actual PARange is smaller, but this is
|
||||
* unlikely to be the case for LPA2 capable silicon.
|
||||
*/
|
||||
id_aa64mmfr0_override.val |=
|
||||
ID_AA64MMFR0_EL1_PARANGE_48 << ID_AA64MMFR0_EL1_PARANGE_SHIFT;
|
||||
id_aa64mmfr0_override.mask |= 0xfU << ID_AA64MMFR0_EL1_PARANGE_SHIFT;
|
||||
}
|
||||
#endif
|
||||
return true;
|
||||
|
@ -136,6 +136,12 @@ static void noinline __section(".idmap.text") set_ttbr0_for_lpa2(u64 ttbr)
|
||||
{
|
||||
u64 sctlr = read_sysreg(sctlr_el1);
|
||||
u64 tcr = read_sysreg(tcr_el1) | TCR_DS;
|
||||
u64 mmfr0 = read_sysreg(id_aa64mmfr0_el1);
|
||||
u64 parange = cpuid_feature_extract_unsigned_field(mmfr0,
|
||||
ID_AA64MMFR0_EL1_PARANGE_SHIFT);
|
||||
|
||||
tcr &= ~TCR_IPS_MASK;
|
||||
tcr |= parange << TCR_IPS_SHIFT;
|
||||
|
||||
asm(" msr sctlr_el1, %0 ;"
|
||||
" isb ;"
|
||||
|
@ -1990,8 +1990,7 @@ static int kvm_init_vector_slots(void)
|
||||
static void __init cpu_prepare_hyp_mode(int cpu, u32 hyp_va_bits)
|
||||
{
|
||||
struct kvm_nvhe_init_params *params = per_cpu_ptr_nvhe_sym(kvm_init_params, cpu);
|
||||
u64 mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
|
||||
unsigned long tcr;
|
||||
unsigned long tcr, ips;
|
||||
|
||||
/*
|
||||
* Calculate the raw per-cpu offset without a translation from the
|
||||
@ -2005,6 +2004,7 @@ static void __init cpu_prepare_hyp_mode(int cpu, u32 hyp_va_bits)
|
||||
params->mair_el2 = read_sysreg(mair_el1);
|
||||
|
||||
tcr = read_sysreg(tcr_el1);
|
||||
ips = FIELD_GET(TCR_IPS_MASK, tcr);
|
||||
if (cpus_have_final_cap(ARM64_KVM_HVHE)) {
|
||||
tcr |= TCR_EPD1_MASK;
|
||||
} else {
|
||||
@ -2014,8 +2014,8 @@ static void __init cpu_prepare_hyp_mode(int cpu, u32 hyp_va_bits)
|
||||
tcr &= ~TCR_T0SZ_MASK;
|
||||
tcr |= TCR_T0SZ(hyp_va_bits);
|
||||
tcr &= ~TCR_EL2_PS_MASK;
|
||||
tcr |= FIELD_PREP(TCR_EL2_PS_MASK, kvm_get_parange(mmfr0));
|
||||
if (kvm_lpa2_is_enabled())
|
||||
tcr |= FIELD_PREP(TCR_EL2_PS_MASK, ips);
|
||||
if (lpa2_is_enabled())
|
||||
tcr |= TCR_EL2_DS;
|
||||
params->tcr_el2 = tcr;
|
||||
|
||||
|
@ -35,14 +35,6 @@ static bool kvm_pgtable_walk_skip_cmo(const struct kvm_pgtable_visit_ctx *ctx)
|
||||
return unlikely(ctx->flags & KVM_PGTABLE_WALK_SKIP_CMO);
|
||||
}
|
||||
|
||||
static bool kvm_phys_is_valid(u64 phys)
|
||||
{
|
||||
u64 parange_max = kvm_get_parange_max();
|
||||
u8 shift = id_aa64mmfr0_parange_to_phys_shift(parange_max);
|
||||
|
||||
return phys < BIT(shift);
|
||||
}
|
||||
|
||||
static bool kvm_block_mapping_supported(const struct kvm_pgtable_visit_ctx *ctx, u64 phys)
|
||||
{
|
||||
u64 granule = kvm_granule_size(ctx->level);
|
||||
@ -53,7 +45,7 @@ static bool kvm_block_mapping_supported(const struct kvm_pgtable_visit_ctx *ctx,
|
||||
if (granule > (ctx->end - ctx->addr))
|
||||
return false;
|
||||
|
||||
if (kvm_phys_is_valid(phys) && !IS_ALIGNED(phys, granule))
|
||||
if (!IS_ALIGNED(phys, granule))
|
||||
return false;
|
||||
|
||||
return IS_ALIGNED(ctx->addr, granule);
|
||||
@ -587,6 +579,9 @@ struct stage2_map_data {
|
||||
|
||||
/* Force mappings to page granularity */
|
||||
bool force_pte;
|
||||
|
||||
/* Walk should update owner_id only */
|
||||
bool annotation;
|
||||
};
|
||||
|
||||
u64 kvm_get_vtcr(u64 mmfr0, u64 mmfr1, u32 phys_shift)
|
||||
@ -885,18 +880,7 @@ static u64 stage2_map_walker_phys_addr(const struct kvm_pgtable_visit_ctx *ctx,
|
||||
{
|
||||
u64 phys = data->phys;
|
||||
|
||||
/*
|
||||
* Stage-2 walks to update ownership data are communicated to the map
|
||||
* walker using an invalid PA. Avoid offsetting an already invalid PA,
|
||||
* which could overflow and make the address valid again.
|
||||
*/
|
||||
if (!kvm_phys_is_valid(phys))
|
||||
return phys;
|
||||
|
||||
/*
|
||||
* Otherwise, work out the correct PA based on how far the walk has
|
||||
* gotten.
|
||||
*/
|
||||
/* Work out the correct PA based on how far the walk has gotten */
|
||||
return phys + (ctx->addr - ctx->start);
|
||||
}
|
||||
|
||||
@ -908,6 +892,9 @@ static bool stage2_leaf_mapping_allowed(const struct kvm_pgtable_visit_ctx *ctx,
|
||||
if (data->force_pte && ctx->level < KVM_PGTABLE_LAST_LEVEL)
|
||||
return false;
|
||||
|
||||
if (data->annotation)
|
||||
return true;
|
||||
|
||||
return kvm_block_mapping_supported(ctx, phys);
|
||||
}
|
||||
|
||||
@ -923,7 +910,7 @@ static int stage2_map_walker_try_leaf(const struct kvm_pgtable_visit_ctx *ctx,
|
||||
if (!stage2_leaf_mapping_allowed(ctx, data))
|
||||
return -E2BIG;
|
||||
|
||||
if (kvm_phys_is_valid(phys))
|
||||
if (!data->annotation)
|
||||
new = kvm_init_valid_leaf_pte(phys, data->attr, ctx->level);
|
||||
else
|
||||
new = kvm_init_invalid_leaf_owner(data->owner_id);
|
||||
@ -1085,11 +1072,11 @@ int kvm_pgtable_stage2_set_owner(struct kvm_pgtable *pgt, u64 addr, u64 size,
|
||||
{
|
||||
int ret;
|
||||
struct stage2_map_data map_data = {
|
||||
.phys = KVM_PHYS_INVALID,
|
||||
.mmu = pgt->mmu,
|
||||
.memcache = mc,
|
||||
.owner_id = owner_id,
|
||||
.force_pte = true,
|
||||
.annotation = true,
|
||||
};
|
||||
struct kvm_pgtable_walker walker = {
|
||||
.cb = stage2_map_walker,
|
||||
|
@ -519,6 +519,18 @@ pte_t huge_ptep_clear_flush(struct vm_area_struct *vma,
|
||||
|
||||
static int __init hugetlbpage_init(void)
|
||||
{
|
||||
/*
|
||||
* HugeTLB pages are supported on maximum four page table
|
||||
* levels (PUD, CONT PMD, PMD, CONT PTE) for a given base
|
||||
* page size, corresponding to hugetlb_add_hstate() calls
|
||||
* here.
|
||||
*
|
||||
* HUGE_MAX_HSTATE should at least match maximum supported
|
||||
* HugeTLB page sizes on the platform. Any new addition to
|
||||
* supported HugeTLB page sizes will also require changing
|
||||
* HUGE_MAX_HSTATE as well.
|
||||
*/
|
||||
BUILD_BUG_ON(HUGE_MAX_HSTATE < 4);
|
||||
if (pud_sect_supported())
|
||||
hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT);
|
||||
|
||||
|
@ -279,7 +279,12 @@ void __init arm64_memblock_init(void)
|
||||
|
||||
if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) {
|
||||
extern u16 memstart_offset_seed;
|
||||
u64 mmfr0 = read_cpuid(ID_AA64MMFR0_EL1);
|
||||
|
||||
/*
|
||||
* Use the sanitised version of id_aa64mmfr0_el1 so that linear
|
||||
* map randomization can be enabled by shrinking the IPA space.
|
||||
*/
|
||||
u64 mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
|
||||
int parange = cpuid_feature_extract_unsigned_field(
|
||||
mmfr0, ID_AA64MMFR0_EL1_PARANGE_SHIFT);
|
||||
s64 range = linear_region_size -
|
||||
|
@ -48,20 +48,21 @@ void pgd_free(struct mm_struct *mm, pgd_t *pgd)
|
||||
|
||||
void __init pgtable_cache_init(void)
|
||||
{
|
||||
unsigned int pgd_size = PGD_SIZE;
|
||||
|
||||
if (pgdir_is_page_size())
|
||||
return;
|
||||
|
||||
#ifdef CONFIG_ARM64_PA_BITS_52
|
||||
/*
|
||||
* With 52-bit physical addresses, the architecture requires the
|
||||
* top-level table to be aligned to at least 64 bytes.
|
||||
*/
|
||||
BUILD_BUG_ON(PGD_SIZE < 64);
|
||||
#endif
|
||||
if (PHYS_MASK_SHIFT >= 52)
|
||||
pgd_size = max(pgd_size, 64);
|
||||
|
||||
/*
|
||||
* Naturally aligned pgds required by the architecture.
|
||||
*/
|
||||
pgd_cache = kmem_cache_create("pgd_cache", PGD_SIZE, PGD_SIZE,
|
||||
pgd_cache = kmem_cache_create("pgd_cache", pgd_size, pgd_size,
|
||||
SLAB_PANIC, NULL);
|
||||
}
|
||||
|
@ -197,10 +197,8 @@ SYM_FUNC_ALIAS(__pi_idmap_cpu_replace_ttbr1, idmap_cpu_replace_ttbr1)
|
||||
|
||||
.macro pte_to_phys, phys, pte
|
||||
and \phys, \pte, #PTE_ADDR_LOW
|
||||
#ifdef CONFIG_ARM64_PA_BITS_52
|
||||
and \pte, \pte, #PTE_ADDR_HIGH
|
||||
orr \phys, \phys, \pte, lsl #PTE_ADDR_HIGH_SHIFT
|
||||
#endif
|
||||
.endm
|
||||
|
||||
.macro kpti_mk_tbl_ng, type, num_entries
|
||||
|
@ -141,7 +141,6 @@ LX_CONFIG(CONFIG_ARM64_4K_PAGES)
|
||||
LX_CONFIG(CONFIG_ARM64_16K_PAGES)
|
||||
LX_CONFIG(CONFIG_ARM64_64K_PAGES)
|
||||
if IS_BUILTIN(CONFIG_ARM64):
|
||||
LX_VALUE(CONFIG_ARM64_PA_BITS)
|
||||
LX_VALUE(CONFIG_ARM64_VA_BITS)
|
||||
LX_VALUE(CONFIG_PAGE_SHIFT)
|
||||
LX_VALUE(CONFIG_ARCH_FORCE_MAX_ORDER)
|
||||
|
@ -574,12 +574,6 @@
|
||||
#define ID_AA64MMFR0_EL1_TGRAN_2_SUPPORTED_MIN 0x2
|
||||
#define ID_AA64MMFR0_EL1_TGRAN_2_SUPPORTED_MAX 0x7
|
||||
|
||||
#ifdef CONFIG_ARM64_PA_BITS_52
|
||||
#define ID_AA64MMFR0_EL1_PARANGE_MAX ID_AA64MMFR0_EL1_PARANGE_52
|
||||
#else
|
||||
#define ID_AA64MMFR0_EL1_PARANGE_MAX ID_AA64MMFR0_EL1_PARANGE_48
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_ARM64_4K_PAGES)
|
||||
#define ID_AA64MMFR0_EL1_TGRAN_SHIFT ID_AA64MMFR0_EL1_TGRAN4_SHIFT
|
||||
#define ID_AA64MMFR0_EL1_TGRAN_SUPPORTED_MIN ID_AA64MMFR0_EL1_TGRAN4_SUPPORTED_MIN
|
||||
|
Loading…
Reference in New Issue
Block a user