mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2024-12-28 00:32:00 +00:00
Merge branch 'for-next/core' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux
This commit is contained in:
commit
7bdd902c16
@ -449,6 +449,18 @@ Before jumping into the kernel, the following conditions must be met:
|
||||
|
||||
- HFGWTR_EL2.nGCS_EL0 (bit 52) must be initialised to 0b1.
|
||||
|
||||
- For CPUs with debug architecture i.e FEAT_Debugv8pN (all versions):
|
||||
|
||||
- If EL3 is present:
|
||||
|
||||
- MDCR_EL3.TDA (bit 9) must be initialized to 0b0
|
||||
|
||||
- For CPUs with FEAT_PMUv3:
|
||||
|
||||
- If EL3 is present:
|
||||
|
||||
- MDCR_EL3.TPM (bit 6) must be initialized to 0b0
|
||||
|
||||
The requirements described above for CPU mode, caches, MMUs, architected
|
||||
timers, coherency and system registers apply to all CPUs. All CPUs must
|
||||
enter the kernel in the same exception level. Where the values documented
|
||||
|
@ -113,7 +113,7 @@ config ARM64
|
||||
select ARCH_WANT_FRAME_POINTERS
|
||||
select ARCH_WANT_HUGE_PMD_SHARE if ARM64_4K_PAGES || (ARM64_16K_PAGES && !ARM64_VA_BITS_36)
|
||||
select ARCH_WANT_LD_ORPHAN_WARN
|
||||
select ARCH_WANTS_EXECMEM_LATE if EXECMEM
|
||||
select ARCH_WANTS_EXECMEM_LATE
|
||||
select ARCH_WANTS_NO_INSTR
|
||||
select ARCH_WANTS_THP_SWAP if ARM64_4K_PAGES
|
||||
select ARCH_HAS_UBSAN
|
||||
@ -1379,7 +1379,6 @@ config ARM64_VA_BITS_48
|
||||
|
||||
config ARM64_VA_BITS_52
|
||||
bool "52-bit"
|
||||
depends on ARM64_PAN || !ARM64_SW_TTBR0_PAN
|
||||
help
|
||||
Enable 52-bit virtual addressing for userspace when explicitly
|
||||
requested via a hint to mmap(). The kernel will also use 52-bit
|
||||
@ -1417,39 +1416,9 @@ config ARM64_VA_BITS
|
||||
default 48 if ARM64_VA_BITS_48
|
||||
default 52 if ARM64_VA_BITS_52
|
||||
|
||||
choice
|
||||
prompt "Physical address space size"
|
||||
default ARM64_PA_BITS_48
|
||||
help
|
||||
Choose the maximum physical address range that the kernel will
|
||||
support.
|
||||
|
||||
config ARM64_PA_BITS_48
|
||||
bool "48-bit"
|
||||
depends on ARM64_64K_PAGES || !ARM64_VA_BITS_52
|
||||
|
||||
config ARM64_PA_BITS_52
|
||||
bool "52-bit"
|
||||
depends on ARM64_64K_PAGES || ARM64_VA_BITS_52
|
||||
depends on ARM64_PAN || !ARM64_SW_TTBR0_PAN
|
||||
help
|
||||
Enable support for a 52-bit physical address space, introduced as
|
||||
part of the ARMv8.2-LPA extension.
|
||||
|
||||
With this enabled, the kernel will also continue to work on CPUs that
|
||||
do not support ARMv8.2-LPA, but with some added memory overhead (and
|
||||
minor performance overhead).
|
||||
|
||||
endchoice
|
||||
|
||||
config ARM64_PA_BITS
|
||||
int
|
||||
default 48 if ARM64_PA_BITS_48
|
||||
default 52 if ARM64_PA_BITS_52
|
||||
|
||||
config ARM64_LPA2
|
||||
def_bool y
|
||||
depends on ARM64_PA_BITS_52 && !ARM64_64K_PAGES
|
||||
depends on !ARM64_64K_PAGES
|
||||
|
||||
choice
|
||||
prompt "Endianness"
|
||||
@ -1681,6 +1650,7 @@ config RODATA_FULL_DEFAULT_ENABLED
|
||||
config ARM64_SW_TTBR0_PAN
|
||||
bool "Emulate Privileged Access Never using TTBR0_EL1 switching"
|
||||
depends on !KCSAN
|
||||
select ARM64_PAN
|
||||
help
|
||||
Enabling this option prevents the kernel from accessing
|
||||
user-space memory directly by pointing TTBR0_EL1 to a reserved
|
||||
@ -1937,7 +1907,6 @@ config ARM64_RAS_EXTN
|
||||
config ARM64_CNP
|
||||
bool "Enable support for Common Not Private (CNP) translations"
|
||||
default y
|
||||
depends on ARM64_PAN || !ARM64_SW_TTBR0_PAN
|
||||
help
|
||||
Common Not Private (CNP) allows translation table entries to
|
||||
be shared between different PEs in the same inner shareable
|
||||
@ -2132,7 +2101,7 @@ config ARM64_MTE
|
||||
depends on AS_HAS_ARMV8_5
|
||||
depends on AS_HAS_LSE_ATOMICS
|
||||
# Required for tag checking in the uaccess routines
|
||||
depends on ARM64_PAN
|
||||
select ARM64_PAN
|
||||
select ARCH_HAS_SUBPAGE_FAULTS
|
||||
select ARCH_USES_HIGH_VMA_FLAGS
|
||||
select ARCH_USES_PG_ARCH_2
|
||||
|
@ -342,9 +342,13 @@ alternative_cb_end
|
||||
mrs \tmp0, ID_AA64MMFR0_EL1
|
||||
// Narrow PARange to fit the PS field in TCR_ELx
|
||||
ubfx \tmp0, \tmp0, #ID_AA64MMFR0_EL1_PARANGE_SHIFT, #3
|
||||
mov \tmp1, #ID_AA64MMFR0_EL1_PARANGE_MAX
|
||||
#ifdef CONFIG_ARM64_LPA2
|
||||
alternative_if_not ARM64_HAS_VA52
|
||||
mov \tmp1, #ID_AA64MMFR0_EL1_PARANGE_48
|
||||
cmp \tmp0, \tmp1
|
||||
csel \tmp0, \tmp1, \tmp0, hi
|
||||
alternative_else_nop_endif
|
||||
#endif
|
||||
bfi \tcr, \tmp0, \pos, #3
|
||||
.endm
|
||||
|
||||
@ -594,21 +598,13 @@ alternative_endif
|
||||
* ttbr: returns the TTBR value
|
||||
*/
|
||||
.macro phys_to_ttbr, ttbr, phys
|
||||
#ifdef CONFIG_ARM64_PA_BITS_52
|
||||
orr \ttbr, \phys, \phys, lsr #46
|
||||
and \ttbr, \ttbr, #TTBR_BADDR_MASK_52
|
||||
#else
|
||||
mov \ttbr, \phys
|
||||
#endif
|
||||
.endm
|
||||
|
||||
.macro phys_to_pte, pte, phys
|
||||
#ifdef CONFIG_ARM64_PA_BITS_52
|
||||
orr \pte, \phys, \phys, lsr #PTE_ADDR_HIGH_SHIFT
|
||||
and \pte, \pte, #PHYS_TO_PTE_ADDR_MASK
|
||||
#else
|
||||
mov \pte, \phys
|
||||
#endif
|
||||
.endm
|
||||
|
||||
/*
|
||||
|
@ -46,6 +46,8 @@ cpucap_is_possible(const unsigned int cap)
|
||||
return IS_ENABLED(CONFIG_ARM64_POE);
|
||||
case ARM64_HAS_GCS:
|
||||
return IS_ENABLED(CONFIG_ARM64_GCS);
|
||||
case ARM64_HAFT:
|
||||
return IS_ENABLED(CONFIG_ARM64_HAFT);
|
||||
case ARM64_UNMAP_KERNEL_AT_EL0:
|
||||
return IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0);
|
||||
case ARM64_WORKAROUND_843419:
|
||||
|
@ -852,8 +852,7 @@ static inline bool system_supports_gcs(void)
|
||||
|
||||
static inline bool system_supports_haft(void)
|
||||
{
|
||||
return IS_ENABLED(CONFIG_ARM64_HAFT) &&
|
||||
cpus_have_final_cap(ARM64_HAFT);
|
||||
return cpus_have_final_cap(ARM64_HAFT);
|
||||
}
|
||||
|
||||
static __always_inline bool system_supports_mpam(void)
|
||||
@ -884,9 +883,8 @@ static inline u32 id_aa64mmfr0_parange_to_phys_shift(int parange)
|
||||
* However, by the "D10.1.4 Principles of the ID scheme
|
||||
* for fields in ID registers", ARM DDI 0487C.a, any new
|
||||
* value is guaranteed to be higher than what we know already.
|
||||
* As a safe limit, we return the limit supported by the kernel.
|
||||
*/
|
||||
default: return CONFIG_ARM64_PA_BITS;
|
||||
default: return 52;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -154,7 +154,7 @@
|
||||
/* Coprocessor traps */
|
||||
.macro __init_el2_cptr
|
||||
__check_hvhe .LnVHE_\@, x1
|
||||
mov x0, #CPACR_ELx_FPEN
|
||||
mov x0, #CPACR_EL1_FPEN
|
||||
msr cpacr_el1, x0
|
||||
b .Lskip_set_cptr_\@
|
||||
.LnVHE_\@:
|
||||
@ -332,7 +332,7 @@
|
||||
|
||||
// (h)VHE case
|
||||
mrs x0, cpacr_el1 // Disable SVE traps
|
||||
orr x0, x0, #CPACR_ELx_ZEN
|
||||
orr x0, x0, #CPACR_EL1_ZEN
|
||||
msr cpacr_el1, x0
|
||||
b .Lskip_set_cptr_\@
|
||||
|
||||
@ -353,7 +353,7 @@
|
||||
|
||||
// (h)VHE case
|
||||
mrs x0, cpacr_el1 // Disable SME traps
|
||||
orr x0, x0, #CPACR_ELx_SMEN
|
||||
orr x0, x0, #CPACR_EL1_SMEN
|
||||
msr cpacr_el1, x0
|
||||
b .Lskip_set_cptr_sme_\@
|
||||
|
||||
|
@ -391,8 +391,6 @@
|
||||
ECN(SOFTSTP_CUR), ECN(WATCHPT_LOW), ECN(WATCHPT_CUR), \
|
||||
ECN(BKPT32), ECN(VECTOR32), ECN(BRK64), ECN(ERET)
|
||||
|
||||
#define CPACR_EL1_TTA (1 << 28)
|
||||
|
||||
#define kvm_mode_names \
|
||||
{ PSR_MODE_EL0t, "EL0t" }, \
|
||||
{ PSR_MODE_EL1t, "EL1t" }, \
|
||||
|
@ -556,13 +556,13 @@ static __always_inline void kvm_incr_pc(struct kvm_vcpu *vcpu)
|
||||
({ \
|
||||
u64 cptr = 0; \
|
||||
\
|
||||
if ((set) & CPACR_ELx_FPEN) \
|
||||
if ((set) & CPACR_EL1_FPEN) \
|
||||
cptr |= CPTR_EL2_TFP; \
|
||||
if ((set) & CPACR_ELx_ZEN) \
|
||||
if ((set) & CPACR_EL1_ZEN) \
|
||||
cptr |= CPTR_EL2_TZ; \
|
||||
if ((set) & CPACR_ELx_SMEN) \
|
||||
if ((set) & CPACR_EL1_SMEN) \
|
||||
cptr |= CPTR_EL2_TSM; \
|
||||
if ((clr) & CPACR_ELx_TTA) \
|
||||
if ((clr) & CPACR_EL1_TTA) \
|
||||
cptr |= CPTR_EL2_TTA; \
|
||||
if ((clr) & CPTR_EL2_TAM) \
|
||||
cptr |= CPTR_EL2_TAM; \
|
||||
@ -576,13 +576,13 @@ static __always_inline void kvm_incr_pc(struct kvm_vcpu *vcpu)
|
||||
({ \
|
||||
u64 cptr = 0; \
|
||||
\
|
||||
if ((clr) & CPACR_ELx_FPEN) \
|
||||
if ((clr) & CPACR_EL1_FPEN) \
|
||||
cptr |= CPTR_EL2_TFP; \
|
||||
if ((clr) & CPACR_ELx_ZEN) \
|
||||
if ((clr) & CPACR_EL1_ZEN) \
|
||||
cptr |= CPTR_EL2_TZ; \
|
||||
if ((clr) & CPACR_ELx_SMEN) \
|
||||
if ((clr) & CPACR_EL1_SMEN) \
|
||||
cptr |= CPTR_EL2_TSM; \
|
||||
if ((set) & CPACR_ELx_TTA) \
|
||||
if ((set) & CPACR_EL1_TTA) \
|
||||
cptr |= CPTR_EL2_TTA; \
|
||||
if ((set) & CPTR_EL2_TAM) \
|
||||
cptr |= CPTR_EL2_TAM; \
|
||||
@ -595,13 +595,13 @@ static __always_inline void kvm_incr_pc(struct kvm_vcpu *vcpu)
|
||||
#define cpacr_clear_set(clr, set) \
|
||||
do { \
|
||||
BUILD_BUG_ON((set) & CPTR_VHE_EL2_RES0); \
|
||||
BUILD_BUG_ON((clr) & CPACR_ELx_E0POE); \
|
||||
__build_check_all_or_none((clr), CPACR_ELx_FPEN); \
|
||||
__build_check_all_or_none((set), CPACR_ELx_FPEN); \
|
||||
__build_check_all_or_none((clr), CPACR_ELx_ZEN); \
|
||||
__build_check_all_or_none((set), CPACR_ELx_ZEN); \
|
||||
__build_check_all_or_none((clr), CPACR_ELx_SMEN); \
|
||||
__build_check_all_or_none((set), CPACR_ELx_SMEN); \
|
||||
BUILD_BUG_ON((clr) & CPACR_EL1_E0POE); \
|
||||
__build_check_all_or_none((clr), CPACR_EL1_FPEN); \
|
||||
__build_check_all_or_none((set), CPACR_EL1_FPEN); \
|
||||
__build_check_all_or_none((clr), CPACR_EL1_ZEN); \
|
||||
__build_check_all_or_none((set), CPACR_EL1_ZEN); \
|
||||
__build_check_all_or_none((clr), CPACR_EL1_SMEN); \
|
||||
__build_check_all_or_none((set), CPACR_EL1_SMEN); \
|
||||
\
|
||||
if (has_vhe() || has_hvhe()) \
|
||||
sysreg_clear_set(cpacr_el1, clr, set); \
|
||||
@ -624,16 +624,16 @@ static __always_inline u64 kvm_get_reset_cptr_el2(struct kvm_vcpu *vcpu)
|
||||
u64 val;
|
||||
|
||||
if (has_vhe()) {
|
||||
val = (CPACR_ELx_FPEN | CPACR_EL1_ZEN_EL1EN);
|
||||
val = (CPACR_EL1_FPEN | CPACR_EL1_ZEN_EL1EN);
|
||||
if (cpus_have_final_cap(ARM64_SME))
|
||||
val |= CPACR_EL1_SMEN_EL1EN;
|
||||
} else if (has_hvhe()) {
|
||||
val = CPACR_ELx_FPEN;
|
||||
val = CPACR_EL1_FPEN;
|
||||
|
||||
if (!vcpu_has_sve(vcpu) || !guest_owns_fp_regs())
|
||||
val |= CPACR_ELx_ZEN;
|
||||
val |= CPACR_EL1_ZEN;
|
||||
if (cpus_have_final_cap(ARM64_SME))
|
||||
val |= CPACR_ELx_SMEN;
|
||||
val |= CPACR_EL1_SMEN;
|
||||
} else {
|
||||
val = CPTR_NVHE_EL2_RES1;
|
||||
|
||||
@ -685,7 +685,7 @@ static inline bool ____cptr_xen_trap_enabled(const struct kvm_vcpu *vcpu,
|
||||
#define __guest_hyp_cptr_xen_trap_enabled(vcpu, xen) \
|
||||
(!vcpu_has_nv(vcpu) ? false : \
|
||||
____cptr_xen_trap_enabled(vcpu, \
|
||||
SYS_FIELD_GET(CPACR_ELx, xen, \
|
||||
SYS_FIELD_GET(CPACR_EL1, xen, \
|
||||
vcpu_sanitised_cptr_el2(vcpu))))
|
||||
|
||||
static inline bool guest_hyp_fpsimd_traps_enabled(const struct kvm_vcpu *vcpu)
|
||||
|
@ -33,14 +33,14 @@ static inline u64 translate_tcr_el2_to_tcr_el1(u64 tcr)
|
||||
|
||||
static inline u64 translate_cptr_el2_to_cpacr_el1(u64 cptr_el2)
|
||||
{
|
||||
u64 cpacr_el1 = CPACR_ELx_RES1;
|
||||
u64 cpacr_el1 = CPACR_EL1_RES1;
|
||||
|
||||
if (cptr_el2 & CPTR_EL2_TTA)
|
||||
cpacr_el1 |= CPACR_ELx_TTA;
|
||||
cpacr_el1 |= CPACR_EL1_TTA;
|
||||
if (!(cptr_el2 & CPTR_EL2_TFP))
|
||||
cpacr_el1 |= CPACR_ELx_FPEN;
|
||||
cpacr_el1 |= CPACR_EL1_FPEN;
|
||||
if (!(cptr_el2 & CPTR_EL2_TZ))
|
||||
cpacr_el1 |= CPACR_ELx_ZEN;
|
||||
cpacr_el1 |= CPACR_EL1_ZEN;
|
||||
|
||||
cpacr_el1 |= cptr_el2 & (CPTR_EL2_TCPAC | CPTR_EL2_TAM);
|
||||
|
||||
|
@ -30,8 +30,7 @@
|
||||
|
||||
static inline u64 kvm_get_parange_max(void)
|
||||
{
|
||||
if (kvm_lpa2_is_enabled() ||
|
||||
(IS_ENABLED(CONFIG_ARM64_PA_BITS_52) && PAGE_SHIFT == 16))
|
||||
if (kvm_lpa2_is_enabled() || PAGE_SHIFT == 16)
|
||||
return ID_AA64MMFR0_EL1_PARANGE_52;
|
||||
else
|
||||
return ID_AA64MMFR0_EL1_PARANGE_48;
|
||||
|
@ -109,8 +109,5 @@ static inline bool kaslr_requires_kpti(void)
|
||||
return true;
|
||||
}
|
||||
|
||||
#define INIT_MM_CONTEXT(name) \
|
||||
.pgd = swapper_pg_dir,
|
||||
|
||||
#endif /* !__ASSEMBLY__ */
|
||||
#endif
|
||||
|
@ -176,7 +176,6 @@
|
||||
#define PTE_SWBITS_MASK _AT(pteval_t, (BIT(63) | GENMASK(58, 55)))
|
||||
|
||||
#define PTE_ADDR_LOW (((_AT(pteval_t, 1) << (50 - PAGE_SHIFT)) - 1) << PAGE_SHIFT)
|
||||
#ifdef CONFIG_ARM64_PA_BITS_52
|
||||
#ifdef CONFIG_ARM64_64K_PAGES
|
||||
#define PTE_ADDR_HIGH (_AT(pteval_t, 0xf) << 12)
|
||||
#define PTE_ADDR_HIGH_SHIFT 36
|
||||
@ -186,7 +185,6 @@
|
||||
#define PTE_ADDR_HIGH_SHIFT 42
|
||||
#define PHYS_TO_PTE_ADDR_MASK GENMASK_ULL(49, 8)
|
||||
#endif
|
||||
#endif
|
||||
|
||||
/*
|
||||
* AttrIndx[2:0] encoding (mapping attributes defined in the MAIR* registers).
|
||||
@ -222,12 +220,6 @@
|
||||
*/
|
||||
#define S1_TABLE_AP (_AT(pmdval_t, 3) << 61)
|
||||
|
||||
/*
|
||||
* Highest possible physical address supported.
|
||||
*/
|
||||
#define PHYS_MASK_SHIFT (CONFIG_ARM64_PA_BITS)
|
||||
#define PHYS_MASK ((UL(1) << PHYS_MASK_SHIFT) - 1)
|
||||
|
||||
#define TTBR_CNP_BIT (UL(1) << 0)
|
||||
|
||||
/*
|
||||
@ -333,12 +325,10 @@
|
||||
/*
|
||||
* TTBR.
|
||||
*/
|
||||
#ifdef CONFIG_ARM64_PA_BITS_52
|
||||
/*
|
||||
* TTBR_ELx[1] is RES0 in this configuration.
|
||||
* TTBR_ELx[1] is RES0 when using 52-bit physical addressing
|
||||
*/
|
||||
#define TTBR_BADDR_MASK_52 GENMASK_ULL(47, 2)
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_ARM64_VA_BITS_52
|
||||
/* Must be at least 64-byte aligned to prevent corruption of the TTBR */
|
||||
|
@ -81,6 +81,7 @@ extern unsigned long prot_ns_shared;
|
||||
#define lpa2_is_enabled() false
|
||||
#define PTE_MAYBE_SHARED PTE_SHARED
|
||||
#define PMD_MAYBE_SHARED PMD_SECT_S
|
||||
#define PHYS_MASK_SHIFT (52)
|
||||
#else
|
||||
static inline bool __pure lpa2_is_enabled(void)
|
||||
{
|
||||
@ -89,8 +90,14 @@ static inline bool __pure lpa2_is_enabled(void)
|
||||
|
||||
#define PTE_MAYBE_SHARED (lpa2_is_enabled() ? 0 : PTE_SHARED)
|
||||
#define PMD_MAYBE_SHARED (lpa2_is_enabled() ? 0 : PMD_SECT_S)
|
||||
#define PHYS_MASK_SHIFT (lpa2_is_enabled() ? 52 : 48)
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Highest possible physical address supported.
|
||||
*/
|
||||
#define PHYS_MASK ((UL(1) << PHYS_MASK_SHIFT) - 1)
|
||||
|
||||
/*
|
||||
* If we have userspace only BTI we don't want to mark kernel pages
|
||||
* guarded even if the system does support BTI.
|
||||
|
@ -69,10 +69,9 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
|
||||
pr_err("%s:%d: bad pte %016llx.\n", __FILE__, __LINE__, pte_val(e))
|
||||
|
||||
/*
|
||||
* Macros to convert between a physical address and its placement in a
|
||||
* Helpers to convert between a physical address and its placement in a
|
||||
* page table entry, taking care of 52-bit addresses.
|
||||
*/
|
||||
#ifdef CONFIG_ARM64_PA_BITS_52
|
||||
static inline phys_addr_t __pte_to_phys(pte_t pte)
|
||||
{
|
||||
pte_val(pte) &= ~PTE_MAYBE_SHARED;
|
||||
@ -83,10 +82,6 @@ static inline pteval_t __phys_to_pte_val(phys_addr_t phys)
|
||||
{
|
||||
return (phys | (phys >> PTE_ADDR_HIGH_SHIFT)) & PHYS_TO_PTE_ADDR_MASK;
|
||||
}
|
||||
#else
|
||||
#define __pte_to_phys(pte) (pte_val(pte) & PTE_ADDR_LOW)
|
||||
#define __phys_to_pte_val(phys) (phys)
|
||||
#endif
|
||||
|
||||
#define pte_pfn(pte) (__pte_to_phys(pte) >> PAGE_SHIFT)
|
||||
#define pfn_pte(pfn,prot) \
|
||||
@ -896,7 +891,7 @@ static inline bool mm_pud_folded(const struct mm_struct *mm)
|
||||
pr_err("%s:%d: bad pud %016llx.\n", __FILE__, __LINE__, pud_val(e))
|
||||
|
||||
#define p4d_none(p4d) (pgtable_l4_enabled() && !p4d_val(p4d))
|
||||
#define p4d_bad(p4d) (pgtable_l4_enabled() && !(p4d_val(p4d) & 2))
|
||||
#define p4d_bad(p4d) (pgtable_l4_enabled() && !(p4d_val(p4d) & P4D_TABLE_BIT))
|
||||
#define p4d_present(p4d) (!p4d_none(p4d))
|
||||
|
||||
static inline void set_p4d(p4d_t *p4dp, p4d_t p4d)
|
||||
@ -1023,7 +1018,7 @@ static inline bool mm_p4d_folded(const struct mm_struct *mm)
|
||||
pr_err("%s:%d: bad p4d %016llx.\n", __FILE__, __LINE__, p4d_val(e))
|
||||
|
||||
#define pgd_none(pgd) (pgtable_l5_enabled() && !pgd_val(pgd))
|
||||
#define pgd_bad(pgd) (pgtable_l5_enabled() && !(pgd_val(pgd) & 2))
|
||||
#define pgd_bad(pgd) (pgtable_l5_enabled() && !(pgd_val(pgd) & PGD_TABLE_BIT))
|
||||
#define pgd_present(pgd) (!pgd_none(pgd))
|
||||
|
||||
static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
|
||||
@ -1345,7 +1340,7 @@ static inline void ___ptep_set_wrprotect(struct mm_struct *mm,
|
||||
}
|
||||
|
||||
/*
|
||||
* __ptep_set_wrprotect - mark read-only while trasferring potential hardware
|
||||
* __ptep_set_wrprotect - mark read-only while transferring potential hardware
|
||||
* dirty status (PTE_DBM && !PTE_RDONLY) to the software PTE_DIRTY bit.
|
||||
*/
|
||||
static inline void __ptep_set_wrprotect(struct mm_struct *mm,
|
||||
@ -1495,11 +1490,7 @@ static inline void update_mmu_cache_range(struct vm_fault *vmf,
|
||||
update_mmu_cache_range(NULL, vma, addr, ptep, 1)
|
||||
#define update_mmu_cache_pmd(vma, address, pmd) do { } while (0)
|
||||
|
||||
#ifdef CONFIG_ARM64_PA_BITS_52
|
||||
#define phys_to_ttbr(addr) (((addr) | ((addr) >> 46)) & TTBR_BADDR_MASK_52)
|
||||
#else
|
||||
#define phys_to_ttbr(addr) (addr)
|
||||
#endif
|
||||
|
||||
/*
|
||||
* On arm64 without hardware Access Flag, copying from user will fail because
|
||||
|
@ -5,7 +5,10 @@
|
||||
#ifndef __ASM_SPARSEMEM_H
|
||||
#define __ASM_SPARSEMEM_H
|
||||
|
||||
#define MAX_PHYSMEM_BITS CONFIG_ARM64_PA_BITS
|
||||
#include <asm/pgtable-prot.h>
|
||||
|
||||
#define MAX_PHYSMEM_BITS PHYS_MASK_SHIFT
|
||||
#define MAX_POSSIBLE_PHYSMEM_BITS (52)
|
||||
|
||||
/*
|
||||
* Section size must be at least 512MB for 64K base
|
||||
|
@ -916,12 +916,6 @@
|
||||
#define ID_AA64MMFR0_EL1_TGRAN_2_SUPPORTED_LPA2 0x3
|
||||
#define ID_AA64MMFR0_EL1_TGRAN_2_SUPPORTED_MAX 0x7
|
||||
|
||||
#ifdef CONFIG_ARM64_PA_BITS_52
|
||||
#define ID_AA64MMFR0_EL1_PARANGE_MAX ID_AA64MMFR0_EL1_PARANGE_52
|
||||
#else
|
||||
#define ID_AA64MMFR0_EL1_PARANGE_MAX ID_AA64MMFR0_EL1_PARANGE_48
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_ARM64_4K_PAGES)
|
||||
#define ID_AA64MMFR0_EL1_TGRAN_SHIFT ID_AA64MMFR0_EL1_TGRAN4_SHIFT
|
||||
#define ID_AA64MMFR0_EL1_TGRAN_LPA2 ID_AA64MMFR0_EL1_TGRAN4_52_BIT
|
||||
|
@ -1004,17 +1004,16 @@ static void init_cpu_ftr_reg(u32 sys_reg, u64 new)
|
||||
/* Override was valid */
|
||||
ftr_new = tmp;
|
||||
str = "forced";
|
||||
} else if (ftr_ovr == tmp) {
|
||||
} else {
|
||||
/* Override was the safe value */
|
||||
str = "already set";
|
||||
}
|
||||
|
||||
if (str)
|
||||
pr_warn("%s[%d:%d]: %s to %llx\n",
|
||||
reg->name,
|
||||
ftrp->shift + ftrp->width - 1,
|
||||
ftrp->shift, str,
|
||||
tmp & (BIT(ftrp->width) - 1));
|
||||
pr_warn("%s[%d:%d]: %s to %llx\n",
|
||||
reg->name,
|
||||
ftrp->shift + ftrp->width - 1,
|
||||
ftrp->shift, str,
|
||||
tmp & (BIT(ftrp->width) - 1));
|
||||
} else if ((ftr_mask & reg->override->val) == ftr_mask) {
|
||||
reg->override->val &= ~ftr_mask;
|
||||
pr_warn("%s[%d:%d]: impossible override, ignored\n",
|
||||
@ -2376,8 +2375,8 @@ static void cpu_enable_mops(const struct arm64_cpu_capabilities *__unused)
|
||||
#ifdef CONFIG_ARM64_POE
|
||||
static void cpu_enable_poe(const struct arm64_cpu_capabilities *__unused)
|
||||
{
|
||||
sysreg_clear_set(REG_TCR2_EL1, 0, TCR2_EL1x_E0POE);
|
||||
sysreg_clear_set(CPACR_EL1, 0, CPACR_ELx_E0POE);
|
||||
sysreg_clear_set(REG_TCR2_EL1, 0, TCR2_EL1_E0POE);
|
||||
sysreg_clear_set(CPACR_EL1, 0, CPACR_EL1_E0POE);
|
||||
}
|
||||
#endif
|
||||
|
||||
@ -3478,7 +3477,7 @@ static void verify_hyp_capabilities(void)
|
||||
return;
|
||||
|
||||
safe_mmfr1 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1);
|
||||
mmfr0 = read_cpuid(ID_AA64MMFR0_EL1);
|
||||
mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
|
||||
mmfr1 = read_cpuid(ID_AA64MMFR1_EL1);
|
||||
|
||||
/* Verify VMID bits */
|
||||
|
@ -83,6 +83,15 @@ static bool __init mmfr2_varange_filter(u64 val)
|
||||
id_aa64mmfr0_override.val |=
|
||||
(ID_AA64MMFR0_EL1_TGRAN_LPA2 - 1) << ID_AA64MMFR0_EL1_TGRAN_SHIFT;
|
||||
id_aa64mmfr0_override.mask |= 0xfU << ID_AA64MMFR0_EL1_TGRAN_SHIFT;
|
||||
|
||||
/*
|
||||
* Override PARange to 48 bits - the override will just be
|
||||
* ignored if the actual PARange is smaller, but this is
|
||||
* unlikely to be the case for LPA2 capable silicon.
|
||||
*/
|
||||
id_aa64mmfr0_override.val |=
|
||||
ID_AA64MMFR0_EL1_PARANGE_48 << ID_AA64MMFR0_EL1_PARANGE_SHIFT;
|
||||
id_aa64mmfr0_override.mask |= 0xfU << ID_AA64MMFR0_EL1_PARANGE_SHIFT;
|
||||
}
|
||||
#endif
|
||||
return true;
|
||||
|
@ -136,6 +136,12 @@ static void noinline __section(".idmap.text") set_ttbr0_for_lpa2(u64 ttbr)
|
||||
{
|
||||
u64 sctlr = read_sysreg(sctlr_el1);
|
||||
u64 tcr = read_sysreg(tcr_el1) | TCR_DS;
|
||||
u64 mmfr0 = read_sysreg(id_aa64mmfr0_el1);
|
||||
u64 parange = cpuid_feature_extract_unsigned_field(mmfr0,
|
||||
ID_AA64MMFR0_EL1_PARANGE_SHIFT);
|
||||
|
||||
tcr &= ~TCR_IPS_MASK;
|
||||
tcr |= parange << TCR_IPS_SHIFT;
|
||||
|
||||
asm(" msr sctlr_el1, %0 ;"
|
||||
" isb ;"
|
||||
|
@ -1990,8 +1990,7 @@ static int kvm_init_vector_slots(void)
|
||||
static void __init cpu_prepare_hyp_mode(int cpu, u32 hyp_va_bits)
|
||||
{
|
||||
struct kvm_nvhe_init_params *params = per_cpu_ptr_nvhe_sym(kvm_init_params, cpu);
|
||||
u64 mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
|
||||
unsigned long tcr;
|
||||
unsigned long tcr, ips;
|
||||
|
||||
/*
|
||||
* Calculate the raw per-cpu offset without a translation from the
|
||||
@ -2005,6 +2004,7 @@ static void __init cpu_prepare_hyp_mode(int cpu, u32 hyp_va_bits)
|
||||
params->mair_el2 = read_sysreg(mair_el1);
|
||||
|
||||
tcr = read_sysreg(tcr_el1);
|
||||
ips = FIELD_GET(TCR_IPS_MASK, tcr);
|
||||
if (cpus_have_final_cap(ARM64_KVM_HVHE)) {
|
||||
tcr |= TCR_EPD1_MASK;
|
||||
} else {
|
||||
@ -2014,8 +2014,8 @@ static void __init cpu_prepare_hyp_mode(int cpu, u32 hyp_va_bits)
|
||||
tcr &= ~TCR_T0SZ_MASK;
|
||||
tcr |= TCR_T0SZ(hyp_va_bits);
|
||||
tcr &= ~TCR_EL2_PS_MASK;
|
||||
tcr |= FIELD_PREP(TCR_EL2_PS_MASK, kvm_get_parange(mmfr0));
|
||||
if (kvm_lpa2_is_enabled())
|
||||
tcr |= FIELD_PREP(TCR_EL2_PS_MASK, ips);
|
||||
if (lpa2_is_enabled())
|
||||
tcr |= TCR_EL2_DS;
|
||||
params->tcr_el2 = tcr;
|
||||
|
||||
|
@ -111,7 +111,7 @@ static bool s1pie_enabled(struct kvm_vcpu *vcpu, enum trans_regime regime)
|
||||
return vcpu_read_sys_reg(vcpu, TCR2_EL2) & TCR2_EL2_PIE;
|
||||
case TR_EL10:
|
||||
return (__vcpu_sys_reg(vcpu, HCRX_EL2) & HCRX_EL2_TCR2En) &&
|
||||
(__vcpu_sys_reg(vcpu, TCR2_EL1) & TCR2_EL1x_PIE);
|
||||
(__vcpu_sys_reg(vcpu, TCR2_EL1) & TCR2_EL1_PIE);
|
||||
default:
|
||||
BUG();
|
||||
}
|
||||
@ -140,8 +140,8 @@ static void compute_s1poe(struct kvm_vcpu *vcpu, struct s1_walk_info *wi)
|
||||
}
|
||||
|
||||
val = __vcpu_sys_reg(vcpu, TCR2_EL1);
|
||||
wi->poe = val & TCR2_EL1x_POE;
|
||||
wi->e0poe = val & TCR2_EL1x_E0POE;
|
||||
wi->poe = val & TCR2_EL1_POE;
|
||||
wi->e0poe = val & TCR2_EL1_E0POE;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -494,7 +494,7 @@ static enum trap_behaviour check_cptr_tta(struct kvm_vcpu *vcpu)
|
||||
if (!vcpu_el2_e2h_is_set(vcpu))
|
||||
val = translate_cptr_el2_to_cpacr_el1(val);
|
||||
|
||||
if (val & CPACR_ELx_TTA)
|
||||
if (val & CPACR_EL1_TTA)
|
||||
return BEHAVE_FORWARD_RW;
|
||||
|
||||
return BEHAVE_HANDLE_LOCALLY;
|
||||
|
@ -169,7 +169,7 @@ void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu)
|
||||
if (has_vhe() && system_supports_sme()) {
|
||||
/* Also restore EL0 state seen on entry */
|
||||
if (vcpu_get_flag(vcpu, HOST_SME_ENABLED))
|
||||
sysreg_clear_set(CPACR_EL1, 0, CPACR_ELx_SMEN);
|
||||
sysreg_clear_set(CPACR_EL1, 0, CPACR_EL1_SMEN);
|
||||
else
|
||||
sysreg_clear_set(CPACR_EL1,
|
||||
CPACR_EL1_SMEN_EL0EN,
|
||||
|
@ -419,9 +419,9 @@ static bool kvm_hyp_handle_fpsimd(struct kvm_vcpu *vcpu, u64 *exit_code)
|
||||
|
||||
/* First disable enough traps to allow us to update the registers */
|
||||
if (sve_guest || (is_protected_kvm_enabled() && system_supports_sve()))
|
||||
cpacr_clear_set(0, CPACR_ELx_FPEN | CPACR_ELx_ZEN);
|
||||
cpacr_clear_set(0, CPACR_EL1_FPEN | CPACR_EL1_ZEN);
|
||||
else
|
||||
cpacr_clear_set(0, CPACR_ELx_FPEN);
|
||||
cpacr_clear_set(0, CPACR_EL1_FPEN);
|
||||
isb();
|
||||
|
||||
/* Write out the host state if it's in the registers */
|
||||
|
@ -68,7 +68,7 @@ static void fpsimd_sve_sync(struct kvm_vcpu *vcpu)
|
||||
if (!guest_owns_fp_regs())
|
||||
return;
|
||||
|
||||
cpacr_clear_set(0, CPACR_ELx_FPEN | CPACR_ELx_ZEN);
|
||||
cpacr_clear_set(0, CPACR_EL1_FPEN | CPACR_EL1_ZEN);
|
||||
isb();
|
||||
|
||||
if (vcpu_has_sve(vcpu))
|
||||
@ -481,7 +481,7 @@ void handle_trap(struct kvm_cpu_context *host_ctxt)
|
||||
handle_host_smc(host_ctxt);
|
||||
break;
|
||||
case ESR_ELx_EC_SVE:
|
||||
cpacr_clear_set(0, CPACR_ELx_ZEN);
|
||||
cpacr_clear_set(0, CPACR_EL1_ZEN);
|
||||
isb();
|
||||
sve_cond_update_zcr_vq(sve_vq_from_vl(kvm_host_sve_max_vl) - 1,
|
||||
SYS_ZCR_EL2);
|
||||
|
@ -68,7 +68,7 @@ static void pvm_init_traps_aa64pfr0(struct kvm_vcpu *vcpu)
|
||||
/* Trap SVE */
|
||||
if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_SVE), feature_ids)) {
|
||||
if (has_hvhe())
|
||||
cptr_clear |= CPACR_ELx_ZEN;
|
||||
cptr_clear |= CPACR_EL1_ZEN;
|
||||
else
|
||||
cptr_set |= CPTR_EL2_TZ;
|
||||
}
|
||||
|
@ -48,14 +48,14 @@ static void __activate_traps(struct kvm_vcpu *vcpu)
|
||||
val |= has_hvhe() ? CPACR_EL1_TTA : CPTR_EL2_TTA;
|
||||
if (cpus_have_final_cap(ARM64_SME)) {
|
||||
if (has_hvhe())
|
||||
val &= ~CPACR_ELx_SMEN;
|
||||
val &= ~CPACR_EL1_SMEN;
|
||||
else
|
||||
val |= CPTR_EL2_TSM;
|
||||
}
|
||||
|
||||
if (!guest_owns_fp_regs()) {
|
||||
if (has_hvhe())
|
||||
val &= ~(CPACR_ELx_FPEN | CPACR_ELx_ZEN);
|
||||
val &= ~(CPACR_EL1_FPEN | CPACR_EL1_ZEN);
|
||||
else
|
||||
val |= CPTR_EL2_TFP | CPTR_EL2_TZ;
|
||||
|
||||
@ -192,7 +192,7 @@ static void kvm_hyp_save_fpsimd_host(struct kvm_vcpu *vcpu)
|
||||
|
||||
/* Re-enable SVE traps if not supported for the guest vcpu. */
|
||||
if (!vcpu_has_sve(vcpu))
|
||||
cpacr_clear_set(CPACR_ELx_ZEN, 0);
|
||||
cpacr_clear_set(CPACR_EL1_ZEN, 0);
|
||||
|
||||
} else {
|
||||
__fpsimd_save_state(*host_data_ptr(fpsimd_state));
|
||||
|
@ -35,14 +35,6 @@ static bool kvm_pgtable_walk_skip_cmo(const struct kvm_pgtable_visit_ctx *ctx)
|
||||
return unlikely(ctx->flags & KVM_PGTABLE_WALK_SKIP_CMO);
|
||||
}
|
||||
|
||||
static bool kvm_phys_is_valid(u64 phys)
|
||||
{
|
||||
u64 parange_max = kvm_get_parange_max();
|
||||
u8 shift = id_aa64mmfr0_parange_to_phys_shift(parange_max);
|
||||
|
||||
return phys < BIT(shift);
|
||||
}
|
||||
|
||||
static bool kvm_block_mapping_supported(const struct kvm_pgtable_visit_ctx *ctx, u64 phys)
|
||||
{
|
||||
u64 granule = kvm_granule_size(ctx->level);
|
||||
@ -53,7 +45,7 @@ static bool kvm_block_mapping_supported(const struct kvm_pgtable_visit_ctx *ctx,
|
||||
if (granule > (ctx->end - ctx->addr))
|
||||
return false;
|
||||
|
||||
if (kvm_phys_is_valid(phys) && !IS_ALIGNED(phys, granule))
|
||||
if (!IS_ALIGNED(phys, granule))
|
||||
return false;
|
||||
|
||||
return IS_ALIGNED(ctx->addr, granule);
|
||||
@ -587,6 +579,9 @@ struct stage2_map_data {
|
||||
|
||||
/* Force mappings to page granularity */
|
||||
bool force_pte;
|
||||
|
||||
/* Walk should update owner_id only */
|
||||
bool annotation;
|
||||
};
|
||||
|
||||
u64 kvm_get_vtcr(u64 mmfr0, u64 mmfr1, u32 phys_shift)
|
||||
@ -885,18 +880,7 @@ static u64 stage2_map_walker_phys_addr(const struct kvm_pgtable_visit_ctx *ctx,
|
||||
{
|
||||
u64 phys = data->phys;
|
||||
|
||||
/*
|
||||
* Stage-2 walks to update ownership data are communicated to the map
|
||||
* walker using an invalid PA. Avoid offsetting an already invalid PA,
|
||||
* which could overflow and make the address valid again.
|
||||
*/
|
||||
if (!kvm_phys_is_valid(phys))
|
||||
return phys;
|
||||
|
||||
/*
|
||||
* Otherwise, work out the correct PA based on how far the walk has
|
||||
* gotten.
|
||||
*/
|
||||
/* Work out the correct PA based on how far the walk has gotten */
|
||||
return phys + (ctx->addr - ctx->start);
|
||||
}
|
||||
|
||||
@ -908,6 +892,9 @@ static bool stage2_leaf_mapping_allowed(const struct kvm_pgtable_visit_ctx *ctx,
|
||||
if (data->force_pte && ctx->level < KVM_PGTABLE_LAST_LEVEL)
|
||||
return false;
|
||||
|
||||
if (data->annotation)
|
||||
return true;
|
||||
|
||||
return kvm_block_mapping_supported(ctx, phys);
|
||||
}
|
||||
|
||||
@ -923,7 +910,7 @@ static int stage2_map_walker_try_leaf(const struct kvm_pgtable_visit_ctx *ctx,
|
||||
if (!stage2_leaf_mapping_allowed(ctx, data))
|
||||
return -E2BIG;
|
||||
|
||||
if (kvm_phys_is_valid(phys))
|
||||
if (!data->annotation)
|
||||
new = kvm_init_valid_leaf_pte(phys, data->attr, ctx->level);
|
||||
else
|
||||
new = kvm_init_invalid_leaf_owner(data->owner_id);
|
||||
@ -1085,11 +1072,11 @@ int kvm_pgtable_stage2_set_owner(struct kvm_pgtable *pgt, u64 addr, u64 size,
|
||||
{
|
||||
int ret;
|
||||
struct stage2_map_data map_data = {
|
||||
.phys = KVM_PHYS_INVALID,
|
||||
.mmu = pgt->mmu,
|
||||
.memcache = mc,
|
||||
.owner_id = owner_id,
|
||||
.force_pte = true,
|
||||
.annotation = true,
|
||||
};
|
||||
struct kvm_pgtable_walker walker = {
|
||||
.cb = stage2_map_walker,
|
||||
|
@ -77,12 +77,12 @@ static void __activate_cptr_traps(struct kvm_vcpu *vcpu)
|
||||
* VHE (HCR.E2H == 1) which allows us to use here the CPTR_EL2.TAM
|
||||
* shift value for trapping the AMU accesses.
|
||||
*/
|
||||
u64 val = CPACR_ELx_TTA | CPTR_EL2_TAM;
|
||||
u64 val = CPACR_EL1_TTA | CPTR_EL2_TAM;
|
||||
|
||||
if (guest_owns_fp_regs()) {
|
||||
val |= CPACR_ELx_FPEN;
|
||||
val |= CPACR_EL1_FPEN;
|
||||
if (vcpu_has_sve(vcpu))
|
||||
val |= CPACR_ELx_ZEN;
|
||||
val |= CPACR_EL1_ZEN;
|
||||
} else {
|
||||
__activate_traps_fpsimd32(vcpu);
|
||||
}
|
||||
@ -122,13 +122,13 @@ static void __activate_cptr_traps(struct kvm_vcpu *vcpu)
|
||||
* hypervisor has traps enabled to dispel any illusion of something more
|
||||
* complicated taking place.
|
||||
*/
|
||||
if (!(SYS_FIELD_GET(CPACR_ELx, FPEN, cptr) & BIT(0)))
|
||||
val &= ~CPACR_ELx_FPEN;
|
||||
if (!(SYS_FIELD_GET(CPACR_ELx, ZEN, cptr) & BIT(0)))
|
||||
val &= ~CPACR_ELx_ZEN;
|
||||
if (!(SYS_FIELD_GET(CPACR_EL1, FPEN, cptr) & BIT(0)))
|
||||
val &= ~CPACR_EL1_FPEN;
|
||||
if (!(SYS_FIELD_GET(CPACR_EL1, ZEN, cptr) & BIT(0)))
|
||||
val &= ~CPACR_EL1_ZEN;
|
||||
|
||||
if (kvm_has_feat(vcpu->kvm, ID_AA64MMFR3_EL1, S2POE, IMP))
|
||||
val |= cptr & CPACR_ELx_E0POE;
|
||||
val |= cptr & CPACR_EL1_E0POE;
|
||||
|
||||
val |= cptr & CPTR_EL2_TCPAC;
|
||||
|
||||
|
@ -519,6 +519,18 @@ pte_t huge_ptep_clear_flush(struct vm_area_struct *vma,
|
||||
|
||||
static int __init hugetlbpage_init(void)
|
||||
{
|
||||
/*
|
||||
* HugeTLB pages are supported on maximum four page table
|
||||
* levels (PUD, CONT PMD, PMD, CONT PTE) for a given base
|
||||
* page size, corresponding to hugetlb_add_hstate() calls
|
||||
* here.
|
||||
*
|
||||
* HUGE_MAX_HSTATE should at least match maximum supported
|
||||
* HugeTLB page sizes on the platform. Any new addition to
|
||||
* supported HugeTLB page sizes will also require changing
|
||||
* HUGE_MAX_HSTATE as well.
|
||||
*/
|
||||
BUILD_BUG_ON(HUGE_MAX_HSTATE < 4);
|
||||
if (pud_sect_supported())
|
||||
hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT);
|
||||
|
||||
|
@ -279,7 +279,12 @@ void __init arm64_memblock_init(void)
|
||||
|
||||
if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) {
|
||||
extern u16 memstart_offset_seed;
|
||||
u64 mmfr0 = read_cpuid(ID_AA64MMFR0_EL1);
|
||||
|
||||
/*
|
||||
* Use the sanitised version of id_aa64mmfr0_el1 so that linear
|
||||
* map randomization can be enabled by shrinking the IPA space.
|
||||
*/
|
||||
u64 mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
|
||||
int parange = cpuid_feature_extract_unsigned_field(
|
||||
mmfr0, ID_AA64MMFR0_EL1_PARANGE_SHIFT);
|
||||
s64 range = linear_region_size -
|
||||
|
@ -48,20 +48,21 @@ void pgd_free(struct mm_struct *mm, pgd_t *pgd)
|
||||
|
||||
void __init pgtable_cache_init(void)
|
||||
{
|
||||
unsigned int pgd_size = PGD_SIZE;
|
||||
|
||||
if (pgdir_is_page_size())
|
||||
return;
|
||||
|
||||
#ifdef CONFIG_ARM64_PA_BITS_52
|
||||
/*
|
||||
* With 52-bit physical addresses, the architecture requires the
|
||||
* top-level table to be aligned to at least 64 bytes.
|
||||
*/
|
||||
BUILD_BUG_ON(PGD_SIZE < 64);
|
||||
#endif
|
||||
if (PHYS_MASK_SHIFT >= 52)
|
||||
pgd_size = max(pgd_size, 64);
|
||||
|
||||
/*
|
||||
* Naturally aligned pgds required by the architecture.
|
||||
*/
|
||||
pgd_cache = kmem_cache_create("pgd_cache", PGD_SIZE, PGD_SIZE,
|
||||
pgd_cache = kmem_cache_create("pgd_cache", pgd_size, pgd_size,
|
||||
SLAB_PANIC, NULL);
|
||||
}
|
||||
|
@ -197,10 +197,8 @@ SYM_FUNC_ALIAS(__pi_idmap_cpu_replace_ttbr1, idmap_cpu_replace_ttbr1)
|
||||
|
||||
.macro pte_to_phys, phys, pte
|
||||
and \phys, \pte, #PTE_ADDR_LOW
|
||||
#ifdef CONFIG_ARM64_PA_BITS_52
|
||||
and \pte, \pte, #PTE_ADDR_HIGH
|
||||
orr \phys, \phys, \pte, lsl #PTE_ADDR_HIGH_SHIFT
|
||||
#endif
|
||||
.endm
|
||||
|
||||
.macro kpti_mk_tbl_ng, type, num_entries
|
||||
@ -501,7 +499,7 @@ alternative_else_nop_endif
|
||||
#ifdef CONFIG_ARM64_HAFT
|
||||
cmp x9, ID_AA64MMFR1_EL1_HAFDBS_HAFT
|
||||
b.lt 1f
|
||||
orr tcr2, tcr2, TCR2_EL1x_HAFT
|
||||
orr tcr2, tcr2, TCR2_EL1_HAFT
|
||||
#endif /* CONFIG_ARM64_HAFT */
|
||||
1:
|
||||
#endif /* CONFIG_ARM64_HW_AFDBM */
|
||||
@ -532,7 +530,8 @@ alternative_else_nop_endif
|
||||
#undef PTE_MAYBE_NG
|
||||
#undef PTE_MAYBE_SHARED
|
||||
|
||||
orr tcr2, tcr2, TCR2_EL1x_PIE
|
||||
orr tcr2, tcr2, TCR2_EL1_PIE
|
||||
msr REG_TCR2_EL1, x0
|
||||
|
||||
.Lskip_indirection:
|
||||
|
||||
|
@ -206,7 +206,7 @@ END {
|
||||
|
||||
# Currently this is effectivey a comment, in future we may want to emit
|
||||
# defines for the fields.
|
||||
/^Fields/ && block_current() == "Sysreg" {
|
||||
(/^Fields/ || /^Mapping/) && block_current() == "Sysreg" {
|
||||
expect_fields(2)
|
||||
|
||||
if (next_bit != 63)
|
||||
|
@ -24,8 +24,16 @@
|
||||
# ...
|
||||
# EndEnum
|
||||
|
||||
# Alternatively if multiple registers share the same layout then
|
||||
# a SysregFields block can be used to describe the shared layout
|
||||
# For VHE aliases (*_EL12, *_EL02) of system registers, a Mapping
|
||||
# entry describes the register the alias actually accesses:
|
||||
|
||||
# Sysreg <name_EL12> <op0> <op1> <crn> <crm> <op2>
|
||||
# Mapping <name_EL1>
|
||||
# EndSysreg
|
||||
|
||||
# Where multiple system regsiters are not VHE aliases but share a
|
||||
# common layout, a SysregFields block can be used to describe the
|
||||
# shared layout:
|
||||
|
||||
# SysregFields <fieldsname>
|
||||
# <field>
|
||||
@ -1978,7 +1986,7 @@ Field 1 A
|
||||
Field 0 M
|
||||
EndSysreg
|
||||
|
||||
SysregFields CPACR_ELx
|
||||
Sysreg CPACR_EL1 3 0 1 0 2
|
||||
Res0 63:30
|
||||
Field 29 E0POE
|
||||
Field 28 TTA
|
||||
@ -1989,10 +1997,6 @@ Field 21:20 FPEN
|
||||
Res0 19:18
|
||||
Field 17:16 ZEN
|
||||
Res0 15:0
|
||||
EndSysregFields
|
||||
|
||||
Sysreg CPACR_EL1 3 0 1 0 2
|
||||
Fields CPACR_ELx
|
||||
EndSysreg
|
||||
|
||||
Sysreg SMPRI_EL1 3 0 1 2 4
|
||||
@ -2947,23 +2951,23 @@ Field 63:0 PhysicalOffset
|
||||
EndSysreg
|
||||
|
||||
Sysreg CPACR_EL12 3 5 1 0 2
|
||||
Fields CPACR_ELx
|
||||
Mapping CPACR_EL1
|
||||
EndSysreg
|
||||
|
||||
Sysreg ZCR_EL12 3 5 1 2 0
|
||||
Fields ZCR_ELx
|
||||
Mapping ZCR_EL1
|
||||
EndSysreg
|
||||
|
||||
Sysreg SMCR_EL12 3 5 1 2 6
|
||||
Fields SMCR_ELx
|
||||
Mapping SMCR_EL1
|
||||
EndSysreg
|
||||
|
||||
Sysreg GCSCR_EL12 3 5 2 5 0
|
||||
Fields GCSCR_ELx
|
||||
Mapping GCSCR_EL1
|
||||
EndSysreg
|
||||
|
||||
Sysreg GCSPR_EL12 3 5 2 5 1
|
||||
Fields GCSPR_ELx
|
||||
Mapping GCSPR_EL1
|
||||
EndSysreg
|
||||
|
||||
Sysreg FAR_EL12 3 5 6 0 0
|
||||
@ -2975,7 +2979,7 @@ Fields MPAM1_ELx
|
||||
EndSysreg
|
||||
|
||||
Sysreg CONTEXTIDR_EL12 3 5 13 0 1
|
||||
Fields CONTEXTIDR_ELx
|
||||
Mapping CONTEXTIDR_EL1
|
||||
EndSysreg
|
||||
|
||||
SysregFields TTBRx_EL1
|
||||
@ -2992,7 +2996,7 @@ Sysreg TTBR1_EL1 3 0 2 0 1
|
||||
Fields TTBRx_EL1
|
||||
EndSysreg
|
||||
|
||||
SysregFields TCR2_EL1x
|
||||
Sysreg TCR2_EL1 3 0 2 0 3
|
||||
Res0 63:16
|
||||
Field 15 DisCH1
|
||||
Field 14 DisCH0
|
||||
@ -3006,14 +3010,10 @@ Field 3 POE
|
||||
Field 2 E0POE
|
||||
Field 1 PIE
|
||||
Field 0 PnCH
|
||||
EndSysregFields
|
||||
|
||||
Sysreg TCR2_EL1 3 0 2 0 3
|
||||
Fields TCR2_EL1x
|
||||
EndSysreg
|
||||
|
||||
Sysreg TCR2_EL12 3 5 2 0 3
|
||||
Fields TCR2_EL1x
|
||||
Mapping TCR2_EL1
|
||||
EndSysreg
|
||||
|
||||
Sysreg TCR2_EL2 3 4 2 0 3
|
||||
@ -3084,7 +3084,7 @@ Fields PIRx_ELx
|
||||
EndSysreg
|
||||
|
||||
Sysreg PIRE0_EL12 3 5 10 2 2
|
||||
Fields PIRx_ELx
|
||||
Mapping PIRE0_EL1
|
||||
EndSysreg
|
||||
|
||||
Sysreg PIRE0_EL2 3 4 10 2 2
|
||||
@ -3096,7 +3096,7 @@ Fields PIRx_ELx
|
||||
EndSysreg
|
||||
|
||||
Sysreg PIR_EL12 3 5 10 2 3
|
||||
Fields PIRx_ELx
|
||||
Mapping PIR_EL1
|
||||
EndSysreg
|
||||
|
||||
Sysreg PIR_EL2 3 4 10 2 3
|
||||
@ -3116,7 +3116,7 @@ Fields PIRx_ELx
|
||||
EndSysreg
|
||||
|
||||
Sysreg POR_EL12 3 5 10 2 4
|
||||
Fields PIRx_ELx
|
||||
Mapping POR_EL1
|
||||
EndSysreg
|
||||
|
||||
Sysreg S2POR_EL1 3 0 10 2 5
|
||||
|
@ -141,7 +141,6 @@ LX_CONFIG(CONFIG_ARM64_4K_PAGES)
|
||||
LX_CONFIG(CONFIG_ARM64_16K_PAGES)
|
||||
LX_CONFIG(CONFIG_ARM64_64K_PAGES)
|
||||
if IS_BUILTIN(CONFIG_ARM64):
|
||||
LX_VALUE(CONFIG_ARM64_PA_BITS)
|
||||
LX_VALUE(CONFIG_ARM64_VA_BITS)
|
||||
LX_VALUE(CONFIG_PAGE_SHIFT)
|
||||
LX_VALUE(CONFIG_ARCH_FORCE_MAX_ORDER)
|
||||
|
@ -574,12 +574,6 @@
|
||||
#define ID_AA64MMFR0_EL1_TGRAN_2_SUPPORTED_MIN 0x2
|
||||
#define ID_AA64MMFR0_EL1_TGRAN_2_SUPPORTED_MAX 0x7
|
||||
|
||||
#ifdef CONFIG_ARM64_PA_BITS_52
|
||||
#define ID_AA64MMFR0_EL1_PARANGE_MAX ID_AA64MMFR0_EL1_PARANGE_52
|
||||
#else
|
||||
#define ID_AA64MMFR0_EL1_PARANGE_MAX ID_AA64MMFR0_EL1_PARANGE_48
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_ARM64_4K_PAGES)
|
||||
#define ID_AA64MMFR0_EL1_TGRAN_SHIFT ID_AA64MMFR0_EL1_TGRAN4_SHIFT
|
||||
#define ID_AA64MMFR0_EL1_TGRAN_SUPPORTED_MIN ID_AA64MMFR0_EL1_TGRAN4_SUPPORTED_MIN
|
||||
|
Loading…
Reference in New Issue
Block a user