mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2024-12-29 09:12:07 +00:00
KVM/arm64 changes for 6.13, part #1
- Support for stage-1 permission indirection (FEAT_S1PIE) and permission overlays (FEAT_S1POE), including nested virt + the emulated page table walker - Introduce PSCI SYSTEM_OFF2 support to KVM + client driver. This call was introduced in PSCIv1.3 as a mechanism to request hibernation, similar to the S4 state in ACPI - Explicitly trap + hide FEAT_MPAM (QoS controls) from KVM guests. As part of it, introduce trivial initialization of the host's MPAM context so KVM can use the corresponding traps - PMU support under nested virtualization, honoring the guest hypervisor's trap configuration and event filtering when running a nested guest - Fixes to vgic ITS serialization where stale device/interrupt table entries are not zeroed when the mapping is invalidated by the VM - Avoid emulated MMIO completion if userspace has requested synchronous external abort injection - Various fixes and cleanups affecting pKVM, vCPU initialization, and selftests -----BEGIN PGP SIGNATURE----- iI0EABYIADUWIQSNXHjWXuzMZutrKNKivnWIJHzdFgUCZzTZXRccb2xpdmVyLnVw dG9uQGxpbnV4LmRldgAKCRCivnWIJHzdFioUAP0cs2pYcwuCqLgmeHqfz6L5Xsw3 hKBCNuvr5mjU0hZfLAEA5ml2eUKD7OnssAOmUZ/K/NoCdJFCe8mJWQDlURvr9g4= =u2/3 -----END PGP SIGNATURE----- Merge tag 'kvmarm-6.13' of https://git.kernel.org/pub/scm/linux/kernel/git/kvmarm/kvmarm into HEAD KVM/arm64 changes for 6.13, part #1 - Support for stage-1 permission indirection (FEAT_S1PIE) and permission overlays (FEAT_S1POE), including nested virt + the emulated page table walker - Introduce PSCI SYSTEM_OFF2 support to KVM + client driver. This call was introduced in PSCIv1.3 as a mechanism to request hibernation, similar to the S4 state in ACPI - Explicitly trap + hide FEAT_MPAM (QoS controls) from KVM guests. As part of it, introduce trivial initialization of the host's MPAM context so KVM can use the corresponding traps - PMU support under nested virtualization, honoring the guest hypervisor's trap configuration and event filtering when running a nested guest - Fixes to vgic ITS serialization where stale device/interrupt table entries are not zeroed when the mapping is invalidated by the VM - Avoid emulated MMIO completion if userspace has requested synchronous external abort injection - Various fixes and cleanups affecting pKVM, vCPU initialization, and selftests
This commit is contained in:
commit
7b541d557f
@ -152,6 +152,8 @@ infrastructure:
|
||||
+------------------------------+---------+---------+
|
||||
| DIT | [51-48] | y |
|
||||
+------------------------------+---------+---------+
|
||||
| MPAM | [43-40] | n |
|
||||
+------------------------------+---------+---------+
|
||||
| SVE | [35-32] | y |
|
||||
+------------------------------+---------+---------+
|
||||
| GIC | [27-24] | n |
|
||||
|
@ -6857,6 +6857,10 @@ the first `ndata` items (possibly zero) of the data array are valid.
|
||||
the guest issued a SYSTEM_RESET2 call according to v1.1 of the PSCI
|
||||
specification.
|
||||
|
||||
- for arm64, data[0] is set to KVM_SYSTEM_EVENT_SHUTDOWN_FLAG_PSCI_OFF2
|
||||
if the guest issued a SYSTEM_OFF2 call according to v1.3 of the PSCI
|
||||
specification.
|
||||
|
||||
- for RISC-V, data[0] is set to the value of the second argument of the
|
||||
``sbi_system_reset`` call.
|
||||
|
||||
@ -6890,6 +6894,12 @@ either:
|
||||
- Deny the guest request to suspend the VM. See ARM DEN0022D.b 5.19.2
|
||||
"Caller responsibilities" for possible return values.
|
||||
|
||||
Hibernation using the PSCI SYSTEM_OFF2 call is enabled when PSCI v1.3
|
||||
is enabled. If a guest invokes the PSCI SYSTEM_OFF2 function, KVM will
|
||||
exit to userspace with the KVM_SYSTEM_EVENT_SHUTDOWN event type and with
|
||||
data[0] set to KVM_SYSTEM_EVENT_SHUTDOWN_FLAG_PSCI_OFF2. The only
|
||||
supported hibernate type for the SYSTEM_OFF2 function is HIBERNATE_OFF.
|
||||
|
||||
::
|
||||
|
||||
/* KVM_EXIT_IOAPIC_EOI */
|
||||
|
@ -46,6 +46,7 @@ struct cpuinfo_arm64 {
|
||||
u64 reg_revidr;
|
||||
u64 reg_gmid;
|
||||
u64 reg_smidr;
|
||||
u64 reg_mpamidr;
|
||||
|
||||
u64 reg_id_aa64dfr0;
|
||||
u64 reg_id_aa64dfr1;
|
||||
|
@ -60,6 +60,11 @@ cpucap_is_possible(const unsigned int cap)
|
||||
return IS_ENABLED(CONFIG_ARM64_WORKAROUND_REPEAT_TLBI);
|
||||
case ARM64_WORKAROUND_SPECULATIVE_SSBS:
|
||||
return IS_ENABLED(CONFIG_ARM64_ERRATUM_3194386);
|
||||
case ARM64_MPAM:
|
||||
/*
|
||||
* KVM MPAM support doesn't rely on the host kernel supporting MPAM.
|
||||
*/
|
||||
return true;
|
||||
}
|
||||
|
||||
return true;
|
||||
|
@ -612,6 +612,13 @@ static inline bool id_aa64pfr1_sme(u64 pfr1)
|
||||
return val > 0;
|
||||
}
|
||||
|
||||
static inline bool id_aa64pfr0_mpam(u64 pfr0)
|
||||
{
|
||||
u32 val = cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_EL1_MPAM_SHIFT);
|
||||
|
||||
return val > 0;
|
||||
}
|
||||
|
||||
static inline bool id_aa64pfr1_mte(u64 pfr1)
|
||||
{
|
||||
u32 val = cpuid_feature_extract_unsigned_field(pfr1, ID_AA64PFR1_EL1_MTE_SHIFT);
|
||||
@ -838,6 +845,16 @@ static inline bool system_supports_poe(void)
|
||||
alternative_has_cap_unlikely(ARM64_HAS_S1POE);
|
||||
}
|
||||
|
||||
static __always_inline bool system_supports_mpam(void)
|
||||
{
|
||||
return alternative_has_cap_unlikely(ARM64_MPAM);
|
||||
}
|
||||
|
||||
static __always_inline bool system_supports_mpam_hcr(void)
|
||||
{
|
||||
return alternative_has_cap_unlikely(ARM64_MPAM_HCR);
|
||||
}
|
||||
|
||||
int do_emulate_mrs(struct pt_regs *regs, u32 sys_reg, u32 rt);
|
||||
bool try_emulate_mrs(struct pt_regs *regs, u32 isn);
|
||||
|
||||
|
@ -220,6 +220,19 @@
|
||||
msr spsr_el2, x0
|
||||
.endm
|
||||
|
||||
.macro __init_el2_mpam
|
||||
/* Memory Partitioning And Monitoring: disable EL2 traps */
|
||||
mrs x1, id_aa64pfr0_el1
|
||||
ubfx x0, x1, #ID_AA64PFR0_EL1_MPAM_SHIFT, #4
|
||||
cbz x0, .Lskip_mpam_\@ // skip if no MPAM
|
||||
msr_s SYS_MPAM2_EL2, xzr // use the default partition
|
||||
// and disable lower traps
|
||||
mrs_s x0, SYS_MPAMIDR_EL1
|
||||
tbz x0, #MPAMIDR_EL1_HAS_HCR_SHIFT, .Lskip_mpam_\@ // skip if no MPAMHCR reg
|
||||
msr_s SYS_MPAMHCR_EL2, xzr // clear TRAP_MPAMIDR_EL1 -> EL2
|
||||
.Lskip_mpam_\@:
|
||||
.endm
|
||||
|
||||
/**
|
||||
* Initialize EL2 registers to sane values. This should be called early on all
|
||||
* cores that were booted in EL2. Note that everything gets initialised as
|
||||
@ -237,6 +250,7 @@
|
||||
__init_el2_stage2
|
||||
__init_el2_gicv3
|
||||
__init_el2_hstr
|
||||
__init_el2_mpam
|
||||
__init_el2_nvhe_idregs
|
||||
__init_el2_cptr
|
||||
__init_el2_fgt
|
||||
|
@ -103,6 +103,7 @@
|
||||
#define HCR_HOST_VHE_FLAGS (HCR_RW | HCR_TGE | HCR_E2H)
|
||||
|
||||
#define HCRX_HOST_FLAGS (HCRX_EL2_MSCEn | HCRX_EL2_TCR2En | HCRX_EL2_EnFPM)
|
||||
#define MPAMHCR_HOST_FLAGS 0
|
||||
|
||||
/* TCR_EL2 Registers bits */
|
||||
#define TCR_EL2_DS (1UL << 32)
|
||||
@ -311,35 +312,6 @@
|
||||
GENMASK(19, 18) | \
|
||||
GENMASK(15, 0))
|
||||
|
||||
/* Hyp Debug Configuration Register bits */
|
||||
#define MDCR_EL2_E2TB_MASK (UL(0x3))
|
||||
#define MDCR_EL2_E2TB_SHIFT (UL(24))
|
||||
#define MDCR_EL2_HPMFZS (UL(1) << 36)
|
||||
#define MDCR_EL2_HPMFZO (UL(1) << 29)
|
||||
#define MDCR_EL2_MTPME (UL(1) << 28)
|
||||
#define MDCR_EL2_TDCC (UL(1) << 27)
|
||||
#define MDCR_EL2_HLP (UL(1) << 26)
|
||||
#define MDCR_EL2_HCCD (UL(1) << 23)
|
||||
#define MDCR_EL2_TTRF (UL(1) << 19)
|
||||
#define MDCR_EL2_HPMD (UL(1) << 17)
|
||||
#define MDCR_EL2_TPMS (UL(1) << 14)
|
||||
#define MDCR_EL2_E2PB_MASK (UL(0x3))
|
||||
#define MDCR_EL2_E2PB_SHIFT (UL(12))
|
||||
#define MDCR_EL2_TDRA (UL(1) << 11)
|
||||
#define MDCR_EL2_TDOSA (UL(1) << 10)
|
||||
#define MDCR_EL2_TDA (UL(1) << 9)
|
||||
#define MDCR_EL2_TDE (UL(1) << 8)
|
||||
#define MDCR_EL2_HPME (UL(1) << 7)
|
||||
#define MDCR_EL2_TPM (UL(1) << 6)
|
||||
#define MDCR_EL2_TPMCR (UL(1) << 5)
|
||||
#define MDCR_EL2_HPMN_MASK (UL(0x1F))
|
||||
#define MDCR_EL2_RES0 (GENMASK(63, 37) | \
|
||||
GENMASK(35, 30) | \
|
||||
GENMASK(25, 24) | \
|
||||
GENMASK(22, 20) | \
|
||||
BIT(18) | \
|
||||
GENMASK(16, 15))
|
||||
|
||||
/*
|
||||
* FGT register definitions
|
||||
*
|
||||
|
@ -76,7 +76,6 @@ enum __kvm_host_smccc_func {
|
||||
__KVM_HOST_SMCCC_FUNC___kvm_timer_set_cntvoff,
|
||||
__KVM_HOST_SMCCC_FUNC___vgic_v3_save_vmcr_aprs,
|
||||
__KVM_HOST_SMCCC_FUNC___vgic_v3_restore_vmcr_aprs,
|
||||
__KVM_HOST_SMCCC_FUNC___pkvm_vcpu_init_traps,
|
||||
__KVM_HOST_SMCCC_FUNC___pkvm_init_vm,
|
||||
__KVM_HOST_SMCCC_FUNC___pkvm_init_vcpu,
|
||||
__KVM_HOST_SMCCC_FUNC___pkvm_teardown_vm,
|
||||
|
@ -225,6 +225,11 @@ static inline bool is_hyp_ctxt(const struct kvm_vcpu *vcpu)
|
||||
return vcpu_has_nv(vcpu) && __is_hyp_ctxt(&vcpu->arch.ctxt);
|
||||
}
|
||||
|
||||
static inline bool vcpu_is_host_el0(const struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return is_hyp_ctxt(vcpu) && !vcpu_is_el2(vcpu);
|
||||
}
|
||||
|
||||
/*
|
||||
* The layout of SPSR for an AArch32 state is different when observed from an
|
||||
* AArch64 SPSR_ELx or an AArch32 SPSR_*. This function generates the AArch32
|
||||
@ -693,4 +698,8 @@ static inline bool guest_hyp_sve_traps_enabled(const struct kvm_vcpu *vcpu)
|
||||
return __guest_hyp_cptr_xen_trap_enabled(vcpu, ZEN);
|
||||
}
|
||||
|
||||
static inline void kvm_vcpu_enable_ptrauth(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
vcpu_set_flag(vcpu, GUEST_HAS_PTRAUTH);
|
||||
}
|
||||
#endif /* __ARM64_KVM_EMULATE_H__ */
|
||||
|
@ -74,8 +74,6 @@ enum kvm_mode kvm_get_mode(void);
|
||||
static inline enum kvm_mode kvm_get_mode(void) { return KVM_MODE_NONE; };
|
||||
#endif
|
||||
|
||||
DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use);
|
||||
|
||||
extern unsigned int __ro_after_init kvm_sve_max_vl;
|
||||
extern unsigned int __ro_after_init kvm_host_sve_max_vl;
|
||||
int __init kvm_arm_init_sve(void);
|
||||
@ -374,7 +372,7 @@ struct kvm_arch {
|
||||
|
||||
u64 ctr_el0;
|
||||
|
||||
/* Masks for VNCR-baked sysregs */
|
||||
/* Masks for VNCR-backed and general EL2 sysregs */
|
||||
struct kvm_sysreg_masks *sysreg_masks;
|
||||
|
||||
/*
|
||||
@ -408,6 +406,9 @@ struct kvm_vcpu_fault_info {
|
||||
r = __VNCR_START__ + ((VNCR_ ## r) / 8), \
|
||||
__after_##r = __MAX__(__before_##r - 1, r)
|
||||
|
||||
#define MARKER(m) \
|
||||
m, __after_##m = m - 1
|
||||
|
||||
enum vcpu_sysreg {
|
||||
__INVALID_SYSREG__, /* 0 is reserved as an invalid value */
|
||||
MPIDR_EL1, /* MultiProcessor Affinity Register */
|
||||
@ -468,13 +469,15 @@ enum vcpu_sysreg {
|
||||
/* EL2 registers */
|
||||
SCTLR_EL2, /* System Control Register (EL2) */
|
||||
ACTLR_EL2, /* Auxiliary Control Register (EL2) */
|
||||
MDCR_EL2, /* Monitor Debug Configuration Register (EL2) */
|
||||
CPTR_EL2, /* Architectural Feature Trap Register (EL2) */
|
||||
HACR_EL2, /* Hypervisor Auxiliary Control Register */
|
||||
ZCR_EL2, /* SVE Control Register (EL2) */
|
||||
TTBR0_EL2, /* Translation Table Base Register 0 (EL2) */
|
||||
TTBR1_EL2, /* Translation Table Base Register 1 (EL2) */
|
||||
TCR_EL2, /* Translation Control Register (EL2) */
|
||||
PIRE0_EL2, /* Permission Indirection Register 0 (EL2) */
|
||||
PIR_EL2, /* Permission Indirection Register 1 (EL2) */
|
||||
POR_EL2, /* Permission Overlay Register 2 (EL2) */
|
||||
SPSR_EL2, /* EL2 saved program status register */
|
||||
ELR_EL2, /* EL2 exception link register */
|
||||
AFSR0_EL2, /* Auxiliary Fault Status Register 0 (EL2) */
|
||||
@ -494,7 +497,13 @@ enum vcpu_sysreg {
|
||||
CNTHV_CTL_EL2,
|
||||
CNTHV_CVAL_EL2,
|
||||
|
||||
__VNCR_START__, /* Any VNCR-capable reg goes after this point */
|
||||
/* Anything from this can be RES0/RES1 sanitised */
|
||||
MARKER(__SANITISED_REG_START__),
|
||||
TCR2_EL2, /* Extended Translation Control Register (EL2) */
|
||||
MDCR_EL2, /* Monitor Debug Configuration Register (EL2) */
|
||||
|
||||
/* Any VNCR-capable reg goes after this point */
|
||||
MARKER(__VNCR_START__),
|
||||
|
||||
VNCR(SCTLR_EL1),/* System Control Register */
|
||||
VNCR(ACTLR_EL1),/* Auxiliary Control Register */
|
||||
@ -554,7 +563,7 @@ struct kvm_sysreg_masks {
|
||||
struct {
|
||||
u64 res0;
|
||||
u64 res1;
|
||||
} mask[NR_SYS_REGS - __VNCR_START__];
|
||||
} mask[NR_SYS_REGS - __SANITISED_REG_START__];
|
||||
};
|
||||
|
||||
struct kvm_cpu_context {
|
||||
@ -1002,13 +1011,13 @@ static inline u64 *___ctxt_sys_reg(const struct kvm_cpu_context *ctxt, int r)
|
||||
|
||||
#define ctxt_sys_reg(c,r) (*__ctxt_sys_reg(c,r))
|
||||
|
||||
u64 kvm_vcpu_sanitise_vncr_reg(const struct kvm_vcpu *, enum vcpu_sysreg);
|
||||
u64 kvm_vcpu_apply_reg_masks(const struct kvm_vcpu *, enum vcpu_sysreg, u64);
|
||||
#define __vcpu_sys_reg(v,r) \
|
||||
(*({ \
|
||||
const struct kvm_cpu_context *ctxt = &(v)->arch.ctxt; \
|
||||
u64 *__r = __ctxt_sys_reg(ctxt, (r)); \
|
||||
if (vcpu_has_nv((v)) && (r) >= __VNCR_START__) \
|
||||
*__r = kvm_vcpu_sanitise_vncr_reg((v), (r)); \
|
||||
if (vcpu_has_nv((v)) && (r) >= __SANITISED_REG_START__) \
|
||||
*__r = kvm_vcpu_apply_reg_masks((v), (r), *__r);\
|
||||
__r; \
|
||||
}))
|
||||
|
||||
@ -1037,6 +1046,10 @@ static inline bool __vcpu_read_sys_reg_from_cpu(int reg, u64 *val)
|
||||
case TTBR0_EL1: *val = read_sysreg_s(SYS_TTBR0_EL12); break;
|
||||
case TTBR1_EL1: *val = read_sysreg_s(SYS_TTBR1_EL12); break;
|
||||
case TCR_EL1: *val = read_sysreg_s(SYS_TCR_EL12); break;
|
||||
case TCR2_EL1: *val = read_sysreg_s(SYS_TCR2_EL12); break;
|
||||
case PIR_EL1: *val = read_sysreg_s(SYS_PIR_EL12); break;
|
||||
case PIRE0_EL1: *val = read_sysreg_s(SYS_PIRE0_EL12); break;
|
||||
case POR_EL1: *val = read_sysreg_s(SYS_POR_EL12); break;
|
||||
case ESR_EL1: *val = read_sysreg_s(SYS_ESR_EL12); break;
|
||||
case AFSR0_EL1: *val = read_sysreg_s(SYS_AFSR0_EL12); break;
|
||||
case AFSR1_EL1: *val = read_sysreg_s(SYS_AFSR1_EL12); break;
|
||||
@ -1083,6 +1096,10 @@ static inline bool __vcpu_write_sys_reg_to_cpu(u64 val, int reg)
|
||||
case TTBR0_EL1: write_sysreg_s(val, SYS_TTBR0_EL12); break;
|
||||
case TTBR1_EL1: write_sysreg_s(val, SYS_TTBR1_EL12); break;
|
||||
case TCR_EL1: write_sysreg_s(val, SYS_TCR_EL12); break;
|
||||
case TCR2_EL1: write_sysreg_s(val, SYS_TCR2_EL12); break;
|
||||
case PIR_EL1: write_sysreg_s(val, SYS_PIR_EL12); break;
|
||||
case PIRE0_EL1: write_sysreg_s(val, SYS_PIRE0_EL12); break;
|
||||
case POR_EL1: write_sysreg_s(val, SYS_POR_EL12); break;
|
||||
case ESR_EL1: write_sysreg_s(val, SYS_ESR_EL12); break;
|
||||
case AFSR0_EL1: write_sysreg_s(val, SYS_AFSR0_EL12); break;
|
||||
case AFSR1_EL1: write_sysreg_s(val, SYS_AFSR1_EL12); break;
|
||||
@ -1503,4 +1520,13 @@ void kvm_set_vm_id_reg(struct kvm *kvm, u32 reg, u64 val);
|
||||
(system_supports_fpmr() && \
|
||||
kvm_has_feat((k), ID_AA64PFR2_EL1, FPMR, IMP))
|
||||
|
||||
#define kvm_has_tcr2(k) \
|
||||
(kvm_has_feat((k), ID_AA64MMFR3_EL1, TCRX, IMP))
|
||||
|
||||
#define kvm_has_s1pie(k) \
|
||||
(kvm_has_feat((k), ID_AA64MMFR3_EL1, S1PIE, IMP))
|
||||
|
||||
#define kvm_has_s1poe(k) \
|
||||
(kvm_has_feat((k), ID_AA64MMFR3_EL1, S1POE, IMP))
|
||||
|
||||
#endif /* __ARM64_KVM_HOST_H__ */
|
||||
|
@ -542,18 +542,6 @@
|
||||
|
||||
#define SYS_MAIR_EL2 sys_reg(3, 4, 10, 2, 0)
|
||||
#define SYS_AMAIR_EL2 sys_reg(3, 4, 10, 3, 0)
|
||||
#define SYS_MPAMHCR_EL2 sys_reg(3, 4, 10, 4, 0)
|
||||
#define SYS_MPAMVPMV_EL2 sys_reg(3, 4, 10, 4, 1)
|
||||
#define SYS_MPAM2_EL2 sys_reg(3, 4, 10, 5, 0)
|
||||
#define __SYS__MPAMVPMx_EL2(x) sys_reg(3, 4, 10, 6, x)
|
||||
#define SYS_MPAMVPM0_EL2 __SYS__MPAMVPMx_EL2(0)
|
||||
#define SYS_MPAMVPM1_EL2 __SYS__MPAMVPMx_EL2(1)
|
||||
#define SYS_MPAMVPM2_EL2 __SYS__MPAMVPMx_EL2(2)
|
||||
#define SYS_MPAMVPM3_EL2 __SYS__MPAMVPMx_EL2(3)
|
||||
#define SYS_MPAMVPM4_EL2 __SYS__MPAMVPMx_EL2(4)
|
||||
#define SYS_MPAMVPM5_EL2 __SYS__MPAMVPMx_EL2(5)
|
||||
#define SYS_MPAMVPM6_EL2 __SYS__MPAMVPMx_EL2(6)
|
||||
#define SYS_MPAMVPM7_EL2 __SYS__MPAMVPMx_EL2(7)
|
||||
|
||||
#define SYS_VBAR_EL2 sys_reg(3, 4, 12, 0, 0)
|
||||
#define SYS_RVBAR_EL2 sys_reg(3, 4, 12, 0, 1)
|
||||
|
@ -50,7 +50,6 @@
|
||||
#define VNCR_VBAR_EL1 0x250
|
||||
#define VNCR_TCR2_EL1 0x270
|
||||
#define VNCR_PIRE0_EL1 0x290
|
||||
#define VNCR_PIRE0_EL2 0x298
|
||||
#define VNCR_PIR_EL1 0x2A0
|
||||
#define VNCR_POR_EL1 0x2A8
|
||||
#define VNCR_ICH_LR0_EL2 0x400
|
||||
|
@ -484,6 +484,12 @@ enum {
|
||||
*/
|
||||
#define KVM_SYSTEM_EVENT_RESET_FLAG_PSCI_RESET2 (1ULL << 0)
|
||||
|
||||
/*
|
||||
* Shutdown caused by a PSCI v1.3 SYSTEM_OFF2 call.
|
||||
* Valid only when the system event has a type of KVM_SYSTEM_EVENT_SHUTDOWN.
|
||||
*/
|
||||
#define KVM_SYSTEM_EVENT_SHUTDOWN_FLAG_PSCI_OFF2 (1ULL << 0)
|
||||
|
||||
/* run->fail_entry.hardware_entry_failure_reason codes. */
|
||||
#define KVM_EXIT_FAIL_ENTRY_CPU_UNSUPPORTED (1ULL << 0)
|
||||
|
||||
|
@ -684,6 +684,14 @@ static const struct arm64_ftr_bits ftr_id_dfr1[] = {
|
||||
ARM64_FTR_END,
|
||||
};
|
||||
|
||||
static const struct arm64_ftr_bits ftr_mpamidr[] = {
|
||||
ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, MPAMIDR_EL1_PMG_MAX_SHIFT, MPAMIDR_EL1_PMG_MAX_WIDTH, 0),
|
||||
ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, MPAMIDR_EL1_VPMR_MAX_SHIFT, MPAMIDR_EL1_VPMR_MAX_WIDTH, 0),
|
||||
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, MPAMIDR_EL1_HAS_HCR_SHIFT, 1, 0),
|
||||
ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, MPAMIDR_EL1_PARTID_MAX_SHIFT, MPAMIDR_EL1_PARTID_MAX_WIDTH, 0),
|
||||
ARM64_FTR_END,
|
||||
};
|
||||
|
||||
/*
|
||||
* Common ftr bits for a 32bit register with all hidden, strict
|
||||
* attributes, with 4bit feature fields and a default safe value of
|
||||
@ -804,6 +812,9 @@ static const struct __ftr_reg_entry {
|
||||
ARM64_FTR_REG(SYS_ID_AA64MMFR3_EL1, ftr_id_aa64mmfr3),
|
||||
ARM64_FTR_REG(SYS_ID_AA64MMFR4_EL1, ftr_id_aa64mmfr4),
|
||||
|
||||
/* Op1 = 0, CRn = 10, CRm = 4 */
|
||||
ARM64_FTR_REG(SYS_MPAMIDR_EL1, ftr_mpamidr),
|
||||
|
||||
/* Op1 = 1, CRn = 0, CRm = 0 */
|
||||
ARM64_FTR_REG(SYS_GMID_EL1, ftr_gmid),
|
||||
|
||||
@ -1163,6 +1174,9 @@ void __init init_cpu_features(struct cpuinfo_arm64 *info)
|
||||
cpacr_restore(cpacr);
|
||||
}
|
||||
|
||||
if (id_aa64pfr0_mpam(info->reg_id_aa64pfr0))
|
||||
init_cpu_ftr_reg(SYS_MPAMIDR_EL1, info->reg_mpamidr);
|
||||
|
||||
if (id_aa64pfr1_mte(info->reg_id_aa64pfr1))
|
||||
init_cpu_ftr_reg(SYS_GMID_EL1, info->reg_gmid);
|
||||
}
|
||||
@ -1419,6 +1433,11 @@ void update_cpu_features(int cpu,
|
||||
cpacr_restore(cpacr);
|
||||
}
|
||||
|
||||
if (id_aa64pfr0_mpam(info->reg_id_aa64pfr0)) {
|
||||
taint |= check_update_ftr_reg(SYS_MPAMIDR_EL1, cpu,
|
||||
info->reg_mpamidr, boot->reg_mpamidr);
|
||||
}
|
||||
|
||||
/*
|
||||
* The kernel uses the LDGM/STGM instructions and the number of tags
|
||||
* they read/write depends on the GMID_EL1.BS field. Check that the
|
||||
@ -2377,6 +2396,36 @@ cpucap_panic_on_conflict(const struct arm64_cpu_capabilities *cap)
|
||||
return !!(cap->type & ARM64_CPUCAP_PANIC_ON_CONFLICT);
|
||||
}
|
||||
|
||||
static bool
|
||||
test_has_mpam(const struct arm64_cpu_capabilities *entry, int scope)
|
||||
{
|
||||
if (!has_cpuid_feature(entry, scope))
|
||||
return false;
|
||||
|
||||
/* Check firmware actually enabled MPAM on this cpu. */
|
||||
return (read_sysreg_s(SYS_MPAM1_EL1) & MPAM1_EL1_MPAMEN);
|
||||
}
|
||||
|
||||
static void
|
||||
cpu_enable_mpam(const struct arm64_cpu_capabilities *entry)
|
||||
{
|
||||
/*
|
||||
* Access by the kernel (at EL1) should use the reserved PARTID
|
||||
* which is configured unrestricted. This avoids priority-inversion
|
||||
* where latency sensitive tasks have to wait for a task that has
|
||||
* been throttled to release the lock.
|
||||
*/
|
||||
write_sysreg_s(0, SYS_MPAM1_EL1);
|
||||
}
|
||||
|
||||
static bool
|
||||
test_has_mpam_hcr(const struct arm64_cpu_capabilities *entry, int scope)
|
||||
{
|
||||
u64 idr = read_sanitised_ftr_reg(SYS_MPAMIDR_EL1);
|
||||
|
||||
return idr & MPAMIDR_EL1_HAS_HCR;
|
||||
}
|
||||
|
||||
static const struct arm64_cpu_capabilities arm64_features[] = {
|
||||
{
|
||||
.capability = ARM64_ALWAYS_BOOT,
|
||||
@ -2873,6 +2922,20 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
|
||||
#endif
|
||||
},
|
||||
#endif
|
||||
{
|
||||
.desc = "Memory Partitioning And Monitoring",
|
||||
.type = ARM64_CPUCAP_SYSTEM_FEATURE,
|
||||
.capability = ARM64_MPAM,
|
||||
.matches = test_has_mpam,
|
||||
.cpu_enable = cpu_enable_mpam,
|
||||
ARM64_CPUID_FIELDS(ID_AA64PFR0_EL1, MPAM, 1)
|
||||
},
|
||||
{
|
||||
.desc = "Memory Partitioning And Monitoring Virtualisation",
|
||||
.type = ARM64_CPUCAP_SYSTEM_FEATURE,
|
||||
.capability = ARM64_MPAM_HCR,
|
||||
.matches = test_has_mpam_hcr,
|
||||
},
|
||||
{
|
||||
.desc = "NV1",
|
||||
.capability = ARM64_HAS_HCR_NV1,
|
||||
@ -3396,6 +3459,36 @@ static void verify_hyp_capabilities(void)
|
||||
}
|
||||
}
|
||||
|
||||
static void verify_mpam_capabilities(void)
|
||||
{
|
||||
u64 cpu_idr = read_cpuid(ID_AA64PFR0_EL1);
|
||||
u64 sys_idr = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
|
||||
u16 cpu_partid_max, cpu_pmg_max, sys_partid_max, sys_pmg_max;
|
||||
|
||||
if (FIELD_GET(ID_AA64PFR0_EL1_MPAM_MASK, cpu_idr) !=
|
||||
FIELD_GET(ID_AA64PFR0_EL1_MPAM_MASK, sys_idr)) {
|
||||
pr_crit("CPU%d: MPAM version mismatch\n", smp_processor_id());
|
||||
cpu_die_early();
|
||||
}
|
||||
|
||||
cpu_idr = read_cpuid(MPAMIDR_EL1);
|
||||
sys_idr = read_sanitised_ftr_reg(SYS_MPAMIDR_EL1);
|
||||
if (FIELD_GET(MPAMIDR_EL1_HAS_HCR, cpu_idr) !=
|
||||
FIELD_GET(MPAMIDR_EL1_HAS_HCR, sys_idr)) {
|
||||
pr_crit("CPU%d: Missing MPAM HCR\n", smp_processor_id());
|
||||
cpu_die_early();
|
||||
}
|
||||
|
||||
cpu_partid_max = FIELD_GET(MPAMIDR_EL1_PARTID_MAX, cpu_idr);
|
||||
cpu_pmg_max = FIELD_GET(MPAMIDR_EL1_PMG_MAX, cpu_idr);
|
||||
sys_partid_max = FIELD_GET(MPAMIDR_EL1_PARTID_MAX, sys_idr);
|
||||
sys_pmg_max = FIELD_GET(MPAMIDR_EL1_PMG_MAX, sys_idr);
|
||||
if (cpu_partid_max < sys_partid_max || cpu_pmg_max < sys_pmg_max) {
|
||||
pr_crit("CPU%d: MPAM PARTID/PMG max values are mismatched\n", smp_processor_id());
|
||||
cpu_die_early();
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Run through the enabled system capabilities and enable() it on this CPU.
|
||||
* The capabilities were decided based on the available CPUs at the boot time.
|
||||
@ -3422,6 +3515,9 @@ static void verify_local_cpu_capabilities(void)
|
||||
|
||||
if (is_hyp_mode_available())
|
||||
verify_hyp_capabilities();
|
||||
|
||||
if (system_supports_mpam())
|
||||
verify_mpam_capabilities();
|
||||
}
|
||||
|
||||
void check_local_cpu_capabilities(void)
|
||||
|
@ -478,6 +478,9 @@ static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info)
|
||||
if (id_aa64pfr0_32bit_el0(info->reg_id_aa64pfr0))
|
||||
__cpuinfo_store_cpu_32bit(&info->aarch32);
|
||||
|
||||
if (id_aa64pfr0_mpam(info->reg_id_aa64pfr0))
|
||||
info->reg_mpamidr = read_cpuid(MPAMIDR_EL1);
|
||||
|
||||
cpuinfo_detect_icache_policy(info);
|
||||
}
|
||||
|
||||
|
@ -206,8 +206,7 @@ void get_timer_map(struct kvm_vcpu *vcpu, struct timer_map *map)
|
||||
|
||||
static inline bool userspace_irqchip(struct kvm *kvm)
|
||||
{
|
||||
return static_branch_unlikely(&userspace_irqchip_in_use) &&
|
||||
unlikely(!irqchip_in_kernel(kvm));
|
||||
return unlikely(!irqchip_in_kernel(kvm));
|
||||
}
|
||||
|
||||
static void soft_timer_start(struct hrtimer *hrt, u64 ns)
|
||||
|
@ -69,7 +69,6 @@ DECLARE_KVM_NVHE_PER_CPU(struct kvm_cpu_context, kvm_hyp_ctxt);
|
||||
static bool vgic_present, kvm_arm_initialised;
|
||||
|
||||
static DEFINE_PER_CPU(unsigned char, kvm_hyp_initialized);
|
||||
DEFINE_STATIC_KEY_FALSE(userspace_irqchip_in_use);
|
||||
|
||||
bool is_kvm_arm_initialised(void)
|
||||
{
|
||||
@ -503,9 +502,6 @@ void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
|
||||
|
||||
void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
if (vcpu_has_run_once(vcpu) && unlikely(!irqchip_in_kernel(vcpu->kvm)))
|
||||
static_branch_dec(&userspace_irqchip_in_use);
|
||||
|
||||
kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
|
||||
kvm_timer_vcpu_terminate(vcpu);
|
||||
kvm_pmu_vcpu_destroy(vcpu);
|
||||
@ -848,22 +844,6 @@ int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu)
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (!irqchip_in_kernel(kvm)) {
|
||||
/*
|
||||
* Tell the rest of the code that there are userspace irqchip
|
||||
* VMs in the wild.
|
||||
*/
|
||||
static_branch_inc(&userspace_irqchip_in_use);
|
||||
}
|
||||
|
||||
/*
|
||||
* Initialize traps for protected VMs.
|
||||
* NOTE: Move to run in EL2 directly, rather than via a hypercall, once
|
||||
* the code is in place for first run initialization at EL2.
|
||||
*/
|
||||
if (kvm_vm_is_protected(kvm))
|
||||
kvm_call_hyp_nvhe(__pkvm_vcpu_init_traps, vcpu);
|
||||
|
||||
mutex_lock(&kvm->arch.config_lock);
|
||||
set_bit(KVM_ARCH_FLAG_HAS_RAN_ONCE, &kvm->arch.flags);
|
||||
mutex_unlock(&kvm->arch.config_lock);
|
||||
@ -1077,7 +1057,7 @@ static bool kvm_vcpu_exit_request(struct kvm_vcpu *vcpu, int *ret)
|
||||
* state gets updated in kvm_timer_update_run and
|
||||
* kvm_pmu_update_run below).
|
||||
*/
|
||||
if (static_branch_unlikely(&userspace_irqchip_in_use)) {
|
||||
if (unlikely(!irqchip_in_kernel(vcpu->kvm))) {
|
||||
if (kvm_timer_should_notify_user(vcpu) ||
|
||||
kvm_pmu_should_notify_user(vcpu)) {
|
||||
*ret = -EINTR;
|
||||
@ -1199,7 +1179,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
|
||||
vcpu->mode = OUTSIDE_GUEST_MODE;
|
||||
isb(); /* Ensure work in x_flush_hwstate is committed */
|
||||
kvm_pmu_sync_hwstate(vcpu);
|
||||
if (static_branch_unlikely(&userspace_irqchip_in_use))
|
||||
if (unlikely(!irqchip_in_kernel(vcpu->kvm)))
|
||||
kvm_timer_sync_user(vcpu);
|
||||
kvm_vgic_sync_hwstate(vcpu);
|
||||
local_irq_enable();
|
||||
@ -1245,7 +1225,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
|
||||
* we don't want vtimer interrupts to race with syncing the
|
||||
* timer virtual interrupt state.
|
||||
*/
|
||||
if (static_branch_unlikely(&userspace_irqchip_in_use))
|
||||
if (unlikely(!irqchip_in_kernel(vcpu->kvm)))
|
||||
kvm_timer_sync_user(vcpu);
|
||||
|
||||
kvm_arch_vcpu_ctxsync_fp(vcpu);
|
||||
|
@ -24,6 +24,9 @@ struct s1_walk_info {
|
||||
unsigned int txsz;
|
||||
int sl;
|
||||
bool hpd;
|
||||
bool e0poe;
|
||||
bool poe;
|
||||
bool pan;
|
||||
bool be;
|
||||
bool s2;
|
||||
};
|
||||
@ -37,6 +40,16 @@ struct s1_walk_result {
|
||||
u8 APTable;
|
||||
bool UXNTable;
|
||||
bool PXNTable;
|
||||
bool uwxn;
|
||||
bool uov;
|
||||
bool ur;
|
||||
bool uw;
|
||||
bool ux;
|
||||
bool pwxn;
|
||||
bool pov;
|
||||
bool pr;
|
||||
bool pw;
|
||||
bool px;
|
||||
};
|
||||
struct {
|
||||
u8 fst;
|
||||
@ -87,6 +100,51 @@ static enum trans_regime compute_translation_regime(struct kvm_vcpu *vcpu, u32 o
|
||||
}
|
||||
}
|
||||
|
||||
static bool s1pie_enabled(struct kvm_vcpu *vcpu, enum trans_regime regime)
|
||||
{
|
||||
if (!kvm_has_s1pie(vcpu->kvm))
|
||||
return false;
|
||||
|
||||
switch (regime) {
|
||||
case TR_EL2:
|
||||
case TR_EL20:
|
||||
return vcpu_read_sys_reg(vcpu, TCR2_EL2) & TCR2_EL2_PIE;
|
||||
case TR_EL10:
|
||||
return (__vcpu_sys_reg(vcpu, HCRX_EL2) & HCRX_EL2_TCR2En) &&
|
||||
(__vcpu_sys_reg(vcpu, TCR2_EL1) & TCR2_EL1x_PIE);
|
||||
default:
|
||||
BUG();
|
||||
}
|
||||
}
|
||||
|
||||
static void compute_s1poe(struct kvm_vcpu *vcpu, struct s1_walk_info *wi)
|
||||
{
|
||||
u64 val;
|
||||
|
||||
if (!kvm_has_s1poe(vcpu->kvm)) {
|
||||
wi->poe = wi->e0poe = false;
|
||||
return;
|
||||
}
|
||||
|
||||
switch (wi->regime) {
|
||||
case TR_EL2:
|
||||
case TR_EL20:
|
||||
val = vcpu_read_sys_reg(vcpu, TCR2_EL2);
|
||||
wi->poe = val & TCR2_EL2_POE;
|
||||
wi->e0poe = (wi->regime == TR_EL20) && (val & TCR2_EL2_E0POE);
|
||||
break;
|
||||
case TR_EL10:
|
||||
if (__vcpu_sys_reg(vcpu, HCRX_EL2) & HCRX_EL2_TCR2En) {
|
||||
wi->poe = wi->e0poe = false;
|
||||
return;
|
||||
}
|
||||
|
||||
val = __vcpu_sys_reg(vcpu, TCR2_EL1);
|
||||
wi->poe = val & TCR2_EL1x_POE;
|
||||
wi->e0poe = val & TCR2_EL1x_E0POE;
|
||||
}
|
||||
}
|
||||
|
||||
static int setup_s1_walk(struct kvm_vcpu *vcpu, u32 op, struct s1_walk_info *wi,
|
||||
struct s1_walk_result *wr, u64 va)
|
||||
{
|
||||
@ -98,6 +156,8 @@ static int setup_s1_walk(struct kvm_vcpu *vcpu, u32 op, struct s1_walk_info *wi,
|
||||
|
||||
wi->regime = compute_translation_regime(vcpu, op);
|
||||
as_el0 = (op == OP_AT_S1E0R || op == OP_AT_S1E0W);
|
||||
wi->pan = (op == OP_AT_S1E1RP || op == OP_AT_S1E1WP) &&
|
||||
(*vcpu_cpsr(vcpu) & PSR_PAN_BIT);
|
||||
|
||||
va55 = va & BIT(55);
|
||||
|
||||
@ -180,6 +240,14 @@ static int setup_s1_walk(struct kvm_vcpu *vcpu, u32 op, struct s1_walk_info *wi,
|
||||
(va55 ?
|
||||
FIELD_GET(TCR_HPD1, tcr) :
|
||||
FIELD_GET(TCR_HPD0, tcr)));
|
||||
/* R_JHSVW */
|
||||
wi->hpd |= s1pie_enabled(vcpu, wi->regime);
|
||||
|
||||
/* Do we have POE? */
|
||||
compute_s1poe(vcpu, wi);
|
||||
|
||||
/* R_BVXDG */
|
||||
wi->hpd |= (wi->poe || wi->e0poe);
|
||||
|
||||
/* Someone was silly enough to encode TG0/TG1 differently */
|
||||
if (va55) {
|
||||
@ -412,6 +480,11 @@ struct mmu_config {
|
||||
u64 ttbr1;
|
||||
u64 tcr;
|
||||
u64 mair;
|
||||
u64 tcr2;
|
||||
u64 pir;
|
||||
u64 pire0;
|
||||
u64 por_el0;
|
||||
u64 por_el1;
|
||||
u64 sctlr;
|
||||
u64 vttbr;
|
||||
u64 vtcr;
|
||||
@ -424,6 +497,17 @@ static void __mmu_config_save(struct mmu_config *config)
|
||||
config->ttbr1 = read_sysreg_el1(SYS_TTBR1);
|
||||
config->tcr = read_sysreg_el1(SYS_TCR);
|
||||
config->mair = read_sysreg_el1(SYS_MAIR);
|
||||
if (cpus_have_final_cap(ARM64_HAS_TCR2)) {
|
||||
config->tcr2 = read_sysreg_el1(SYS_TCR2);
|
||||
if (cpus_have_final_cap(ARM64_HAS_S1PIE)) {
|
||||
config->pir = read_sysreg_el1(SYS_PIR);
|
||||
config->pire0 = read_sysreg_el1(SYS_PIRE0);
|
||||
}
|
||||
if (system_supports_poe()) {
|
||||
config->por_el1 = read_sysreg_el1(SYS_POR);
|
||||
config->por_el0 = read_sysreg_s(SYS_POR_EL0);
|
||||
}
|
||||
}
|
||||
config->sctlr = read_sysreg_el1(SYS_SCTLR);
|
||||
config->vttbr = read_sysreg(vttbr_el2);
|
||||
config->vtcr = read_sysreg(vtcr_el2);
|
||||
@ -444,6 +528,17 @@ static void __mmu_config_restore(struct mmu_config *config)
|
||||
write_sysreg_el1(config->ttbr1, SYS_TTBR1);
|
||||
write_sysreg_el1(config->tcr, SYS_TCR);
|
||||
write_sysreg_el1(config->mair, SYS_MAIR);
|
||||
if (cpus_have_final_cap(ARM64_HAS_TCR2)) {
|
||||
write_sysreg_el1(config->tcr2, SYS_TCR2);
|
||||
if (cpus_have_final_cap(ARM64_HAS_S1PIE)) {
|
||||
write_sysreg_el1(config->pir, SYS_PIR);
|
||||
write_sysreg_el1(config->pire0, SYS_PIRE0);
|
||||
}
|
||||
if (system_supports_poe()) {
|
||||
write_sysreg_el1(config->por_el1, SYS_POR);
|
||||
write_sysreg_s(config->por_el0, SYS_POR_EL0);
|
||||
}
|
||||
}
|
||||
write_sysreg_el1(config->sctlr, SYS_SCTLR);
|
||||
write_sysreg(config->vttbr, vttbr_el2);
|
||||
write_sysreg(config->vtcr, vtcr_el2);
|
||||
@ -739,6 +834,9 @@ static bool pan3_enabled(struct kvm_vcpu *vcpu, enum trans_regime regime)
|
||||
if (!kvm_has_feat(vcpu->kvm, ID_AA64MMFR1_EL1, PAN, PAN3))
|
||||
return false;
|
||||
|
||||
if (s1pie_enabled(vcpu, regime))
|
||||
return true;
|
||||
|
||||
if (regime == TR_EL10)
|
||||
sctlr = vcpu_read_sys_reg(vcpu, SCTLR_EL1);
|
||||
else
|
||||
@ -747,11 +845,307 @@ static bool pan3_enabled(struct kvm_vcpu *vcpu, enum trans_regime regime)
|
||||
return sctlr & SCTLR_EL1_EPAN;
|
||||
}
|
||||
|
||||
static void compute_s1_direct_permissions(struct kvm_vcpu *vcpu,
|
||||
struct s1_walk_info *wi,
|
||||
struct s1_walk_result *wr)
|
||||
{
|
||||
bool wxn;
|
||||
|
||||
/* Non-hierarchical part of AArch64.S1DirectBasePermissions() */
|
||||
if (wi->regime != TR_EL2) {
|
||||
switch (FIELD_GET(PTE_USER | PTE_RDONLY, wr->desc)) {
|
||||
case 0b00:
|
||||
wr->pr = wr->pw = true;
|
||||
wr->ur = wr->uw = false;
|
||||
break;
|
||||
case 0b01:
|
||||
wr->pr = wr->pw = wr->ur = wr->uw = true;
|
||||
break;
|
||||
case 0b10:
|
||||
wr->pr = true;
|
||||
wr->pw = wr->ur = wr->uw = false;
|
||||
break;
|
||||
case 0b11:
|
||||
wr->pr = wr->ur = true;
|
||||
wr->pw = wr->uw = false;
|
||||
break;
|
||||
}
|
||||
|
||||
/* We don't use px for anything yet, but hey... */
|
||||
wr->px = !((wr->desc & PTE_PXN) || wr->uw);
|
||||
wr->ux = !(wr->desc & PTE_UXN);
|
||||
} else {
|
||||
wr->ur = wr->uw = wr->ux = false;
|
||||
|
||||
if (!(wr->desc & PTE_RDONLY)) {
|
||||
wr->pr = wr->pw = true;
|
||||
} else {
|
||||
wr->pr = true;
|
||||
wr->pw = false;
|
||||
}
|
||||
|
||||
/* XN maps to UXN */
|
||||
wr->px = !(wr->desc & PTE_UXN);
|
||||
}
|
||||
|
||||
switch (wi->regime) {
|
||||
case TR_EL2:
|
||||
case TR_EL20:
|
||||
wxn = (vcpu_read_sys_reg(vcpu, SCTLR_EL2) & SCTLR_ELx_WXN);
|
||||
break;
|
||||
case TR_EL10:
|
||||
wxn = (__vcpu_sys_reg(vcpu, SCTLR_EL1) & SCTLR_ELx_WXN);
|
||||
break;
|
||||
}
|
||||
|
||||
wr->pwxn = wr->uwxn = wxn;
|
||||
wr->pov = wi->poe;
|
||||
wr->uov = wi->e0poe;
|
||||
}
|
||||
|
||||
static void compute_s1_hierarchical_permissions(struct kvm_vcpu *vcpu,
|
||||
struct s1_walk_info *wi,
|
||||
struct s1_walk_result *wr)
|
||||
{
|
||||
/* Hierarchical part of AArch64.S1DirectBasePermissions() */
|
||||
if (wi->regime != TR_EL2) {
|
||||
switch (wr->APTable) {
|
||||
case 0b00:
|
||||
break;
|
||||
case 0b01:
|
||||
wr->ur = wr->uw = false;
|
||||
break;
|
||||
case 0b10:
|
||||
wr->pw = wr->uw = false;
|
||||
break;
|
||||
case 0b11:
|
||||
wr->pw = wr->ur = wr->uw = false;
|
||||
break;
|
||||
}
|
||||
|
||||
wr->px &= !wr->PXNTable;
|
||||
wr->ux &= !wr->UXNTable;
|
||||
} else {
|
||||
if (wr->APTable & BIT(1))
|
||||
wr->pw = false;
|
||||
|
||||
/* XN maps to UXN */
|
||||
wr->px &= !wr->UXNTable;
|
||||
}
|
||||
}
|
||||
|
||||
#define perm_idx(v, r, i) ((vcpu_read_sys_reg((v), (r)) >> ((i) * 4)) & 0xf)
|
||||
|
||||
#define set_priv_perms(wr, r, w, x) \
|
||||
do { \
|
||||
(wr)->pr = (r); \
|
||||
(wr)->pw = (w); \
|
||||
(wr)->px = (x); \
|
||||
} while (0)
|
||||
|
||||
#define set_unpriv_perms(wr, r, w, x) \
|
||||
do { \
|
||||
(wr)->ur = (r); \
|
||||
(wr)->uw = (w); \
|
||||
(wr)->ux = (x); \
|
||||
} while (0)
|
||||
|
||||
#define set_priv_wxn(wr, v) \
|
||||
do { \
|
||||
(wr)->pwxn = (v); \
|
||||
} while (0)
|
||||
|
||||
#define set_unpriv_wxn(wr, v) \
|
||||
do { \
|
||||
(wr)->uwxn = (v); \
|
||||
} while (0)
|
||||
|
||||
/* Similar to AArch64.S1IndirectBasePermissions(), without GCS */
|
||||
#define set_perms(w, wr, ip) \
|
||||
do { \
|
||||
/* R_LLZDZ */ \
|
||||
switch ((ip)) { \
|
||||
case 0b0000: \
|
||||
set_ ## w ## _perms((wr), false, false, false); \
|
||||
break; \
|
||||
case 0b0001: \
|
||||
set_ ## w ## _perms((wr), true , false, false); \
|
||||
break; \
|
||||
case 0b0010: \
|
||||
set_ ## w ## _perms((wr), false, false, true ); \
|
||||
break; \
|
||||
case 0b0011: \
|
||||
set_ ## w ## _perms((wr), true , false, true ); \
|
||||
break; \
|
||||
case 0b0100: \
|
||||
set_ ## w ## _perms((wr), false, false, false); \
|
||||
break; \
|
||||
case 0b0101: \
|
||||
set_ ## w ## _perms((wr), true , true , false); \
|
||||
break; \
|
||||
case 0b0110: \
|
||||
set_ ## w ## _perms((wr), true , true , true ); \
|
||||
break; \
|
||||
case 0b0111: \
|
||||
set_ ## w ## _perms((wr), true , true , true ); \
|
||||
break; \
|
||||
case 0b1000: \
|
||||
set_ ## w ## _perms((wr), true , false, false); \
|
||||
break; \
|
||||
case 0b1001: \
|
||||
set_ ## w ## _perms((wr), true , false, false); \
|
||||
break; \
|
||||
case 0b1010: \
|
||||
set_ ## w ## _perms((wr), true , false, true ); \
|
||||
break; \
|
||||
case 0b1011: \
|
||||
set_ ## w ## _perms((wr), false, false, false); \
|
||||
break; \
|
||||
case 0b1100: \
|
||||
set_ ## w ## _perms((wr), true , true , false); \
|
||||
break; \
|
||||
case 0b1101: \
|
||||
set_ ## w ## _perms((wr), false, false, false); \
|
||||
break; \
|
||||
case 0b1110: \
|
||||
set_ ## w ## _perms((wr), true , true , true ); \
|
||||
break; \
|
||||
case 0b1111: \
|
||||
set_ ## w ## _perms((wr), false, false, false); \
|
||||
break; \
|
||||
} \
|
||||
\
|
||||
/* R_HJYGR */ \
|
||||
set_ ## w ## _wxn((wr), ((ip) == 0b0110)); \
|
||||
\
|
||||
} while (0)
|
||||
|
||||
static void compute_s1_indirect_permissions(struct kvm_vcpu *vcpu,
|
||||
struct s1_walk_info *wi,
|
||||
struct s1_walk_result *wr)
|
||||
{
|
||||
u8 up, pp, idx;
|
||||
|
||||
idx = pte_pi_index(wr->desc);
|
||||
|
||||
switch (wi->regime) {
|
||||
case TR_EL10:
|
||||
pp = perm_idx(vcpu, PIR_EL1, idx);
|
||||
up = perm_idx(vcpu, PIRE0_EL1, idx);
|
||||
break;
|
||||
case TR_EL20:
|
||||
pp = perm_idx(vcpu, PIR_EL2, idx);
|
||||
up = perm_idx(vcpu, PIRE0_EL2, idx);
|
||||
break;
|
||||
case TR_EL2:
|
||||
pp = perm_idx(vcpu, PIR_EL2, idx);
|
||||
up = 0;
|
||||
break;
|
||||
}
|
||||
|
||||
set_perms(priv, wr, pp);
|
||||
|
||||
if (wi->regime != TR_EL2)
|
||||
set_perms(unpriv, wr, up);
|
||||
else
|
||||
set_unpriv_perms(wr, false, false, false);
|
||||
|
||||
wr->pov = wi->poe && !(pp & BIT(3));
|
||||
wr->uov = wi->e0poe && !(up & BIT(3));
|
||||
|
||||
/* R_VFPJF */
|
||||
if (wr->px && wr->uw) {
|
||||
set_priv_perms(wr, false, false, false);
|
||||
set_unpriv_perms(wr, false, false, false);
|
||||
}
|
||||
}
|
||||
|
||||
static void compute_s1_overlay_permissions(struct kvm_vcpu *vcpu,
|
||||
struct s1_walk_info *wi,
|
||||
struct s1_walk_result *wr)
|
||||
{
|
||||
u8 idx, pov_perms, uov_perms;
|
||||
|
||||
idx = FIELD_GET(PTE_PO_IDX_MASK, wr->desc);
|
||||
|
||||
switch (wi->regime) {
|
||||
case TR_EL10:
|
||||
pov_perms = perm_idx(vcpu, POR_EL1, idx);
|
||||
uov_perms = perm_idx(vcpu, POR_EL0, idx);
|
||||
break;
|
||||
case TR_EL20:
|
||||
pov_perms = perm_idx(vcpu, POR_EL2, idx);
|
||||
uov_perms = perm_idx(vcpu, POR_EL0, idx);
|
||||
break;
|
||||
case TR_EL2:
|
||||
pov_perms = perm_idx(vcpu, POR_EL2, idx);
|
||||
uov_perms = 0;
|
||||
break;
|
||||
}
|
||||
|
||||
if (pov_perms & ~POE_RXW)
|
||||
pov_perms = POE_NONE;
|
||||
|
||||
if (wi->poe && wr->pov) {
|
||||
wr->pr &= pov_perms & POE_R;
|
||||
wr->px &= pov_perms & POE_X;
|
||||
wr->pw &= pov_perms & POE_W;
|
||||
}
|
||||
|
||||
if (uov_perms & ~POE_RXW)
|
||||
uov_perms = POE_NONE;
|
||||
|
||||
if (wi->e0poe && wr->uov) {
|
||||
wr->ur &= uov_perms & POE_R;
|
||||
wr->ux &= uov_perms & POE_X;
|
||||
wr->uw &= uov_perms & POE_W;
|
||||
}
|
||||
}
|
||||
|
||||
static void compute_s1_permissions(struct kvm_vcpu *vcpu,
|
||||
struct s1_walk_info *wi,
|
||||
struct s1_walk_result *wr)
|
||||
{
|
||||
bool pan;
|
||||
|
||||
if (!s1pie_enabled(vcpu, wi->regime))
|
||||
compute_s1_direct_permissions(vcpu, wi, wr);
|
||||
else
|
||||
compute_s1_indirect_permissions(vcpu, wi, wr);
|
||||
|
||||
if (!wi->hpd)
|
||||
compute_s1_hierarchical_permissions(vcpu, wi, wr);
|
||||
|
||||
if (wi->poe || wi->e0poe)
|
||||
compute_s1_overlay_permissions(vcpu, wi, wr);
|
||||
|
||||
/* R_QXXPC */
|
||||
if (wr->pwxn) {
|
||||
if (!wr->pov && wr->pw)
|
||||
wr->px = false;
|
||||
if (wr->pov && wr->px)
|
||||
wr->pw = false;
|
||||
}
|
||||
|
||||
/* R_NPBXC */
|
||||
if (wr->uwxn) {
|
||||
if (!wr->uov && wr->uw)
|
||||
wr->ux = false;
|
||||
if (wr->uov && wr->ux)
|
||||
wr->uw = false;
|
||||
}
|
||||
|
||||
pan = wi->pan && (wr->ur || wr->uw ||
|
||||
(pan3_enabled(vcpu, wi->regime) && wr->ux));
|
||||
wr->pw &= !pan;
|
||||
wr->pr &= !pan;
|
||||
}
|
||||
|
||||
static u64 handle_at_slow(struct kvm_vcpu *vcpu, u32 op, u64 vaddr)
|
||||
{
|
||||
bool perm_fail, ur, uw, ux, pr, pw, px;
|
||||
struct s1_walk_result wr = {};
|
||||
struct s1_walk_info wi = {};
|
||||
bool perm_fail = false;
|
||||
int ret, idx;
|
||||
|
||||
ret = setup_s1_walk(vcpu, op, &wi, &wr, vaddr);
|
||||
@ -770,88 +1164,24 @@ static u64 handle_at_slow(struct kvm_vcpu *vcpu, u32 op, u64 vaddr)
|
||||
if (ret)
|
||||
goto compute_par;
|
||||
|
||||
/* FIXME: revisit when adding indirect permission support */
|
||||
/* AArch64.S1DirectBasePermissions() */
|
||||
if (wi.regime != TR_EL2) {
|
||||
switch (FIELD_GET(PTE_USER | PTE_RDONLY, wr.desc)) {
|
||||
case 0b00:
|
||||
pr = pw = true;
|
||||
ur = uw = false;
|
||||
break;
|
||||
case 0b01:
|
||||
pr = pw = ur = uw = true;
|
||||
break;
|
||||
case 0b10:
|
||||
pr = true;
|
||||
pw = ur = uw = false;
|
||||
break;
|
||||
case 0b11:
|
||||
pr = ur = true;
|
||||
pw = uw = false;
|
||||
break;
|
||||
}
|
||||
|
||||
switch (wr.APTable) {
|
||||
case 0b00:
|
||||
break;
|
||||
case 0b01:
|
||||
ur = uw = false;
|
||||
break;
|
||||
case 0b10:
|
||||
pw = uw = false;
|
||||
break;
|
||||
case 0b11:
|
||||
pw = ur = uw = false;
|
||||
break;
|
||||
}
|
||||
|
||||
/* We don't use px for anything yet, but hey... */
|
||||
px = !((wr.desc & PTE_PXN) || wr.PXNTable || uw);
|
||||
ux = !((wr.desc & PTE_UXN) || wr.UXNTable);
|
||||
|
||||
if (op == OP_AT_S1E1RP || op == OP_AT_S1E1WP) {
|
||||
bool pan;
|
||||
|
||||
pan = *vcpu_cpsr(vcpu) & PSR_PAN_BIT;
|
||||
pan &= ur || uw || (pan3_enabled(vcpu, wi.regime) && ux);
|
||||
pw &= !pan;
|
||||
pr &= !pan;
|
||||
}
|
||||
} else {
|
||||
ur = uw = ux = false;
|
||||
|
||||
if (!(wr.desc & PTE_RDONLY)) {
|
||||
pr = pw = true;
|
||||
} else {
|
||||
pr = true;
|
||||
pw = false;
|
||||
}
|
||||
|
||||
if (wr.APTable & BIT(1))
|
||||
pw = false;
|
||||
|
||||
/* XN maps to UXN */
|
||||
px = !((wr.desc & PTE_UXN) || wr.UXNTable);
|
||||
}
|
||||
|
||||
perm_fail = false;
|
||||
compute_s1_permissions(vcpu, &wi, &wr);
|
||||
|
||||
switch (op) {
|
||||
case OP_AT_S1E1RP:
|
||||
case OP_AT_S1E1R:
|
||||
case OP_AT_S1E2R:
|
||||
perm_fail = !pr;
|
||||
perm_fail = !wr.pr;
|
||||
break;
|
||||
case OP_AT_S1E1WP:
|
||||
case OP_AT_S1E1W:
|
||||
case OP_AT_S1E2W:
|
||||
perm_fail = !pw;
|
||||
perm_fail = !wr.pw;
|
||||
break;
|
||||
case OP_AT_S1E0R:
|
||||
perm_fail = !ur;
|
||||
perm_fail = !wr.ur;
|
||||
break;
|
||||
case OP_AT_S1E0W:
|
||||
perm_fail = !uw;
|
||||
perm_fail = !wr.uw;
|
||||
break;
|
||||
case OP_AT_S1E1A:
|
||||
case OP_AT_S1E2A:
|
||||
@ -914,6 +1244,17 @@ static u64 __kvm_at_s1e01_fast(struct kvm_vcpu *vcpu, u32 op, u64 vaddr)
|
||||
write_sysreg_el1(vcpu_read_sys_reg(vcpu, TTBR1_EL1), SYS_TTBR1);
|
||||
write_sysreg_el1(vcpu_read_sys_reg(vcpu, TCR_EL1), SYS_TCR);
|
||||
write_sysreg_el1(vcpu_read_sys_reg(vcpu, MAIR_EL1), SYS_MAIR);
|
||||
if (kvm_has_tcr2(vcpu->kvm)) {
|
||||
write_sysreg_el1(vcpu_read_sys_reg(vcpu, TCR2_EL1), SYS_TCR2);
|
||||
if (kvm_has_s1pie(vcpu->kvm)) {
|
||||
write_sysreg_el1(vcpu_read_sys_reg(vcpu, PIR_EL1), SYS_PIR);
|
||||
write_sysreg_el1(vcpu_read_sys_reg(vcpu, PIRE0_EL1), SYS_PIRE0);
|
||||
}
|
||||
if (kvm_has_s1poe(vcpu->kvm)) {
|
||||
write_sysreg_el1(vcpu_read_sys_reg(vcpu, POR_EL1), SYS_POR);
|
||||
write_sysreg_s(vcpu_read_sys_reg(vcpu, POR_EL0), SYS_POR_EL0);
|
||||
}
|
||||
}
|
||||
write_sysreg_el1(vcpu_read_sys_reg(vcpu, SCTLR_EL1), SYS_SCTLR);
|
||||
__load_stage2(mmu, mmu->arch);
|
||||
|
||||
@ -992,12 +1333,9 @@ void __kvm_at_s1e2(struct kvm_vcpu *vcpu, u32 op, u64 vaddr)
|
||||
* switching context behind everybody's back, disable interrupts...
|
||||
*/
|
||||
scoped_guard(write_lock_irqsave, &vcpu->kvm->mmu_lock) {
|
||||
struct kvm_s2_mmu *mmu;
|
||||
u64 val, hcr;
|
||||
bool fail;
|
||||
|
||||
mmu = &vcpu->kvm->arch.mmu;
|
||||
|
||||
val = hcr = read_sysreg(hcr_el2);
|
||||
val &= ~HCR_TGE;
|
||||
val |= HCR_VM;
|
||||
|
@ -16,9 +16,13 @@
|
||||
|
||||
enum trap_behaviour {
|
||||
BEHAVE_HANDLE_LOCALLY = 0,
|
||||
|
||||
BEHAVE_FORWARD_READ = BIT(0),
|
||||
BEHAVE_FORWARD_WRITE = BIT(1),
|
||||
BEHAVE_FORWARD_ANY = BEHAVE_FORWARD_READ | BEHAVE_FORWARD_WRITE,
|
||||
BEHAVE_FORWARD_RW = BEHAVE_FORWARD_READ | BEHAVE_FORWARD_WRITE,
|
||||
|
||||
/* Traps that take effect in Host EL0, this is rare! */
|
||||
BEHAVE_FORWARD_IN_HOST_EL0 = BIT(2),
|
||||
};
|
||||
|
||||
struct trap_bits {
|
||||
@ -79,7 +83,6 @@ enum cgt_group_id {
|
||||
CGT_MDCR_E2TB,
|
||||
CGT_MDCR_TDCC,
|
||||
|
||||
CGT_CPACR_E0POE,
|
||||
CGT_CPTR_TAM,
|
||||
CGT_CPTR_TCPAC,
|
||||
|
||||
@ -106,6 +109,7 @@ enum cgt_group_id {
|
||||
CGT_HCR_TPU_TOCU,
|
||||
CGT_HCR_NV1_nNV2_ENSCXT,
|
||||
CGT_MDCR_TPM_TPMCR,
|
||||
CGT_MDCR_TPM_HPMN,
|
||||
CGT_MDCR_TDE_TDA,
|
||||
CGT_MDCR_TDE_TDOSA,
|
||||
CGT_MDCR_TDE_TDRA,
|
||||
@ -122,6 +126,7 @@ enum cgt_group_id {
|
||||
CGT_CNTHCTL_EL1PTEN,
|
||||
|
||||
CGT_CPTR_TTA,
|
||||
CGT_MDCR_HPMN,
|
||||
|
||||
/* Must be last */
|
||||
__NR_CGT_GROUP_IDS__
|
||||
@ -138,7 +143,7 @@ static const struct trap_bits coarse_trap_bits[] = {
|
||||
.index = HCR_EL2,
|
||||
.value = HCR_TID2,
|
||||
.mask = HCR_TID2,
|
||||
.behaviour = BEHAVE_FORWARD_ANY,
|
||||
.behaviour = BEHAVE_FORWARD_RW,
|
||||
},
|
||||
[CGT_HCR_TID3] = {
|
||||
.index = HCR_EL2,
|
||||
@ -162,37 +167,37 @@ static const struct trap_bits coarse_trap_bits[] = {
|
||||
.index = HCR_EL2,
|
||||
.value = HCR_TIDCP,
|
||||
.mask = HCR_TIDCP,
|
||||
.behaviour = BEHAVE_FORWARD_ANY,
|
||||
.behaviour = BEHAVE_FORWARD_RW,
|
||||
},
|
||||
[CGT_HCR_TACR] = {
|
||||
.index = HCR_EL2,
|
||||
.value = HCR_TACR,
|
||||
.mask = HCR_TACR,
|
||||
.behaviour = BEHAVE_FORWARD_ANY,
|
||||
.behaviour = BEHAVE_FORWARD_RW,
|
||||
},
|
||||
[CGT_HCR_TSW] = {
|
||||
.index = HCR_EL2,
|
||||
.value = HCR_TSW,
|
||||
.mask = HCR_TSW,
|
||||
.behaviour = BEHAVE_FORWARD_ANY,
|
||||
.behaviour = BEHAVE_FORWARD_RW,
|
||||
},
|
||||
[CGT_HCR_TPC] = { /* Also called TCPC when FEAT_DPB is implemented */
|
||||
.index = HCR_EL2,
|
||||
.value = HCR_TPC,
|
||||
.mask = HCR_TPC,
|
||||
.behaviour = BEHAVE_FORWARD_ANY,
|
||||
.behaviour = BEHAVE_FORWARD_RW,
|
||||
},
|
||||
[CGT_HCR_TPU] = {
|
||||
.index = HCR_EL2,
|
||||
.value = HCR_TPU,
|
||||
.mask = HCR_TPU,
|
||||
.behaviour = BEHAVE_FORWARD_ANY,
|
||||
.behaviour = BEHAVE_FORWARD_RW,
|
||||
},
|
||||
[CGT_HCR_TTLB] = {
|
||||
.index = HCR_EL2,
|
||||
.value = HCR_TTLB,
|
||||
.mask = HCR_TTLB,
|
||||
.behaviour = BEHAVE_FORWARD_ANY,
|
||||
.behaviour = BEHAVE_FORWARD_RW,
|
||||
},
|
||||
[CGT_HCR_TVM] = {
|
||||
.index = HCR_EL2,
|
||||
@ -204,7 +209,7 @@ static const struct trap_bits coarse_trap_bits[] = {
|
||||
.index = HCR_EL2,
|
||||
.value = HCR_TDZ,
|
||||
.mask = HCR_TDZ,
|
||||
.behaviour = BEHAVE_FORWARD_ANY,
|
||||
.behaviour = BEHAVE_FORWARD_RW,
|
||||
},
|
||||
[CGT_HCR_TRVM] = {
|
||||
.index = HCR_EL2,
|
||||
@ -216,205 +221,201 @@ static const struct trap_bits coarse_trap_bits[] = {
|
||||
.index = HCR_EL2,
|
||||
.value = HCR_TLOR,
|
||||
.mask = HCR_TLOR,
|
||||
.behaviour = BEHAVE_FORWARD_ANY,
|
||||
.behaviour = BEHAVE_FORWARD_RW,
|
||||
},
|
||||
[CGT_HCR_TERR] = {
|
||||
.index = HCR_EL2,
|
||||
.value = HCR_TERR,
|
||||
.mask = HCR_TERR,
|
||||
.behaviour = BEHAVE_FORWARD_ANY,
|
||||
.behaviour = BEHAVE_FORWARD_RW,
|
||||
},
|
||||
[CGT_HCR_APK] = {
|
||||
.index = HCR_EL2,
|
||||
.value = 0,
|
||||
.mask = HCR_APK,
|
||||
.behaviour = BEHAVE_FORWARD_ANY,
|
||||
.behaviour = BEHAVE_FORWARD_RW,
|
||||
},
|
||||
[CGT_HCR_NV] = {
|
||||
.index = HCR_EL2,
|
||||
.value = HCR_NV,
|
||||
.mask = HCR_NV,
|
||||
.behaviour = BEHAVE_FORWARD_ANY,
|
||||
.behaviour = BEHAVE_FORWARD_RW,
|
||||
},
|
||||
[CGT_HCR_NV_nNV2] = {
|
||||
.index = HCR_EL2,
|
||||
.value = HCR_NV,
|
||||
.mask = HCR_NV | HCR_NV2,
|
||||
.behaviour = BEHAVE_FORWARD_ANY,
|
||||
.behaviour = BEHAVE_FORWARD_RW,
|
||||
},
|
||||
[CGT_HCR_NV1_nNV2] = {
|
||||
.index = HCR_EL2,
|
||||
.value = HCR_NV | HCR_NV1,
|
||||
.mask = HCR_NV | HCR_NV1 | HCR_NV2,
|
||||
.behaviour = BEHAVE_FORWARD_ANY,
|
||||
.behaviour = BEHAVE_FORWARD_RW,
|
||||
},
|
||||
[CGT_HCR_AT] = {
|
||||
.index = HCR_EL2,
|
||||
.value = HCR_AT,
|
||||
.mask = HCR_AT,
|
||||
.behaviour = BEHAVE_FORWARD_ANY,
|
||||
.behaviour = BEHAVE_FORWARD_RW,
|
||||
},
|
||||
[CGT_HCR_nFIEN] = {
|
||||
.index = HCR_EL2,
|
||||
.value = 0,
|
||||
.mask = HCR_FIEN,
|
||||
.behaviour = BEHAVE_FORWARD_ANY,
|
||||
.behaviour = BEHAVE_FORWARD_RW,
|
||||
},
|
||||
[CGT_HCR_TID4] = {
|
||||
.index = HCR_EL2,
|
||||
.value = HCR_TID4,
|
||||
.mask = HCR_TID4,
|
||||
.behaviour = BEHAVE_FORWARD_ANY,
|
||||
.behaviour = BEHAVE_FORWARD_RW,
|
||||
},
|
||||
[CGT_HCR_TICAB] = {
|
||||
.index = HCR_EL2,
|
||||
.value = HCR_TICAB,
|
||||
.mask = HCR_TICAB,
|
||||
.behaviour = BEHAVE_FORWARD_ANY,
|
||||
.behaviour = BEHAVE_FORWARD_RW,
|
||||
},
|
||||
[CGT_HCR_TOCU] = {
|
||||
.index = HCR_EL2,
|
||||
.value = HCR_TOCU,
|
||||
.mask = HCR_TOCU,
|
||||
.behaviour = BEHAVE_FORWARD_ANY,
|
||||
.behaviour = BEHAVE_FORWARD_RW,
|
||||
},
|
||||
[CGT_HCR_ENSCXT] = {
|
||||
.index = HCR_EL2,
|
||||
.value = 0,
|
||||
.mask = HCR_ENSCXT,
|
||||
.behaviour = BEHAVE_FORWARD_ANY,
|
||||
.behaviour = BEHAVE_FORWARD_RW,
|
||||
},
|
||||
[CGT_HCR_TTLBIS] = {
|
||||
.index = HCR_EL2,
|
||||
.value = HCR_TTLBIS,
|
||||
.mask = HCR_TTLBIS,
|
||||
.behaviour = BEHAVE_FORWARD_ANY,
|
||||
.behaviour = BEHAVE_FORWARD_RW,
|
||||
},
|
||||
[CGT_HCR_TTLBOS] = {
|
||||
.index = HCR_EL2,
|
||||
.value = HCR_TTLBOS,
|
||||
.mask = HCR_TTLBOS,
|
||||
.behaviour = BEHAVE_FORWARD_ANY,
|
||||
.behaviour = BEHAVE_FORWARD_RW,
|
||||
},
|
||||
[CGT_MDCR_TPMCR] = {
|
||||
.index = MDCR_EL2,
|
||||
.value = MDCR_EL2_TPMCR,
|
||||
.mask = MDCR_EL2_TPMCR,
|
||||
.behaviour = BEHAVE_FORWARD_ANY,
|
||||
.behaviour = BEHAVE_FORWARD_RW |
|
||||
BEHAVE_FORWARD_IN_HOST_EL0,
|
||||
},
|
||||
[CGT_MDCR_TPM] = {
|
||||
.index = MDCR_EL2,
|
||||
.value = MDCR_EL2_TPM,
|
||||
.mask = MDCR_EL2_TPM,
|
||||
.behaviour = BEHAVE_FORWARD_ANY,
|
||||
.behaviour = BEHAVE_FORWARD_RW |
|
||||
BEHAVE_FORWARD_IN_HOST_EL0,
|
||||
},
|
||||
[CGT_MDCR_TDE] = {
|
||||
.index = MDCR_EL2,
|
||||
.value = MDCR_EL2_TDE,
|
||||
.mask = MDCR_EL2_TDE,
|
||||
.behaviour = BEHAVE_FORWARD_ANY,
|
||||
.behaviour = BEHAVE_FORWARD_RW,
|
||||
},
|
||||
[CGT_MDCR_TDA] = {
|
||||
.index = MDCR_EL2,
|
||||
.value = MDCR_EL2_TDA,
|
||||
.mask = MDCR_EL2_TDA,
|
||||
.behaviour = BEHAVE_FORWARD_ANY,
|
||||
.behaviour = BEHAVE_FORWARD_RW,
|
||||
},
|
||||
[CGT_MDCR_TDOSA] = {
|
||||
.index = MDCR_EL2,
|
||||
.value = MDCR_EL2_TDOSA,
|
||||
.mask = MDCR_EL2_TDOSA,
|
||||
.behaviour = BEHAVE_FORWARD_ANY,
|
||||
.behaviour = BEHAVE_FORWARD_RW,
|
||||
},
|
||||
[CGT_MDCR_TDRA] = {
|
||||
.index = MDCR_EL2,
|
||||
.value = MDCR_EL2_TDRA,
|
||||
.mask = MDCR_EL2_TDRA,
|
||||
.behaviour = BEHAVE_FORWARD_ANY,
|
||||
.behaviour = BEHAVE_FORWARD_RW,
|
||||
},
|
||||
[CGT_MDCR_E2PB] = {
|
||||
.index = MDCR_EL2,
|
||||
.value = 0,
|
||||
.mask = BIT(MDCR_EL2_E2PB_SHIFT),
|
||||
.behaviour = BEHAVE_FORWARD_ANY,
|
||||
.behaviour = BEHAVE_FORWARD_RW,
|
||||
},
|
||||
[CGT_MDCR_TPMS] = {
|
||||
.index = MDCR_EL2,
|
||||
.value = MDCR_EL2_TPMS,
|
||||
.mask = MDCR_EL2_TPMS,
|
||||
.behaviour = BEHAVE_FORWARD_ANY,
|
||||
.behaviour = BEHAVE_FORWARD_RW,
|
||||
},
|
||||
[CGT_MDCR_TTRF] = {
|
||||
.index = MDCR_EL2,
|
||||
.value = MDCR_EL2_TTRF,
|
||||
.mask = MDCR_EL2_TTRF,
|
||||
.behaviour = BEHAVE_FORWARD_ANY,
|
||||
.behaviour = BEHAVE_FORWARD_RW,
|
||||
},
|
||||
[CGT_MDCR_E2TB] = {
|
||||
.index = MDCR_EL2,
|
||||
.value = 0,
|
||||
.mask = BIT(MDCR_EL2_E2TB_SHIFT),
|
||||
.behaviour = BEHAVE_FORWARD_ANY,
|
||||
.behaviour = BEHAVE_FORWARD_RW,
|
||||
},
|
||||
[CGT_MDCR_TDCC] = {
|
||||
.index = MDCR_EL2,
|
||||
.value = MDCR_EL2_TDCC,
|
||||
.mask = MDCR_EL2_TDCC,
|
||||
.behaviour = BEHAVE_FORWARD_ANY,
|
||||
},
|
||||
[CGT_CPACR_E0POE] = {
|
||||
.index = CPTR_EL2,
|
||||
.value = CPACR_ELx_E0POE,
|
||||
.mask = CPACR_ELx_E0POE,
|
||||
.behaviour = BEHAVE_FORWARD_ANY,
|
||||
.behaviour = BEHAVE_FORWARD_RW,
|
||||
},
|
||||
[CGT_CPTR_TAM] = {
|
||||
.index = CPTR_EL2,
|
||||
.value = CPTR_EL2_TAM,
|
||||
.mask = CPTR_EL2_TAM,
|
||||
.behaviour = BEHAVE_FORWARD_ANY,
|
||||
.behaviour = BEHAVE_FORWARD_RW,
|
||||
},
|
||||
[CGT_CPTR_TCPAC] = {
|
||||
.index = CPTR_EL2,
|
||||
.value = CPTR_EL2_TCPAC,
|
||||
.mask = CPTR_EL2_TCPAC,
|
||||
.behaviour = BEHAVE_FORWARD_ANY,
|
||||
.behaviour = BEHAVE_FORWARD_RW,
|
||||
},
|
||||
[CGT_HCRX_EnFPM] = {
|
||||
.index = HCRX_EL2,
|
||||
.value = 0,
|
||||
.mask = HCRX_EL2_EnFPM,
|
||||
.behaviour = BEHAVE_FORWARD_ANY,
|
||||
.behaviour = BEHAVE_FORWARD_RW,
|
||||
},
|
||||
[CGT_HCRX_TCR2En] = {
|
||||
.index = HCRX_EL2,
|
||||
.value = 0,
|
||||
.mask = HCRX_EL2_TCR2En,
|
||||
.behaviour = BEHAVE_FORWARD_ANY,
|
||||
.behaviour = BEHAVE_FORWARD_RW,
|
||||
},
|
||||
[CGT_ICH_HCR_TC] = {
|
||||
.index = ICH_HCR_EL2,
|
||||
.value = ICH_HCR_TC,
|
||||
.mask = ICH_HCR_TC,
|
||||
.behaviour = BEHAVE_FORWARD_ANY,
|
||||
.behaviour = BEHAVE_FORWARD_RW,
|
||||
},
|
||||
[CGT_ICH_HCR_TALL0] = {
|
||||
.index = ICH_HCR_EL2,
|
||||
.value = ICH_HCR_TALL0,
|
||||
.mask = ICH_HCR_TALL0,
|
||||
.behaviour = BEHAVE_FORWARD_ANY,
|
||||
.behaviour = BEHAVE_FORWARD_RW,
|
||||
},
|
||||
[CGT_ICH_HCR_TALL1] = {
|
||||
.index = ICH_HCR_EL2,
|
||||
.value = ICH_HCR_TALL1,
|
||||
.mask = ICH_HCR_TALL1,
|
||||
.behaviour = BEHAVE_FORWARD_ANY,
|
||||
.behaviour = BEHAVE_FORWARD_RW,
|
||||
},
|
||||
[CGT_ICH_HCR_TDIR] = {
|
||||
.index = ICH_HCR_EL2,
|
||||
.value = ICH_HCR_TDIR,
|
||||
.mask = ICH_HCR_TDIR,
|
||||
.behaviour = BEHAVE_FORWARD_ANY,
|
||||
.behaviour = BEHAVE_FORWARD_RW,
|
||||
},
|
||||
};
|
||||
|
||||
@ -435,6 +436,7 @@ static const enum cgt_group_id *coarse_control_combo[] = {
|
||||
MCB(CGT_HCR_TPU_TOCU, CGT_HCR_TPU, CGT_HCR_TOCU),
|
||||
MCB(CGT_HCR_NV1_nNV2_ENSCXT, CGT_HCR_NV1_nNV2, CGT_HCR_ENSCXT),
|
||||
MCB(CGT_MDCR_TPM_TPMCR, CGT_MDCR_TPM, CGT_MDCR_TPMCR),
|
||||
MCB(CGT_MDCR_TPM_HPMN, CGT_MDCR_TPM, CGT_MDCR_HPMN),
|
||||
MCB(CGT_MDCR_TDE_TDA, CGT_MDCR_TDE, CGT_MDCR_TDA),
|
||||
MCB(CGT_MDCR_TDE_TDOSA, CGT_MDCR_TDE, CGT_MDCR_TDOSA),
|
||||
MCB(CGT_MDCR_TDE_TDRA, CGT_MDCR_TDE, CGT_MDCR_TDRA),
|
||||
@ -474,7 +476,7 @@ static enum trap_behaviour check_cnthctl_el1pcten(struct kvm_vcpu *vcpu)
|
||||
if (get_sanitized_cnthctl(vcpu) & (CNTHCTL_EL1PCTEN << 10))
|
||||
return BEHAVE_HANDLE_LOCALLY;
|
||||
|
||||
return BEHAVE_FORWARD_ANY;
|
||||
return BEHAVE_FORWARD_RW;
|
||||
}
|
||||
|
||||
static enum trap_behaviour check_cnthctl_el1pten(struct kvm_vcpu *vcpu)
|
||||
@ -482,7 +484,7 @@ static enum trap_behaviour check_cnthctl_el1pten(struct kvm_vcpu *vcpu)
|
||||
if (get_sanitized_cnthctl(vcpu) & (CNTHCTL_EL1PCEN << 10))
|
||||
return BEHAVE_HANDLE_LOCALLY;
|
||||
|
||||
return BEHAVE_FORWARD_ANY;
|
||||
return BEHAVE_FORWARD_RW;
|
||||
}
|
||||
|
||||
static enum trap_behaviour check_cptr_tta(struct kvm_vcpu *vcpu)
|
||||
@ -493,7 +495,35 @@ static enum trap_behaviour check_cptr_tta(struct kvm_vcpu *vcpu)
|
||||
val = translate_cptr_el2_to_cpacr_el1(val);
|
||||
|
||||
if (val & CPACR_ELx_TTA)
|
||||
return BEHAVE_FORWARD_ANY;
|
||||
return BEHAVE_FORWARD_RW;
|
||||
|
||||
return BEHAVE_HANDLE_LOCALLY;
|
||||
}
|
||||
|
||||
static enum trap_behaviour check_mdcr_hpmn(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
u32 sysreg = esr_sys64_to_sysreg(kvm_vcpu_get_esr(vcpu));
|
||||
unsigned int idx;
|
||||
|
||||
|
||||
switch (sysreg) {
|
||||
case SYS_PMEVTYPERn_EL0(0) ... SYS_PMEVTYPERn_EL0(30):
|
||||
case SYS_PMEVCNTRn_EL0(0) ... SYS_PMEVCNTRn_EL0(30):
|
||||
idx = (sys_reg_CRm(sysreg) & 0x3) << 3 | sys_reg_Op2(sysreg);
|
||||
break;
|
||||
case SYS_PMXEVTYPER_EL0:
|
||||
case SYS_PMXEVCNTR_EL0:
|
||||
idx = SYS_FIELD_GET(PMSELR_EL0, SEL,
|
||||
__vcpu_sys_reg(vcpu, PMSELR_EL0));
|
||||
break;
|
||||
default:
|
||||
/* Someone used this trap helper for something else... */
|
||||
KVM_BUG_ON(1, vcpu->kvm);
|
||||
return BEHAVE_HANDLE_LOCALLY;
|
||||
}
|
||||
|
||||
if (kvm_pmu_counter_is_hyp(vcpu, idx))
|
||||
return BEHAVE_FORWARD_RW | BEHAVE_FORWARD_IN_HOST_EL0;
|
||||
|
||||
return BEHAVE_HANDLE_LOCALLY;
|
||||
}
|
||||
@ -505,6 +535,7 @@ static const complex_condition_check ccc[] = {
|
||||
CCC(CGT_CNTHCTL_EL1PCTEN, check_cnthctl_el1pcten),
|
||||
CCC(CGT_CNTHCTL_EL1PTEN, check_cnthctl_el1pten),
|
||||
CCC(CGT_CPTR_TTA, check_cptr_tta),
|
||||
CCC(CGT_MDCR_HPMN, check_mdcr_hpmn),
|
||||
};
|
||||
|
||||
/*
|
||||
@ -711,6 +742,10 @@ static const struct encoding_to_trap_config encoding_to_cgt[] __initconst = {
|
||||
SR_TRAP(SYS_MAIR_EL1, CGT_HCR_TVM_TRVM),
|
||||
SR_TRAP(SYS_AMAIR_EL1, CGT_HCR_TVM_TRVM),
|
||||
SR_TRAP(SYS_CONTEXTIDR_EL1, CGT_HCR_TVM_TRVM),
|
||||
SR_TRAP(SYS_PIR_EL1, CGT_HCR_TVM_TRVM),
|
||||
SR_TRAP(SYS_PIRE0_EL1, CGT_HCR_TVM_TRVM),
|
||||
SR_TRAP(SYS_POR_EL0, CGT_HCR_TVM_TRVM),
|
||||
SR_TRAP(SYS_POR_EL1, CGT_HCR_TVM_TRVM),
|
||||
SR_TRAP(SYS_TCR2_EL1, CGT_HCR_TVM_TRVM_HCRX_TCR2En),
|
||||
SR_TRAP(SYS_DC_ZVA, CGT_HCR_TDZ),
|
||||
SR_TRAP(SYS_DC_GVA, CGT_HCR_TDZ),
|
||||
@ -919,77 +954,77 @@ static const struct encoding_to_trap_config encoding_to_cgt[] __initconst = {
|
||||
SR_TRAP(SYS_PMOVSCLR_EL0, CGT_MDCR_TPM),
|
||||
SR_TRAP(SYS_PMCEID0_EL0, CGT_MDCR_TPM),
|
||||
SR_TRAP(SYS_PMCEID1_EL0, CGT_MDCR_TPM),
|
||||
SR_TRAP(SYS_PMXEVTYPER_EL0, CGT_MDCR_TPM),
|
||||
SR_TRAP(SYS_PMXEVTYPER_EL0, CGT_MDCR_TPM_HPMN),
|
||||
SR_TRAP(SYS_PMSWINC_EL0, CGT_MDCR_TPM),
|
||||
SR_TRAP(SYS_PMSELR_EL0, CGT_MDCR_TPM),
|
||||
SR_TRAP(SYS_PMXEVCNTR_EL0, CGT_MDCR_TPM),
|
||||
SR_TRAP(SYS_PMXEVCNTR_EL0, CGT_MDCR_TPM_HPMN),
|
||||
SR_TRAP(SYS_PMCCNTR_EL0, CGT_MDCR_TPM),
|
||||
SR_TRAP(SYS_PMUSERENR_EL0, CGT_MDCR_TPM),
|
||||
SR_TRAP(SYS_PMINTENSET_EL1, CGT_MDCR_TPM),
|
||||
SR_TRAP(SYS_PMINTENCLR_EL1, CGT_MDCR_TPM),
|
||||
SR_TRAP(SYS_PMMIR_EL1, CGT_MDCR_TPM),
|
||||
SR_TRAP(SYS_PMEVCNTRn_EL0(0), CGT_MDCR_TPM),
|
||||
SR_TRAP(SYS_PMEVCNTRn_EL0(1), CGT_MDCR_TPM),
|
||||
SR_TRAP(SYS_PMEVCNTRn_EL0(2), CGT_MDCR_TPM),
|
||||
SR_TRAP(SYS_PMEVCNTRn_EL0(3), CGT_MDCR_TPM),
|
||||
SR_TRAP(SYS_PMEVCNTRn_EL0(4), CGT_MDCR_TPM),
|
||||
SR_TRAP(SYS_PMEVCNTRn_EL0(5), CGT_MDCR_TPM),
|
||||
SR_TRAP(SYS_PMEVCNTRn_EL0(6), CGT_MDCR_TPM),
|
||||
SR_TRAP(SYS_PMEVCNTRn_EL0(7), CGT_MDCR_TPM),
|
||||
SR_TRAP(SYS_PMEVCNTRn_EL0(8), CGT_MDCR_TPM),
|
||||
SR_TRAP(SYS_PMEVCNTRn_EL0(9), CGT_MDCR_TPM),
|
||||
SR_TRAP(SYS_PMEVCNTRn_EL0(10), CGT_MDCR_TPM),
|
||||
SR_TRAP(SYS_PMEVCNTRn_EL0(11), CGT_MDCR_TPM),
|
||||
SR_TRAP(SYS_PMEVCNTRn_EL0(12), CGT_MDCR_TPM),
|
||||
SR_TRAP(SYS_PMEVCNTRn_EL0(13), CGT_MDCR_TPM),
|
||||
SR_TRAP(SYS_PMEVCNTRn_EL0(14), CGT_MDCR_TPM),
|
||||
SR_TRAP(SYS_PMEVCNTRn_EL0(15), CGT_MDCR_TPM),
|
||||
SR_TRAP(SYS_PMEVCNTRn_EL0(16), CGT_MDCR_TPM),
|
||||
SR_TRAP(SYS_PMEVCNTRn_EL0(17), CGT_MDCR_TPM),
|
||||
SR_TRAP(SYS_PMEVCNTRn_EL0(18), CGT_MDCR_TPM),
|
||||
SR_TRAP(SYS_PMEVCNTRn_EL0(19), CGT_MDCR_TPM),
|
||||
SR_TRAP(SYS_PMEVCNTRn_EL0(20), CGT_MDCR_TPM),
|
||||
SR_TRAP(SYS_PMEVCNTRn_EL0(21), CGT_MDCR_TPM),
|
||||
SR_TRAP(SYS_PMEVCNTRn_EL0(22), CGT_MDCR_TPM),
|
||||
SR_TRAP(SYS_PMEVCNTRn_EL0(23), CGT_MDCR_TPM),
|
||||
SR_TRAP(SYS_PMEVCNTRn_EL0(24), CGT_MDCR_TPM),
|
||||
SR_TRAP(SYS_PMEVCNTRn_EL0(25), CGT_MDCR_TPM),
|
||||
SR_TRAP(SYS_PMEVCNTRn_EL0(26), CGT_MDCR_TPM),
|
||||
SR_TRAP(SYS_PMEVCNTRn_EL0(27), CGT_MDCR_TPM),
|
||||
SR_TRAP(SYS_PMEVCNTRn_EL0(28), CGT_MDCR_TPM),
|
||||
SR_TRAP(SYS_PMEVCNTRn_EL0(29), CGT_MDCR_TPM),
|
||||
SR_TRAP(SYS_PMEVCNTRn_EL0(30), CGT_MDCR_TPM),
|
||||
SR_TRAP(SYS_PMEVTYPERn_EL0(0), CGT_MDCR_TPM),
|
||||
SR_TRAP(SYS_PMEVTYPERn_EL0(1), CGT_MDCR_TPM),
|
||||
SR_TRAP(SYS_PMEVTYPERn_EL0(2), CGT_MDCR_TPM),
|
||||
SR_TRAP(SYS_PMEVTYPERn_EL0(3), CGT_MDCR_TPM),
|
||||
SR_TRAP(SYS_PMEVTYPERn_EL0(4), CGT_MDCR_TPM),
|
||||
SR_TRAP(SYS_PMEVTYPERn_EL0(5), CGT_MDCR_TPM),
|
||||
SR_TRAP(SYS_PMEVTYPERn_EL0(6), CGT_MDCR_TPM),
|
||||
SR_TRAP(SYS_PMEVTYPERn_EL0(7), CGT_MDCR_TPM),
|
||||
SR_TRAP(SYS_PMEVTYPERn_EL0(8), CGT_MDCR_TPM),
|
||||
SR_TRAP(SYS_PMEVTYPERn_EL0(9), CGT_MDCR_TPM),
|
||||
SR_TRAP(SYS_PMEVTYPERn_EL0(10), CGT_MDCR_TPM),
|
||||
SR_TRAP(SYS_PMEVTYPERn_EL0(11), CGT_MDCR_TPM),
|
||||
SR_TRAP(SYS_PMEVTYPERn_EL0(12), CGT_MDCR_TPM),
|
||||
SR_TRAP(SYS_PMEVTYPERn_EL0(13), CGT_MDCR_TPM),
|
||||
SR_TRAP(SYS_PMEVTYPERn_EL0(14), CGT_MDCR_TPM),
|
||||
SR_TRAP(SYS_PMEVTYPERn_EL0(15), CGT_MDCR_TPM),
|
||||
SR_TRAP(SYS_PMEVTYPERn_EL0(16), CGT_MDCR_TPM),
|
||||
SR_TRAP(SYS_PMEVTYPERn_EL0(17), CGT_MDCR_TPM),
|
||||
SR_TRAP(SYS_PMEVTYPERn_EL0(18), CGT_MDCR_TPM),
|
||||
SR_TRAP(SYS_PMEVTYPERn_EL0(19), CGT_MDCR_TPM),
|
||||
SR_TRAP(SYS_PMEVTYPERn_EL0(20), CGT_MDCR_TPM),
|
||||
SR_TRAP(SYS_PMEVTYPERn_EL0(21), CGT_MDCR_TPM),
|
||||
SR_TRAP(SYS_PMEVTYPERn_EL0(22), CGT_MDCR_TPM),
|
||||
SR_TRAP(SYS_PMEVTYPERn_EL0(23), CGT_MDCR_TPM),
|
||||
SR_TRAP(SYS_PMEVTYPERn_EL0(24), CGT_MDCR_TPM),
|
||||
SR_TRAP(SYS_PMEVTYPERn_EL0(25), CGT_MDCR_TPM),
|
||||
SR_TRAP(SYS_PMEVTYPERn_EL0(26), CGT_MDCR_TPM),
|
||||
SR_TRAP(SYS_PMEVTYPERn_EL0(27), CGT_MDCR_TPM),
|
||||
SR_TRAP(SYS_PMEVTYPERn_EL0(28), CGT_MDCR_TPM),
|
||||
SR_TRAP(SYS_PMEVTYPERn_EL0(29), CGT_MDCR_TPM),
|
||||
SR_TRAP(SYS_PMEVTYPERn_EL0(30), CGT_MDCR_TPM),
|
||||
SR_TRAP(SYS_PMEVCNTRn_EL0(0), CGT_MDCR_TPM_HPMN),
|
||||
SR_TRAP(SYS_PMEVCNTRn_EL0(1), CGT_MDCR_TPM_HPMN),
|
||||
SR_TRAP(SYS_PMEVCNTRn_EL0(2), CGT_MDCR_TPM_HPMN),
|
||||
SR_TRAP(SYS_PMEVCNTRn_EL0(3), CGT_MDCR_TPM_HPMN),
|
||||
SR_TRAP(SYS_PMEVCNTRn_EL0(4), CGT_MDCR_TPM_HPMN),
|
||||
SR_TRAP(SYS_PMEVCNTRn_EL0(5), CGT_MDCR_TPM_HPMN),
|
||||
SR_TRAP(SYS_PMEVCNTRn_EL0(6), CGT_MDCR_TPM_HPMN),
|
||||
SR_TRAP(SYS_PMEVCNTRn_EL0(7), CGT_MDCR_TPM_HPMN),
|
||||
SR_TRAP(SYS_PMEVCNTRn_EL0(8), CGT_MDCR_TPM_HPMN),
|
||||
SR_TRAP(SYS_PMEVCNTRn_EL0(9), CGT_MDCR_TPM_HPMN),
|
||||
SR_TRAP(SYS_PMEVCNTRn_EL0(10), CGT_MDCR_TPM_HPMN),
|
||||
SR_TRAP(SYS_PMEVCNTRn_EL0(11), CGT_MDCR_TPM_HPMN),
|
||||
SR_TRAP(SYS_PMEVCNTRn_EL0(12), CGT_MDCR_TPM_HPMN),
|
||||
SR_TRAP(SYS_PMEVCNTRn_EL0(13), CGT_MDCR_TPM_HPMN),
|
||||
SR_TRAP(SYS_PMEVCNTRn_EL0(14), CGT_MDCR_TPM_HPMN),
|
||||
SR_TRAP(SYS_PMEVCNTRn_EL0(15), CGT_MDCR_TPM_HPMN),
|
||||
SR_TRAP(SYS_PMEVCNTRn_EL0(16), CGT_MDCR_TPM_HPMN),
|
||||
SR_TRAP(SYS_PMEVCNTRn_EL0(17), CGT_MDCR_TPM_HPMN),
|
||||
SR_TRAP(SYS_PMEVCNTRn_EL0(18), CGT_MDCR_TPM_HPMN),
|
||||
SR_TRAP(SYS_PMEVCNTRn_EL0(19), CGT_MDCR_TPM_HPMN),
|
||||
SR_TRAP(SYS_PMEVCNTRn_EL0(20), CGT_MDCR_TPM_HPMN),
|
||||
SR_TRAP(SYS_PMEVCNTRn_EL0(21), CGT_MDCR_TPM_HPMN),
|
||||
SR_TRAP(SYS_PMEVCNTRn_EL0(22), CGT_MDCR_TPM_HPMN),
|
||||
SR_TRAP(SYS_PMEVCNTRn_EL0(23), CGT_MDCR_TPM_HPMN),
|
||||
SR_TRAP(SYS_PMEVCNTRn_EL0(24), CGT_MDCR_TPM_HPMN),
|
||||
SR_TRAP(SYS_PMEVCNTRn_EL0(25), CGT_MDCR_TPM_HPMN),
|
||||
SR_TRAP(SYS_PMEVCNTRn_EL0(26), CGT_MDCR_TPM_HPMN),
|
||||
SR_TRAP(SYS_PMEVCNTRn_EL0(27), CGT_MDCR_TPM_HPMN),
|
||||
SR_TRAP(SYS_PMEVCNTRn_EL0(28), CGT_MDCR_TPM_HPMN),
|
||||
SR_TRAP(SYS_PMEVCNTRn_EL0(29), CGT_MDCR_TPM_HPMN),
|
||||
SR_TRAP(SYS_PMEVCNTRn_EL0(30), CGT_MDCR_TPM_HPMN),
|
||||
SR_TRAP(SYS_PMEVTYPERn_EL0(0), CGT_MDCR_TPM_HPMN),
|
||||
SR_TRAP(SYS_PMEVTYPERn_EL0(1), CGT_MDCR_TPM_HPMN),
|
||||
SR_TRAP(SYS_PMEVTYPERn_EL0(2), CGT_MDCR_TPM_HPMN),
|
||||
SR_TRAP(SYS_PMEVTYPERn_EL0(3), CGT_MDCR_TPM_HPMN),
|
||||
SR_TRAP(SYS_PMEVTYPERn_EL0(4), CGT_MDCR_TPM_HPMN),
|
||||
SR_TRAP(SYS_PMEVTYPERn_EL0(5), CGT_MDCR_TPM_HPMN),
|
||||
SR_TRAP(SYS_PMEVTYPERn_EL0(6), CGT_MDCR_TPM_HPMN),
|
||||
SR_TRAP(SYS_PMEVTYPERn_EL0(7), CGT_MDCR_TPM_HPMN),
|
||||
SR_TRAP(SYS_PMEVTYPERn_EL0(8), CGT_MDCR_TPM_HPMN),
|
||||
SR_TRAP(SYS_PMEVTYPERn_EL0(9), CGT_MDCR_TPM_HPMN),
|
||||
SR_TRAP(SYS_PMEVTYPERn_EL0(10), CGT_MDCR_TPM_HPMN),
|
||||
SR_TRAP(SYS_PMEVTYPERn_EL0(11), CGT_MDCR_TPM_HPMN),
|
||||
SR_TRAP(SYS_PMEVTYPERn_EL0(12), CGT_MDCR_TPM_HPMN),
|
||||
SR_TRAP(SYS_PMEVTYPERn_EL0(13), CGT_MDCR_TPM_HPMN),
|
||||
SR_TRAP(SYS_PMEVTYPERn_EL0(14), CGT_MDCR_TPM_HPMN),
|
||||
SR_TRAP(SYS_PMEVTYPERn_EL0(15), CGT_MDCR_TPM_HPMN),
|
||||
SR_TRAP(SYS_PMEVTYPERn_EL0(16), CGT_MDCR_TPM_HPMN),
|
||||
SR_TRAP(SYS_PMEVTYPERn_EL0(17), CGT_MDCR_TPM_HPMN),
|
||||
SR_TRAP(SYS_PMEVTYPERn_EL0(18), CGT_MDCR_TPM_HPMN),
|
||||
SR_TRAP(SYS_PMEVTYPERn_EL0(19), CGT_MDCR_TPM_HPMN),
|
||||
SR_TRAP(SYS_PMEVTYPERn_EL0(20), CGT_MDCR_TPM_HPMN),
|
||||
SR_TRAP(SYS_PMEVTYPERn_EL0(21), CGT_MDCR_TPM_HPMN),
|
||||
SR_TRAP(SYS_PMEVTYPERn_EL0(22), CGT_MDCR_TPM_HPMN),
|
||||
SR_TRAP(SYS_PMEVTYPERn_EL0(23), CGT_MDCR_TPM_HPMN),
|
||||
SR_TRAP(SYS_PMEVTYPERn_EL0(24), CGT_MDCR_TPM_HPMN),
|
||||
SR_TRAP(SYS_PMEVTYPERn_EL0(25), CGT_MDCR_TPM_HPMN),
|
||||
SR_TRAP(SYS_PMEVTYPERn_EL0(26), CGT_MDCR_TPM_HPMN),
|
||||
SR_TRAP(SYS_PMEVTYPERn_EL0(27), CGT_MDCR_TPM_HPMN),
|
||||
SR_TRAP(SYS_PMEVTYPERn_EL0(28), CGT_MDCR_TPM_HPMN),
|
||||
SR_TRAP(SYS_PMEVTYPERn_EL0(29), CGT_MDCR_TPM_HPMN),
|
||||
SR_TRAP(SYS_PMEVTYPERn_EL0(30), CGT_MDCR_TPM_HPMN),
|
||||
SR_TRAP(SYS_PMCCFILTR_EL0, CGT_MDCR_TPM),
|
||||
SR_TRAP(SYS_MDCCSR_EL0, CGT_MDCR_TDCC_TDE_TDA),
|
||||
SR_TRAP(SYS_MDCCINT_EL1, CGT_MDCR_TDCC_TDE_TDA),
|
||||
@ -1141,7 +1176,6 @@ static const struct encoding_to_trap_config encoding_to_cgt[] __initconst = {
|
||||
SR_TRAP(SYS_AMEVTYPER1_EL0(13), CGT_CPTR_TAM),
|
||||
SR_TRAP(SYS_AMEVTYPER1_EL0(14), CGT_CPTR_TAM),
|
||||
SR_TRAP(SYS_AMEVTYPER1_EL0(15), CGT_CPTR_TAM),
|
||||
SR_TRAP(SYS_POR_EL0, CGT_CPACR_E0POE),
|
||||
/* op0=2, op1=1, and CRn<0b1000 */
|
||||
SR_RANGE_TRAP(sys_reg(2, 1, 0, 0, 0),
|
||||
sys_reg(2, 1, 7, 15, 7), CGT_CPTR_TTA),
|
||||
@ -2021,7 +2055,8 @@ int __init populate_nv_trap_config(void)
|
||||
cgids = coarse_control_combo[id - __MULTIPLE_CONTROL_BITS__];
|
||||
|
||||
for (int i = 0; cgids[i] != __RESERVED__; i++) {
|
||||
if (cgids[i] >= __MULTIPLE_CONTROL_BITS__) {
|
||||
if (cgids[i] >= __MULTIPLE_CONTROL_BITS__ &&
|
||||
cgids[i] < __COMPLEX_CONDITIONS__) {
|
||||
kvm_err("Recursive MCB %d/%d\n", id, cgids[i]);
|
||||
ret = -EINVAL;
|
||||
}
|
||||
@ -2126,11 +2161,19 @@ static u64 kvm_get_sysreg_res0(struct kvm *kvm, enum vcpu_sysreg sr)
|
||||
return masks->mask[sr - __VNCR_START__].res0;
|
||||
}
|
||||
|
||||
static bool check_fgt_bit(struct kvm *kvm, bool is_read,
|
||||
static bool check_fgt_bit(struct kvm_vcpu *vcpu, bool is_read,
|
||||
u64 val, const union trap_config tc)
|
||||
{
|
||||
struct kvm *kvm = vcpu->kvm;
|
||||
enum vcpu_sysreg sr;
|
||||
|
||||
/*
|
||||
* KVM doesn't know about any FGTs that apply to the host, and hopefully
|
||||
* that'll remain the case.
|
||||
*/
|
||||
if (is_hyp_ctxt(vcpu))
|
||||
return false;
|
||||
|
||||
if (tc.pol)
|
||||
return (val & BIT(tc.bit));
|
||||
|
||||
@ -2207,7 +2250,15 @@ bool triage_sysreg_trap(struct kvm_vcpu *vcpu, int *sr_index)
|
||||
* If we're not nesting, immediately return to the caller, with the
|
||||
* sysreg index, should we have it.
|
||||
*/
|
||||
if (!vcpu_has_nv(vcpu) || is_hyp_ctxt(vcpu))
|
||||
if (!vcpu_has_nv(vcpu))
|
||||
goto local;
|
||||
|
||||
/*
|
||||
* There are a few traps that take effect InHost, but are constrained
|
||||
* to EL0. Don't bother with computing the trap behaviour if the vCPU
|
||||
* isn't in EL0.
|
||||
*/
|
||||
if (is_hyp_ctxt(vcpu) && !vcpu_is_host_el0(vcpu))
|
||||
goto local;
|
||||
|
||||
switch ((enum fgt_group_id)tc.fgt) {
|
||||
@ -2253,12 +2304,14 @@ bool triage_sysreg_trap(struct kvm_vcpu *vcpu, int *sr_index)
|
||||
goto local;
|
||||
}
|
||||
|
||||
if (tc.fgt != __NO_FGT_GROUP__ && check_fgt_bit(vcpu->kvm, is_read,
|
||||
val, tc))
|
||||
if (tc.fgt != __NO_FGT_GROUP__ && check_fgt_bit(vcpu, is_read, val, tc))
|
||||
goto inject;
|
||||
|
||||
b = compute_trap_behaviour(vcpu, tc);
|
||||
|
||||
if (!(b & BEHAVE_FORWARD_IN_HOST_EL0) && vcpu_is_host_el0(vcpu))
|
||||
goto local;
|
||||
|
||||
if (((b & BEHAVE_FORWARD_READ) && is_read) ||
|
||||
((b & BEHAVE_FORWARD_WRITE) && !is_read))
|
||||
goto inject;
|
||||
@ -2393,6 +2446,8 @@ void kvm_emulate_nested_eret(struct kvm_vcpu *vcpu)
|
||||
|
||||
kvm_arch_vcpu_load(vcpu, smp_processor_id());
|
||||
preempt_enable();
|
||||
|
||||
kvm_pmu_nested_transition(vcpu);
|
||||
}
|
||||
|
||||
static void kvm_inject_el2_exception(struct kvm_vcpu *vcpu, u64 esr_el2,
|
||||
@ -2475,6 +2530,8 @@ static int kvm_inject_nested(struct kvm_vcpu *vcpu, u64 esr_el2,
|
||||
kvm_arch_vcpu_load(vcpu, smp_processor_id());
|
||||
preempt_enable();
|
||||
|
||||
kvm_pmu_nested_transition(vcpu);
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
@ -204,6 +204,35 @@ static inline void __deactivate_traps_hfgxtr(struct kvm_vcpu *vcpu)
|
||||
__deactivate_fgt(hctxt, vcpu, kvm, HAFGRTR_EL2);
|
||||
}
|
||||
|
||||
static inline void __activate_traps_mpam(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
u64 r = MPAM2_EL2_TRAPMPAM0EL1 | MPAM2_EL2_TRAPMPAM1EL1;
|
||||
|
||||
if (!system_supports_mpam())
|
||||
return;
|
||||
|
||||
/* trap guest access to MPAMIDR_EL1 */
|
||||
if (system_supports_mpam_hcr()) {
|
||||
write_sysreg_s(MPAMHCR_EL2_TRAP_MPAMIDR_EL1, SYS_MPAMHCR_EL2);
|
||||
} else {
|
||||
/* From v1.1 TIDR can trap MPAMIDR, set it unconditionally */
|
||||
r |= MPAM2_EL2_TIDR;
|
||||
}
|
||||
|
||||
write_sysreg_s(r, SYS_MPAM2_EL2);
|
||||
}
|
||||
|
||||
static inline void __deactivate_traps_mpam(void)
|
||||
{
|
||||
if (!system_supports_mpam())
|
||||
return;
|
||||
|
||||
write_sysreg_s(0, SYS_MPAM2_EL2);
|
||||
|
||||
if (system_supports_mpam_hcr())
|
||||
write_sysreg_s(MPAMHCR_HOST_FLAGS, SYS_MPAMHCR_EL2);
|
||||
}
|
||||
|
||||
static inline void __activate_traps_common(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
/* Trap on AArch32 cp15 c15 (impdef sysregs) accesses (EL1 or EL0) */
|
||||
@ -244,6 +273,7 @@ static inline void __activate_traps_common(struct kvm_vcpu *vcpu)
|
||||
}
|
||||
|
||||
__activate_traps_hfgxtr(vcpu);
|
||||
__activate_traps_mpam(vcpu);
|
||||
}
|
||||
|
||||
static inline void __deactivate_traps_common(struct kvm_vcpu *vcpu)
|
||||
@ -263,6 +293,7 @@ static inline void __deactivate_traps_common(struct kvm_vcpu *vcpu)
|
||||
write_sysreg_s(HCRX_HOST_FLAGS, SYS_HCRX_EL2);
|
||||
|
||||
__deactivate_traps_hfgxtr(vcpu);
|
||||
__deactivate_traps_mpam();
|
||||
}
|
||||
|
||||
static inline void ___activate_traps(struct kvm_vcpu *vcpu, u64 hcr)
|
||||
|
@ -58,7 +58,7 @@ static inline bool ctxt_has_s1pie(struct kvm_cpu_context *ctxt)
|
||||
return false;
|
||||
|
||||
vcpu = ctxt_to_vcpu(ctxt);
|
||||
return kvm_has_feat(kern_hyp_va(vcpu->kvm), ID_AA64MMFR3_EL1, S1PIE, IMP);
|
||||
return kvm_has_s1pie(kern_hyp_va(vcpu->kvm));
|
||||
}
|
||||
|
||||
static inline bool ctxt_has_tcrx(struct kvm_cpu_context *ctxt)
|
||||
@ -69,7 +69,7 @@ static inline bool ctxt_has_tcrx(struct kvm_cpu_context *ctxt)
|
||||
return false;
|
||||
|
||||
vcpu = ctxt_to_vcpu(ctxt);
|
||||
return kvm_has_feat(kern_hyp_va(vcpu->kvm), ID_AA64MMFR3_EL1, TCRX, IMP);
|
||||
return kvm_has_tcr2(kern_hyp_va(vcpu->kvm));
|
||||
}
|
||||
|
||||
static inline bool ctxt_has_s1poe(struct kvm_cpu_context *ctxt)
|
||||
@ -80,7 +80,7 @@ static inline bool ctxt_has_s1poe(struct kvm_cpu_context *ctxt)
|
||||
return false;
|
||||
|
||||
vcpu = ctxt_to_vcpu(ctxt);
|
||||
return kvm_has_feat(kern_hyp_va(vcpu->kvm), ID_AA64MMFR3_EL1, S1POE, IMP);
|
||||
return kvm_has_s1poe(kern_hyp_va(vcpu->kvm));
|
||||
}
|
||||
|
||||
static inline void __sysreg_save_el1_state(struct kvm_cpu_context *ctxt)
|
||||
@ -152,9 +152,10 @@ static inline void __sysreg_restore_user_state(struct kvm_cpu_context *ctxt)
|
||||
write_sysreg(ctxt_sys_reg(ctxt, TPIDRRO_EL0), tpidrro_el0);
|
||||
}
|
||||
|
||||
static inline void __sysreg_restore_el1_state(struct kvm_cpu_context *ctxt)
|
||||
static inline void __sysreg_restore_el1_state(struct kvm_cpu_context *ctxt,
|
||||
u64 mpidr)
|
||||
{
|
||||
write_sysreg(ctxt_sys_reg(ctxt, MPIDR_EL1), vmpidr_el2);
|
||||
write_sysreg(mpidr, vmpidr_el2);
|
||||
|
||||
if (has_vhe() ||
|
||||
!cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
|
||||
|
@ -15,6 +15,4 @@
|
||||
#define DECLARE_REG(type, name, ctxt, reg) \
|
||||
type name = (type)cpu_reg(ctxt, (reg))
|
||||
|
||||
void __pkvm_vcpu_init_traps(struct kvm_vcpu *vcpu);
|
||||
|
||||
#endif /* __ARM64_KVM_NVHE_TRAP_HANDLER_H__ */
|
||||
|
@ -105,8 +105,10 @@ static void flush_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu)
|
||||
|
||||
hyp_vcpu->vcpu.arch.hw_mmu = host_vcpu->arch.hw_mmu;
|
||||
|
||||
hyp_vcpu->vcpu.arch.hcr_el2 = host_vcpu->arch.hcr_el2;
|
||||
hyp_vcpu->vcpu.arch.mdcr_el2 = host_vcpu->arch.mdcr_el2;
|
||||
hyp_vcpu->vcpu.arch.hcr_el2 &= ~(HCR_TWI | HCR_TWE);
|
||||
hyp_vcpu->vcpu.arch.hcr_el2 |= READ_ONCE(host_vcpu->arch.hcr_el2) &
|
||||
(HCR_TWI | HCR_TWE);
|
||||
|
||||
hyp_vcpu->vcpu.arch.iflags = host_vcpu->arch.iflags;
|
||||
|
||||
@ -349,13 +351,6 @@ static void handle___pkvm_prot_finalize(struct kvm_cpu_context *host_ctxt)
|
||||
cpu_reg(host_ctxt, 1) = __pkvm_prot_finalize();
|
||||
}
|
||||
|
||||
static void handle___pkvm_vcpu_init_traps(struct kvm_cpu_context *host_ctxt)
|
||||
{
|
||||
DECLARE_REG(struct kvm_vcpu *, vcpu, host_ctxt, 1);
|
||||
|
||||
__pkvm_vcpu_init_traps(kern_hyp_va(vcpu));
|
||||
}
|
||||
|
||||
static void handle___pkvm_init_vm(struct kvm_cpu_context *host_ctxt)
|
||||
{
|
||||
DECLARE_REG(struct kvm *, host_kvm, host_ctxt, 1);
|
||||
@ -411,7 +406,6 @@ static const hcall_t host_hcall[] = {
|
||||
HANDLE_FUNC(__kvm_timer_set_cntvoff),
|
||||
HANDLE_FUNC(__vgic_v3_save_vmcr_aprs),
|
||||
HANDLE_FUNC(__vgic_v3_restore_vmcr_aprs),
|
||||
HANDLE_FUNC(__pkvm_vcpu_init_traps),
|
||||
HANDLE_FUNC(__pkvm_init_vm),
|
||||
HANDLE_FUNC(__pkvm_init_vcpu),
|
||||
HANDLE_FUNC(__pkvm_teardown_vm),
|
||||
|
@ -6,6 +6,9 @@
|
||||
|
||||
#include <linux/kvm_host.h>
|
||||
#include <linux/mm.h>
|
||||
|
||||
#include <asm/kvm_emulate.h>
|
||||
|
||||
#include <nvhe/fixed_config.h>
|
||||
#include <nvhe/mem_protect.h>
|
||||
#include <nvhe/memory.h>
|
||||
@ -201,11 +204,46 @@ static void pvm_init_trap_regs(struct kvm_vcpu *vcpu)
|
||||
}
|
||||
}
|
||||
|
||||
static void pkvm_vcpu_reset_hcr(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
vcpu->arch.hcr_el2 = HCR_GUEST_FLAGS;
|
||||
|
||||
if (has_hvhe())
|
||||
vcpu->arch.hcr_el2 |= HCR_E2H;
|
||||
|
||||
if (cpus_have_final_cap(ARM64_HAS_RAS_EXTN)) {
|
||||
/* route synchronous external abort exceptions to EL2 */
|
||||
vcpu->arch.hcr_el2 |= HCR_TEA;
|
||||
/* trap error record accesses */
|
||||
vcpu->arch.hcr_el2 |= HCR_TERR;
|
||||
}
|
||||
|
||||
if (cpus_have_final_cap(ARM64_HAS_STAGE2_FWB))
|
||||
vcpu->arch.hcr_el2 |= HCR_FWB;
|
||||
|
||||
if (cpus_have_final_cap(ARM64_HAS_EVT) &&
|
||||
!cpus_have_final_cap(ARM64_MISMATCHED_CACHE_TYPE))
|
||||
vcpu->arch.hcr_el2 |= HCR_TID4;
|
||||
else
|
||||
vcpu->arch.hcr_el2 |= HCR_TID2;
|
||||
|
||||
if (vcpu_has_ptrauth(vcpu))
|
||||
vcpu->arch.hcr_el2 |= (HCR_API | HCR_APK);
|
||||
}
|
||||
|
||||
/*
|
||||
* Initialize trap register values in protected mode.
|
||||
*/
|
||||
void __pkvm_vcpu_init_traps(struct kvm_vcpu *vcpu)
|
||||
static void pkvm_vcpu_init_traps(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
vcpu->arch.cptr_el2 = kvm_get_reset_cptr_el2(vcpu);
|
||||
vcpu->arch.mdcr_el2 = 0;
|
||||
|
||||
pkvm_vcpu_reset_hcr(vcpu);
|
||||
|
||||
if ((!vcpu_is_protected(vcpu)))
|
||||
return;
|
||||
|
||||
pvm_init_trap_regs(vcpu);
|
||||
pvm_init_traps_aa64pfr0(vcpu);
|
||||
pvm_init_traps_aa64pfr1(vcpu);
|
||||
@ -289,6 +327,65 @@ void pkvm_put_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu)
|
||||
hyp_spin_unlock(&vm_table_lock);
|
||||
}
|
||||
|
||||
static void pkvm_init_features_from_host(struct pkvm_hyp_vm *hyp_vm, const struct kvm *host_kvm)
|
||||
{
|
||||
struct kvm *kvm = &hyp_vm->kvm;
|
||||
DECLARE_BITMAP(allowed_features, KVM_VCPU_MAX_FEATURES);
|
||||
|
||||
/* No restrictions for non-protected VMs. */
|
||||
if (!kvm_vm_is_protected(kvm)) {
|
||||
bitmap_copy(kvm->arch.vcpu_features,
|
||||
host_kvm->arch.vcpu_features,
|
||||
KVM_VCPU_MAX_FEATURES);
|
||||
return;
|
||||
}
|
||||
|
||||
bitmap_zero(allowed_features, KVM_VCPU_MAX_FEATURES);
|
||||
|
||||
/*
|
||||
* For protected VMs, always allow:
|
||||
* - CPU starting in poweroff state
|
||||
* - PSCI v0.2
|
||||
*/
|
||||
set_bit(KVM_ARM_VCPU_POWER_OFF, allowed_features);
|
||||
set_bit(KVM_ARM_VCPU_PSCI_0_2, allowed_features);
|
||||
|
||||
/*
|
||||
* Check if remaining features are allowed:
|
||||
* - Performance Monitoring
|
||||
* - Scalable Vectors
|
||||
* - Pointer Authentication
|
||||
*/
|
||||
if (FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer), PVM_ID_AA64DFR0_ALLOW))
|
||||
set_bit(KVM_ARM_VCPU_PMU_V3, allowed_features);
|
||||
|
||||
if (FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_SVE), PVM_ID_AA64PFR0_ALLOW))
|
||||
set_bit(KVM_ARM_VCPU_SVE, allowed_features);
|
||||
|
||||
if (FIELD_GET(ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_API), PVM_ID_AA64ISAR1_RESTRICT_UNSIGNED) &&
|
||||
FIELD_GET(ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_APA), PVM_ID_AA64ISAR1_RESTRICT_UNSIGNED))
|
||||
set_bit(KVM_ARM_VCPU_PTRAUTH_ADDRESS, allowed_features);
|
||||
|
||||
if (FIELD_GET(ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_GPI), PVM_ID_AA64ISAR1_ALLOW) &&
|
||||
FIELD_GET(ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_GPA), PVM_ID_AA64ISAR1_ALLOW))
|
||||
set_bit(KVM_ARM_VCPU_PTRAUTH_GENERIC, allowed_features);
|
||||
|
||||
bitmap_and(kvm->arch.vcpu_features, host_kvm->arch.vcpu_features,
|
||||
allowed_features, KVM_VCPU_MAX_FEATURES);
|
||||
}
|
||||
|
||||
static void pkvm_vcpu_init_ptrauth(struct pkvm_hyp_vcpu *hyp_vcpu)
|
||||
{
|
||||
struct kvm_vcpu *vcpu = &hyp_vcpu->vcpu;
|
||||
|
||||
if (vcpu_has_feature(vcpu, KVM_ARM_VCPU_PTRAUTH_ADDRESS) ||
|
||||
vcpu_has_feature(vcpu, KVM_ARM_VCPU_PTRAUTH_GENERIC)) {
|
||||
kvm_vcpu_enable_ptrauth(vcpu);
|
||||
} else {
|
||||
vcpu_clear_flag(&hyp_vcpu->vcpu, GUEST_HAS_PTRAUTH);
|
||||
}
|
||||
}
|
||||
|
||||
static void unpin_host_vcpu(struct kvm_vcpu *host_vcpu)
|
||||
{
|
||||
if (host_vcpu)
|
||||
@ -310,6 +407,18 @@ static void init_pkvm_hyp_vm(struct kvm *host_kvm, struct pkvm_hyp_vm *hyp_vm,
|
||||
hyp_vm->host_kvm = host_kvm;
|
||||
hyp_vm->kvm.created_vcpus = nr_vcpus;
|
||||
hyp_vm->kvm.arch.mmu.vtcr = host_mmu.arch.mmu.vtcr;
|
||||
hyp_vm->kvm.arch.pkvm.enabled = READ_ONCE(host_kvm->arch.pkvm.enabled);
|
||||
pkvm_init_features_from_host(hyp_vm, host_kvm);
|
||||
}
|
||||
|
||||
static void pkvm_vcpu_init_sve(struct pkvm_hyp_vcpu *hyp_vcpu, struct kvm_vcpu *host_vcpu)
|
||||
{
|
||||
struct kvm_vcpu *vcpu = &hyp_vcpu->vcpu;
|
||||
|
||||
if (!vcpu_has_feature(vcpu, KVM_ARM_VCPU_SVE)) {
|
||||
vcpu_clear_flag(vcpu, GUEST_HAS_SVE);
|
||||
vcpu_clear_flag(vcpu, VCPU_SVE_FINALIZED);
|
||||
}
|
||||
}
|
||||
|
||||
static int init_pkvm_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu,
|
||||
@ -335,6 +444,11 @@ static int init_pkvm_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu,
|
||||
|
||||
hyp_vcpu->vcpu.arch.hw_mmu = &hyp_vm->kvm.arch.mmu;
|
||||
hyp_vcpu->vcpu.arch.cflags = READ_ONCE(host_vcpu->arch.cflags);
|
||||
hyp_vcpu->vcpu.arch.mp_state.mp_state = KVM_MP_STATE_STOPPED;
|
||||
|
||||
pkvm_vcpu_init_sve(hyp_vcpu, host_vcpu);
|
||||
pkvm_vcpu_init_ptrauth(hyp_vcpu);
|
||||
pkvm_vcpu_init_traps(&hyp_vcpu->vcpu);
|
||||
done:
|
||||
if (ret)
|
||||
unpin_host_vcpu(host_vcpu);
|
||||
|
@ -265,6 +265,8 @@ static unsigned long psci_1_0_handler(u64 func_id, struct kvm_cpu_context *host_
|
||||
case PSCI_1_0_FN_PSCI_FEATURES:
|
||||
case PSCI_1_0_FN_SET_SUSPEND_MODE:
|
||||
case PSCI_1_1_FN64_SYSTEM_RESET2:
|
||||
case PSCI_1_3_FN_SYSTEM_OFF2:
|
||||
case PSCI_1_3_FN64_SYSTEM_OFF2:
|
||||
return psci_forward(host_ctxt);
|
||||
case PSCI_1_0_FN64_SYSTEM_SUSPEND:
|
||||
return psci_system_suspend(func_id, host_ctxt);
|
||||
|
@ -95,7 +95,6 @@ static int recreate_hyp_mappings(phys_addr_t phys, unsigned long size,
|
||||
{
|
||||
void *start, *end, *virt = hyp_phys_to_virt(phys);
|
||||
unsigned long pgt_size = hyp_s1_pgtable_pages() << PAGE_SHIFT;
|
||||
enum kvm_pgtable_prot prot;
|
||||
int ret, i;
|
||||
|
||||
/* Recreate the hyp page-table using the early page allocator */
|
||||
@ -147,24 +146,7 @@ static int recreate_hyp_mappings(phys_addr_t phys, unsigned long size,
|
||||
return ret;
|
||||
}
|
||||
|
||||
pkvm_create_host_sve_mappings();
|
||||
|
||||
/*
|
||||
* Map the host sections RO in the hypervisor, but transfer the
|
||||
* ownership from the host to the hypervisor itself to make sure they
|
||||
* can't be donated or shared with another entity.
|
||||
*
|
||||
* The ownership transition requires matching changes in the host
|
||||
* stage-2. This will be done later (see finalize_host_mappings()) once
|
||||
* the hyp_vmemmap is addressable.
|
||||
*/
|
||||
prot = pkvm_mkstate(PAGE_HYP_RO, PKVM_PAGE_SHARED_OWNED);
|
||||
ret = pkvm_create_mappings(&kvm_vgic_global_state,
|
||||
&kvm_vgic_global_state + 1, prot);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return 0;
|
||||
return pkvm_create_host_sve_mappings();
|
||||
}
|
||||
|
||||
static void update_nvhe_init_params(void)
|
||||
|
@ -28,7 +28,7 @@ void __sysreg_save_state_nvhe(struct kvm_cpu_context *ctxt)
|
||||
|
||||
void __sysreg_restore_state_nvhe(struct kvm_cpu_context *ctxt)
|
||||
{
|
||||
__sysreg_restore_el1_state(ctxt);
|
||||
__sysreg_restore_el1_state(ctxt, ctxt_sys_reg(ctxt, MPIDR_EL1));
|
||||
__sysreg_restore_common_state(ctxt);
|
||||
__sysreg_restore_user_state(ctxt);
|
||||
__sysreg_restore_el2_return_state(ctxt);
|
||||
|
@ -1012,9 +1012,6 @@ static void __vgic_v3_read_ctlr(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
|
||||
val = ((vtr >> 29) & 7) << ICC_CTLR_EL1_PRI_BITS_SHIFT;
|
||||
/* IDbits */
|
||||
val |= ((vtr >> 23) & 7) << ICC_CTLR_EL1_ID_BITS_SHIFT;
|
||||
/* SEIS */
|
||||
if (kvm_vgic_global_state.ich_vtr_el2 & ICH_VTR_SEIS_MASK)
|
||||
val |= BIT(ICC_CTLR_EL1_SEIS_SHIFT);
|
||||
/* A3V */
|
||||
val |= ((vtr >> 21) & 1) << ICC_CTLR_EL1_A3V_SHIFT;
|
||||
/* EOImode */
|
||||
|
@ -15,6 +15,131 @@
|
||||
#include <asm/kvm_hyp.h>
|
||||
#include <asm/kvm_nested.h>
|
||||
|
||||
static void __sysreg_save_vel2_state(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
/* These registers are common with EL1 */
|
||||
__vcpu_sys_reg(vcpu, PAR_EL1) = read_sysreg(par_el1);
|
||||
__vcpu_sys_reg(vcpu, TPIDR_EL1) = read_sysreg(tpidr_el1);
|
||||
|
||||
__vcpu_sys_reg(vcpu, ESR_EL2) = read_sysreg_el1(SYS_ESR);
|
||||
__vcpu_sys_reg(vcpu, AFSR0_EL2) = read_sysreg_el1(SYS_AFSR0);
|
||||
__vcpu_sys_reg(vcpu, AFSR1_EL2) = read_sysreg_el1(SYS_AFSR1);
|
||||
__vcpu_sys_reg(vcpu, FAR_EL2) = read_sysreg_el1(SYS_FAR);
|
||||
__vcpu_sys_reg(vcpu, MAIR_EL2) = read_sysreg_el1(SYS_MAIR);
|
||||
__vcpu_sys_reg(vcpu, VBAR_EL2) = read_sysreg_el1(SYS_VBAR);
|
||||
__vcpu_sys_reg(vcpu, CONTEXTIDR_EL2) = read_sysreg_el1(SYS_CONTEXTIDR);
|
||||
__vcpu_sys_reg(vcpu, AMAIR_EL2) = read_sysreg_el1(SYS_AMAIR);
|
||||
|
||||
/*
|
||||
* In VHE mode those registers are compatible between EL1 and EL2,
|
||||
* and the guest uses the _EL1 versions on the CPU naturally.
|
||||
* So we save them into their _EL2 versions here.
|
||||
* For nVHE mode we trap accesses to those registers, so our
|
||||
* _EL2 copy in sys_regs[] is always up-to-date and we don't need
|
||||
* to save anything here.
|
||||
*/
|
||||
if (vcpu_el2_e2h_is_set(vcpu)) {
|
||||
u64 val;
|
||||
|
||||
/*
|
||||
* We don't save CPTR_EL2, as accesses to CPACR_EL1
|
||||
* are always trapped, ensuring that the in-memory
|
||||
* copy is always up-to-date. A small blessing...
|
||||
*/
|
||||
__vcpu_sys_reg(vcpu, SCTLR_EL2) = read_sysreg_el1(SYS_SCTLR);
|
||||
__vcpu_sys_reg(vcpu, TTBR0_EL2) = read_sysreg_el1(SYS_TTBR0);
|
||||
__vcpu_sys_reg(vcpu, TTBR1_EL2) = read_sysreg_el1(SYS_TTBR1);
|
||||
__vcpu_sys_reg(vcpu, TCR_EL2) = read_sysreg_el1(SYS_TCR);
|
||||
|
||||
if (ctxt_has_tcrx(&vcpu->arch.ctxt)) {
|
||||
__vcpu_sys_reg(vcpu, TCR2_EL2) = read_sysreg_el1(SYS_TCR2);
|
||||
|
||||
if (ctxt_has_s1pie(&vcpu->arch.ctxt)) {
|
||||
__vcpu_sys_reg(vcpu, PIRE0_EL2) = read_sysreg_el1(SYS_PIRE0);
|
||||
__vcpu_sys_reg(vcpu, PIR_EL2) = read_sysreg_el1(SYS_PIR);
|
||||
}
|
||||
|
||||
if (ctxt_has_s1poe(&vcpu->arch.ctxt))
|
||||
__vcpu_sys_reg(vcpu, POR_EL2) = read_sysreg_el1(SYS_POR);
|
||||
}
|
||||
|
||||
/*
|
||||
* The EL1 view of CNTKCTL_EL1 has a bunch of RES0 bits where
|
||||
* the interesting CNTHCTL_EL2 bits live. So preserve these
|
||||
* bits when reading back the guest-visible value.
|
||||
*/
|
||||
val = read_sysreg_el1(SYS_CNTKCTL);
|
||||
val &= CNTKCTL_VALID_BITS;
|
||||
__vcpu_sys_reg(vcpu, CNTHCTL_EL2) &= ~CNTKCTL_VALID_BITS;
|
||||
__vcpu_sys_reg(vcpu, CNTHCTL_EL2) |= val;
|
||||
}
|
||||
|
||||
__vcpu_sys_reg(vcpu, SP_EL2) = read_sysreg(sp_el1);
|
||||
__vcpu_sys_reg(vcpu, ELR_EL2) = read_sysreg_el1(SYS_ELR);
|
||||
__vcpu_sys_reg(vcpu, SPSR_EL2) = read_sysreg_el1(SYS_SPSR);
|
||||
}
|
||||
|
||||
static void __sysreg_restore_vel2_state(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
u64 val;
|
||||
|
||||
/* These registers are common with EL1 */
|
||||
write_sysreg(__vcpu_sys_reg(vcpu, PAR_EL1), par_el1);
|
||||
write_sysreg(__vcpu_sys_reg(vcpu, TPIDR_EL1), tpidr_el1);
|
||||
|
||||
write_sysreg(__vcpu_sys_reg(vcpu, MPIDR_EL1), vmpidr_el2);
|
||||
write_sysreg_el1(__vcpu_sys_reg(vcpu, MAIR_EL2), SYS_MAIR);
|
||||
write_sysreg_el1(__vcpu_sys_reg(vcpu, VBAR_EL2), SYS_VBAR);
|
||||
write_sysreg_el1(__vcpu_sys_reg(vcpu, CONTEXTIDR_EL2), SYS_CONTEXTIDR);
|
||||
write_sysreg_el1(__vcpu_sys_reg(vcpu, AMAIR_EL2), SYS_AMAIR);
|
||||
|
||||
if (vcpu_el2_e2h_is_set(vcpu)) {
|
||||
/*
|
||||
* In VHE mode those registers are compatible between
|
||||
* EL1 and EL2.
|
||||
*/
|
||||
write_sysreg_el1(__vcpu_sys_reg(vcpu, SCTLR_EL2), SYS_SCTLR);
|
||||
write_sysreg_el1(__vcpu_sys_reg(vcpu, CPTR_EL2), SYS_CPACR);
|
||||
write_sysreg_el1(__vcpu_sys_reg(vcpu, TTBR0_EL2), SYS_TTBR0);
|
||||
write_sysreg_el1(__vcpu_sys_reg(vcpu, TTBR1_EL2), SYS_TTBR1);
|
||||
write_sysreg_el1(__vcpu_sys_reg(vcpu, TCR_EL2), SYS_TCR);
|
||||
write_sysreg_el1(__vcpu_sys_reg(vcpu, CNTHCTL_EL2), SYS_CNTKCTL);
|
||||
} else {
|
||||
/*
|
||||
* CNTHCTL_EL2 only affects EL1 when running nVHE, so
|
||||
* no need to restore it.
|
||||
*/
|
||||
val = translate_sctlr_el2_to_sctlr_el1(__vcpu_sys_reg(vcpu, SCTLR_EL2));
|
||||
write_sysreg_el1(val, SYS_SCTLR);
|
||||
val = translate_cptr_el2_to_cpacr_el1(__vcpu_sys_reg(vcpu, CPTR_EL2));
|
||||
write_sysreg_el1(val, SYS_CPACR);
|
||||
val = translate_ttbr0_el2_to_ttbr0_el1(__vcpu_sys_reg(vcpu, TTBR0_EL2));
|
||||
write_sysreg_el1(val, SYS_TTBR0);
|
||||
val = translate_tcr_el2_to_tcr_el1(__vcpu_sys_reg(vcpu, TCR_EL2));
|
||||
write_sysreg_el1(val, SYS_TCR);
|
||||
}
|
||||
|
||||
if (ctxt_has_tcrx(&vcpu->arch.ctxt)) {
|
||||
write_sysreg_el1(__vcpu_sys_reg(vcpu, TCR2_EL2), SYS_TCR2);
|
||||
|
||||
if (ctxt_has_s1pie(&vcpu->arch.ctxt)) {
|
||||
write_sysreg_el1(__vcpu_sys_reg(vcpu, PIR_EL2), SYS_PIR);
|
||||
write_sysreg_el1(__vcpu_sys_reg(vcpu, PIRE0_EL2), SYS_PIRE0);
|
||||
}
|
||||
|
||||
if (ctxt_has_s1poe(&vcpu->arch.ctxt))
|
||||
write_sysreg_el1(__vcpu_sys_reg(vcpu, POR_EL2), SYS_POR);
|
||||
}
|
||||
|
||||
write_sysreg_el1(__vcpu_sys_reg(vcpu, ESR_EL2), SYS_ESR);
|
||||
write_sysreg_el1(__vcpu_sys_reg(vcpu, AFSR0_EL2), SYS_AFSR0);
|
||||
write_sysreg_el1(__vcpu_sys_reg(vcpu, AFSR1_EL2), SYS_AFSR1);
|
||||
write_sysreg_el1(__vcpu_sys_reg(vcpu, FAR_EL2), SYS_FAR);
|
||||
write_sysreg(__vcpu_sys_reg(vcpu, SP_EL2), sp_el1);
|
||||
write_sysreg_el1(__vcpu_sys_reg(vcpu, ELR_EL2), SYS_ELR);
|
||||
write_sysreg_el1(__vcpu_sys_reg(vcpu, SPSR_EL2), SYS_SPSR);
|
||||
}
|
||||
|
||||
/*
|
||||
* VHE: Host and guest must save mdscr_el1 and sp_el0 (and the PC and
|
||||
* pstate, which are handled as part of the el2 return state) on every
|
||||
@ -66,6 +191,7 @@ void __vcpu_load_switch_sysregs(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvm_cpu_context *guest_ctxt = &vcpu->arch.ctxt;
|
||||
struct kvm_cpu_context *host_ctxt;
|
||||
u64 mpidr;
|
||||
|
||||
host_ctxt = host_data_ptr(host_ctxt);
|
||||
__sysreg_save_user_state(host_ctxt);
|
||||
@ -89,7 +215,29 @@ void __vcpu_load_switch_sysregs(struct kvm_vcpu *vcpu)
|
||||
*/
|
||||
__sysreg32_restore_state(vcpu);
|
||||
__sysreg_restore_user_state(guest_ctxt);
|
||||
__sysreg_restore_el1_state(guest_ctxt);
|
||||
|
||||
if (unlikely(__is_hyp_ctxt(guest_ctxt))) {
|
||||
__sysreg_restore_vel2_state(vcpu);
|
||||
} else {
|
||||
if (vcpu_has_nv(vcpu)) {
|
||||
/*
|
||||
* Use the guest hypervisor's VPIDR_EL2 when in a
|
||||
* nested state. The hardware value of MIDR_EL1 gets
|
||||
* restored on put.
|
||||
*/
|
||||
write_sysreg(ctxt_sys_reg(guest_ctxt, VPIDR_EL2), vpidr_el2);
|
||||
|
||||
/*
|
||||
* As we're restoring a nested guest, set the value
|
||||
* provided by the guest hypervisor.
|
||||
*/
|
||||
mpidr = ctxt_sys_reg(guest_ctxt, VMPIDR_EL2);
|
||||
} else {
|
||||
mpidr = ctxt_sys_reg(guest_ctxt, MPIDR_EL1);
|
||||
}
|
||||
|
||||
__sysreg_restore_el1_state(guest_ctxt, mpidr);
|
||||
}
|
||||
|
||||
vcpu_set_flag(vcpu, SYSREGS_ON_CPU);
|
||||
}
|
||||
@ -112,12 +260,20 @@ void __vcpu_put_switch_sysregs(struct kvm_vcpu *vcpu)
|
||||
|
||||
host_ctxt = host_data_ptr(host_ctxt);
|
||||
|
||||
__sysreg_save_el1_state(guest_ctxt);
|
||||
if (unlikely(__is_hyp_ctxt(guest_ctxt)))
|
||||
__sysreg_save_vel2_state(vcpu);
|
||||
else
|
||||
__sysreg_save_el1_state(guest_ctxt);
|
||||
|
||||
__sysreg_save_user_state(guest_ctxt);
|
||||
__sysreg32_save_state(vcpu);
|
||||
|
||||
/* Restore host user state */
|
||||
__sysreg_restore_user_state(host_ctxt);
|
||||
|
||||
/* If leaving a nesting guest, restore MIDR_EL1 default view */
|
||||
if (vcpu_has_nv(vcpu))
|
||||
write_sysreg(read_cpuid_id(), vpidr_el2);
|
||||
|
||||
vcpu_clear_flag(vcpu, SYSREGS_ON_CPU);
|
||||
}
|
||||
|
@ -575,6 +575,8 @@ int kvm_arm_set_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
|
||||
case KVM_ARM_PSCI_0_2:
|
||||
case KVM_ARM_PSCI_1_0:
|
||||
case KVM_ARM_PSCI_1_1:
|
||||
case KVM_ARM_PSCI_1_2:
|
||||
case KVM_ARM_PSCI_1_3:
|
||||
if (!wants_02)
|
||||
return -EINVAL;
|
||||
vcpu->kvm->arch.psci_version = val;
|
||||
|
@ -72,6 +72,31 @@ unsigned long kvm_mmio_read_buf(const void *buf, unsigned int len)
|
||||
return data;
|
||||
}
|
||||
|
||||
static bool kvm_pending_sync_exception(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
if (!vcpu_get_flag(vcpu, PENDING_EXCEPTION))
|
||||
return false;
|
||||
|
||||
if (vcpu_el1_is_32bit(vcpu)) {
|
||||
switch (vcpu_get_flag(vcpu, EXCEPT_MASK)) {
|
||||
case unpack_vcpu_flag(EXCEPT_AA32_UND):
|
||||
case unpack_vcpu_flag(EXCEPT_AA32_IABT):
|
||||
case unpack_vcpu_flag(EXCEPT_AA32_DABT):
|
||||
return true;
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
} else {
|
||||
switch (vcpu_get_flag(vcpu, EXCEPT_MASK)) {
|
||||
case unpack_vcpu_flag(EXCEPT_AA64_EL1_SYNC):
|
||||
case unpack_vcpu_flag(EXCEPT_AA64_EL2_SYNC):
|
||||
return true;
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* kvm_handle_mmio_return -- Handle MMIO loads after user space emulation
|
||||
* or in-kernel IO emulation
|
||||
@ -84,8 +109,11 @@ int kvm_handle_mmio_return(struct kvm_vcpu *vcpu)
|
||||
unsigned int len;
|
||||
int mask;
|
||||
|
||||
/* Detect an already handled MMIO return */
|
||||
if (unlikely(!vcpu->mmio_needed))
|
||||
/*
|
||||
* Detect if the MMIO return was already handled or if userspace aborted
|
||||
* the MMIO access.
|
||||
*/
|
||||
if (unlikely(!vcpu->mmio_needed || kvm_pending_sync_exception(vcpu)))
|
||||
return 1;
|
||||
|
||||
vcpu->mmio_needed = 0;
|
||||
|
@ -917,12 +917,13 @@ static void limit_nv_id_regs(struct kvm *kvm)
|
||||
ID_AA64MMFR4_EL1_E2H0_NI_NV1);
|
||||
kvm_set_vm_id_reg(kvm, SYS_ID_AA64MMFR4_EL1, val);
|
||||
|
||||
/* Only limited support for PMU, Debug, BPs and WPs */
|
||||
/* Only limited support for PMU, Debug, BPs, WPs, and HPMN0 */
|
||||
val = kvm_read_vm_id_reg(kvm, SYS_ID_AA64DFR0_EL1);
|
||||
val &= (NV_FTR(DFR0, PMUVer) |
|
||||
NV_FTR(DFR0, WRPs) |
|
||||
NV_FTR(DFR0, BRPs) |
|
||||
NV_FTR(DFR0, DebugVer));
|
||||
NV_FTR(DFR0, DebugVer) |
|
||||
NV_FTR(DFR0, HPMN0));
|
||||
|
||||
/* Cap Debug to ARMv8.1 */
|
||||
tmp = FIELD_GET(NV_FTR(DFR0, DebugVer), val);
|
||||
@ -933,15 +934,15 @@ static void limit_nv_id_regs(struct kvm *kvm)
|
||||
kvm_set_vm_id_reg(kvm, SYS_ID_AA64DFR0_EL1, val);
|
||||
}
|
||||
|
||||
u64 kvm_vcpu_sanitise_vncr_reg(const struct kvm_vcpu *vcpu, enum vcpu_sysreg sr)
|
||||
u64 kvm_vcpu_apply_reg_masks(const struct kvm_vcpu *vcpu,
|
||||
enum vcpu_sysreg sr, u64 v)
|
||||
{
|
||||
u64 v = ctxt_sys_reg(&vcpu->arch.ctxt, sr);
|
||||
struct kvm_sysreg_masks *masks;
|
||||
|
||||
masks = vcpu->kvm->arch.sysreg_masks;
|
||||
|
||||
if (masks) {
|
||||
sr -= __VNCR_START__;
|
||||
sr -= __SANITISED_REG_START__;
|
||||
|
||||
v &= ~masks->mask[sr].res0;
|
||||
v |= masks->mask[sr].res1;
|
||||
@ -952,7 +953,11 @@ u64 kvm_vcpu_sanitise_vncr_reg(const struct kvm_vcpu *vcpu, enum vcpu_sysreg sr)
|
||||
|
||||
static void set_sysreg_masks(struct kvm *kvm, int sr, u64 res0, u64 res1)
|
||||
{
|
||||
int i = sr - __VNCR_START__;
|
||||
int i = sr - __SANITISED_REG_START__;
|
||||
|
||||
BUILD_BUG_ON(!__builtin_constant_p(sr));
|
||||
BUILD_BUG_ON(sr < __SANITISED_REG_START__);
|
||||
BUILD_BUG_ON(sr >= NR_SYS_REGS);
|
||||
|
||||
kvm->arch.sysreg_masks->mask[i].res0 = res0;
|
||||
kvm->arch.sysreg_masks->mask[i].res1 = res1;
|
||||
@ -1050,7 +1055,7 @@ int kvm_init_nv_sysregs(struct kvm *kvm)
|
||||
res0 |= HCRX_EL2_PTTWI;
|
||||
if (!kvm_has_feat(kvm, ID_AA64MMFR3_EL1, SCTLRX, IMP))
|
||||
res0 |= HCRX_EL2_SCTLR2En;
|
||||
if (!kvm_has_feat(kvm, ID_AA64MMFR3_EL1, TCRX, IMP))
|
||||
if (!kvm_has_tcr2(kvm))
|
||||
res0 |= HCRX_EL2_TCR2En;
|
||||
if (!kvm_has_feat(kvm, ID_AA64ISAR2_EL1, MOPS, IMP))
|
||||
res0 |= (HCRX_EL2_MSCEn | HCRX_EL2_MCE2);
|
||||
@ -1101,9 +1106,9 @@ int kvm_init_nv_sysregs(struct kvm *kvm)
|
||||
res0 |= (HFGxTR_EL2_nSMPRI_EL1 | HFGxTR_EL2_nTPIDR2_EL0);
|
||||
if (!kvm_has_feat(kvm, ID_AA64PFR1_EL1, THE, IMP))
|
||||
res0 |= HFGxTR_EL2_nRCWMASK_EL1;
|
||||
if (!kvm_has_feat(kvm, ID_AA64MMFR3_EL1, S1PIE, IMP))
|
||||
if (!kvm_has_s1pie(kvm))
|
||||
res0 |= (HFGxTR_EL2_nPIRE0_EL1 | HFGxTR_EL2_nPIR_EL1);
|
||||
if (!kvm_has_feat(kvm, ID_AA64MMFR3_EL1, S1POE, IMP))
|
||||
if (!kvm_has_s1poe(kvm))
|
||||
res0 |= (HFGxTR_EL2_nPOR_EL0 | HFGxTR_EL2_nPOR_EL1);
|
||||
if (!kvm_has_feat(kvm, ID_AA64MMFR3_EL1, S2POE, IMP))
|
||||
res0 |= HFGxTR_EL2_nS2POR_EL1;
|
||||
@ -1200,6 +1205,28 @@ int kvm_init_nv_sysregs(struct kvm *kvm)
|
||||
res0 |= ~(res0 | res1);
|
||||
set_sysreg_masks(kvm, HAFGRTR_EL2, res0, res1);
|
||||
|
||||
/* TCR2_EL2 */
|
||||
res0 = TCR2_EL2_RES0;
|
||||
res1 = TCR2_EL2_RES1;
|
||||
if (!kvm_has_feat(kvm, ID_AA64MMFR3_EL1, D128, IMP))
|
||||
res0 |= (TCR2_EL2_DisCH0 | TCR2_EL2_DisCH1 | TCR2_EL2_D128);
|
||||
if (!kvm_has_feat(kvm, ID_AA64MMFR3_EL1, MEC, IMP))
|
||||
res0 |= TCR2_EL2_AMEC1 | TCR2_EL2_AMEC0;
|
||||
if (!kvm_has_feat(kvm, ID_AA64MMFR1_EL1, HAFDBS, HAFT))
|
||||
res0 |= TCR2_EL2_HAFT;
|
||||
if (!kvm_has_feat(kvm, ID_AA64PFR1_EL1, THE, IMP))
|
||||
res0 |= TCR2_EL2_PTTWI | TCR2_EL2_PnCH;
|
||||
if (!kvm_has_feat(kvm, ID_AA64MMFR3_EL1, AIE, IMP))
|
||||
res0 |= TCR2_EL2_AIE;
|
||||
if (!kvm_has_s1poe(kvm))
|
||||
res0 |= TCR2_EL2_POE | TCR2_EL2_E0POE;
|
||||
if (!kvm_has_s1pie(kvm))
|
||||
res0 |= TCR2_EL2_PIE;
|
||||
if (!kvm_has_feat(kvm, ID_AA64MMFR1_EL1, VH, IMP))
|
||||
res0 |= (TCR2_EL2_E0POE | TCR2_EL2_D128 |
|
||||
TCR2_EL2_AMEC1 | TCR2_EL2_DisCH0 | TCR2_EL2_DisCH1);
|
||||
set_sysreg_masks(kvm, TCR2_EL2, res0, res1);
|
||||
|
||||
/* SCTLR_EL1 */
|
||||
res0 = SCTLR_EL1_RES0;
|
||||
res1 = SCTLR_EL1_RES1;
|
||||
@ -1207,6 +1234,43 @@ int kvm_init_nv_sysregs(struct kvm *kvm)
|
||||
res0 |= SCTLR_EL1_EPAN;
|
||||
set_sysreg_masks(kvm, SCTLR_EL1, res0, res1);
|
||||
|
||||
/* MDCR_EL2 */
|
||||
res0 = MDCR_EL2_RES0;
|
||||
res1 = MDCR_EL2_RES1;
|
||||
if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, PMUVer, IMP))
|
||||
res0 |= (MDCR_EL2_HPMN | MDCR_EL2_TPMCR |
|
||||
MDCR_EL2_TPM | MDCR_EL2_HPME);
|
||||
if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, PMSVer, IMP))
|
||||
res0 |= MDCR_EL2_E2PB | MDCR_EL2_TPMS;
|
||||
if (!kvm_has_feat(kvm, ID_AA64DFR1_EL1, SPMU, IMP))
|
||||
res0 |= MDCR_EL2_EnSPM;
|
||||
if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, PMUVer, V3P1))
|
||||
res0 |= MDCR_EL2_HPMD;
|
||||
if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, TraceFilt, IMP))
|
||||
res0 |= MDCR_EL2_TTRF;
|
||||
if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, PMUVer, V3P5))
|
||||
res0 |= MDCR_EL2_HCCD | MDCR_EL2_HLP;
|
||||
if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, TraceBuffer, IMP))
|
||||
res0 |= MDCR_EL2_E2TB;
|
||||
if (!kvm_has_feat(kvm, ID_AA64MMFR0_EL1, FGT, IMP))
|
||||
res0 |= MDCR_EL2_TDCC;
|
||||
if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, MTPMU, IMP) ||
|
||||
kvm_has_feat(kvm, ID_AA64PFR0_EL1, EL3, IMP))
|
||||
res0 |= MDCR_EL2_MTPME;
|
||||
if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, PMUVer, V3P7))
|
||||
res0 |= MDCR_EL2_HPMFZO;
|
||||
if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, PMSS, IMP))
|
||||
res0 |= MDCR_EL2_PMSSE;
|
||||
if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, PMSVer, V1P2))
|
||||
res0 |= MDCR_EL2_HPMFZS;
|
||||
if (!kvm_has_feat(kvm, ID_AA64DFR1_EL1, EBEP, IMP))
|
||||
res0 |= MDCR_EL2_PMEE;
|
||||
if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, DebugVer, V8P9))
|
||||
res0 |= MDCR_EL2_EBWE;
|
||||
if (!kvm_has_feat(kvm, ID_AA64DFR2_EL1, STEP, IMP))
|
||||
res0 |= MDCR_EL2_EnSTEPOP;
|
||||
set_sysreg_masks(kvm, MDCR_EL2, res0, res1);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -89,7 +89,11 @@ static bool kvm_pmc_is_64bit(struct kvm_pmc *pmc)
|
||||
|
||||
static bool kvm_pmc_has_64bit_overflow(struct kvm_pmc *pmc)
|
||||
{
|
||||
u64 val = kvm_vcpu_read_pmcr(kvm_pmc_to_vcpu(pmc));
|
||||
struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc);
|
||||
u64 val = kvm_vcpu_read_pmcr(vcpu);
|
||||
|
||||
if (kvm_pmu_counter_is_hyp(vcpu, pmc->idx))
|
||||
return __vcpu_sys_reg(vcpu, MDCR_EL2) & MDCR_EL2_HLP;
|
||||
|
||||
return (pmc->idx < ARMV8_PMU_CYCLE_IDX && (val & ARMV8_PMU_PMCR_LP)) ||
|
||||
(pmc->idx == ARMV8_PMU_CYCLE_IDX && (val & ARMV8_PMU_PMCR_LC));
|
||||
@ -111,6 +115,11 @@ static u32 counter_index_to_evtreg(u64 idx)
|
||||
return (idx == ARMV8_PMU_CYCLE_IDX) ? PMCCFILTR_EL0 : PMEVTYPER0_EL0 + idx;
|
||||
}
|
||||
|
||||
static u64 kvm_pmc_read_evtreg(const struct kvm_pmc *pmc)
|
||||
{
|
||||
return __vcpu_sys_reg(kvm_pmc_to_vcpu(pmc), counter_index_to_evtreg(pmc->idx));
|
||||
}
|
||||
|
||||
static u64 kvm_pmu_get_pmc_value(struct kvm_pmc *pmc)
|
||||
{
|
||||
struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc);
|
||||
@ -244,7 +253,7 @@ void kvm_pmu_vcpu_init(struct kvm_vcpu *vcpu)
|
||||
*/
|
||||
void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
unsigned long mask = kvm_pmu_valid_counter_mask(vcpu);
|
||||
unsigned long mask = kvm_pmu_implemented_counter_mask(vcpu);
|
||||
int i;
|
||||
|
||||
for_each_set_bit(i, &mask, 32)
|
||||
@ -265,7 +274,37 @@ void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu)
|
||||
irq_work_sync(&vcpu->arch.pmu.overflow_work);
|
||||
}
|
||||
|
||||
u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu)
|
||||
bool kvm_pmu_counter_is_hyp(struct kvm_vcpu *vcpu, unsigned int idx)
|
||||
{
|
||||
unsigned int hpmn;
|
||||
|
||||
if (!vcpu_has_nv(vcpu) || idx == ARMV8_PMU_CYCLE_IDX)
|
||||
return false;
|
||||
|
||||
/*
|
||||
* Programming HPMN=0 is CONSTRAINED UNPREDICTABLE if FEAT_HPMN0 isn't
|
||||
* implemented. Since KVM's ability to emulate HPMN=0 does not directly
|
||||
* depend on hardware (all PMU registers are trapped), make the
|
||||
* implementation choice that all counters are included in the second
|
||||
* range reserved for EL2/EL3.
|
||||
*/
|
||||
hpmn = SYS_FIELD_GET(MDCR_EL2, HPMN, __vcpu_sys_reg(vcpu, MDCR_EL2));
|
||||
return idx >= hpmn;
|
||||
}
|
||||
|
||||
u64 kvm_pmu_accessible_counter_mask(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
u64 mask = kvm_pmu_implemented_counter_mask(vcpu);
|
||||
u64 hpmn;
|
||||
|
||||
if (!vcpu_has_nv(vcpu) || vcpu_is_el2(vcpu))
|
||||
return mask;
|
||||
|
||||
hpmn = SYS_FIELD_GET(MDCR_EL2, HPMN, __vcpu_sys_reg(vcpu, MDCR_EL2));
|
||||
return mask & ~GENMASK(vcpu->kvm->arch.pmcr_n - 1, hpmn);
|
||||
}
|
||||
|
||||
u64 kvm_pmu_implemented_counter_mask(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
u64 val = FIELD_GET(ARMV8_PMU_PMCR_N, kvm_vcpu_read_pmcr(vcpu));
|
||||
|
||||
@ -574,7 +613,7 @@ void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val)
|
||||
kvm_pmu_set_counter_value(vcpu, ARMV8_PMU_CYCLE_IDX, 0);
|
||||
|
||||
if (val & ARMV8_PMU_PMCR_P) {
|
||||
unsigned long mask = kvm_pmu_valid_counter_mask(vcpu);
|
||||
unsigned long mask = kvm_pmu_accessible_counter_mask(vcpu);
|
||||
mask &= ~BIT(ARMV8_PMU_CYCLE_IDX);
|
||||
for_each_set_bit(i, &mask, 32)
|
||||
kvm_pmu_set_pmc_value(kvm_vcpu_idx_to_pmc(vcpu, i), 0, true);
|
||||
@ -585,8 +624,44 @@ void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val)
|
||||
static bool kvm_pmu_counter_is_enabled(struct kvm_pmc *pmc)
|
||||
{
|
||||
struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc);
|
||||
return (kvm_vcpu_read_pmcr(vcpu) & ARMV8_PMU_PMCR_E) &&
|
||||
(__vcpu_sys_reg(vcpu, PMCNTENSET_EL0) & BIT(pmc->idx));
|
||||
unsigned int mdcr = __vcpu_sys_reg(vcpu, MDCR_EL2);
|
||||
|
||||
if (!(__vcpu_sys_reg(vcpu, PMCNTENSET_EL0) & BIT(pmc->idx)))
|
||||
return false;
|
||||
|
||||
if (kvm_pmu_counter_is_hyp(vcpu, pmc->idx))
|
||||
return mdcr & MDCR_EL2_HPME;
|
||||
|
||||
return kvm_vcpu_read_pmcr(vcpu) & ARMV8_PMU_PMCR_E;
|
||||
}
|
||||
|
||||
static bool kvm_pmc_counts_at_el0(struct kvm_pmc *pmc)
|
||||
{
|
||||
u64 evtreg = kvm_pmc_read_evtreg(pmc);
|
||||
bool nsu = evtreg & ARMV8_PMU_EXCLUDE_NS_EL0;
|
||||
bool u = evtreg & ARMV8_PMU_EXCLUDE_EL0;
|
||||
|
||||
return u == nsu;
|
||||
}
|
||||
|
||||
static bool kvm_pmc_counts_at_el1(struct kvm_pmc *pmc)
|
||||
{
|
||||
u64 evtreg = kvm_pmc_read_evtreg(pmc);
|
||||
bool nsk = evtreg & ARMV8_PMU_EXCLUDE_NS_EL1;
|
||||
bool p = evtreg & ARMV8_PMU_EXCLUDE_EL1;
|
||||
|
||||
return p == nsk;
|
||||
}
|
||||
|
||||
static bool kvm_pmc_counts_at_el2(struct kvm_pmc *pmc)
|
||||
{
|
||||
struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc);
|
||||
u64 mdcr = __vcpu_sys_reg(vcpu, MDCR_EL2);
|
||||
|
||||
if (!kvm_pmu_counter_is_hyp(vcpu, pmc->idx) && (mdcr & MDCR_EL2_HPMD))
|
||||
return false;
|
||||
|
||||
return kvm_pmc_read_evtreg(pmc) & ARMV8_PMU_INCLUDE_EL2;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -599,17 +674,15 @@ static void kvm_pmu_create_perf_event(struct kvm_pmc *pmc)
|
||||
struct arm_pmu *arm_pmu = vcpu->kvm->arch.arm_pmu;
|
||||
struct perf_event *event;
|
||||
struct perf_event_attr attr;
|
||||
u64 eventsel, reg, data;
|
||||
bool p, u, nsk, nsu;
|
||||
u64 eventsel, evtreg;
|
||||
|
||||
reg = counter_index_to_evtreg(pmc->idx);
|
||||
data = __vcpu_sys_reg(vcpu, reg);
|
||||
evtreg = kvm_pmc_read_evtreg(pmc);
|
||||
|
||||
kvm_pmu_stop_counter(pmc);
|
||||
if (pmc->idx == ARMV8_PMU_CYCLE_IDX)
|
||||
eventsel = ARMV8_PMUV3_PERFCTR_CPU_CYCLES;
|
||||
else
|
||||
eventsel = data & kvm_pmu_event_mask(vcpu->kvm);
|
||||
eventsel = evtreg & kvm_pmu_event_mask(vcpu->kvm);
|
||||
|
||||
/*
|
||||
* Neither SW increment nor chained events need to be backed
|
||||
@ -627,22 +700,25 @@ static void kvm_pmu_create_perf_event(struct kvm_pmc *pmc)
|
||||
!test_bit(eventsel, vcpu->kvm->arch.pmu_filter))
|
||||
return;
|
||||
|
||||
p = data & ARMV8_PMU_EXCLUDE_EL1;
|
||||
u = data & ARMV8_PMU_EXCLUDE_EL0;
|
||||
nsk = data & ARMV8_PMU_EXCLUDE_NS_EL1;
|
||||
nsu = data & ARMV8_PMU_EXCLUDE_NS_EL0;
|
||||
|
||||
memset(&attr, 0, sizeof(struct perf_event_attr));
|
||||
attr.type = arm_pmu->pmu.type;
|
||||
attr.size = sizeof(attr);
|
||||
attr.pinned = 1;
|
||||
attr.disabled = !kvm_pmu_counter_is_enabled(pmc);
|
||||
attr.exclude_user = (u != nsu);
|
||||
attr.exclude_kernel = (p != nsk);
|
||||
attr.exclude_user = !kvm_pmc_counts_at_el0(pmc);
|
||||
attr.exclude_hv = 1; /* Don't count EL2 events */
|
||||
attr.exclude_host = 1; /* Don't count host events */
|
||||
attr.config = eventsel;
|
||||
|
||||
/*
|
||||
* Filter events at EL1 (i.e. vEL2) when in a hyp context based on the
|
||||
* guest's EL2 filter.
|
||||
*/
|
||||
if (unlikely(is_hyp_ctxt(vcpu)))
|
||||
attr.exclude_kernel = !kvm_pmc_counts_at_el2(pmc);
|
||||
else
|
||||
attr.exclude_kernel = !kvm_pmc_counts_at_el1(pmc);
|
||||
|
||||
/*
|
||||
* If counting with a 64bit counter, advertise it to the perf
|
||||
* code, carefully dealing with the initial sample period
|
||||
@ -804,7 +880,7 @@ u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1)
|
||||
|
||||
void kvm_vcpu_reload_pmu(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
u64 mask = kvm_pmu_valid_counter_mask(vcpu);
|
||||
u64 mask = kvm_pmu_implemented_counter_mask(vcpu);
|
||||
|
||||
kvm_pmu_handle_pmcr(vcpu, kvm_vcpu_read_pmcr(vcpu));
|
||||
|
||||
@ -1139,3 +1215,32 @@ u64 kvm_vcpu_read_pmcr(struct kvm_vcpu *vcpu)
|
||||
|
||||
return u64_replace_bits(pmcr, vcpu->kvm->arch.pmcr_n, ARMV8_PMU_PMCR_N);
|
||||
}
|
||||
|
||||
void kvm_pmu_nested_transition(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
bool reprogrammed = false;
|
||||
unsigned long mask;
|
||||
int i;
|
||||
|
||||
if (!kvm_vcpu_has_pmu(vcpu))
|
||||
return;
|
||||
|
||||
mask = __vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
|
||||
for_each_set_bit(i, &mask, 32) {
|
||||
struct kvm_pmc *pmc = kvm_vcpu_idx_to_pmc(vcpu, i);
|
||||
|
||||
/*
|
||||
* We only need to reconfigure events where the filter is
|
||||
* different at EL1 vs. EL2, as we're multiplexing the true EL1
|
||||
* event filter bit for nested.
|
||||
*/
|
||||
if (kvm_pmc_counts_at_el1(pmc) == kvm_pmc_counts_at_el2(pmc))
|
||||
continue;
|
||||
|
||||
kvm_pmu_create_perf_event(pmc);
|
||||
reprogrammed = true;
|
||||
}
|
||||
|
||||
if (reprogrammed)
|
||||
kvm_vcpu_pmu_restore_guest(vcpu);
|
||||
}
|
||||
|
@ -194,6 +194,12 @@ static void kvm_psci_system_off(struct kvm_vcpu *vcpu)
|
||||
kvm_prepare_system_event(vcpu, KVM_SYSTEM_EVENT_SHUTDOWN, 0);
|
||||
}
|
||||
|
||||
static void kvm_psci_system_off2(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
kvm_prepare_system_event(vcpu, KVM_SYSTEM_EVENT_SHUTDOWN,
|
||||
KVM_SYSTEM_EVENT_SHUTDOWN_FLAG_PSCI_OFF2);
|
||||
}
|
||||
|
||||
static void kvm_psci_system_reset(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
kvm_prepare_system_event(vcpu, KVM_SYSTEM_EVENT_RESET, 0);
|
||||
@ -322,7 +328,7 @@ static int kvm_psci_1_x_call(struct kvm_vcpu *vcpu, u32 minor)
|
||||
|
||||
switch(psci_fn) {
|
||||
case PSCI_0_2_FN_PSCI_VERSION:
|
||||
val = minor == 0 ? KVM_ARM_PSCI_1_0 : KVM_ARM_PSCI_1_1;
|
||||
val = PSCI_VERSION(1, minor);
|
||||
break;
|
||||
case PSCI_1_0_FN_PSCI_FEATURES:
|
||||
arg = smccc_get_arg1(vcpu);
|
||||
@ -358,6 +364,11 @@ static int kvm_psci_1_x_call(struct kvm_vcpu *vcpu, u32 minor)
|
||||
if (minor >= 1)
|
||||
val = 0;
|
||||
break;
|
||||
case PSCI_1_3_FN_SYSTEM_OFF2:
|
||||
case PSCI_1_3_FN64_SYSTEM_OFF2:
|
||||
if (minor >= 3)
|
||||
val = PSCI_1_3_OFF_TYPE_HIBERNATE_OFF;
|
||||
break;
|
||||
}
|
||||
break;
|
||||
case PSCI_1_0_FN_SYSTEM_SUSPEND:
|
||||
@ -392,6 +403,33 @@ static int kvm_psci_1_x_call(struct kvm_vcpu *vcpu, u32 minor)
|
||||
break;
|
||||
}
|
||||
break;
|
||||
case PSCI_1_3_FN_SYSTEM_OFF2:
|
||||
kvm_psci_narrow_to_32bit(vcpu);
|
||||
fallthrough;
|
||||
case PSCI_1_3_FN64_SYSTEM_OFF2:
|
||||
if (minor < 3)
|
||||
break;
|
||||
|
||||
arg = smccc_get_arg1(vcpu);
|
||||
/*
|
||||
* SYSTEM_OFF2 defaults to HIBERNATE_OFF if arg1 is zero. arg2
|
||||
* must be zero.
|
||||
*/
|
||||
if ((arg && arg != PSCI_1_3_OFF_TYPE_HIBERNATE_OFF) ||
|
||||
smccc_get_arg2(vcpu) != 0) {
|
||||
val = PSCI_RET_INVALID_PARAMS;
|
||||
break;
|
||||
}
|
||||
kvm_psci_system_off2(vcpu);
|
||||
/*
|
||||
* We shouldn't be going back to the guest after receiving a
|
||||
* SYSTEM_OFF2 request. Preload a return value of
|
||||
* INTERNAL_FAILURE should userspace ignore the exit and resume
|
||||
* the vCPU.
|
||||
*/
|
||||
val = PSCI_RET_INTERNAL_FAILURE;
|
||||
ret = 0;
|
||||
break;
|
||||
default:
|
||||
return kvm_psci_0_2_call(vcpu);
|
||||
}
|
||||
@ -449,6 +487,10 @@ int kvm_psci_call(struct kvm_vcpu *vcpu)
|
||||
}
|
||||
|
||||
switch (version) {
|
||||
case KVM_ARM_PSCI_1_3:
|
||||
return kvm_psci_1_x_call(vcpu, 3);
|
||||
case KVM_ARM_PSCI_1_2:
|
||||
return kvm_psci_1_x_call(vcpu, 2);
|
||||
case KVM_ARM_PSCI_1_1:
|
||||
return kvm_psci_1_x_call(vcpu, 1);
|
||||
case KVM_ARM_PSCI_1_0:
|
||||
|
@ -167,11 +167,6 @@ static void kvm_vcpu_reset_sve(struct kvm_vcpu *vcpu)
|
||||
memset(vcpu->arch.sve_state, 0, vcpu_sve_state_size(vcpu));
|
||||
}
|
||||
|
||||
static void kvm_vcpu_enable_ptrauth(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
vcpu_set_flag(vcpu, GUEST_HAS_PTRAUTH);
|
||||
}
|
||||
|
||||
/**
|
||||
* kvm_reset_vcpu - sets core registers and sys_regs to reset value
|
||||
* @vcpu: The VCPU pointer
|
||||
|
@ -110,6 +110,14 @@ static bool get_el2_to_el1_mapping(unsigned int reg,
|
||||
PURE_EL2_SYSREG( RVBAR_EL2 );
|
||||
PURE_EL2_SYSREG( TPIDR_EL2 );
|
||||
PURE_EL2_SYSREG( HPFAR_EL2 );
|
||||
PURE_EL2_SYSREG( HCRX_EL2 );
|
||||
PURE_EL2_SYSREG( HFGRTR_EL2 );
|
||||
PURE_EL2_SYSREG( HFGWTR_EL2 );
|
||||
PURE_EL2_SYSREG( HFGITR_EL2 );
|
||||
PURE_EL2_SYSREG( HDFGRTR_EL2 );
|
||||
PURE_EL2_SYSREG( HDFGWTR_EL2 );
|
||||
PURE_EL2_SYSREG( HAFGRTR_EL2 );
|
||||
PURE_EL2_SYSREG( CNTVOFF_EL2 );
|
||||
PURE_EL2_SYSREG( CNTHCTL_EL2 );
|
||||
MAPPED_EL2_SYSREG(SCTLR_EL2, SCTLR_EL1,
|
||||
translate_sctlr_el2_to_sctlr_el1 );
|
||||
@ -126,10 +134,15 @@ static bool get_el2_to_el1_mapping(unsigned int reg,
|
||||
MAPPED_EL2_SYSREG(ESR_EL2, ESR_EL1, NULL );
|
||||
MAPPED_EL2_SYSREG(FAR_EL2, FAR_EL1, NULL );
|
||||
MAPPED_EL2_SYSREG(MAIR_EL2, MAIR_EL1, NULL );
|
||||
MAPPED_EL2_SYSREG(TCR2_EL2, TCR2_EL1, NULL );
|
||||
MAPPED_EL2_SYSREG(PIR_EL2, PIR_EL1, NULL );
|
||||
MAPPED_EL2_SYSREG(PIRE0_EL2, PIRE0_EL1, NULL );
|
||||
MAPPED_EL2_SYSREG(POR_EL2, POR_EL1, NULL );
|
||||
MAPPED_EL2_SYSREG(AMAIR_EL2, AMAIR_EL1, NULL );
|
||||
MAPPED_EL2_SYSREG(ELR_EL2, ELR_EL1, NULL );
|
||||
MAPPED_EL2_SYSREG(SPSR_EL2, SPSR_EL1, NULL );
|
||||
MAPPED_EL2_SYSREG(ZCR_EL2, ZCR_EL1, NULL );
|
||||
MAPPED_EL2_SYSREG(CONTEXTIDR_EL2, CONTEXTIDR_EL1, NULL );
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
@ -148,6 +161,21 @@ u64 vcpu_read_sys_reg(const struct kvm_vcpu *vcpu, int reg)
|
||||
if (!is_hyp_ctxt(vcpu))
|
||||
goto memory_read;
|
||||
|
||||
/*
|
||||
* CNTHCTL_EL2 requires some special treatment to
|
||||
* account for the bits that can be set via CNTKCTL_EL1.
|
||||
*/
|
||||
switch (reg) {
|
||||
case CNTHCTL_EL2:
|
||||
if (vcpu_el2_e2h_is_set(vcpu)) {
|
||||
val = read_sysreg_el1(SYS_CNTKCTL);
|
||||
val &= CNTKCTL_VALID_BITS;
|
||||
val |= __vcpu_sys_reg(vcpu, reg) & ~CNTKCTL_VALID_BITS;
|
||||
return val;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
/*
|
||||
* If this register does not have an EL1 counterpart,
|
||||
* then read the stored EL2 version.
|
||||
@ -165,6 +193,9 @@ u64 vcpu_read_sys_reg(const struct kvm_vcpu *vcpu, int reg)
|
||||
|
||||
/* Get the current version of the EL1 counterpart. */
|
||||
WARN_ON(!__vcpu_read_sys_reg_from_cpu(el1r, &val));
|
||||
if (reg >= __SANITISED_REG_START__)
|
||||
val = kvm_vcpu_apply_reg_masks(vcpu, reg, val);
|
||||
|
||||
return val;
|
||||
}
|
||||
|
||||
@ -198,6 +229,19 @@ void vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, int reg)
|
||||
*/
|
||||
__vcpu_sys_reg(vcpu, reg) = val;
|
||||
|
||||
switch (reg) {
|
||||
case CNTHCTL_EL2:
|
||||
/*
|
||||
* If E2H=0, CNHTCTL_EL2 is a pure shadow register.
|
||||
* Otherwise, some of the bits are backed by
|
||||
* CNTKCTL_EL1, while the rest is kept in memory.
|
||||
* Yes, this is fun stuff.
|
||||
*/
|
||||
if (vcpu_el2_e2h_is_set(vcpu))
|
||||
write_sysreg_el1(val, SYS_CNTKCTL);
|
||||
return;
|
||||
}
|
||||
|
||||
/* No EL1 counterpart? We're done here.? */
|
||||
if (reg == el1r)
|
||||
return;
|
||||
@ -390,10 +434,6 @@ static bool access_vm_reg(struct kvm_vcpu *vcpu,
|
||||
bool was_enabled = vcpu_has_cache_enabled(vcpu);
|
||||
u64 val, mask, shift;
|
||||
|
||||
if (reg_to_encoding(r) == SYS_TCR2_EL1 &&
|
||||
!kvm_has_feat(vcpu->kvm, ID_AA64MMFR3_EL1, TCRX, IMP))
|
||||
return undef_access(vcpu, p, r);
|
||||
|
||||
BUG_ON(!p->is_write);
|
||||
|
||||
get_access_mask(r, &mask, &shift);
|
||||
@ -1128,7 +1168,7 @@ static int set_pmreg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r, u64 va
|
||||
{
|
||||
bool set;
|
||||
|
||||
val &= kvm_pmu_valid_counter_mask(vcpu);
|
||||
val &= kvm_pmu_accessible_counter_mask(vcpu);
|
||||
|
||||
switch (r->reg) {
|
||||
case PMOVSSET_EL0:
|
||||
@ -1151,7 +1191,7 @@ static int set_pmreg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r, u64 va
|
||||
|
||||
static int get_pmreg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r, u64 *val)
|
||||
{
|
||||
u64 mask = kvm_pmu_valid_counter_mask(vcpu);
|
||||
u64 mask = kvm_pmu_accessible_counter_mask(vcpu);
|
||||
|
||||
*val = __vcpu_sys_reg(vcpu, r->reg) & mask;
|
||||
return 0;
|
||||
@ -1165,7 +1205,7 @@ static bool access_pmcnten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
|
||||
if (pmu_access_el0_disabled(vcpu))
|
||||
return false;
|
||||
|
||||
mask = kvm_pmu_valid_counter_mask(vcpu);
|
||||
mask = kvm_pmu_accessible_counter_mask(vcpu);
|
||||
if (p->is_write) {
|
||||
val = p->regval & mask;
|
||||
if (r->Op2 & 0x1) {
|
||||
@ -1188,7 +1228,7 @@ static bool access_pmcnten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
|
||||
static bool access_pminten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
|
||||
const struct sys_reg_desc *r)
|
||||
{
|
||||
u64 mask = kvm_pmu_valid_counter_mask(vcpu);
|
||||
u64 mask = kvm_pmu_accessible_counter_mask(vcpu);
|
||||
|
||||
if (check_pmu_access_disabled(vcpu, 0))
|
||||
return false;
|
||||
@ -1212,7 +1252,7 @@ static bool access_pminten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
|
||||
static bool access_pmovs(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
|
||||
const struct sys_reg_desc *r)
|
||||
{
|
||||
u64 mask = kvm_pmu_valid_counter_mask(vcpu);
|
||||
u64 mask = kvm_pmu_accessible_counter_mask(vcpu);
|
||||
|
||||
if (pmu_access_el0_disabled(vcpu))
|
||||
return false;
|
||||
@ -1242,7 +1282,7 @@ static bool access_pmswinc(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
|
||||
if (pmu_write_swinc_el0_disabled(vcpu))
|
||||
return false;
|
||||
|
||||
mask = kvm_pmu_valid_counter_mask(vcpu);
|
||||
mask = kvm_pmu_accessible_counter_mask(vcpu);
|
||||
kvm_pmu_software_increment(vcpu, p->regval & mask);
|
||||
return true;
|
||||
}
|
||||
@ -1509,6 +1549,9 @@ static u8 pmuver_to_perfmon(u8 pmuver)
|
||||
}
|
||||
}
|
||||
|
||||
static u64 sanitise_id_aa64pfr0_el1(const struct kvm_vcpu *vcpu, u64 val);
|
||||
static u64 sanitise_id_aa64dfr0_el1(const struct kvm_vcpu *vcpu, u64 val);
|
||||
|
||||
/* Read a sanitised cpufeature ID register by sys_reg_desc */
|
||||
static u64 __kvm_read_sanitised_id_reg(const struct kvm_vcpu *vcpu,
|
||||
const struct sys_reg_desc *r)
|
||||
@ -1522,6 +1565,12 @@ static u64 __kvm_read_sanitised_id_reg(const struct kvm_vcpu *vcpu,
|
||||
val = read_sanitised_ftr_reg(id);
|
||||
|
||||
switch (id) {
|
||||
case SYS_ID_AA64DFR0_EL1:
|
||||
val = sanitise_id_aa64dfr0_el1(vcpu, val);
|
||||
break;
|
||||
case SYS_ID_AA64PFR0_EL1:
|
||||
val = sanitise_id_aa64pfr0_el1(vcpu, val);
|
||||
break;
|
||||
case SYS_ID_AA64PFR1_EL1:
|
||||
if (!kvm_has_mte(vcpu->kvm))
|
||||
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MTE);
|
||||
@ -1535,6 +1584,7 @@ static u64 __kvm_read_sanitised_id_reg(const struct kvm_vcpu *vcpu,
|
||||
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MTEX);
|
||||
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_DF2);
|
||||
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_PFAR);
|
||||
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MPAM_frac);
|
||||
break;
|
||||
case SYS_ID_AA64PFR2_EL1:
|
||||
/* We only expose FPMR */
|
||||
@ -1692,11 +1742,8 @@ static unsigned int fp8_visibility(const struct kvm_vcpu *vcpu,
|
||||
return REG_HIDDEN;
|
||||
}
|
||||
|
||||
static u64 read_sanitised_id_aa64pfr0_el1(struct kvm_vcpu *vcpu,
|
||||
const struct sys_reg_desc *rd)
|
||||
static u64 sanitise_id_aa64pfr0_el1(const struct kvm_vcpu *vcpu, u64 val)
|
||||
{
|
||||
u64 val = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
|
||||
|
||||
if (!vcpu_has_sve(vcpu))
|
||||
val &= ~ID_AA64PFR0_EL1_SVE_MASK;
|
||||
|
||||
@ -1724,6 +1771,13 @@ static u64 read_sanitised_id_aa64pfr0_el1(struct kvm_vcpu *vcpu,
|
||||
|
||||
val &= ~ID_AA64PFR0_EL1_AMU_MASK;
|
||||
|
||||
/*
|
||||
* MPAM is disabled by default as KVM also needs a set of PARTID to
|
||||
* program the MPAMVPMx_EL2 PARTID remapping registers with. But some
|
||||
* older kernels let the guest see the ID bit.
|
||||
*/
|
||||
val &= ~ID_AA64PFR0_EL1_MPAM_MASK;
|
||||
|
||||
return val;
|
||||
}
|
||||
|
||||
@ -1737,11 +1791,8 @@ static u64 read_sanitised_id_aa64pfr0_el1(struct kvm_vcpu *vcpu,
|
||||
(val); \
|
||||
})
|
||||
|
||||
static u64 read_sanitised_id_aa64dfr0_el1(struct kvm_vcpu *vcpu,
|
||||
const struct sys_reg_desc *rd)
|
||||
static u64 sanitise_id_aa64dfr0_el1(const struct kvm_vcpu *vcpu, u64 val)
|
||||
{
|
||||
u64 val = read_sanitised_ftr_reg(SYS_ID_AA64DFR0_EL1);
|
||||
|
||||
val = ID_REG_LIMIT_FIELD_ENUM(val, ID_AA64DFR0_EL1, DebugVer, V8P8);
|
||||
|
||||
/*
|
||||
@ -1834,6 +1885,70 @@ static int set_id_dfr0_el1(struct kvm_vcpu *vcpu,
|
||||
return set_id_reg(vcpu, rd, val);
|
||||
}
|
||||
|
||||
static int set_id_aa64pfr0_el1(struct kvm_vcpu *vcpu,
|
||||
const struct sys_reg_desc *rd, u64 user_val)
|
||||
{
|
||||
u64 hw_val = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
|
||||
u64 mpam_mask = ID_AA64PFR0_EL1_MPAM_MASK;
|
||||
|
||||
/*
|
||||
* Commit 011e5f5bf529f ("arm64/cpufeature: Add remaining feature bits
|
||||
* in ID_AA64PFR0 register") exposed the MPAM field of AA64PFR0_EL1 to
|
||||
* guests, but didn't add trap handling. KVM doesn't support MPAM and
|
||||
* always returns an UNDEF for these registers. The guest must see 0
|
||||
* for this field.
|
||||
*
|
||||
* But KVM must also accept values from user-space that were provided
|
||||
* by KVM. On CPUs that support MPAM, permit user-space to write
|
||||
* the sanitizied value to ID_AA64PFR0_EL1.MPAM, but ignore this field.
|
||||
*/
|
||||
if ((hw_val & mpam_mask) == (user_val & mpam_mask))
|
||||
user_val &= ~ID_AA64PFR0_EL1_MPAM_MASK;
|
||||
|
||||
return set_id_reg(vcpu, rd, user_val);
|
||||
}
|
||||
|
||||
static int set_id_aa64pfr1_el1(struct kvm_vcpu *vcpu,
|
||||
const struct sys_reg_desc *rd, u64 user_val)
|
||||
{
|
||||
u64 hw_val = read_sanitised_ftr_reg(SYS_ID_AA64PFR1_EL1);
|
||||
u64 mpam_mask = ID_AA64PFR1_EL1_MPAM_frac_MASK;
|
||||
|
||||
/* See set_id_aa64pfr0_el1 for comment about MPAM */
|
||||
if ((hw_val & mpam_mask) == (user_val & mpam_mask))
|
||||
user_val &= ~ID_AA64PFR1_EL1_MPAM_frac_MASK;
|
||||
|
||||
return set_id_reg(vcpu, rd, user_val);
|
||||
}
|
||||
|
||||
static int set_ctr_el0(struct kvm_vcpu *vcpu,
|
||||
const struct sys_reg_desc *rd, u64 user_val)
|
||||
{
|
||||
u8 user_L1Ip = SYS_FIELD_GET(CTR_EL0, L1Ip, user_val);
|
||||
|
||||
/*
|
||||
* Both AIVIVT (0b01) and VPIPT (0b00) are documented as reserved.
|
||||
* Hence only allow to set VIPT(0b10) or PIPT(0b11) for L1Ip based
|
||||
* on what hardware reports.
|
||||
*
|
||||
* Using a VIPT software model on PIPT will lead to over invalidation,
|
||||
* but still correct. Hence, we can allow downgrading PIPT to VIPT,
|
||||
* but not the other way around. This is handled via arm64_ftr_safe_value()
|
||||
* as CTR_EL0 ftr_bits has L1Ip field with type FTR_EXACT and safe value
|
||||
* set as VIPT.
|
||||
*/
|
||||
switch (user_L1Ip) {
|
||||
case CTR_EL0_L1Ip_RESERVED_VPIPT:
|
||||
case CTR_EL0_L1Ip_RESERVED_AIVIVT:
|
||||
return -EINVAL;
|
||||
case CTR_EL0_L1Ip_VIPT:
|
||||
case CTR_EL0_L1Ip_PIPT:
|
||||
return set_id_reg(vcpu, rd, user_val);
|
||||
default:
|
||||
return -ENOENT;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* cpufeature ID register user accessors
|
||||
*
|
||||
@ -2104,6 +2219,15 @@ static bool bad_redir_trap(struct kvm_vcpu *vcpu,
|
||||
.val = v, \
|
||||
}
|
||||
|
||||
#define EL2_REG_FILTERED(name, acc, rst, v, filter) { \
|
||||
SYS_DESC(SYS_##name), \
|
||||
.access = acc, \
|
||||
.reset = rst, \
|
||||
.reg = name, \
|
||||
.visibility = filter, \
|
||||
.val = v, \
|
||||
}
|
||||
|
||||
#define EL2_REG_VNCR(name, rst, v) EL2_REG(name, bad_vncr_trap, rst, v)
|
||||
#define EL2_REG_REDIR(name, rst, v) EL2_REG(name, bad_redir_trap, rst, v)
|
||||
|
||||
@ -2150,6 +2274,15 @@ static bool bad_redir_trap(struct kvm_vcpu *vcpu,
|
||||
.val = mask, \
|
||||
}
|
||||
|
||||
/* sys_reg_desc initialiser for cpufeature ID registers that need filtering */
|
||||
#define ID_FILTERED(sysreg, name, mask) { \
|
||||
ID_DESC(sysreg), \
|
||||
.set_user = set_##name, \
|
||||
.visibility = id_visibility, \
|
||||
.reset = kvm_read_sanitised_id_reg, \
|
||||
.val = (mask), \
|
||||
}
|
||||
|
||||
/*
|
||||
* sys_reg_desc initialiser for architecturally unallocated cpufeature ID
|
||||
* register with encoding Op0=3, Op1=0, CRn=0, CRm=crm, Op2=op2
|
||||
@ -2236,16 +2369,18 @@ static u64 reset_hcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
|
||||
return __vcpu_sys_reg(vcpu, r->reg) = val;
|
||||
}
|
||||
|
||||
static unsigned int __el2_visibility(const struct kvm_vcpu *vcpu,
|
||||
const struct sys_reg_desc *rd,
|
||||
unsigned int (*fn)(const struct kvm_vcpu *,
|
||||
const struct sys_reg_desc *))
|
||||
{
|
||||
return el2_visibility(vcpu, rd) ?: fn(vcpu, rd);
|
||||
}
|
||||
|
||||
static unsigned int sve_el2_visibility(const struct kvm_vcpu *vcpu,
|
||||
const struct sys_reg_desc *rd)
|
||||
{
|
||||
unsigned int r;
|
||||
|
||||
r = el2_visibility(vcpu, rd);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
return sve_visibility(vcpu, rd);
|
||||
return __el2_visibility(vcpu, rd, sve_visibility);
|
||||
}
|
||||
|
||||
static bool access_zcr_el2(struct kvm_vcpu *vcpu,
|
||||
@ -2273,12 +2408,48 @@ static bool access_zcr_el2(struct kvm_vcpu *vcpu,
|
||||
static unsigned int s1poe_visibility(const struct kvm_vcpu *vcpu,
|
||||
const struct sys_reg_desc *rd)
|
||||
{
|
||||
if (kvm_has_feat(vcpu->kvm, ID_AA64MMFR3_EL1, S1POE, IMP))
|
||||
if (kvm_has_s1poe(vcpu->kvm))
|
||||
return 0;
|
||||
|
||||
return REG_HIDDEN;
|
||||
}
|
||||
|
||||
static unsigned int s1poe_el2_visibility(const struct kvm_vcpu *vcpu,
|
||||
const struct sys_reg_desc *rd)
|
||||
{
|
||||
return __el2_visibility(vcpu, rd, s1poe_visibility);
|
||||
}
|
||||
|
||||
static unsigned int tcr2_visibility(const struct kvm_vcpu *vcpu,
|
||||
const struct sys_reg_desc *rd)
|
||||
{
|
||||
if (kvm_has_tcr2(vcpu->kvm))
|
||||
return 0;
|
||||
|
||||
return REG_HIDDEN;
|
||||
}
|
||||
|
||||
static unsigned int tcr2_el2_visibility(const struct kvm_vcpu *vcpu,
|
||||
const struct sys_reg_desc *rd)
|
||||
{
|
||||
return __el2_visibility(vcpu, rd, tcr2_visibility);
|
||||
}
|
||||
|
||||
static unsigned int s1pie_visibility(const struct kvm_vcpu *vcpu,
|
||||
const struct sys_reg_desc *rd)
|
||||
{
|
||||
if (kvm_has_s1pie(vcpu->kvm))
|
||||
return 0;
|
||||
|
||||
return REG_HIDDEN;
|
||||
}
|
||||
|
||||
static unsigned int s1pie_el2_visibility(const struct kvm_vcpu *vcpu,
|
||||
const struct sys_reg_desc *rd)
|
||||
{
|
||||
return __el2_visibility(vcpu, rd, s1pie_visibility);
|
||||
}
|
||||
|
||||
/*
|
||||
* Architected system registers.
|
||||
* Important: Must be sorted ascending by Op0, Op1, CRn, CRm, Op2
|
||||
@ -2374,18 +2545,15 @@ static const struct sys_reg_desc sys_reg_descs[] = {
|
||||
|
||||
/* AArch64 ID registers */
|
||||
/* CRm=4 */
|
||||
{ SYS_DESC(SYS_ID_AA64PFR0_EL1),
|
||||
.access = access_id_reg,
|
||||
.get_user = get_id_reg,
|
||||
.set_user = set_id_reg,
|
||||
.reset = read_sanitised_id_aa64pfr0_el1,
|
||||
.val = ~(ID_AA64PFR0_EL1_AMU |
|
||||
ID_AA64PFR0_EL1_MPAM |
|
||||
ID_AA64PFR0_EL1_SVE |
|
||||
ID_AA64PFR0_EL1_RAS |
|
||||
ID_AA64PFR0_EL1_AdvSIMD |
|
||||
ID_AA64PFR0_EL1_FP), },
|
||||
ID_WRITABLE(ID_AA64PFR1_EL1, ~(ID_AA64PFR1_EL1_PFAR |
|
||||
ID_FILTERED(ID_AA64PFR0_EL1, id_aa64pfr0_el1,
|
||||
~(ID_AA64PFR0_EL1_AMU |
|
||||
ID_AA64PFR0_EL1_MPAM |
|
||||
ID_AA64PFR0_EL1_SVE |
|
||||
ID_AA64PFR0_EL1_RAS |
|
||||
ID_AA64PFR0_EL1_AdvSIMD |
|
||||
ID_AA64PFR0_EL1_FP)),
|
||||
ID_FILTERED(ID_AA64PFR1_EL1, id_aa64pfr1_el1,
|
||||
~(ID_AA64PFR1_EL1_PFAR |
|
||||
ID_AA64PFR1_EL1_DF2 |
|
||||
ID_AA64PFR1_EL1_MTEX |
|
||||
ID_AA64PFR1_EL1_THE |
|
||||
@ -2406,11 +2574,6 @@ static const struct sys_reg_desc sys_reg_descs[] = {
|
||||
ID_WRITABLE(ID_AA64FPFR0_EL1, ~ID_AA64FPFR0_EL1_RES0),
|
||||
|
||||
/* CRm=5 */
|
||||
{ SYS_DESC(SYS_ID_AA64DFR0_EL1),
|
||||
.access = access_id_reg,
|
||||
.get_user = get_id_reg,
|
||||
.set_user = set_id_aa64dfr0_el1,
|
||||
.reset = read_sanitised_id_aa64dfr0_el1,
|
||||
/*
|
||||
* Prior to FEAT_Debugv8.9, the architecture defines context-aware
|
||||
* breakpoints (CTX_CMPs) as the highest numbered breakpoints (BRPs).
|
||||
@ -2423,10 +2586,11 @@ static const struct sys_reg_desc sys_reg_descs[] = {
|
||||
* See DDI0487K.a, section D2.8.3 Breakpoint types and linking
|
||||
* of breakpoints for more details.
|
||||
*/
|
||||
.val = ID_AA64DFR0_EL1_DoubleLock_MASK |
|
||||
ID_AA64DFR0_EL1_WRPs_MASK |
|
||||
ID_AA64DFR0_EL1_PMUVer_MASK |
|
||||
ID_AA64DFR0_EL1_DebugVer_MASK, },
|
||||
ID_FILTERED(ID_AA64DFR0_EL1, id_aa64dfr0_el1,
|
||||
ID_AA64DFR0_EL1_DoubleLock_MASK |
|
||||
ID_AA64DFR0_EL1_WRPs_MASK |
|
||||
ID_AA64DFR0_EL1_PMUVer_MASK |
|
||||
ID_AA64DFR0_EL1_DebugVer_MASK),
|
||||
ID_SANITISED(ID_AA64DFR1_EL1),
|
||||
ID_UNALLOCATED(5,2),
|
||||
ID_UNALLOCATED(5,3),
|
||||
@ -2489,7 +2653,8 @@ static const struct sys_reg_desc sys_reg_descs[] = {
|
||||
{ SYS_DESC(SYS_TTBR0_EL1), access_vm_reg, reset_unknown, TTBR0_EL1 },
|
||||
{ SYS_DESC(SYS_TTBR1_EL1), access_vm_reg, reset_unknown, TTBR1_EL1 },
|
||||
{ SYS_DESC(SYS_TCR_EL1), access_vm_reg, reset_val, TCR_EL1, 0 },
|
||||
{ SYS_DESC(SYS_TCR2_EL1), access_vm_reg, reset_val, TCR2_EL1, 0 },
|
||||
{ SYS_DESC(SYS_TCR2_EL1), access_vm_reg, reset_val, TCR2_EL1, 0,
|
||||
.visibility = tcr2_visibility },
|
||||
|
||||
PTRAUTH_KEY(APIA),
|
||||
PTRAUTH_KEY(APIB),
|
||||
@ -2543,8 +2708,10 @@ static const struct sys_reg_desc sys_reg_descs[] = {
|
||||
{ SYS_DESC(SYS_PMMIR_EL1), trap_raz_wi },
|
||||
|
||||
{ SYS_DESC(SYS_MAIR_EL1), access_vm_reg, reset_unknown, MAIR_EL1 },
|
||||
{ SYS_DESC(SYS_PIRE0_EL1), NULL, reset_unknown, PIRE0_EL1 },
|
||||
{ SYS_DESC(SYS_PIR_EL1), NULL, reset_unknown, PIR_EL1 },
|
||||
{ SYS_DESC(SYS_PIRE0_EL1), NULL, reset_unknown, PIRE0_EL1,
|
||||
.visibility = s1pie_visibility },
|
||||
{ SYS_DESC(SYS_PIR_EL1), NULL, reset_unknown, PIR_EL1,
|
||||
.visibility = s1pie_visibility },
|
||||
{ SYS_DESC(SYS_POR_EL1), NULL, reset_unknown, POR_EL1,
|
||||
.visibility = s1poe_visibility },
|
||||
{ SYS_DESC(SYS_AMAIR_EL1), access_vm_reg, reset_amair_el1, AMAIR_EL1 },
|
||||
@ -2553,8 +2720,11 @@ static const struct sys_reg_desc sys_reg_descs[] = {
|
||||
{ SYS_DESC(SYS_LOREA_EL1), trap_loregion },
|
||||
{ SYS_DESC(SYS_LORN_EL1), trap_loregion },
|
||||
{ SYS_DESC(SYS_LORC_EL1), trap_loregion },
|
||||
{ SYS_DESC(SYS_MPAMIDR_EL1), undef_access },
|
||||
{ SYS_DESC(SYS_LORID_EL1), trap_loregion },
|
||||
|
||||
{ SYS_DESC(SYS_MPAM1_EL1), undef_access },
|
||||
{ SYS_DESC(SYS_MPAM0_EL1), undef_access },
|
||||
{ SYS_DESC(SYS_VBAR_EL1), access_rw, reset_val, VBAR_EL1, 0 },
|
||||
{ SYS_DESC(SYS_DISR_EL1), NULL, reset_val, DISR_EL1, 0 },
|
||||
|
||||
@ -2599,10 +2769,12 @@ static const struct sys_reg_desc sys_reg_descs[] = {
|
||||
{ SYS_DESC(SYS_CCSIDR2_EL1), undef_access },
|
||||
{ SYS_DESC(SYS_SMIDR_EL1), undef_access },
|
||||
{ SYS_DESC(SYS_CSSELR_EL1), access_csselr, reset_unknown, CSSELR_EL1 },
|
||||
ID_WRITABLE(CTR_EL0, CTR_EL0_DIC_MASK |
|
||||
CTR_EL0_IDC_MASK |
|
||||
CTR_EL0_DminLine_MASK |
|
||||
CTR_EL0_IminLine_MASK),
|
||||
ID_FILTERED(CTR_EL0, ctr_el0,
|
||||
CTR_EL0_DIC_MASK |
|
||||
CTR_EL0_IDC_MASK |
|
||||
CTR_EL0_DminLine_MASK |
|
||||
CTR_EL0_L1Ip_MASK |
|
||||
CTR_EL0_IminLine_MASK),
|
||||
{ SYS_DESC(SYS_SVCR), undef_access, reset_val, SVCR, 0, .visibility = sme_visibility },
|
||||
{ SYS_DESC(SYS_FPMR), undef_access, reset_val, FPMR, 0, .visibility = fp8_visibility },
|
||||
|
||||
@ -2818,14 +2990,16 @@ static const struct sys_reg_desc sys_reg_descs[] = {
|
||||
EL2_REG_VNCR(HFGITR_EL2, reset_val, 0),
|
||||
EL2_REG_VNCR(HACR_EL2, reset_val, 0),
|
||||
|
||||
{ SYS_DESC(SYS_ZCR_EL2), .access = access_zcr_el2, .reset = reset_val,
|
||||
.visibility = sve_el2_visibility, .reg = ZCR_EL2 },
|
||||
EL2_REG_FILTERED(ZCR_EL2, access_zcr_el2, reset_val, 0,
|
||||
sve_el2_visibility),
|
||||
|
||||
EL2_REG_VNCR(HCRX_EL2, reset_val, 0),
|
||||
|
||||
EL2_REG(TTBR0_EL2, access_rw, reset_val, 0),
|
||||
EL2_REG(TTBR1_EL2, access_rw, reset_val, 0),
|
||||
EL2_REG(TCR_EL2, access_rw, reset_val, TCR_EL2_RES1),
|
||||
EL2_REG_FILTERED(TCR2_EL2, access_rw, reset_val, TCR2_EL2_RES1,
|
||||
tcr2_el2_visibility),
|
||||
EL2_REG_VNCR(VTTBR_EL2, reset_val, 0),
|
||||
EL2_REG_VNCR(VTCR_EL2, reset_val, 0),
|
||||
|
||||
@ -2853,7 +3027,24 @@ static const struct sys_reg_desc sys_reg_descs[] = {
|
||||
EL2_REG(HPFAR_EL2, access_rw, reset_val, 0),
|
||||
|
||||
EL2_REG(MAIR_EL2, access_rw, reset_val, 0),
|
||||
EL2_REG_FILTERED(PIRE0_EL2, access_rw, reset_val, 0,
|
||||
s1pie_el2_visibility),
|
||||
EL2_REG_FILTERED(PIR_EL2, access_rw, reset_val, 0,
|
||||
s1pie_el2_visibility),
|
||||
EL2_REG_FILTERED(POR_EL2, access_rw, reset_val, 0,
|
||||
s1poe_el2_visibility),
|
||||
EL2_REG(AMAIR_EL2, access_rw, reset_val, 0),
|
||||
{ SYS_DESC(SYS_MPAMHCR_EL2), undef_access },
|
||||
{ SYS_DESC(SYS_MPAMVPMV_EL2), undef_access },
|
||||
{ SYS_DESC(SYS_MPAM2_EL2), undef_access },
|
||||
{ SYS_DESC(SYS_MPAMVPM0_EL2), undef_access },
|
||||
{ SYS_DESC(SYS_MPAMVPM1_EL2), undef_access },
|
||||
{ SYS_DESC(SYS_MPAMVPM2_EL2), undef_access },
|
||||
{ SYS_DESC(SYS_MPAMVPM3_EL2), undef_access },
|
||||
{ SYS_DESC(SYS_MPAMVPM4_EL2), undef_access },
|
||||
{ SYS_DESC(SYS_MPAMVPM5_EL2), undef_access },
|
||||
{ SYS_DESC(SYS_MPAMVPM6_EL2), undef_access },
|
||||
{ SYS_DESC(SYS_MPAMVPM7_EL2), undef_access },
|
||||
|
||||
EL2_REG(VBAR_EL2, access_rw, reset_val, 0),
|
||||
EL2_REG(RVBAR_EL2, access_rw, reset_val, 0),
|
||||
@ -4719,7 +4910,7 @@ void kvm_calculate_traps(struct kvm_vcpu *vcpu)
|
||||
if (kvm_has_feat(kvm, ID_AA64ISAR2_EL1, MOPS, IMP))
|
||||
vcpu->arch.hcrx_el2 |= (HCRX_EL2_MSCEn | HCRX_EL2_MCE2);
|
||||
|
||||
if (kvm_has_feat(kvm, ID_AA64MMFR3_EL1, TCRX, IMP))
|
||||
if (kvm_has_tcr2(kvm))
|
||||
vcpu->arch.hcrx_el2 |= HCRX_EL2_TCR2En;
|
||||
|
||||
if (kvm_has_fpmr(kvm))
|
||||
@ -4769,11 +4960,11 @@ void kvm_calculate_traps(struct kvm_vcpu *vcpu)
|
||||
kvm->arch.fgu[HFGITR_GROUP] |= (HFGITR_EL2_ATS1E1RP |
|
||||
HFGITR_EL2_ATS1E1WP);
|
||||
|
||||
if (!kvm_has_feat(kvm, ID_AA64MMFR3_EL1, S1PIE, IMP))
|
||||
if (!kvm_has_s1pie(kvm))
|
||||
kvm->arch.fgu[HFGxTR_GROUP] |= (HFGxTR_EL2_nPIRE0_EL1 |
|
||||
HFGxTR_EL2_nPIR_EL1);
|
||||
|
||||
if (!kvm_has_feat(kvm, ID_AA64MMFR3_EL1, S1POE, IMP))
|
||||
if (!kvm_has_s1poe(kvm))
|
||||
kvm->arch.fgu[HFGxTR_GROUP] |= (HFGxTR_EL2_nPOR_EL1 |
|
||||
HFGxTR_EL2_nPOR_EL0);
|
||||
|
||||
|
@ -782,6 +782,9 @@ static int vgic_its_cmd_handle_discard(struct kvm *kvm, struct vgic_its *its,
|
||||
|
||||
ite = find_ite(its, device_id, event_id);
|
||||
if (ite && its_is_collection_mapped(ite->collection)) {
|
||||
struct its_device *device = find_its_device(its, device_id);
|
||||
int ite_esz = vgic_its_get_abi(its)->ite_esz;
|
||||
gpa_t gpa = device->itt_addr + ite->event_id * ite_esz;
|
||||
/*
|
||||
* Though the spec talks about removing the pending state, we
|
||||
* don't bother here since we clear the ITTE anyway and the
|
||||
@ -790,7 +793,8 @@ static int vgic_its_cmd_handle_discard(struct kvm *kvm, struct vgic_its *its,
|
||||
vgic_its_invalidate_cache(its);
|
||||
|
||||
its_free_ite(kvm, ite);
|
||||
return 0;
|
||||
|
||||
return vgic_its_write_entry_lock(its, gpa, 0, ite_esz);
|
||||
}
|
||||
|
||||
return E_ITS_DISCARD_UNMAPPED_INTERRUPT;
|
||||
@ -1139,9 +1143,11 @@ static int vgic_its_cmd_handle_mapd(struct kvm *kvm, struct vgic_its *its,
|
||||
bool valid = its_cmd_get_validbit(its_cmd);
|
||||
u8 num_eventid_bits = its_cmd_get_size(its_cmd);
|
||||
gpa_t itt_addr = its_cmd_get_ittaddr(its_cmd);
|
||||
int dte_esz = vgic_its_get_abi(its)->dte_esz;
|
||||
struct its_device *device;
|
||||
gpa_t gpa;
|
||||
|
||||
if (!vgic_its_check_id(its, its->baser_device_table, device_id, NULL))
|
||||
if (!vgic_its_check_id(its, its->baser_device_table, device_id, &gpa))
|
||||
return E_ITS_MAPD_DEVICE_OOR;
|
||||
|
||||
if (valid && num_eventid_bits > VITS_TYPER_IDBITS)
|
||||
@ -1162,7 +1168,7 @@ static int vgic_its_cmd_handle_mapd(struct kvm *kvm, struct vgic_its *its,
|
||||
* is an error, so we are done in any case.
|
||||
*/
|
||||
if (!valid)
|
||||
return 0;
|
||||
return vgic_its_write_entry_lock(its, gpa, 0, dte_esz);
|
||||
|
||||
device = vgic_its_alloc_device(its, device_id, itt_addr,
|
||||
num_eventid_bits);
|
||||
@ -2086,7 +2092,6 @@ static int scan_its_table(struct vgic_its *its, gpa_t base, int size, u32 esz,
|
||||
static int vgic_its_save_ite(struct vgic_its *its, struct its_device *dev,
|
||||
struct its_ite *ite, gpa_t gpa, int ite_esz)
|
||||
{
|
||||
struct kvm *kvm = its->dev->kvm;
|
||||
u32 next_offset;
|
||||
u64 val;
|
||||
|
||||
@ -2095,7 +2100,8 @@ static int vgic_its_save_ite(struct vgic_its *its, struct its_device *dev,
|
||||
((u64)ite->irq->intid << KVM_ITS_ITE_PINTID_SHIFT) |
|
||||
ite->collection->collection_id;
|
||||
val = cpu_to_le64(val);
|
||||
return vgic_write_guest_lock(kvm, gpa, &val, ite_esz);
|
||||
|
||||
return vgic_its_write_entry_lock(its, gpa, val, ite_esz);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -2239,7 +2245,6 @@ static int vgic_its_restore_itt(struct vgic_its *its, struct its_device *dev)
|
||||
static int vgic_its_save_dte(struct vgic_its *its, struct its_device *dev,
|
||||
gpa_t ptr, int dte_esz)
|
||||
{
|
||||
struct kvm *kvm = its->dev->kvm;
|
||||
u64 val, itt_addr_field;
|
||||
u32 next_offset;
|
||||
|
||||
@ -2250,7 +2255,8 @@ static int vgic_its_save_dte(struct vgic_its *its, struct its_device *dev,
|
||||
(itt_addr_field << KVM_ITS_DTE_ITTADDR_SHIFT) |
|
||||
(dev->num_eventid_bits - 1));
|
||||
val = cpu_to_le64(val);
|
||||
return vgic_write_guest_lock(kvm, ptr, &val, dte_esz);
|
||||
|
||||
return vgic_its_write_entry_lock(its, ptr, val, dte_esz);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -2437,7 +2443,8 @@ static int vgic_its_save_cte(struct vgic_its *its,
|
||||
((u64)collection->target_addr << KVM_ITS_CTE_RDBASE_SHIFT) |
|
||||
collection->collection_id);
|
||||
val = cpu_to_le64(val);
|
||||
return vgic_write_guest_lock(its->dev->kvm, gpa, &val, esz);
|
||||
|
||||
return vgic_its_write_entry_lock(its, gpa, val, esz);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -2453,8 +2460,7 @@ static int vgic_its_restore_cte(struct vgic_its *its, gpa_t gpa, int esz)
|
||||
u64 val;
|
||||
int ret;
|
||||
|
||||
BUG_ON(esz > sizeof(val));
|
||||
ret = kvm_read_guest_lock(kvm, gpa, &val, esz);
|
||||
ret = vgic_its_read_entry_lock(its, gpa, &val, esz);
|
||||
if (ret)
|
||||
return ret;
|
||||
val = le64_to_cpu(val);
|
||||
@ -2492,7 +2498,6 @@ static int vgic_its_save_collection_table(struct vgic_its *its)
|
||||
u64 baser = its->baser_coll_table;
|
||||
gpa_t gpa = GITS_BASER_ADDR_48_to_52(baser);
|
||||
struct its_collection *collection;
|
||||
u64 val;
|
||||
size_t max_size, filled = 0;
|
||||
int ret, cte_esz = abi->cte_esz;
|
||||
|
||||
@ -2516,10 +2521,7 @@ static int vgic_its_save_collection_table(struct vgic_its *its)
|
||||
* table is not fully filled, add a last dummy element
|
||||
* with valid bit unset
|
||||
*/
|
||||
val = 0;
|
||||
BUG_ON(cte_esz > sizeof(val));
|
||||
ret = vgic_write_guest_lock(its->dev->kvm, gpa, &val, cte_esz);
|
||||
return ret;
|
||||
return vgic_its_write_entry_lock(its, gpa, 0, cte_esz);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -146,6 +146,29 @@ static inline int vgic_write_guest_lock(struct kvm *kvm, gpa_t gpa,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline int vgic_its_read_entry_lock(struct vgic_its *its, gpa_t eaddr,
|
||||
u64 *eval, unsigned long esize)
|
||||
{
|
||||
struct kvm *kvm = its->dev->kvm;
|
||||
|
||||
if (KVM_BUG_ON(esize != sizeof(*eval), kvm))
|
||||
return -EINVAL;
|
||||
|
||||
return kvm_read_guest_lock(kvm, eaddr, eval, esize);
|
||||
|
||||
}
|
||||
|
||||
static inline int vgic_its_write_entry_lock(struct vgic_its *its, gpa_t eaddr,
|
||||
u64 eval, unsigned long esize)
|
||||
{
|
||||
struct kvm *kvm = its->dev->kvm;
|
||||
|
||||
if (KVM_BUG_ON(esize != sizeof(eval), kvm))
|
||||
return -EINVAL;
|
||||
|
||||
return vgic_write_guest_lock(kvm, eaddr, &eval, esize);
|
||||
}
|
||||
|
||||
/*
|
||||
* This struct provides an intermediate representation of the fields contained
|
||||
* in the GICH_VMCR and ICH_VMCR registers, such that code exporting the GIC
|
||||
|
@ -60,6 +60,8 @@ HW_DBM
|
||||
KVM_HVHE
|
||||
KVM_PROTECTED_MODE
|
||||
MISMATCHED_CACHE_TYPE
|
||||
MPAM
|
||||
MPAM_HCR
|
||||
MTE
|
||||
MTE_ASYMM
|
||||
SME
|
||||
|
@ -1200,7 +1200,7 @@ UnsignedEnum 55:52 BRBE
|
||||
0b0001 IMP
|
||||
0b0010 BRBE_V1P1
|
||||
EndEnum
|
||||
Enum 51:48 MTPMU
|
||||
SignedEnum 51:48 MTPMU
|
||||
0b0000 NI_IMPDEF
|
||||
0b0001 IMP
|
||||
0b1111 NI
|
||||
@ -1208,6 +1208,7 @@ EndEnum
|
||||
UnsignedEnum 47:44 TraceBuffer
|
||||
0b0000 NI
|
||||
0b0001 IMP
|
||||
0b0010 TRBE_V1P1
|
||||
EndEnum
|
||||
UnsignedEnum 43:40 TraceFilt
|
||||
0b0000 NI
|
||||
@ -1224,11 +1225,18 @@ UnsignedEnum 35:32 PMSVer
|
||||
0b0011 V1P2
|
||||
0b0100 V1P3
|
||||
0b0101 V1P4
|
||||
0b0110 V1P5
|
||||
EndEnum
|
||||
Field 31:28 CTX_CMPs
|
||||
Res0 27:24
|
||||
UnsignedEnum 27:24 SEBEP
|
||||
0b0000 NI
|
||||
0b0001 IMP
|
||||
EndEnum
|
||||
Field 23:20 WRPs
|
||||
Res0 19:16
|
||||
UnsignedEnum 19:16 PMSS
|
||||
0b0000 NI
|
||||
0b0001 IMP
|
||||
EndEnum
|
||||
Field 15:12 BRPs
|
||||
UnsignedEnum 11:8 PMUVer
|
||||
0b0000 NI
|
||||
@ -1238,6 +1246,7 @@ UnsignedEnum 11:8 PMUVer
|
||||
0b0110 V3P5
|
||||
0b0111 V3P7
|
||||
0b1000 V3P8
|
||||
0b1001 V3P9
|
||||
0b1111 IMP_DEF
|
||||
EndEnum
|
||||
UnsignedEnum 7:4 TraceVer
|
||||
@ -1287,6 +1296,32 @@ Field 15:8 BRPs
|
||||
Field 7:0 SYSPMUID
|
||||
EndSysreg
|
||||
|
||||
Sysreg ID_AA64DFR2_EL1 3 0 0 5 2
|
||||
Res0 63:28
|
||||
UnsignedEnum 27:24 TRBE_EXC
|
||||
0b0000 NI
|
||||
0b0001 IMP
|
||||
EndEnum
|
||||
UnsignedEnum 23:20 SPE_nVM
|
||||
0b0000 NI
|
||||
0b0001 IMP
|
||||
EndEnum
|
||||
UnsignedEnum 19:16 SPE_EXC
|
||||
0b0000 NI
|
||||
0b0001 IMP
|
||||
EndEnum
|
||||
Res0 15:8
|
||||
UnsignedEnum 7:4 BWE
|
||||
0b0000 NI
|
||||
0b0001 FEAT_BWE
|
||||
0b0002 FEAT_BWE2
|
||||
EndEnum
|
||||
UnsignedEnum 3:0 STEP
|
||||
0b0000 NI
|
||||
0b0001 IMP
|
||||
EndEnum
|
||||
EndSysreg
|
||||
|
||||
Sysreg ID_AA64AFR0_EL1 3 0 0 5 4
|
||||
Res0 63:32
|
||||
Field 31:28 IMPDEF7
|
||||
@ -1688,6 +1723,7 @@ UnsignedEnum 3:0 HAFDBS
|
||||
0b0000 NI
|
||||
0b0001 AF
|
||||
0b0010 DBM
|
||||
0b0011 HAFT
|
||||
EndEnum
|
||||
EndSysreg
|
||||
|
||||
@ -2388,6 +2424,41 @@ Field 1 AFSR1_EL1
|
||||
Field 0 AFSR0_EL1
|
||||
EndSysregFields
|
||||
|
||||
Sysreg MDCR_EL2 3 4 1 1 1
|
||||
Res0 63:51
|
||||
Field 50 EnSTEPOP
|
||||
Res0 49:44
|
||||
Field 43 EBWE
|
||||
Res0 42
|
||||
Field 41:40 PMEE
|
||||
Res0 39:37
|
||||
Field 36 HPMFZS
|
||||
Res0 35:32
|
||||
Field 31:30 PMSSE
|
||||
Field 29 HPMFZO
|
||||
Field 28 MTPME
|
||||
Field 27 TDCC
|
||||
Field 26 HLP
|
||||
Field 25:24 E2TB
|
||||
Field 23 HCCD
|
||||
Res0 22:20
|
||||
Field 19 TTRF
|
||||
Res0 18
|
||||
Field 17 HPMD
|
||||
Res0 16
|
||||
Field 15 EnSPM
|
||||
Field 14 TPMS
|
||||
Field 13:12 E2PB
|
||||
Field 11 TDRA
|
||||
Field 10 TDOSA
|
||||
Field 9 TDA
|
||||
Field 8 TDE
|
||||
Field 7 HPME
|
||||
Field 6 TPM
|
||||
Field 5 TPMCR
|
||||
Field 4:0 HPMN
|
||||
EndSysreg
|
||||
|
||||
Sysreg HFGRTR_EL2 3 4 1 1 4
|
||||
Fields HFGxTR_EL2
|
||||
EndSysreg
|
||||
@ -2737,6 +2808,126 @@ Field 1 E2SPE
|
||||
Field 0 E0HSPE
|
||||
EndSysreg
|
||||
|
||||
Sysreg MPAMHCR_EL2 3 4 10 4 0
|
||||
Res0 63:32
|
||||
Field 31 TRAP_MPAMIDR_EL1
|
||||
Res0 30:9
|
||||
Field 8 GSTAPP_PLK
|
||||
Res0 7:2
|
||||
Field 1 EL1_VPMEN
|
||||
Field 0 EL0_VPMEN
|
||||
EndSysreg
|
||||
|
||||
Sysreg MPAMVPMV_EL2 3 4 10 4 1
|
||||
Res0 63:32
|
||||
Field 31 VPM_V31
|
||||
Field 30 VPM_V30
|
||||
Field 29 VPM_V29
|
||||
Field 28 VPM_V28
|
||||
Field 27 VPM_V27
|
||||
Field 26 VPM_V26
|
||||
Field 25 VPM_V25
|
||||
Field 24 VPM_V24
|
||||
Field 23 VPM_V23
|
||||
Field 22 VPM_V22
|
||||
Field 21 VPM_V21
|
||||
Field 20 VPM_V20
|
||||
Field 19 VPM_V19
|
||||
Field 18 VPM_V18
|
||||
Field 17 VPM_V17
|
||||
Field 16 VPM_V16
|
||||
Field 15 VPM_V15
|
||||
Field 14 VPM_V14
|
||||
Field 13 VPM_V13
|
||||
Field 12 VPM_V12
|
||||
Field 11 VPM_V11
|
||||
Field 10 VPM_V10
|
||||
Field 9 VPM_V9
|
||||
Field 8 VPM_V8
|
||||
Field 7 VPM_V7
|
||||
Field 6 VPM_V6
|
||||
Field 5 VPM_V5
|
||||
Field 4 VPM_V4
|
||||
Field 3 VPM_V3
|
||||
Field 2 VPM_V2
|
||||
Field 1 VPM_V1
|
||||
Field 0 VPM_V0
|
||||
EndSysreg
|
||||
|
||||
Sysreg MPAM2_EL2 3 4 10 5 0
|
||||
Field 63 MPAMEN
|
||||
Res0 62:59
|
||||
Field 58 TIDR
|
||||
Res0 57
|
||||
Field 56 ALTSP_HFC
|
||||
Field 55 ALTSP_EL2
|
||||
Field 54 ALTSP_FRCD
|
||||
Res0 53:51
|
||||
Field 50 EnMPAMSM
|
||||
Field 49 TRAPMPAM0EL1
|
||||
Field 48 TRAPMPAM1EL1
|
||||
Field 47:40 PMG_D
|
||||
Field 39:32 PMG_I
|
||||
Field 31:16 PARTID_D
|
||||
Field 15:0 PARTID_I
|
||||
EndSysreg
|
||||
|
||||
Sysreg MPAMVPM0_EL2 3 4 10 6 0
|
||||
Field 63:48 PhyPARTID3
|
||||
Field 47:32 PhyPARTID2
|
||||
Field 31:16 PhyPARTID1
|
||||
Field 15:0 PhyPARTID0
|
||||
EndSysreg
|
||||
|
||||
Sysreg MPAMVPM1_EL2 3 4 10 6 1
|
||||
Field 63:48 PhyPARTID7
|
||||
Field 47:32 PhyPARTID6
|
||||
Field 31:16 PhyPARTID5
|
||||
Field 15:0 PhyPARTID4
|
||||
EndSysreg
|
||||
|
||||
Sysreg MPAMVPM2_EL2 3 4 10 6 2
|
||||
Field 63:48 PhyPARTID11
|
||||
Field 47:32 PhyPARTID10
|
||||
Field 31:16 PhyPARTID9
|
||||
Field 15:0 PhyPARTID8
|
||||
EndSysreg
|
||||
|
||||
Sysreg MPAMVPM3_EL2 3 4 10 6 3
|
||||
Field 63:48 PhyPARTID15
|
||||
Field 47:32 PhyPARTID14
|
||||
Field 31:16 PhyPARTID13
|
||||
Field 15:0 PhyPARTID12
|
||||
EndSysreg
|
||||
|
||||
Sysreg MPAMVPM4_EL2 3 4 10 6 4
|
||||
Field 63:48 PhyPARTID19
|
||||
Field 47:32 PhyPARTID18
|
||||
Field 31:16 PhyPARTID17
|
||||
Field 15:0 PhyPARTID16
|
||||
EndSysreg
|
||||
|
||||
Sysreg MPAMVPM5_EL2 3 4 10 6 5
|
||||
Field 63:48 PhyPARTID23
|
||||
Field 47:32 PhyPARTID22
|
||||
Field 31:16 PhyPARTID21
|
||||
Field 15:0 PhyPARTID20
|
||||
EndSysreg
|
||||
|
||||
Sysreg MPAMVPM6_EL2 3 4 10 6 6
|
||||
Field 63:48 PhyPARTID27
|
||||
Field 47:32 PhyPARTID26
|
||||
Field 31:16 PhyPARTID25
|
||||
Field 15:0 PhyPARTID24
|
||||
EndSysreg
|
||||
|
||||
Sysreg MPAMVPM7_EL2 3 4 10 6 7
|
||||
Field 63:48 PhyPARTID31
|
||||
Field 47:32 PhyPARTID30
|
||||
Field 31:16 PhyPARTID29
|
||||
Field 15:0 PhyPARTID28
|
||||
EndSysreg
|
||||
|
||||
Sysreg CONTEXTIDR_EL2 3 4 13 0 1
|
||||
Fields CONTEXTIDR_ELx
|
||||
EndSysreg
|
||||
@ -2769,6 +2960,10 @@ Sysreg FAR_EL12 3 5 6 0 0
|
||||
Field 63:0 ADDR
|
||||
EndSysreg
|
||||
|
||||
Sysreg MPAM1_EL12 3 5 10 5 0
|
||||
Fields MPAM1_ELx
|
||||
EndSysreg
|
||||
|
||||
Sysreg CONTEXTIDR_EL12 3 5 13 0 1
|
||||
Fields CONTEXTIDR_ELx
|
||||
EndSysreg
|
||||
@ -2819,8 +3014,7 @@ Field 13 AMEC1
|
||||
Field 12 AMEC0
|
||||
Field 11 HAFT
|
||||
Field 10 PTTWI
|
||||
Field 9:8 SKL1
|
||||
Field 7:6 SKL0
|
||||
Res0 9:6
|
||||
Field 5 D128
|
||||
Field 4 AIE
|
||||
Field 3 POE
|
||||
@ -2883,6 +3077,10 @@ Sysreg PIRE0_EL12 3 5 10 2 2
|
||||
Fields PIRx_ELx
|
||||
EndSysreg
|
||||
|
||||
Sysreg PIRE0_EL2 3 4 10 2 2
|
||||
Fields PIRx_ELx
|
||||
EndSysreg
|
||||
|
||||
Sysreg PIR_EL1 3 0 10 2 3
|
||||
Fields PIRx_ELx
|
||||
EndSysreg
|
||||
@ -2903,6 +3101,10 @@ Sysreg POR_EL1 3 0 10 2 4
|
||||
Fields PIRx_ELx
|
||||
EndSysreg
|
||||
|
||||
Sysreg POR_EL2 3 4 10 2 4
|
||||
Fields PIRx_ELx
|
||||
EndSysreg
|
||||
|
||||
Sysreg POR_EL12 3 5 10 2 4
|
||||
Fields PIRx_ELx
|
||||
EndSysreg
|
||||
@ -2941,6 +3143,22 @@ Res0 1
|
||||
Field 0 EN
|
||||
EndSysreg
|
||||
|
||||
Sysreg MPAMIDR_EL1 3 0 10 4 4
|
||||
Res0 63:62
|
||||
Field 61 HAS_SDEFLT
|
||||
Field 60 HAS_FORCE_NS
|
||||
Field 59 SP4
|
||||
Field 58 HAS_TIDR
|
||||
Field 57 HAS_ALTSP
|
||||
Res0 56:40
|
||||
Field 39:32 PMG_MAX
|
||||
Res0 31:21
|
||||
Field 20:18 VPMR_MAX
|
||||
Field 17 HAS_HCR
|
||||
Res0 16
|
||||
Field 15:0 PARTID_MAX
|
||||
EndSysreg
|
||||
|
||||
Sysreg LORID_EL1 3 0 10 4 7
|
||||
Res0 63:24
|
||||
Field 23:16 LD
|
||||
@ -2948,6 +3166,27 @@ Res0 15:8
|
||||
Field 7:0 LR
|
||||
EndSysreg
|
||||
|
||||
Sysreg MPAM1_EL1 3 0 10 5 0
|
||||
Field 63 MPAMEN
|
||||
Res0 62:61
|
||||
Field 60 FORCED_NS
|
||||
Res0 59:55
|
||||
Field 54 ALTSP_FRCD
|
||||
Res0 53:48
|
||||
Field 47:40 PMG_D
|
||||
Field 39:32 PMG_I
|
||||
Field 31:16 PARTID_D
|
||||
Field 15:0 PARTID_I
|
||||
EndSysreg
|
||||
|
||||
Sysreg MPAM0_EL1 3 0 10 5 1
|
||||
Res0 63:48
|
||||
Field 47:40 PMG_D
|
||||
Field 39:32 PMG_I
|
||||
Field 31:16 PARTID_D
|
||||
Field 15:0 PARTID_I
|
||||
EndSysreg
|
||||
|
||||
Sysreg ISR_EL1 3 0 12 1 0
|
||||
Res0 63:11
|
||||
Field 10 IS
|
||||
|
@ -78,6 +78,7 @@ struct psci_0_1_function_ids get_psci_0_1_function_ids(void)
|
||||
|
||||
static u32 psci_cpu_suspend_feature;
|
||||
static bool psci_system_reset2_supported;
|
||||
static bool psci_system_off2_hibernate_supported;
|
||||
|
||||
static inline bool psci_has_ext_power_state(void)
|
||||
{
|
||||
@ -333,6 +334,36 @@ static void psci_sys_poweroff(void)
|
||||
invoke_psci_fn(PSCI_0_2_FN_SYSTEM_OFF, 0, 0, 0);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_HIBERNATION
|
||||
static int psci_sys_hibernate(struct sys_off_data *data)
|
||||
{
|
||||
/*
|
||||
* If no hibernate type is specified SYSTEM_OFF2 defaults to selecting
|
||||
* HIBERNATE_OFF.
|
||||
*
|
||||
* There are hypervisors in the wild that do not align with the spec and
|
||||
* reject calls that explicitly provide a hibernate type. For
|
||||
* compatibility with these nonstandard implementations, pass 0 as the
|
||||
* type.
|
||||
*/
|
||||
if (system_entering_hibernation())
|
||||
invoke_psci_fn(PSCI_FN_NATIVE(1_3, SYSTEM_OFF2), 0, 0, 0);
|
||||
return NOTIFY_DONE;
|
||||
}
|
||||
|
||||
static int __init psci_hibernate_init(void)
|
||||
{
|
||||
if (psci_system_off2_hibernate_supported) {
|
||||
/* Higher priority than EFI shutdown, but only for hibernate */
|
||||
register_sys_off_handler(SYS_OFF_MODE_POWER_OFF,
|
||||
SYS_OFF_PRIO_FIRMWARE + 2,
|
||||
psci_sys_hibernate, NULL);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
subsys_initcall(psci_hibernate_init);
|
||||
#endif
|
||||
|
||||
static int psci_features(u32 psci_func_id)
|
||||
{
|
||||
return invoke_psci_fn(PSCI_1_0_FN_PSCI_FEATURES,
|
||||
@ -364,6 +395,7 @@ static const struct {
|
||||
PSCI_ID_NATIVE(1_1, SYSTEM_RESET2),
|
||||
PSCI_ID(1_1, MEM_PROTECT),
|
||||
PSCI_ID_NATIVE(1_1, MEM_PROTECT_CHECK_RANGE),
|
||||
PSCI_ID_NATIVE(1_3, SYSTEM_OFF2),
|
||||
};
|
||||
|
||||
static int psci_debugfs_read(struct seq_file *s, void *data)
|
||||
@ -525,6 +557,18 @@ static void __init psci_init_system_reset2(void)
|
||||
psci_system_reset2_supported = true;
|
||||
}
|
||||
|
||||
static void __init psci_init_system_off2(void)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = psci_features(PSCI_FN_NATIVE(1_3, SYSTEM_OFF2));
|
||||
if (ret < 0)
|
||||
return;
|
||||
|
||||
if (ret & PSCI_1_3_OFF_TYPE_HIBERNATE_OFF)
|
||||
psci_system_off2_hibernate_supported = true;
|
||||
}
|
||||
|
||||
static void __init psci_init_system_suspend(void)
|
||||
{
|
||||
int ret;
|
||||
@ -655,6 +699,7 @@ static int __init psci_probe(void)
|
||||
psci_init_cpu_suspend();
|
||||
psci_init_system_suspend();
|
||||
psci_init_system_reset2();
|
||||
psci_init_system_off2();
|
||||
kvm_init_hyp_services();
|
||||
}
|
||||
|
||||
|
@ -147,6 +147,9 @@ u64 timer_get_cval(struct arch_timer_context *ctxt);
|
||||
void kvm_timer_cpu_up(void);
|
||||
void kvm_timer_cpu_down(void);
|
||||
|
||||
/* CNTKCTL_EL1 valid bits as of DDI0487J.a */
|
||||
#define CNTKCTL_VALID_BITS (BIT(17) | GENMASK_ULL(9, 0))
|
||||
|
||||
static inline bool has_cntpoff(void)
|
||||
{
|
||||
return (has_vhe() && cpus_have_final_cap(ARM64_HAS_ECV_CNTPOFF));
|
||||
|
@ -47,7 +47,8 @@ static __always_inline bool kvm_arm_support_pmu_v3(void)
|
||||
#define kvm_arm_pmu_irq_initialized(v) ((v)->arch.pmu.irq_num >= VGIC_NR_SGIS)
|
||||
u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx);
|
||||
void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val);
|
||||
u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu);
|
||||
u64 kvm_pmu_implemented_counter_mask(struct kvm_vcpu *vcpu);
|
||||
u64 kvm_pmu_accessible_counter_mask(struct kvm_vcpu *vcpu);
|
||||
u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1);
|
||||
void kvm_pmu_vcpu_init(struct kvm_vcpu *vcpu);
|
||||
void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu);
|
||||
@ -96,6 +97,8 @@ int kvm_arm_set_default_pmu(struct kvm *kvm);
|
||||
u8 kvm_arm_pmu_get_max_counters(struct kvm *kvm);
|
||||
|
||||
u64 kvm_vcpu_read_pmcr(struct kvm_vcpu *vcpu);
|
||||
bool kvm_pmu_counter_is_hyp(struct kvm_vcpu *vcpu, unsigned int idx);
|
||||
void kvm_pmu_nested_transition(struct kvm_vcpu *vcpu);
|
||||
#else
|
||||
struct kvm_pmu {
|
||||
};
|
||||
@ -113,7 +116,11 @@ static inline u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu,
|
||||
}
|
||||
static inline void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu,
|
||||
u64 select_idx, u64 val) {}
|
||||
static inline u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu)
|
||||
static inline u64 kvm_pmu_implemented_counter_mask(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
static inline u64 kvm_pmu_accessible_counter_mask(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
@ -187,6 +194,13 @@ static inline u64 kvm_vcpu_read_pmcr(struct kvm_vcpu *vcpu)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline bool kvm_pmu_counter_is_hyp(struct kvm_vcpu *vcpu, unsigned int idx)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline void kvm_pmu_nested_transition(struct kvm_vcpu *vcpu) {}
|
||||
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
@ -14,8 +14,10 @@
|
||||
#define KVM_ARM_PSCI_0_2 PSCI_VERSION(0, 2)
|
||||
#define KVM_ARM_PSCI_1_0 PSCI_VERSION(1, 0)
|
||||
#define KVM_ARM_PSCI_1_1 PSCI_VERSION(1, 1)
|
||||
#define KVM_ARM_PSCI_1_2 PSCI_VERSION(1, 2)
|
||||
#define KVM_ARM_PSCI_1_3 PSCI_VERSION(1, 3)
|
||||
|
||||
#define KVM_ARM_PSCI_LATEST KVM_ARM_PSCI_1_1
|
||||
#define KVM_ARM_PSCI_LATEST KVM_ARM_PSCI_1_3
|
||||
|
||||
static inline int kvm_psci_version(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
|
@ -59,6 +59,7 @@
|
||||
#define PSCI_1_1_FN_SYSTEM_RESET2 PSCI_0_2_FN(18)
|
||||
#define PSCI_1_1_FN_MEM_PROTECT PSCI_0_2_FN(19)
|
||||
#define PSCI_1_1_FN_MEM_PROTECT_CHECK_RANGE PSCI_0_2_FN(20)
|
||||
#define PSCI_1_3_FN_SYSTEM_OFF2 PSCI_0_2_FN(21)
|
||||
|
||||
#define PSCI_1_0_FN64_CPU_DEFAULT_SUSPEND PSCI_0_2_FN64(12)
|
||||
#define PSCI_1_0_FN64_NODE_HW_STATE PSCI_0_2_FN64(13)
|
||||
@ -68,6 +69,7 @@
|
||||
|
||||
#define PSCI_1_1_FN64_SYSTEM_RESET2 PSCI_0_2_FN64(18)
|
||||
#define PSCI_1_1_FN64_MEM_PROTECT_CHECK_RANGE PSCI_0_2_FN64(20)
|
||||
#define PSCI_1_3_FN64_SYSTEM_OFF2 PSCI_0_2_FN64(21)
|
||||
|
||||
/* PSCI v0.2 power state encoding for CPU_SUSPEND function */
|
||||
#define PSCI_0_2_POWER_STATE_ID_MASK 0xffff
|
||||
@ -100,6 +102,9 @@
|
||||
#define PSCI_1_1_RESET_TYPE_SYSTEM_WARM_RESET 0
|
||||
#define PSCI_1_1_RESET_TYPE_VENDOR_START 0x80000000U
|
||||
|
||||
/* PSCI v1.3 hibernate type for SYSTEM_OFF2 */
|
||||
#define PSCI_1_3_OFF_TYPE_HIBERNATE_OFF BIT(0)
|
||||
|
||||
/* PSCI version decoding (independent of PSCI version) */
|
||||
#define PSCI_VERSION_MAJOR_SHIFT 16
|
||||
#define PSCI_VERSION_MINOR_MASK \
|
||||
|
@ -685,8 +685,11 @@ static void power_down(void)
|
||||
}
|
||||
fallthrough;
|
||||
case HIBERNATION_SHUTDOWN:
|
||||
if (kernel_can_power_off())
|
||||
if (kernel_can_power_off()) {
|
||||
entering_platform_hibernation = true;
|
||||
kernel_power_off();
|
||||
entering_platform_hibernation = false;
|
||||
}
|
||||
break;
|
||||
}
|
||||
kernel_halt();
|
||||
|
42
tools/arch/arm64/include/asm/brk-imm.h
Normal file
42
tools/arch/arm64/include/asm/brk-imm.h
Normal file
@ -0,0 +1,42 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (C) 2012 ARM Ltd.
|
||||
*/
|
||||
|
||||
#ifndef __ASM_BRK_IMM_H
|
||||
#define __ASM_BRK_IMM_H
|
||||
|
||||
/*
|
||||
* #imm16 values used for BRK instruction generation
|
||||
* 0x004: for installing kprobes
|
||||
* 0x005: for installing uprobes
|
||||
* 0x006: for kprobe software single-step
|
||||
* 0x007: for kretprobe return
|
||||
* Allowed values for kgdb are 0x400 - 0x7ff
|
||||
* 0x100: for triggering a fault on purpose (reserved)
|
||||
* 0x400: for dynamic BRK instruction
|
||||
* 0x401: for compile time BRK instruction
|
||||
* 0x800: kernel-mode BUG() and WARN() traps
|
||||
* 0x9xx: tag-based KASAN trap (allowed values 0x900 - 0x9ff)
|
||||
* 0x55xx: Undefined Behavior Sanitizer traps ('U' << 8)
|
||||
* 0x8xxx: Control-Flow Integrity traps
|
||||
*/
|
||||
#define KPROBES_BRK_IMM 0x004
|
||||
#define UPROBES_BRK_IMM 0x005
|
||||
#define KPROBES_BRK_SS_IMM 0x006
|
||||
#define KRETPROBES_BRK_IMM 0x007
|
||||
#define FAULT_BRK_IMM 0x100
|
||||
#define KGDB_DYN_DBG_BRK_IMM 0x400
|
||||
#define KGDB_COMPILED_DBG_BRK_IMM 0x401
|
||||
#define BUG_BRK_IMM 0x800
|
||||
#define KASAN_BRK_IMM 0x900
|
||||
#define KASAN_BRK_MASK 0x0ff
|
||||
#define UBSAN_BRK_IMM 0x5500
|
||||
#define UBSAN_BRK_MASK 0x00ff
|
||||
|
||||
#define CFI_BRK_IMM_TARGET GENMASK(4, 0)
|
||||
#define CFI_BRK_IMM_TYPE GENMASK(9, 5)
|
||||
#define CFI_BRK_IMM_BASE 0x8000
|
||||
#define CFI_BRK_IMM_MASK (CFI_BRK_IMM_TARGET | CFI_BRK_IMM_TYPE)
|
||||
|
||||
#endif
|
455
tools/arch/arm64/include/asm/esr.h
Normal file
455
tools/arch/arm64/include/asm/esr.h
Normal file
@ -0,0 +1,455 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (C) 2013 - ARM Ltd
|
||||
* Author: Marc Zyngier <marc.zyngier@arm.com>
|
||||
*/
|
||||
|
||||
#ifndef __ASM_ESR_H
|
||||
#define __ASM_ESR_H
|
||||
|
||||
#include <asm/sysreg.h>
|
||||
|
||||
#define ESR_ELx_EC_UNKNOWN UL(0x00)
|
||||
#define ESR_ELx_EC_WFx UL(0x01)
|
||||
/* Unallocated EC: 0x02 */
|
||||
#define ESR_ELx_EC_CP15_32 UL(0x03)
|
||||
#define ESR_ELx_EC_CP15_64 UL(0x04)
|
||||
#define ESR_ELx_EC_CP14_MR UL(0x05)
|
||||
#define ESR_ELx_EC_CP14_LS UL(0x06)
|
||||
#define ESR_ELx_EC_FP_ASIMD UL(0x07)
|
||||
#define ESR_ELx_EC_CP10_ID UL(0x08) /* EL2 only */
|
||||
#define ESR_ELx_EC_PAC UL(0x09) /* EL2 and above */
|
||||
/* Unallocated EC: 0x0A - 0x0B */
|
||||
#define ESR_ELx_EC_CP14_64 UL(0x0C)
|
||||
#define ESR_ELx_EC_BTI UL(0x0D)
|
||||
#define ESR_ELx_EC_ILL UL(0x0E)
|
||||
/* Unallocated EC: 0x0F - 0x10 */
|
||||
#define ESR_ELx_EC_SVC32 UL(0x11)
|
||||
#define ESR_ELx_EC_HVC32 UL(0x12) /* EL2 only */
|
||||
#define ESR_ELx_EC_SMC32 UL(0x13) /* EL2 and above */
|
||||
/* Unallocated EC: 0x14 */
|
||||
#define ESR_ELx_EC_SVC64 UL(0x15)
|
||||
#define ESR_ELx_EC_HVC64 UL(0x16) /* EL2 and above */
|
||||
#define ESR_ELx_EC_SMC64 UL(0x17) /* EL2 and above */
|
||||
#define ESR_ELx_EC_SYS64 UL(0x18)
|
||||
#define ESR_ELx_EC_SVE UL(0x19)
|
||||
#define ESR_ELx_EC_ERET UL(0x1a) /* EL2 only */
|
||||
/* Unallocated EC: 0x1B */
|
||||
#define ESR_ELx_EC_FPAC UL(0x1C) /* EL1 and above */
|
||||
#define ESR_ELx_EC_SME UL(0x1D)
|
||||
/* Unallocated EC: 0x1E */
|
||||
#define ESR_ELx_EC_IMP_DEF UL(0x1f) /* EL3 only */
|
||||
#define ESR_ELx_EC_IABT_LOW UL(0x20)
|
||||
#define ESR_ELx_EC_IABT_CUR UL(0x21)
|
||||
#define ESR_ELx_EC_PC_ALIGN UL(0x22)
|
||||
/* Unallocated EC: 0x23 */
|
||||
#define ESR_ELx_EC_DABT_LOW UL(0x24)
|
||||
#define ESR_ELx_EC_DABT_CUR UL(0x25)
|
||||
#define ESR_ELx_EC_SP_ALIGN UL(0x26)
|
||||
#define ESR_ELx_EC_MOPS UL(0x27)
|
||||
#define ESR_ELx_EC_FP_EXC32 UL(0x28)
|
||||
/* Unallocated EC: 0x29 - 0x2B */
|
||||
#define ESR_ELx_EC_FP_EXC64 UL(0x2C)
|
||||
/* Unallocated EC: 0x2D - 0x2E */
|
||||
#define ESR_ELx_EC_SERROR UL(0x2F)
|
||||
#define ESR_ELx_EC_BREAKPT_LOW UL(0x30)
|
||||
#define ESR_ELx_EC_BREAKPT_CUR UL(0x31)
|
||||
#define ESR_ELx_EC_SOFTSTP_LOW UL(0x32)
|
||||
#define ESR_ELx_EC_SOFTSTP_CUR UL(0x33)
|
||||
#define ESR_ELx_EC_WATCHPT_LOW UL(0x34)
|
||||
#define ESR_ELx_EC_WATCHPT_CUR UL(0x35)
|
||||
/* Unallocated EC: 0x36 - 0x37 */
|
||||
#define ESR_ELx_EC_BKPT32 UL(0x38)
|
||||
/* Unallocated EC: 0x39 */
|
||||
#define ESR_ELx_EC_VECTOR32 UL(0x3A) /* EL2 only */
|
||||
/* Unallocated EC: 0x3B */
|
||||
#define ESR_ELx_EC_BRK64 UL(0x3C)
|
||||
/* Unallocated EC: 0x3D - 0x3F */
|
||||
#define ESR_ELx_EC_MAX UL(0x3F)
|
||||
|
||||
#define ESR_ELx_EC_SHIFT (26)
|
||||
#define ESR_ELx_EC_WIDTH (6)
|
||||
#define ESR_ELx_EC_MASK (UL(0x3F) << ESR_ELx_EC_SHIFT)
|
||||
#define ESR_ELx_EC(esr) (((esr) & ESR_ELx_EC_MASK) >> ESR_ELx_EC_SHIFT)
|
||||
|
||||
#define ESR_ELx_IL_SHIFT (25)
|
||||
#define ESR_ELx_IL (UL(1) << ESR_ELx_IL_SHIFT)
|
||||
#define ESR_ELx_ISS_MASK (GENMASK(24, 0))
|
||||
#define ESR_ELx_ISS(esr) ((esr) & ESR_ELx_ISS_MASK)
|
||||
#define ESR_ELx_ISS2_SHIFT (32)
|
||||
#define ESR_ELx_ISS2_MASK (GENMASK_ULL(55, 32))
|
||||
#define ESR_ELx_ISS2(esr) (((esr) & ESR_ELx_ISS2_MASK) >> ESR_ELx_ISS2_SHIFT)
|
||||
|
||||
/* ISS field definitions shared by different classes */
|
||||
#define ESR_ELx_WNR_SHIFT (6)
|
||||
#define ESR_ELx_WNR (UL(1) << ESR_ELx_WNR_SHIFT)
|
||||
|
||||
/* Asynchronous Error Type */
|
||||
#define ESR_ELx_IDS_SHIFT (24)
|
||||
#define ESR_ELx_IDS (UL(1) << ESR_ELx_IDS_SHIFT)
|
||||
#define ESR_ELx_AET_SHIFT (10)
|
||||
#define ESR_ELx_AET (UL(0x7) << ESR_ELx_AET_SHIFT)
|
||||
|
||||
#define ESR_ELx_AET_UC (UL(0) << ESR_ELx_AET_SHIFT)
|
||||
#define ESR_ELx_AET_UEU (UL(1) << ESR_ELx_AET_SHIFT)
|
||||
#define ESR_ELx_AET_UEO (UL(2) << ESR_ELx_AET_SHIFT)
|
||||
#define ESR_ELx_AET_UER (UL(3) << ESR_ELx_AET_SHIFT)
|
||||
#define ESR_ELx_AET_CE (UL(6) << ESR_ELx_AET_SHIFT)
|
||||
|
||||
/* Shared ISS field definitions for Data/Instruction aborts */
|
||||
#define ESR_ELx_SET_SHIFT (11)
|
||||
#define ESR_ELx_SET_MASK (UL(3) << ESR_ELx_SET_SHIFT)
|
||||
#define ESR_ELx_FnV_SHIFT (10)
|
||||
#define ESR_ELx_FnV (UL(1) << ESR_ELx_FnV_SHIFT)
|
||||
#define ESR_ELx_EA_SHIFT (9)
|
||||
#define ESR_ELx_EA (UL(1) << ESR_ELx_EA_SHIFT)
|
||||
#define ESR_ELx_S1PTW_SHIFT (7)
|
||||
#define ESR_ELx_S1PTW (UL(1) << ESR_ELx_S1PTW_SHIFT)
|
||||
|
||||
/* Shared ISS fault status code(IFSC/DFSC) for Data/Instruction aborts */
|
||||
#define ESR_ELx_FSC (0x3F)
|
||||
#define ESR_ELx_FSC_TYPE (0x3C)
|
||||
#define ESR_ELx_FSC_LEVEL (0x03)
|
||||
#define ESR_ELx_FSC_EXTABT (0x10)
|
||||
#define ESR_ELx_FSC_MTE (0x11)
|
||||
#define ESR_ELx_FSC_SERROR (0x11)
|
||||
#define ESR_ELx_FSC_ACCESS (0x08)
|
||||
#define ESR_ELx_FSC_FAULT (0x04)
|
||||
#define ESR_ELx_FSC_PERM (0x0C)
|
||||
#define ESR_ELx_FSC_SEA_TTW(n) (0x14 + (n))
|
||||
#define ESR_ELx_FSC_SECC (0x18)
|
||||
#define ESR_ELx_FSC_SECC_TTW(n) (0x1c + (n))
|
||||
|
||||
/* Status codes for individual page table levels */
|
||||
#define ESR_ELx_FSC_ACCESS_L(n) (ESR_ELx_FSC_ACCESS + (n))
|
||||
#define ESR_ELx_FSC_PERM_L(n) (ESR_ELx_FSC_PERM + (n))
|
||||
|
||||
#define ESR_ELx_FSC_FAULT_nL (0x2C)
|
||||
#define ESR_ELx_FSC_FAULT_L(n) (((n) < 0 ? ESR_ELx_FSC_FAULT_nL : \
|
||||
ESR_ELx_FSC_FAULT) + (n))
|
||||
|
||||
/* ISS field definitions for Data Aborts */
|
||||
#define ESR_ELx_ISV_SHIFT (24)
|
||||
#define ESR_ELx_ISV (UL(1) << ESR_ELx_ISV_SHIFT)
|
||||
#define ESR_ELx_SAS_SHIFT (22)
|
||||
#define ESR_ELx_SAS (UL(3) << ESR_ELx_SAS_SHIFT)
|
||||
#define ESR_ELx_SSE_SHIFT (21)
|
||||
#define ESR_ELx_SSE (UL(1) << ESR_ELx_SSE_SHIFT)
|
||||
#define ESR_ELx_SRT_SHIFT (16)
|
||||
#define ESR_ELx_SRT_MASK (UL(0x1F) << ESR_ELx_SRT_SHIFT)
|
||||
#define ESR_ELx_SF_SHIFT (15)
|
||||
#define ESR_ELx_SF (UL(1) << ESR_ELx_SF_SHIFT)
|
||||
#define ESR_ELx_AR_SHIFT (14)
|
||||
#define ESR_ELx_AR (UL(1) << ESR_ELx_AR_SHIFT)
|
||||
#define ESR_ELx_CM_SHIFT (8)
|
||||
#define ESR_ELx_CM (UL(1) << ESR_ELx_CM_SHIFT)
|
||||
|
||||
/* ISS2 field definitions for Data Aborts */
|
||||
#define ESR_ELx_TnD_SHIFT (10)
|
||||
#define ESR_ELx_TnD (UL(1) << ESR_ELx_TnD_SHIFT)
|
||||
#define ESR_ELx_TagAccess_SHIFT (9)
|
||||
#define ESR_ELx_TagAccess (UL(1) << ESR_ELx_TagAccess_SHIFT)
|
||||
#define ESR_ELx_GCS_SHIFT (8)
|
||||
#define ESR_ELx_GCS (UL(1) << ESR_ELx_GCS_SHIFT)
|
||||
#define ESR_ELx_Overlay_SHIFT (6)
|
||||
#define ESR_ELx_Overlay (UL(1) << ESR_ELx_Overlay_SHIFT)
|
||||
#define ESR_ELx_DirtyBit_SHIFT (5)
|
||||
#define ESR_ELx_DirtyBit (UL(1) << ESR_ELx_DirtyBit_SHIFT)
|
||||
#define ESR_ELx_Xs_SHIFT (0)
|
||||
#define ESR_ELx_Xs_MASK (GENMASK_ULL(4, 0))
|
||||
|
||||
/* ISS field definitions for exceptions taken in to Hyp */
|
||||
#define ESR_ELx_FSC_ADDRSZ (0x00)
|
||||
#define ESR_ELx_FSC_ADDRSZ_L(n) (ESR_ELx_FSC_ADDRSZ + (n))
|
||||
#define ESR_ELx_CV (UL(1) << 24)
|
||||
#define ESR_ELx_COND_SHIFT (20)
|
||||
#define ESR_ELx_COND_MASK (UL(0xF) << ESR_ELx_COND_SHIFT)
|
||||
#define ESR_ELx_WFx_ISS_RN (UL(0x1F) << 5)
|
||||
#define ESR_ELx_WFx_ISS_RV (UL(1) << 2)
|
||||
#define ESR_ELx_WFx_ISS_TI (UL(3) << 0)
|
||||
#define ESR_ELx_WFx_ISS_WFxT (UL(2) << 0)
|
||||
#define ESR_ELx_WFx_ISS_WFI (UL(0) << 0)
|
||||
#define ESR_ELx_WFx_ISS_WFE (UL(1) << 0)
|
||||
#define ESR_ELx_xVC_IMM_MASK ((UL(1) << 16) - 1)
|
||||
|
||||
#define DISR_EL1_IDS (UL(1) << 24)
|
||||
/*
|
||||
* DISR_EL1 and ESR_ELx share the bottom 13 bits, but the RES0 bits may mean
|
||||
* different things in the future...
|
||||
*/
|
||||
#define DISR_EL1_ESR_MASK (ESR_ELx_AET | ESR_ELx_EA | ESR_ELx_FSC)
|
||||
|
||||
/* ESR value templates for specific events */
|
||||
#define ESR_ELx_WFx_MASK (ESR_ELx_EC_MASK | \
|
||||
(ESR_ELx_WFx_ISS_TI & ~ESR_ELx_WFx_ISS_WFxT))
|
||||
#define ESR_ELx_WFx_WFI_VAL ((ESR_ELx_EC_WFx << ESR_ELx_EC_SHIFT) | \
|
||||
ESR_ELx_WFx_ISS_WFI)
|
||||
|
||||
/* BRK instruction trap from AArch64 state */
|
||||
#define ESR_ELx_BRK64_ISS_COMMENT_MASK 0xffff
|
||||
|
||||
/* ISS field definitions for System instruction traps */
|
||||
#define ESR_ELx_SYS64_ISS_RES0_SHIFT 22
|
||||
#define ESR_ELx_SYS64_ISS_RES0_MASK (UL(0x7) << ESR_ELx_SYS64_ISS_RES0_SHIFT)
|
||||
#define ESR_ELx_SYS64_ISS_DIR_MASK 0x1
|
||||
#define ESR_ELx_SYS64_ISS_DIR_READ 0x1
|
||||
#define ESR_ELx_SYS64_ISS_DIR_WRITE 0x0
|
||||
|
||||
#define ESR_ELx_SYS64_ISS_RT_SHIFT 5
|
||||
#define ESR_ELx_SYS64_ISS_RT_MASK (UL(0x1f) << ESR_ELx_SYS64_ISS_RT_SHIFT)
|
||||
#define ESR_ELx_SYS64_ISS_CRM_SHIFT 1
|
||||
#define ESR_ELx_SYS64_ISS_CRM_MASK (UL(0xf) << ESR_ELx_SYS64_ISS_CRM_SHIFT)
|
||||
#define ESR_ELx_SYS64_ISS_CRN_SHIFT 10
|
||||
#define ESR_ELx_SYS64_ISS_CRN_MASK (UL(0xf) << ESR_ELx_SYS64_ISS_CRN_SHIFT)
|
||||
#define ESR_ELx_SYS64_ISS_OP1_SHIFT 14
|
||||
#define ESR_ELx_SYS64_ISS_OP1_MASK (UL(0x7) << ESR_ELx_SYS64_ISS_OP1_SHIFT)
|
||||
#define ESR_ELx_SYS64_ISS_OP2_SHIFT 17
|
||||
#define ESR_ELx_SYS64_ISS_OP2_MASK (UL(0x7) << ESR_ELx_SYS64_ISS_OP2_SHIFT)
|
||||
#define ESR_ELx_SYS64_ISS_OP0_SHIFT 20
|
||||
#define ESR_ELx_SYS64_ISS_OP0_MASK (UL(0x3) << ESR_ELx_SYS64_ISS_OP0_SHIFT)
|
||||
#define ESR_ELx_SYS64_ISS_SYS_MASK (ESR_ELx_SYS64_ISS_OP0_MASK | \
|
||||
ESR_ELx_SYS64_ISS_OP1_MASK | \
|
||||
ESR_ELx_SYS64_ISS_OP2_MASK | \
|
||||
ESR_ELx_SYS64_ISS_CRN_MASK | \
|
||||
ESR_ELx_SYS64_ISS_CRM_MASK)
|
||||
#define ESR_ELx_SYS64_ISS_SYS_VAL(op0, op1, op2, crn, crm) \
|
||||
(((op0) << ESR_ELx_SYS64_ISS_OP0_SHIFT) | \
|
||||
((op1) << ESR_ELx_SYS64_ISS_OP1_SHIFT) | \
|
||||
((op2) << ESR_ELx_SYS64_ISS_OP2_SHIFT) | \
|
||||
((crn) << ESR_ELx_SYS64_ISS_CRN_SHIFT) | \
|
||||
((crm) << ESR_ELx_SYS64_ISS_CRM_SHIFT))
|
||||
|
||||
#define ESR_ELx_SYS64_ISS_SYS_OP_MASK (ESR_ELx_SYS64_ISS_SYS_MASK | \
|
||||
ESR_ELx_SYS64_ISS_DIR_MASK)
|
||||
#define ESR_ELx_SYS64_ISS_RT(esr) \
|
||||
(((esr) & ESR_ELx_SYS64_ISS_RT_MASK) >> ESR_ELx_SYS64_ISS_RT_SHIFT)
|
||||
/*
|
||||
* User space cache operations have the following sysreg encoding
|
||||
* in System instructions.
|
||||
* op0=1, op1=3, op2=1, crn=7, crm={ 5, 10, 11, 12, 13, 14 }, WRITE (L=0)
|
||||
*/
|
||||
#define ESR_ELx_SYS64_ISS_CRM_DC_CIVAC 14
|
||||
#define ESR_ELx_SYS64_ISS_CRM_DC_CVADP 13
|
||||
#define ESR_ELx_SYS64_ISS_CRM_DC_CVAP 12
|
||||
#define ESR_ELx_SYS64_ISS_CRM_DC_CVAU 11
|
||||
#define ESR_ELx_SYS64_ISS_CRM_DC_CVAC 10
|
||||
#define ESR_ELx_SYS64_ISS_CRM_IC_IVAU 5
|
||||
|
||||
#define ESR_ELx_SYS64_ISS_EL0_CACHE_OP_MASK (ESR_ELx_SYS64_ISS_OP0_MASK | \
|
||||
ESR_ELx_SYS64_ISS_OP1_MASK | \
|
||||
ESR_ELx_SYS64_ISS_OP2_MASK | \
|
||||
ESR_ELx_SYS64_ISS_CRN_MASK | \
|
||||
ESR_ELx_SYS64_ISS_DIR_MASK)
|
||||
#define ESR_ELx_SYS64_ISS_EL0_CACHE_OP_VAL \
|
||||
(ESR_ELx_SYS64_ISS_SYS_VAL(1, 3, 1, 7, 0) | \
|
||||
ESR_ELx_SYS64_ISS_DIR_WRITE)
|
||||
/*
|
||||
* User space MRS operations which are supported for emulation
|
||||
* have the following sysreg encoding in System instructions.
|
||||
* op0 = 3, op1= 0, crn = 0, {crm = 0, 4-7}, READ (L = 1)
|
||||
*/
|
||||
#define ESR_ELx_SYS64_ISS_SYS_MRS_OP_MASK (ESR_ELx_SYS64_ISS_OP0_MASK | \
|
||||
ESR_ELx_SYS64_ISS_OP1_MASK | \
|
||||
ESR_ELx_SYS64_ISS_CRN_MASK | \
|
||||
ESR_ELx_SYS64_ISS_DIR_MASK)
|
||||
#define ESR_ELx_SYS64_ISS_SYS_MRS_OP_VAL \
|
||||
(ESR_ELx_SYS64_ISS_SYS_VAL(3, 0, 0, 0, 0) | \
|
||||
ESR_ELx_SYS64_ISS_DIR_READ)
|
||||
|
||||
#define ESR_ELx_SYS64_ISS_SYS_CTR ESR_ELx_SYS64_ISS_SYS_VAL(3, 3, 1, 0, 0)
|
||||
#define ESR_ELx_SYS64_ISS_SYS_CTR_READ (ESR_ELx_SYS64_ISS_SYS_CTR | \
|
||||
ESR_ELx_SYS64_ISS_DIR_READ)
|
||||
|
||||
#define ESR_ELx_SYS64_ISS_SYS_CNTVCT (ESR_ELx_SYS64_ISS_SYS_VAL(3, 3, 2, 14, 0) | \
|
||||
ESR_ELx_SYS64_ISS_DIR_READ)
|
||||
|
||||
#define ESR_ELx_SYS64_ISS_SYS_CNTVCTSS (ESR_ELx_SYS64_ISS_SYS_VAL(3, 3, 6, 14, 0) | \
|
||||
ESR_ELx_SYS64_ISS_DIR_READ)
|
||||
|
||||
#define ESR_ELx_SYS64_ISS_SYS_CNTFRQ (ESR_ELx_SYS64_ISS_SYS_VAL(3, 3, 0, 14, 0) | \
|
||||
ESR_ELx_SYS64_ISS_DIR_READ)
|
||||
|
||||
#define esr_sys64_to_sysreg(e) \
|
||||
sys_reg((((e) & ESR_ELx_SYS64_ISS_OP0_MASK) >> \
|
||||
ESR_ELx_SYS64_ISS_OP0_SHIFT), \
|
||||
(((e) & ESR_ELx_SYS64_ISS_OP1_MASK) >> \
|
||||
ESR_ELx_SYS64_ISS_OP1_SHIFT), \
|
||||
(((e) & ESR_ELx_SYS64_ISS_CRN_MASK) >> \
|
||||
ESR_ELx_SYS64_ISS_CRN_SHIFT), \
|
||||
(((e) & ESR_ELx_SYS64_ISS_CRM_MASK) >> \
|
||||
ESR_ELx_SYS64_ISS_CRM_SHIFT), \
|
||||
(((e) & ESR_ELx_SYS64_ISS_OP2_MASK) >> \
|
||||
ESR_ELx_SYS64_ISS_OP2_SHIFT))
|
||||
|
||||
#define esr_cp15_to_sysreg(e) \
|
||||
sys_reg(3, \
|
||||
(((e) & ESR_ELx_SYS64_ISS_OP1_MASK) >> \
|
||||
ESR_ELx_SYS64_ISS_OP1_SHIFT), \
|
||||
(((e) & ESR_ELx_SYS64_ISS_CRN_MASK) >> \
|
||||
ESR_ELx_SYS64_ISS_CRN_SHIFT), \
|
||||
(((e) & ESR_ELx_SYS64_ISS_CRM_MASK) >> \
|
||||
ESR_ELx_SYS64_ISS_CRM_SHIFT), \
|
||||
(((e) & ESR_ELx_SYS64_ISS_OP2_MASK) >> \
|
||||
ESR_ELx_SYS64_ISS_OP2_SHIFT))
|
||||
|
||||
/* ISS field definitions for ERET/ERETAA/ERETAB trapping */
|
||||
#define ESR_ELx_ERET_ISS_ERET 0x2
|
||||
#define ESR_ELx_ERET_ISS_ERETA 0x1
|
||||
|
||||
/*
|
||||
* ISS field definitions for floating-point exception traps
|
||||
* (FP_EXC_32/FP_EXC_64).
|
||||
*
|
||||
* (The FPEXC_* constants are used instead for common bits.)
|
||||
*/
|
||||
|
||||
#define ESR_ELx_FP_EXC_TFV (UL(1) << 23)
|
||||
|
||||
/*
|
||||
* ISS field definitions for CP15 accesses
|
||||
*/
|
||||
#define ESR_ELx_CP15_32_ISS_DIR_MASK 0x1
|
||||
#define ESR_ELx_CP15_32_ISS_DIR_READ 0x1
|
||||
#define ESR_ELx_CP15_32_ISS_DIR_WRITE 0x0
|
||||
|
||||
#define ESR_ELx_CP15_32_ISS_RT_SHIFT 5
|
||||
#define ESR_ELx_CP15_32_ISS_RT_MASK (UL(0x1f) << ESR_ELx_CP15_32_ISS_RT_SHIFT)
|
||||
#define ESR_ELx_CP15_32_ISS_CRM_SHIFT 1
|
||||
#define ESR_ELx_CP15_32_ISS_CRM_MASK (UL(0xf) << ESR_ELx_CP15_32_ISS_CRM_SHIFT)
|
||||
#define ESR_ELx_CP15_32_ISS_CRN_SHIFT 10
|
||||
#define ESR_ELx_CP15_32_ISS_CRN_MASK (UL(0xf) << ESR_ELx_CP15_32_ISS_CRN_SHIFT)
|
||||
#define ESR_ELx_CP15_32_ISS_OP1_SHIFT 14
|
||||
#define ESR_ELx_CP15_32_ISS_OP1_MASK (UL(0x7) << ESR_ELx_CP15_32_ISS_OP1_SHIFT)
|
||||
#define ESR_ELx_CP15_32_ISS_OP2_SHIFT 17
|
||||
#define ESR_ELx_CP15_32_ISS_OP2_MASK (UL(0x7) << ESR_ELx_CP15_32_ISS_OP2_SHIFT)
|
||||
|
||||
#define ESR_ELx_CP15_32_ISS_SYS_MASK (ESR_ELx_CP15_32_ISS_OP1_MASK | \
|
||||
ESR_ELx_CP15_32_ISS_OP2_MASK | \
|
||||
ESR_ELx_CP15_32_ISS_CRN_MASK | \
|
||||
ESR_ELx_CP15_32_ISS_CRM_MASK | \
|
||||
ESR_ELx_CP15_32_ISS_DIR_MASK)
|
||||
#define ESR_ELx_CP15_32_ISS_SYS_VAL(op1, op2, crn, crm) \
|
||||
(((op1) << ESR_ELx_CP15_32_ISS_OP1_SHIFT) | \
|
||||
((op2) << ESR_ELx_CP15_32_ISS_OP2_SHIFT) | \
|
||||
((crn) << ESR_ELx_CP15_32_ISS_CRN_SHIFT) | \
|
||||
((crm) << ESR_ELx_CP15_32_ISS_CRM_SHIFT))
|
||||
|
||||
#define ESR_ELx_CP15_64_ISS_DIR_MASK 0x1
|
||||
#define ESR_ELx_CP15_64_ISS_DIR_READ 0x1
|
||||
#define ESR_ELx_CP15_64_ISS_DIR_WRITE 0x0
|
||||
|
||||
#define ESR_ELx_CP15_64_ISS_RT_SHIFT 5
|
||||
#define ESR_ELx_CP15_64_ISS_RT_MASK (UL(0x1f) << ESR_ELx_CP15_64_ISS_RT_SHIFT)
|
||||
|
||||
#define ESR_ELx_CP15_64_ISS_RT2_SHIFT 10
|
||||
#define ESR_ELx_CP15_64_ISS_RT2_MASK (UL(0x1f) << ESR_ELx_CP15_64_ISS_RT2_SHIFT)
|
||||
|
||||
#define ESR_ELx_CP15_64_ISS_OP1_SHIFT 16
|
||||
#define ESR_ELx_CP15_64_ISS_OP1_MASK (UL(0xf) << ESR_ELx_CP15_64_ISS_OP1_SHIFT)
|
||||
#define ESR_ELx_CP15_64_ISS_CRM_SHIFT 1
|
||||
#define ESR_ELx_CP15_64_ISS_CRM_MASK (UL(0xf) << ESR_ELx_CP15_64_ISS_CRM_SHIFT)
|
||||
|
||||
#define ESR_ELx_CP15_64_ISS_SYS_VAL(op1, crm) \
|
||||
(((op1) << ESR_ELx_CP15_64_ISS_OP1_SHIFT) | \
|
||||
((crm) << ESR_ELx_CP15_64_ISS_CRM_SHIFT))
|
||||
|
||||
#define ESR_ELx_CP15_64_ISS_SYS_MASK (ESR_ELx_CP15_64_ISS_OP1_MASK | \
|
||||
ESR_ELx_CP15_64_ISS_CRM_MASK | \
|
||||
ESR_ELx_CP15_64_ISS_DIR_MASK)
|
||||
|
||||
#define ESR_ELx_CP15_64_ISS_SYS_CNTVCT (ESR_ELx_CP15_64_ISS_SYS_VAL(1, 14) | \
|
||||
ESR_ELx_CP15_64_ISS_DIR_READ)
|
||||
|
||||
#define ESR_ELx_CP15_64_ISS_SYS_CNTVCTSS (ESR_ELx_CP15_64_ISS_SYS_VAL(9, 14) | \
|
||||
ESR_ELx_CP15_64_ISS_DIR_READ)
|
||||
|
||||
#define ESR_ELx_CP15_32_ISS_SYS_CNTFRQ (ESR_ELx_CP15_32_ISS_SYS_VAL(0, 0, 14, 0) |\
|
||||
ESR_ELx_CP15_32_ISS_DIR_READ)
|
||||
|
||||
/*
|
||||
* ISS values for SME traps
|
||||
*/
|
||||
|
||||
#define ESR_ELx_SME_ISS_SME_DISABLED 0
|
||||
#define ESR_ELx_SME_ISS_ILL 1
|
||||
#define ESR_ELx_SME_ISS_SM_DISABLED 2
|
||||
#define ESR_ELx_SME_ISS_ZA_DISABLED 3
|
||||
#define ESR_ELx_SME_ISS_ZT_DISABLED 4
|
||||
|
||||
/* ISS field definitions for MOPS exceptions */
|
||||
#define ESR_ELx_MOPS_ISS_MEM_INST (UL(1) << 24)
|
||||
#define ESR_ELx_MOPS_ISS_FROM_EPILOGUE (UL(1) << 18)
|
||||
#define ESR_ELx_MOPS_ISS_WRONG_OPTION (UL(1) << 17)
|
||||
#define ESR_ELx_MOPS_ISS_OPTION_A (UL(1) << 16)
|
||||
#define ESR_ELx_MOPS_ISS_DESTREG(esr) (((esr) & (UL(0x1f) << 10)) >> 10)
|
||||
#define ESR_ELx_MOPS_ISS_SRCREG(esr) (((esr) & (UL(0x1f) << 5)) >> 5)
|
||||
#define ESR_ELx_MOPS_ISS_SIZEREG(esr) (((esr) & (UL(0x1f) << 0)) >> 0)
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
#include <asm/types.h>
|
||||
|
||||
static inline unsigned long esr_brk_comment(unsigned long esr)
|
||||
{
|
||||
return esr & ESR_ELx_BRK64_ISS_COMMENT_MASK;
|
||||
}
|
||||
|
||||
static inline bool esr_is_data_abort(unsigned long esr)
|
||||
{
|
||||
const unsigned long ec = ESR_ELx_EC(esr);
|
||||
|
||||
return ec == ESR_ELx_EC_DABT_LOW || ec == ESR_ELx_EC_DABT_CUR;
|
||||
}
|
||||
|
||||
static inline bool esr_is_cfi_brk(unsigned long esr)
|
||||
{
|
||||
return ESR_ELx_EC(esr) == ESR_ELx_EC_BRK64 &&
|
||||
(esr_brk_comment(esr) & ~CFI_BRK_IMM_MASK) == CFI_BRK_IMM_BASE;
|
||||
}
|
||||
|
||||
static inline bool esr_fsc_is_translation_fault(unsigned long esr)
|
||||
{
|
||||
esr = esr & ESR_ELx_FSC;
|
||||
|
||||
return (esr == ESR_ELx_FSC_FAULT_L(3)) ||
|
||||
(esr == ESR_ELx_FSC_FAULT_L(2)) ||
|
||||
(esr == ESR_ELx_FSC_FAULT_L(1)) ||
|
||||
(esr == ESR_ELx_FSC_FAULT_L(0)) ||
|
||||
(esr == ESR_ELx_FSC_FAULT_L(-1));
|
||||
}
|
||||
|
||||
static inline bool esr_fsc_is_permission_fault(unsigned long esr)
|
||||
{
|
||||
esr = esr & ESR_ELx_FSC;
|
||||
|
||||
return (esr == ESR_ELx_FSC_PERM_L(3)) ||
|
||||
(esr == ESR_ELx_FSC_PERM_L(2)) ||
|
||||
(esr == ESR_ELx_FSC_PERM_L(1)) ||
|
||||
(esr == ESR_ELx_FSC_PERM_L(0));
|
||||
}
|
||||
|
||||
static inline bool esr_fsc_is_access_flag_fault(unsigned long esr)
|
||||
{
|
||||
esr = esr & ESR_ELx_FSC;
|
||||
|
||||
return (esr == ESR_ELx_FSC_ACCESS_L(3)) ||
|
||||
(esr == ESR_ELx_FSC_ACCESS_L(2)) ||
|
||||
(esr == ESR_ELx_FSC_ACCESS_L(1)) ||
|
||||
(esr == ESR_ELx_FSC_ACCESS_L(0));
|
||||
}
|
||||
|
||||
/* Indicate whether ESR.EC==0x1A is for an ERETAx instruction */
|
||||
static inline bool esr_iss_is_eretax(unsigned long esr)
|
||||
{
|
||||
return esr & ESR_ELx_ERET_ISS_ERET;
|
||||
}
|
||||
|
||||
/* Indicate which key is used for ERETAx (false: A-Key, true: B-Key) */
|
||||
static inline bool esr_iss_is_eretab(unsigned long esr)
|
||||
{
|
||||
return esr & ESR_ELx_ERET_ISS_ERETA;
|
||||
}
|
||||
|
||||
const char *esr_get_class_string(unsigned long esr);
|
||||
#endif /* __ASSEMBLY */
|
||||
|
||||
#endif /* __ASM_ESR_H */
|
@ -157,6 +157,7 @@ TEST_GEN_PROGS_aarch64 += aarch64/aarch32_id_regs
|
||||
TEST_GEN_PROGS_aarch64 += aarch64/arch_timer_edge_cases
|
||||
TEST_GEN_PROGS_aarch64 += aarch64/debug-exceptions
|
||||
TEST_GEN_PROGS_aarch64 += aarch64/hypercalls
|
||||
TEST_GEN_PROGS_aarch64 += aarch64/mmio_abort
|
||||
TEST_GEN_PROGS_aarch64 += aarch64/page_fault_test
|
||||
TEST_GEN_PROGS_aarch64 += aarch64/psci_test
|
||||
TEST_GEN_PROGS_aarch64 += aarch64/set_id_regs
|
||||
|
@ -433,15 +433,15 @@ static void test_guest_debug_exceptions(uint8_t bpn, uint8_t wpn, uint8_t ctx_bp
|
||||
vcpu_init_descriptor_tables(vcpu);
|
||||
|
||||
vm_install_sync_handler(vm, VECTOR_SYNC_CURRENT,
|
||||
ESR_EC_BRK_INS, guest_sw_bp_handler);
|
||||
ESR_ELx_EC_BRK64, guest_sw_bp_handler);
|
||||
vm_install_sync_handler(vm, VECTOR_SYNC_CURRENT,
|
||||
ESR_EC_HW_BP_CURRENT, guest_hw_bp_handler);
|
||||
ESR_ELx_EC_BREAKPT_CUR, guest_hw_bp_handler);
|
||||
vm_install_sync_handler(vm, VECTOR_SYNC_CURRENT,
|
||||
ESR_EC_WP_CURRENT, guest_wp_handler);
|
||||
ESR_ELx_EC_WATCHPT_CUR, guest_wp_handler);
|
||||
vm_install_sync_handler(vm, VECTOR_SYNC_CURRENT,
|
||||
ESR_EC_SSTEP_CURRENT, guest_ss_handler);
|
||||
ESR_ELx_EC_SOFTSTP_CUR, guest_ss_handler);
|
||||
vm_install_sync_handler(vm, VECTOR_SYNC_CURRENT,
|
||||
ESR_EC_SVC64, guest_svc_handler);
|
||||
ESR_ELx_EC_SVC64, guest_svc_handler);
|
||||
|
||||
/* Specify bpn/wpn/ctx_bpn to be tested */
|
||||
vcpu_args_set(vcpu, 3, bpn, wpn, ctx_bpn);
|
||||
|
159
tools/testing/selftests/kvm/aarch64/mmio_abort.c
Normal file
159
tools/testing/selftests/kvm/aarch64/mmio_abort.c
Normal file
@ -0,0 +1,159 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* mmio_abort - Tests for userspace MMIO abort injection
|
||||
*
|
||||
* Copyright (c) 2024 Google LLC
|
||||
*/
|
||||
#include "processor.h"
|
||||
#include "test_util.h"
|
||||
|
||||
#define MMIO_ADDR 0x8000000ULL
|
||||
|
||||
static u64 expected_abort_pc;
|
||||
|
||||
static void expect_sea_handler(struct ex_regs *regs)
|
||||
{
|
||||
u64 esr = read_sysreg(esr_el1);
|
||||
|
||||
GUEST_ASSERT_EQ(regs->pc, expected_abort_pc);
|
||||
GUEST_ASSERT_EQ(ESR_ELx_EC(esr), ESR_ELx_EC_DABT_CUR);
|
||||
GUEST_ASSERT_EQ(esr & ESR_ELx_FSC_TYPE, ESR_ELx_FSC_EXTABT);
|
||||
|
||||
GUEST_DONE();
|
||||
}
|
||||
|
||||
static void unexpected_dabt_handler(struct ex_regs *regs)
|
||||
{
|
||||
GUEST_FAIL("Unexpected data abort at PC: %lx\n", regs->pc);
|
||||
}
|
||||
|
||||
static struct kvm_vm *vm_create_with_dabt_handler(struct kvm_vcpu **vcpu, void *guest_code,
|
||||
handler_fn dabt_handler)
|
||||
{
|
||||
struct kvm_vm *vm = vm_create_with_one_vcpu(vcpu, guest_code);
|
||||
|
||||
vm_init_descriptor_tables(vm);
|
||||
vcpu_init_descriptor_tables(*vcpu);
|
||||
vm_install_sync_handler(vm, VECTOR_SYNC_CURRENT, ESR_ELx_EC_DABT_CUR, dabt_handler);
|
||||
|
||||
virt_map(vm, MMIO_ADDR, MMIO_ADDR, 1);
|
||||
|
||||
return vm;
|
||||
}
|
||||
|
||||
static void vcpu_inject_extabt(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvm_vcpu_events events = {};
|
||||
|
||||
events.exception.ext_dabt_pending = true;
|
||||
vcpu_events_set(vcpu, &events);
|
||||
}
|
||||
|
||||
static void vcpu_run_expect_done(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct ucall uc;
|
||||
|
||||
vcpu_run(vcpu);
|
||||
switch (get_ucall(vcpu, &uc)) {
|
||||
case UCALL_ABORT:
|
||||
REPORT_GUEST_ASSERT(uc);
|
||||
break;
|
||||
case UCALL_DONE:
|
||||
break;
|
||||
default:
|
||||
TEST_FAIL("Unexpected ucall: %lu", uc.cmd);
|
||||
}
|
||||
}
|
||||
|
||||
extern char test_mmio_abort_insn;
|
||||
|
||||
static void test_mmio_abort_guest(void)
|
||||
{
|
||||
WRITE_ONCE(expected_abort_pc, (u64)&test_mmio_abort_insn);
|
||||
|
||||
asm volatile("test_mmio_abort_insn:\n\t"
|
||||
"ldr x0, [%0]\n\t"
|
||||
: : "r" (MMIO_ADDR) : "x0", "memory");
|
||||
|
||||
GUEST_FAIL("MMIO instruction should not retire");
|
||||
}
|
||||
|
||||
/*
|
||||
* Test that KVM doesn't complete MMIO emulation when userspace has made an
|
||||
* external abort pending for the instruction.
|
||||
*/
|
||||
static void test_mmio_abort(void)
|
||||
{
|
||||
struct kvm_vcpu *vcpu;
|
||||
struct kvm_vm *vm = vm_create_with_dabt_handler(&vcpu, test_mmio_abort_guest,
|
||||
expect_sea_handler);
|
||||
struct kvm_run *run = vcpu->run;
|
||||
|
||||
vcpu_run(vcpu);
|
||||
TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_MMIO);
|
||||
TEST_ASSERT_EQ(run->mmio.phys_addr, MMIO_ADDR);
|
||||
TEST_ASSERT_EQ(run->mmio.len, sizeof(unsigned long));
|
||||
TEST_ASSERT(!run->mmio.is_write, "Expected MMIO read");
|
||||
|
||||
vcpu_inject_extabt(vcpu);
|
||||
vcpu_run_expect_done(vcpu);
|
||||
kvm_vm_free(vm);
|
||||
}
|
||||
|
||||
extern char test_mmio_nisv_insn;
|
||||
|
||||
static void test_mmio_nisv_guest(void)
|
||||
{
|
||||
WRITE_ONCE(expected_abort_pc, (u64)&test_mmio_nisv_insn);
|
||||
|
||||
asm volatile("test_mmio_nisv_insn:\n\t"
|
||||
"ldr x0, [%0], #8\n\t"
|
||||
: : "r" (MMIO_ADDR) : "x0", "memory");
|
||||
|
||||
GUEST_FAIL("MMIO instruction should not retire");
|
||||
}
|
||||
|
||||
/*
|
||||
* Test that the KVM_RUN ioctl fails for ESR_EL2.ISV=0 MMIO aborts if userspace
|
||||
* hasn't enabled KVM_CAP_ARM_NISV_TO_USER.
|
||||
*/
|
||||
static void test_mmio_nisv(void)
|
||||
{
|
||||
struct kvm_vcpu *vcpu;
|
||||
struct kvm_vm *vm = vm_create_with_dabt_handler(&vcpu, test_mmio_nisv_guest,
|
||||
unexpected_dabt_handler);
|
||||
|
||||
TEST_ASSERT(_vcpu_run(vcpu), "Expected nonzero return code from KVM_RUN");
|
||||
TEST_ASSERT_EQ(errno, ENOSYS);
|
||||
|
||||
kvm_vm_free(vm);
|
||||
}
|
||||
|
||||
/*
|
||||
* Test that ESR_EL2.ISV=0 MMIO aborts reach userspace and that an injected SEA
|
||||
* reaches the guest.
|
||||
*/
|
||||
static void test_mmio_nisv_abort(void)
|
||||
{
|
||||
struct kvm_vcpu *vcpu;
|
||||
struct kvm_vm *vm = vm_create_with_dabt_handler(&vcpu, test_mmio_nisv_guest,
|
||||
expect_sea_handler);
|
||||
struct kvm_run *run = vcpu->run;
|
||||
|
||||
vm_enable_cap(vm, KVM_CAP_ARM_NISV_TO_USER, 1);
|
||||
|
||||
vcpu_run(vcpu);
|
||||
TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_ARM_NISV);
|
||||
TEST_ASSERT_EQ(run->arm_nisv.fault_ipa, MMIO_ADDR);
|
||||
|
||||
vcpu_inject_extabt(vcpu);
|
||||
vcpu_run_expect_done(vcpu);
|
||||
kvm_vm_free(vm);
|
||||
}
|
||||
|
||||
int main(void)
|
||||
{
|
||||
test_mmio_abort();
|
||||
test_mmio_nisv();
|
||||
test_mmio_nisv_abort();
|
||||
}
|
@ -150,7 +150,7 @@ static void test_guest_no_gicv3(void)
|
||||
vcpu_init_descriptor_tables(vcpu);
|
||||
|
||||
vm_install_sync_handler(vm, VECTOR_SYNC_CURRENT,
|
||||
ESR_EC_UNKNOWN, guest_undef_handler);
|
||||
ESR_ELx_EC_UNKNOWN, guest_undef_handler);
|
||||
|
||||
test_run_vcpu(vcpu);
|
||||
|
||||
|
@ -544,9 +544,9 @@ static void setup_abort_handlers(struct kvm_vm *vm, struct kvm_vcpu *vcpu,
|
||||
vcpu_init_descriptor_tables(vcpu);
|
||||
|
||||
vm_install_sync_handler(vm, VECTOR_SYNC_CURRENT,
|
||||
ESR_EC_DABT, no_dabt_handler);
|
||||
ESR_ELx_EC_DABT_CUR, no_dabt_handler);
|
||||
vm_install_sync_handler(vm, VECTOR_SYNC_CURRENT,
|
||||
ESR_EC_IABT, no_iabt_handler);
|
||||
ESR_ELx_EC_IABT_CUR, no_iabt_handler);
|
||||
}
|
||||
|
||||
static void setup_gva_maps(struct kvm_vm *vm)
|
||||
|
@ -54,6 +54,15 @@ static uint64_t psci_system_suspend(uint64_t entry_addr, uint64_t context_id)
|
||||
return res.a0;
|
||||
}
|
||||
|
||||
static uint64_t psci_system_off2(uint64_t type, uint64_t cookie)
|
||||
{
|
||||
struct arm_smccc_res res;
|
||||
|
||||
smccc_hvc(PSCI_1_3_FN64_SYSTEM_OFF2, type, cookie, 0, 0, 0, 0, 0, &res);
|
||||
|
||||
return res.a0;
|
||||
}
|
||||
|
||||
static uint64_t psci_features(uint32_t func_id)
|
||||
{
|
||||
struct arm_smccc_res res;
|
||||
@ -188,11 +197,94 @@ static void host_test_system_suspend(void)
|
||||
kvm_vm_free(vm);
|
||||
}
|
||||
|
||||
static void guest_test_system_off2(void)
|
||||
{
|
||||
uint64_t ret;
|
||||
|
||||
/* assert that SYSTEM_OFF2 is discoverable */
|
||||
GUEST_ASSERT(psci_features(PSCI_1_3_FN_SYSTEM_OFF2) &
|
||||
PSCI_1_3_OFF_TYPE_HIBERNATE_OFF);
|
||||
GUEST_ASSERT(psci_features(PSCI_1_3_FN64_SYSTEM_OFF2) &
|
||||
PSCI_1_3_OFF_TYPE_HIBERNATE_OFF);
|
||||
|
||||
/* With non-zero 'cookie' field, it should fail */
|
||||
ret = psci_system_off2(PSCI_1_3_OFF_TYPE_HIBERNATE_OFF, 1);
|
||||
GUEST_ASSERT(ret == PSCI_RET_INVALID_PARAMS);
|
||||
|
||||
/*
|
||||
* This would normally never return, so KVM sets the return value
|
||||
* to PSCI_RET_INTERNAL_FAILURE. The test case *does* return, so
|
||||
* that it can test both values for HIBERNATE_OFF.
|
||||
*/
|
||||
ret = psci_system_off2(PSCI_1_3_OFF_TYPE_HIBERNATE_OFF, 0);
|
||||
GUEST_ASSERT(ret == PSCI_RET_INTERNAL_FAILURE);
|
||||
|
||||
/*
|
||||
* Revision F.b of the PSCI v1.3 specification documents zero as an
|
||||
* alias for HIBERNATE_OFF, since that's the value used in earlier
|
||||
* revisions of the spec and some implementations in the field.
|
||||
*/
|
||||
ret = psci_system_off2(0, 1);
|
||||
GUEST_ASSERT(ret == PSCI_RET_INVALID_PARAMS);
|
||||
|
||||
ret = psci_system_off2(0, 0);
|
||||
GUEST_ASSERT(ret == PSCI_RET_INTERNAL_FAILURE);
|
||||
|
||||
GUEST_DONE();
|
||||
}
|
||||
|
||||
static void host_test_system_off2(void)
|
||||
{
|
||||
struct kvm_vcpu *source, *target;
|
||||
struct kvm_mp_state mps;
|
||||
uint64_t psci_version = 0;
|
||||
int nr_shutdowns = 0;
|
||||
struct kvm_run *run;
|
||||
struct ucall uc;
|
||||
|
||||
setup_vm(guest_test_system_off2, &source, &target);
|
||||
|
||||
vcpu_get_reg(target, KVM_REG_ARM_PSCI_VERSION, &psci_version);
|
||||
|
||||
TEST_ASSERT(psci_version >= PSCI_VERSION(1, 3),
|
||||
"Unexpected PSCI version %lu.%lu",
|
||||
PSCI_VERSION_MAJOR(psci_version),
|
||||
PSCI_VERSION_MINOR(psci_version));
|
||||
|
||||
vcpu_power_off(target);
|
||||
run = source->run;
|
||||
|
||||
enter_guest(source);
|
||||
while (run->exit_reason == KVM_EXIT_SYSTEM_EVENT) {
|
||||
TEST_ASSERT(run->system_event.type == KVM_SYSTEM_EVENT_SHUTDOWN,
|
||||
"Unhandled system event: %u (expected: %u)",
|
||||
run->system_event.type, KVM_SYSTEM_EVENT_SHUTDOWN);
|
||||
TEST_ASSERT(run->system_event.ndata >= 1,
|
||||
"Unexpected amount of system event data: %u (expected, >= 1)",
|
||||
run->system_event.ndata);
|
||||
TEST_ASSERT(run->system_event.data[0] & KVM_SYSTEM_EVENT_SHUTDOWN_FLAG_PSCI_OFF2,
|
||||
"PSCI_OFF2 flag not set. Flags %llu (expected %llu)",
|
||||
run->system_event.data[0], KVM_SYSTEM_EVENT_SHUTDOWN_FLAG_PSCI_OFF2);
|
||||
|
||||
nr_shutdowns++;
|
||||
|
||||
/* Restart the vCPU */
|
||||
mps.mp_state = KVM_MP_STATE_RUNNABLE;
|
||||
vcpu_mp_state_set(source, &mps);
|
||||
|
||||
enter_guest(source);
|
||||
}
|
||||
|
||||
TEST_ASSERT(get_ucall(source, &uc) == UCALL_DONE, "Guest did not exit cleanly");
|
||||
TEST_ASSERT(nr_shutdowns == 2, "Two shutdown events were expected, but saw %d", nr_shutdowns);
|
||||
}
|
||||
|
||||
int main(void)
|
||||
{
|
||||
TEST_REQUIRE(kvm_has_cap(KVM_CAP_ARM_SYSTEM_SUSPEND));
|
||||
|
||||
host_test_cpu_on();
|
||||
host_test_system_suspend();
|
||||
host_test_system_off2();
|
||||
return 0;
|
||||
}
|
||||
|
@ -443,6 +443,101 @@ static void test_vm_ftr_id_regs(struct kvm_vcpu *vcpu, bool aarch64_only)
|
||||
}
|
||||
}
|
||||
|
||||
#define MPAM_IDREG_TEST 6
|
||||
static void test_user_set_mpam_reg(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
uint64_t masks[KVM_ARM_FEATURE_ID_RANGE_SIZE];
|
||||
struct reg_mask_range range = {
|
||||
.addr = (__u64)masks,
|
||||
};
|
||||
uint64_t val;
|
||||
int idx, err;
|
||||
|
||||
/*
|
||||
* If ID_AA64PFR0.MPAM is _not_ officially modifiable and is zero,
|
||||
* check that if it can be set to 1, (i.e. it is supported by the
|
||||
* hardware), that it can't be set to other values.
|
||||
*/
|
||||
|
||||
/* Get writable masks for feature ID registers */
|
||||
memset(range.reserved, 0, sizeof(range.reserved));
|
||||
vm_ioctl(vcpu->vm, KVM_ARM_GET_REG_WRITABLE_MASKS, &range);
|
||||
|
||||
/* Writeable? Nothing to test! */
|
||||
idx = encoding_to_range_idx(SYS_ID_AA64PFR0_EL1);
|
||||
if ((masks[idx] & ID_AA64PFR0_EL1_MPAM_MASK) == ID_AA64PFR0_EL1_MPAM_MASK) {
|
||||
ksft_test_result_skip("ID_AA64PFR0_EL1.MPAM is officially writable, nothing to test\n");
|
||||
return;
|
||||
}
|
||||
|
||||
/* Get the id register value */
|
||||
vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR0_EL1), &val);
|
||||
|
||||
/* Try to set MPAM=0. This should always be possible. */
|
||||
val &= ~ID_AA64PFR0_EL1_MPAM_MASK;
|
||||
val |= FIELD_PREP(ID_AA64PFR0_EL1_MPAM_MASK, 0);
|
||||
err = __vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR0_EL1), val);
|
||||
if (err)
|
||||
ksft_test_result_fail("ID_AA64PFR0_EL1.MPAM=0 was not accepted\n");
|
||||
else
|
||||
ksft_test_result_pass("ID_AA64PFR0_EL1.MPAM=0 worked\n");
|
||||
|
||||
/* Try to set MPAM=1 */
|
||||
val &= ~ID_AA64PFR0_EL1_MPAM_MASK;
|
||||
val |= FIELD_PREP(ID_AA64PFR0_EL1_MPAM_MASK, 1);
|
||||
err = __vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR0_EL1), val);
|
||||
if (err)
|
||||
ksft_test_result_skip("ID_AA64PFR0_EL1.MPAM is not writable, nothing to test\n");
|
||||
else
|
||||
ksft_test_result_pass("ID_AA64PFR0_EL1.MPAM=1 was writable\n");
|
||||
|
||||
/* Try to set MPAM=2 */
|
||||
val &= ~ID_AA64PFR0_EL1_MPAM_MASK;
|
||||
val |= FIELD_PREP(ID_AA64PFR0_EL1_MPAM_MASK, 2);
|
||||
err = __vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR0_EL1), val);
|
||||
if (err)
|
||||
ksft_test_result_pass("ID_AA64PFR0_EL1.MPAM not arbitrarily modifiable\n");
|
||||
else
|
||||
ksft_test_result_fail("ID_AA64PFR0_EL1.MPAM value should not be ignored\n");
|
||||
|
||||
/* And again for ID_AA64PFR1_EL1.MPAM_frac */
|
||||
idx = encoding_to_range_idx(SYS_ID_AA64PFR1_EL1);
|
||||
if ((masks[idx] & ID_AA64PFR1_EL1_MPAM_frac_MASK) == ID_AA64PFR1_EL1_MPAM_frac_MASK) {
|
||||
ksft_test_result_skip("ID_AA64PFR1_EL1.MPAM_frac is officially writable, nothing to test\n");
|
||||
return;
|
||||
}
|
||||
|
||||
/* Get the id register value */
|
||||
vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR1_EL1), &val);
|
||||
|
||||
/* Try to set MPAM_frac=0. This should always be possible. */
|
||||
val &= ~ID_AA64PFR1_EL1_MPAM_frac_MASK;
|
||||
val |= FIELD_PREP(ID_AA64PFR1_EL1_MPAM_frac_MASK, 0);
|
||||
err = __vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR1_EL1), val);
|
||||
if (err)
|
||||
ksft_test_result_fail("ID_AA64PFR0_EL1.MPAM_frac=0 was not accepted\n");
|
||||
else
|
||||
ksft_test_result_pass("ID_AA64PFR0_EL1.MPAM_frac=0 worked\n");
|
||||
|
||||
/* Try to set MPAM_frac=1 */
|
||||
val &= ~ID_AA64PFR1_EL1_MPAM_frac_MASK;
|
||||
val |= FIELD_PREP(ID_AA64PFR1_EL1_MPAM_frac_MASK, 1);
|
||||
err = __vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR1_EL1), val);
|
||||
if (err)
|
||||
ksft_test_result_skip("ID_AA64PFR1_EL1.MPAM_frac is not writable, nothing to test\n");
|
||||
else
|
||||
ksft_test_result_pass("ID_AA64PFR0_EL1.MPAM_frac=1 was writable\n");
|
||||
|
||||
/* Try to set MPAM_frac=2 */
|
||||
val &= ~ID_AA64PFR1_EL1_MPAM_frac_MASK;
|
||||
val |= FIELD_PREP(ID_AA64PFR1_EL1_MPAM_frac_MASK, 2);
|
||||
err = __vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR1_EL1), val);
|
||||
if (err)
|
||||
ksft_test_result_pass("ID_AA64PFR1_EL1.MPAM_frac not arbitrarily modifiable\n");
|
||||
else
|
||||
ksft_test_result_fail("ID_AA64PFR1_EL1.MPAM_frac value should not be ignored\n");
|
||||
}
|
||||
|
||||
static void test_guest_reg_read(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
bool done = false;
|
||||
@ -581,12 +676,14 @@ int main(void)
|
||||
ARRAY_SIZE(ftr_id_aa64isar2_el1) + ARRAY_SIZE(ftr_id_aa64pfr0_el1) +
|
||||
ARRAY_SIZE(ftr_id_aa64pfr1_el1) + ARRAY_SIZE(ftr_id_aa64mmfr0_el1) +
|
||||
ARRAY_SIZE(ftr_id_aa64mmfr1_el1) + ARRAY_SIZE(ftr_id_aa64mmfr2_el1) +
|
||||
ARRAY_SIZE(ftr_id_aa64zfr0_el1) - ARRAY_SIZE(test_regs) + 2;
|
||||
ARRAY_SIZE(ftr_id_aa64zfr0_el1) - ARRAY_SIZE(test_regs) + 2 +
|
||||
MPAM_IDREG_TEST;
|
||||
|
||||
ksft_set_plan(test_cnt);
|
||||
|
||||
test_vm_ftr_id_regs(vcpu, aarch64_only);
|
||||
test_vcpu_ftr_id_regs(vcpu);
|
||||
test_user_set_mpam_reg(vcpu);
|
||||
|
||||
test_guest_reg_read(vcpu);
|
||||
|
||||
|
@ -300,7 +300,7 @@ static void guest_sync_handler(struct ex_regs *regs)
|
||||
uint64_t esr, ec;
|
||||
|
||||
esr = read_sysreg(esr_el1);
|
||||
ec = (esr >> ESR_EC_SHIFT) & ESR_EC_MASK;
|
||||
ec = ESR_ELx_EC(esr);
|
||||
|
||||
__GUEST_ASSERT(expected_ec == ec,
|
||||
"PC: 0x%lx; ESR: 0x%lx; EC: 0x%lx; EC expected: 0x%lx",
|
||||
@ -338,10 +338,10 @@ static void test_access_invalid_pmc_regs(struct pmc_accessor *acc, int pmc_idx)
|
||||
* Reading/writing the event count/type registers should cause
|
||||
* an UNDEFINED exception.
|
||||
*/
|
||||
TEST_EXCEPTION(ESR_EC_UNKNOWN, acc->read_cntr(pmc_idx));
|
||||
TEST_EXCEPTION(ESR_EC_UNKNOWN, acc->write_cntr(pmc_idx, 0));
|
||||
TEST_EXCEPTION(ESR_EC_UNKNOWN, acc->read_typer(pmc_idx));
|
||||
TEST_EXCEPTION(ESR_EC_UNKNOWN, acc->write_typer(pmc_idx, 0));
|
||||
TEST_EXCEPTION(ESR_ELx_EC_UNKNOWN, acc->read_cntr(pmc_idx));
|
||||
TEST_EXCEPTION(ESR_ELx_EC_UNKNOWN, acc->write_cntr(pmc_idx, 0));
|
||||
TEST_EXCEPTION(ESR_ELx_EC_UNKNOWN, acc->read_typer(pmc_idx));
|
||||
TEST_EXCEPTION(ESR_ELx_EC_UNKNOWN, acc->write_typer(pmc_idx, 0));
|
||||
/*
|
||||
* The bit corresponding to the (unimplemented) counter in
|
||||
* {PMCNTEN,PMINTEN,PMOVS}{SET,CLR} registers should be RAZ.
|
||||
@ -425,7 +425,7 @@ static void create_vpmu_vm(void *guest_code)
|
||||
|
||||
vpmu_vm.vm = vm_create(1);
|
||||
vm_init_descriptor_tables(vpmu_vm.vm);
|
||||
for (ec = 0; ec < ESR_EC_NUM; ec++) {
|
||||
for (ec = 0; ec < ESR_ELx_EC_MAX + 1; ec++) {
|
||||
vm_install_sync_handler(vpmu_vm.vm, VECTOR_SYNC_CURRENT, ec,
|
||||
guest_sync_handler);
|
||||
}
|
||||
|
@ -12,6 +12,8 @@
|
||||
|
||||
#include <linux/stringify.h>
|
||||
#include <linux/types.h>
|
||||
#include <asm/brk-imm.h>
|
||||
#include <asm/esr.h>
|
||||
#include <asm/sysreg.h>
|
||||
|
||||
|
||||
@ -100,19 +102,6 @@ enum {
|
||||
(v) == VECTOR_SYNC_LOWER_64 || \
|
||||
(v) == VECTOR_SYNC_LOWER_32)
|
||||
|
||||
#define ESR_EC_NUM 64
|
||||
#define ESR_EC_SHIFT 26
|
||||
#define ESR_EC_MASK (ESR_EC_NUM - 1)
|
||||
|
||||
#define ESR_EC_UNKNOWN 0x0
|
||||
#define ESR_EC_SVC64 0x15
|
||||
#define ESR_EC_IABT 0x21
|
||||
#define ESR_EC_DABT 0x25
|
||||
#define ESR_EC_HW_BP_CURRENT 0x31
|
||||
#define ESR_EC_SSTEP_CURRENT 0x33
|
||||
#define ESR_EC_WP_CURRENT 0x35
|
||||
#define ESR_EC_BRK_INS 0x3c
|
||||
|
||||
/* Access flag */
|
||||
#define PTE_AF (1ULL << 10)
|
||||
|
||||
|
@ -450,7 +450,7 @@ void assert_on_unhandled_exception(struct kvm_vcpu *vcpu)
|
||||
}
|
||||
|
||||
struct handlers {
|
||||
handler_fn exception_handlers[VECTOR_NUM][ESR_EC_NUM];
|
||||
handler_fn exception_handlers[VECTOR_NUM][ESR_ELx_EC_MAX + 1];
|
||||
};
|
||||
|
||||
void vcpu_init_descriptor_tables(struct kvm_vcpu *vcpu)
|
||||
@ -469,7 +469,7 @@ void route_exception(struct ex_regs *regs, int vector)
|
||||
switch (vector) {
|
||||
case VECTOR_SYNC_CURRENT:
|
||||
case VECTOR_SYNC_LOWER_64:
|
||||
ec = (read_sysreg(esr_el1) >> ESR_EC_SHIFT) & ESR_EC_MASK;
|
||||
ec = ESR_ELx_EC(read_sysreg(esr_el1));
|
||||
valid_ec = true;
|
||||
break;
|
||||
case VECTOR_IRQ_CURRENT:
|
||||
@ -508,7 +508,7 @@ void vm_install_sync_handler(struct kvm_vm *vm, int vector, int ec,
|
||||
|
||||
assert(VECTOR_IS_SYNC(vector));
|
||||
assert(vector < VECTOR_NUM);
|
||||
assert(ec < ESR_EC_NUM);
|
||||
assert(ec <= ESR_ELx_EC_MAX);
|
||||
handlers->exception_handlers[vector][ec] = handler;
|
||||
}
|
||||
|
||||
|
@ -720,9 +720,6 @@ static void __vm_mem_region_delete(struct kvm_vm *vm,
|
||||
rb_erase(®ion->hva_node, &vm->regions.hva_tree);
|
||||
hash_del(®ion->slot_node);
|
||||
|
||||
region->region.memory_size = 0;
|
||||
vm_ioctl(vm, KVM_SET_USER_MEMORY_REGION2, ®ion->region);
|
||||
|
||||
sparsebit_free(®ion->unused_phy_pages);
|
||||
sparsebit_free(®ion->protected_phy_pages);
|
||||
ret = munmap(region->mmap_start, region->mmap_size);
|
||||
@ -1197,7 +1194,12 @@ void vm_mem_region_move(struct kvm_vm *vm, uint32_t slot, uint64_t new_gpa)
|
||||
*/
|
||||
void vm_mem_region_delete(struct kvm_vm *vm, uint32_t slot)
|
||||
{
|
||||
__vm_mem_region_delete(vm, memslot2region(vm, slot));
|
||||
struct userspace_mem_region *region = memslot2region(vm, slot);
|
||||
|
||||
region->region.memory_size = 0;
|
||||
vm_ioctl(vm, KVM_SET_USER_MEMORY_REGION2, ®ion->region);
|
||||
|
||||
__vm_mem_region_delete(vm, region);
|
||||
}
|
||||
|
||||
void vm_guest_mem_fallocate(struct kvm_vm *vm, uint64_t base, uint64_t size,
|
||||
|
Loading…
Reference in New Issue
Block a user