Merge branch kvm-arm64/mpam-ni into kvmarm/next

* kvm-arm64/mpam-ni:
  : Hiding FEAT_MPAM from KVM guests, courtesy of James Morse + Joey Gouly
  :
  : Fix a longstanding bug where FEAT_MPAM was accidentally exposed to KVM
  : guests + the EL2 trap configuration was not explicitly configured. As
  : part of this, bring in skeletal support for initialising the MPAM CPU
  : context so KVM can actually set traps for its guests.
  :
  : Be warned -- if this series leads to boot failures on your system,
  : you're running on turd firmware.
  :
  : As an added bonus (that builds upon the infrastructure added by the MPAM
  : series), allow userspace to configure CTR_EL0.L1Ip, courtesy of Shameer
  : Kolothum.
  KVM: arm64: Make L1Ip feature in CTR_EL0 writable from userspace
  KVM: arm64: selftests: Test ID_AA64PFR0.MPAM isn't completely ignored
  KVM: arm64: Disable MPAM visibility by default and ignore VMM writes
  KVM: arm64: Add a macro for creating filtered sys_reg_descs entries
  KVM: arm64: Fix missing traps of guest accesses to the MPAM registers
  arm64: cpufeature: discover CPU support for MPAM
  arm64: head.S: Initialise MPAM EL2 registers and disable traps
  arm64/sysreg: Convert existing MPAM sysregs and add the remaining entries

Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
This commit is contained in:
Oliver Upton 2024-11-11 18:38:30 +00:00
commit 24bb181136
14 changed files with 557 additions and 46 deletions

View File

@ -152,6 +152,8 @@ infrastructure:
+------------------------------+---------+---------+
| DIT | [51-48] | y |
+------------------------------+---------+---------+
| MPAM | [43-40] | n |
+------------------------------+---------+---------+
| SVE | [35-32] | y |
+------------------------------+---------+---------+
| GIC | [27-24] | n |

View File

@ -46,6 +46,7 @@ struct cpuinfo_arm64 {
u64 reg_revidr;
u64 reg_gmid;
u64 reg_smidr;
u64 reg_mpamidr;
u64 reg_id_aa64dfr0;
u64 reg_id_aa64dfr1;

View File

@ -60,6 +60,11 @@ cpucap_is_possible(const unsigned int cap)
return IS_ENABLED(CONFIG_ARM64_WORKAROUND_REPEAT_TLBI);
case ARM64_WORKAROUND_SPECULATIVE_SSBS:
return IS_ENABLED(CONFIG_ARM64_ERRATUM_3194386);
case ARM64_MPAM:
/*
* KVM MPAM support doesn't rely on the host kernel supporting MPAM.
*/
return true;
}
return true;

View File

@ -612,6 +612,13 @@ static inline bool id_aa64pfr1_sme(u64 pfr1)
return val > 0;
}
static inline bool id_aa64pfr0_mpam(u64 pfr0)
{
u32 val = cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_EL1_MPAM_SHIFT);
return val > 0;
}
static inline bool id_aa64pfr1_mte(u64 pfr1)
{
u32 val = cpuid_feature_extract_unsigned_field(pfr1, ID_AA64PFR1_EL1_MTE_SHIFT);
@ -838,6 +845,16 @@ static inline bool system_supports_poe(void)
alternative_has_cap_unlikely(ARM64_HAS_S1POE);
}
static __always_inline bool system_supports_mpam(void)
{
return alternative_has_cap_unlikely(ARM64_MPAM);
}
static __always_inline bool system_supports_mpam_hcr(void)
{
return alternative_has_cap_unlikely(ARM64_MPAM_HCR);
}
int do_emulate_mrs(struct pt_regs *regs, u32 sys_reg, u32 rt);
bool try_emulate_mrs(struct pt_regs *regs, u32 isn);

View File

@ -220,6 +220,19 @@
msr spsr_el2, x0
.endm
.macro __init_el2_mpam
/* Memory Partitioning And Monitoring: disable EL2 traps */
mrs x1, id_aa64pfr0_el1
ubfx x0, x1, #ID_AA64PFR0_EL1_MPAM_SHIFT, #4
cbz x0, .Lskip_mpam_\@ // skip if no MPAM
msr_s SYS_MPAM2_EL2, xzr // use the default partition
// and disable lower traps
mrs_s x0, SYS_MPAMIDR_EL1
tbz x0, #MPAMIDR_EL1_HAS_HCR_SHIFT, .Lskip_mpam_\@ // skip if no MPAMHCR reg
msr_s SYS_MPAMHCR_EL2, xzr // clear TRAP_MPAMIDR_EL1 -> EL2
.Lskip_mpam_\@:
.endm
/**
* Initialize EL2 registers to sane values. This should be called early on all
* cores that were booted in EL2. Note that everything gets initialised as
@ -237,6 +250,7 @@
__init_el2_stage2
__init_el2_gicv3
__init_el2_hstr
__init_el2_mpam
__init_el2_nvhe_idregs
__init_el2_cptr
__init_el2_fgt

View File

@ -103,6 +103,7 @@
#define HCR_HOST_VHE_FLAGS (HCR_RW | HCR_TGE | HCR_E2H)
#define HCRX_HOST_FLAGS (HCRX_EL2_MSCEn | HCRX_EL2_TCR2En | HCRX_EL2_EnFPM)
#define MPAMHCR_HOST_FLAGS 0
/* TCR_EL2 Registers bits */
#define TCR_EL2_DS (1UL << 32)

View File

@ -542,18 +542,6 @@
#define SYS_MAIR_EL2 sys_reg(3, 4, 10, 2, 0)
#define SYS_AMAIR_EL2 sys_reg(3, 4, 10, 3, 0)
#define SYS_MPAMHCR_EL2 sys_reg(3, 4, 10, 4, 0)
#define SYS_MPAMVPMV_EL2 sys_reg(3, 4, 10, 4, 1)
#define SYS_MPAM2_EL2 sys_reg(3, 4, 10, 5, 0)
#define __SYS__MPAMVPMx_EL2(x) sys_reg(3, 4, 10, 6, x)
#define SYS_MPAMVPM0_EL2 __SYS__MPAMVPMx_EL2(0)
#define SYS_MPAMVPM1_EL2 __SYS__MPAMVPMx_EL2(1)
#define SYS_MPAMVPM2_EL2 __SYS__MPAMVPMx_EL2(2)
#define SYS_MPAMVPM3_EL2 __SYS__MPAMVPMx_EL2(3)
#define SYS_MPAMVPM4_EL2 __SYS__MPAMVPMx_EL2(4)
#define SYS_MPAMVPM5_EL2 __SYS__MPAMVPMx_EL2(5)
#define SYS_MPAMVPM6_EL2 __SYS__MPAMVPMx_EL2(6)
#define SYS_MPAMVPM7_EL2 __SYS__MPAMVPMx_EL2(7)
#define SYS_VBAR_EL2 sys_reg(3, 4, 12, 0, 0)
#define SYS_RVBAR_EL2 sys_reg(3, 4, 12, 0, 1)

View File

@ -684,6 +684,14 @@ static const struct arm64_ftr_bits ftr_id_dfr1[] = {
ARM64_FTR_END,
};
static const struct arm64_ftr_bits ftr_mpamidr[] = {
ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, MPAMIDR_EL1_PMG_MAX_SHIFT, MPAMIDR_EL1_PMG_MAX_WIDTH, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, MPAMIDR_EL1_VPMR_MAX_SHIFT, MPAMIDR_EL1_VPMR_MAX_WIDTH, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, MPAMIDR_EL1_HAS_HCR_SHIFT, 1, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, MPAMIDR_EL1_PARTID_MAX_SHIFT, MPAMIDR_EL1_PARTID_MAX_WIDTH, 0),
ARM64_FTR_END,
};
/*
* Common ftr bits for a 32bit register with all hidden, strict
* attributes, with 4bit feature fields and a default safe value of
@ -804,6 +812,9 @@ static const struct __ftr_reg_entry {
ARM64_FTR_REG(SYS_ID_AA64MMFR3_EL1, ftr_id_aa64mmfr3),
ARM64_FTR_REG(SYS_ID_AA64MMFR4_EL1, ftr_id_aa64mmfr4),
/* Op1 = 0, CRn = 10, CRm = 4 */
ARM64_FTR_REG(SYS_MPAMIDR_EL1, ftr_mpamidr),
/* Op1 = 1, CRn = 0, CRm = 0 */
ARM64_FTR_REG(SYS_GMID_EL1, ftr_gmid),
@ -1163,6 +1174,9 @@ void __init init_cpu_features(struct cpuinfo_arm64 *info)
cpacr_restore(cpacr);
}
if (id_aa64pfr0_mpam(info->reg_id_aa64pfr0))
init_cpu_ftr_reg(SYS_MPAMIDR_EL1, info->reg_mpamidr);
if (id_aa64pfr1_mte(info->reg_id_aa64pfr1))
init_cpu_ftr_reg(SYS_GMID_EL1, info->reg_gmid);
}
@ -1419,6 +1433,11 @@ void update_cpu_features(int cpu,
cpacr_restore(cpacr);
}
if (id_aa64pfr0_mpam(info->reg_id_aa64pfr0)) {
taint |= check_update_ftr_reg(SYS_MPAMIDR_EL1, cpu,
info->reg_mpamidr, boot->reg_mpamidr);
}
/*
* The kernel uses the LDGM/STGM instructions and the number of tags
* they read/write depends on the GMID_EL1.BS field. Check that the
@ -2377,6 +2396,36 @@ cpucap_panic_on_conflict(const struct arm64_cpu_capabilities *cap)
return !!(cap->type & ARM64_CPUCAP_PANIC_ON_CONFLICT);
}
static bool
test_has_mpam(const struct arm64_cpu_capabilities *entry, int scope)
{
if (!has_cpuid_feature(entry, scope))
return false;
/* Check firmware actually enabled MPAM on this cpu. */
return (read_sysreg_s(SYS_MPAM1_EL1) & MPAM1_EL1_MPAMEN);
}
static void
cpu_enable_mpam(const struct arm64_cpu_capabilities *entry)
{
/*
* Access by the kernel (at EL1) should use the reserved PARTID
* which is configured unrestricted. This avoids priority-inversion
* where latency sensitive tasks have to wait for a task that has
* been throttled to release the lock.
*/
write_sysreg_s(0, SYS_MPAM1_EL1);
}
static bool
test_has_mpam_hcr(const struct arm64_cpu_capabilities *entry, int scope)
{
u64 idr = read_sanitised_ftr_reg(SYS_MPAMIDR_EL1);
return idr & MPAMIDR_EL1_HAS_HCR;
}
static const struct arm64_cpu_capabilities arm64_features[] = {
{
.capability = ARM64_ALWAYS_BOOT,
@ -2873,6 +2922,20 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
#endif
},
#endif
{
.desc = "Memory Partitioning And Monitoring",
.type = ARM64_CPUCAP_SYSTEM_FEATURE,
.capability = ARM64_MPAM,
.matches = test_has_mpam,
.cpu_enable = cpu_enable_mpam,
ARM64_CPUID_FIELDS(ID_AA64PFR0_EL1, MPAM, 1)
},
{
.desc = "Memory Partitioning And Monitoring Virtualisation",
.type = ARM64_CPUCAP_SYSTEM_FEATURE,
.capability = ARM64_MPAM_HCR,
.matches = test_has_mpam_hcr,
},
{
.desc = "NV1",
.capability = ARM64_HAS_HCR_NV1,
@ -3396,6 +3459,36 @@ static void verify_hyp_capabilities(void)
}
}
static void verify_mpam_capabilities(void)
{
u64 cpu_idr = read_cpuid(ID_AA64PFR0_EL1);
u64 sys_idr = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
u16 cpu_partid_max, cpu_pmg_max, sys_partid_max, sys_pmg_max;
if (FIELD_GET(ID_AA64PFR0_EL1_MPAM_MASK, cpu_idr) !=
FIELD_GET(ID_AA64PFR0_EL1_MPAM_MASK, sys_idr)) {
pr_crit("CPU%d: MPAM version mismatch\n", smp_processor_id());
cpu_die_early();
}
cpu_idr = read_cpuid(MPAMIDR_EL1);
sys_idr = read_sanitised_ftr_reg(SYS_MPAMIDR_EL1);
if (FIELD_GET(MPAMIDR_EL1_HAS_HCR, cpu_idr) !=
FIELD_GET(MPAMIDR_EL1_HAS_HCR, sys_idr)) {
pr_crit("CPU%d: Missing MPAM HCR\n", smp_processor_id());
cpu_die_early();
}
cpu_partid_max = FIELD_GET(MPAMIDR_EL1_PARTID_MAX, cpu_idr);
cpu_pmg_max = FIELD_GET(MPAMIDR_EL1_PMG_MAX, cpu_idr);
sys_partid_max = FIELD_GET(MPAMIDR_EL1_PARTID_MAX, sys_idr);
sys_pmg_max = FIELD_GET(MPAMIDR_EL1_PMG_MAX, sys_idr);
if (cpu_partid_max < sys_partid_max || cpu_pmg_max < sys_pmg_max) {
pr_crit("CPU%d: MPAM PARTID/PMG max values are mismatched\n", smp_processor_id());
cpu_die_early();
}
}
/*
* Run through the enabled system capabilities and enable() it on this CPU.
* The capabilities were decided based on the available CPUs at the boot time.
@ -3422,6 +3515,9 @@ static void verify_local_cpu_capabilities(void)
if (is_hyp_mode_available())
verify_hyp_capabilities();
if (system_supports_mpam())
verify_mpam_capabilities();
}
void check_local_cpu_capabilities(void)

View File

@ -478,6 +478,9 @@ static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info)
if (id_aa64pfr0_32bit_el0(info->reg_id_aa64pfr0))
__cpuinfo_store_cpu_32bit(&info->aarch32);
if (id_aa64pfr0_mpam(info->reg_id_aa64pfr0))
info->reg_mpamidr = read_cpuid(MPAMIDR_EL1);
cpuinfo_detect_icache_policy(info);
}

View File

@ -204,6 +204,35 @@ static inline void __deactivate_traps_hfgxtr(struct kvm_vcpu *vcpu)
__deactivate_fgt(hctxt, vcpu, kvm, HAFGRTR_EL2);
}
static inline void __activate_traps_mpam(struct kvm_vcpu *vcpu)
{
u64 r = MPAM2_EL2_TRAPMPAM0EL1 | MPAM2_EL2_TRAPMPAM1EL1;
if (!system_supports_mpam())
return;
/* trap guest access to MPAMIDR_EL1 */
if (system_supports_mpam_hcr()) {
write_sysreg_s(MPAMHCR_EL2_TRAP_MPAMIDR_EL1, SYS_MPAMHCR_EL2);
} else {
/* From v1.1 TIDR can trap MPAMIDR, set it unconditionally */
r |= MPAM2_EL2_TIDR;
}
write_sysreg_s(r, SYS_MPAM2_EL2);
}
static inline void __deactivate_traps_mpam(void)
{
if (!system_supports_mpam())
return;
write_sysreg_s(0, SYS_MPAM2_EL2);
if (system_supports_mpam_hcr())
write_sysreg_s(MPAMHCR_HOST_FLAGS, SYS_MPAMHCR_EL2);
}
static inline void __activate_traps_common(struct kvm_vcpu *vcpu)
{
/* Trap on AArch32 cp15 c15 (impdef sysregs) accesses (EL1 or EL0) */
@ -244,6 +273,7 @@ static inline void __activate_traps_common(struct kvm_vcpu *vcpu)
}
__activate_traps_hfgxtr(vcpu);
__activate_traps_mpam(vcpu);
}
static inline void __deactivate_traps_common(struct kvm_vcpu *vcpu)
@ -263,6 +293,7 @@ static inline void __deactivate_traps_common(struct kvm_vcpu *vcpu)
write_sysreg_s(HCRX_HOST_FLAGS, SYS_HCRX_EL2);
__deactivate_traps_hfgxtr(vcpu);
__deactivate_traps_mpam();
}
static inline void ___activate_traps(struct kvm_vcpu *vcpu, u64 hcr)

View File

@ -1549,6 +1549,9 @@ static u8 pmuver_to_perfmon(u8 pmuver)
}
}
static u64 sanitise_id_aa64pfr0_el1(const struct kvm_vcpu *vcpu, u64 val);
static u64 sanitise_id_aa64dfr0_el1(const struct kvm_vcpu *vcpu, u64 val);
/* Read a sanitised cpufeature ID register by sys_reg_desc */
static u64 __kvm_read_sanitised_id_reg(const struct kvm_vcpu *vcpu,
const struct sys_reg_desc *r)
@ -1562,6 +1565,12 @@ static u64 __kvm_read_sanitised_id_reg(const struct kvm_vcpu *vcpu,
val = read_sanitised_ftr_reg(id);
switch (id) {
case SYS_ID_AA64DFR0_EL1:
val = sanitise_id_aa64dfr0_el1(vcpu, val);
break;
case SYS_ID_AA64PFR0_EL1:
val = sanitise_id_aa64pfr0_el1(vcpu, val);
break;
case SYS_ID_AA64PFR1_EL1:
if (!kvm_has_mte(vcpu->kvm))
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MTE);
@ -1575,6 +1584,7 @@ static u64 __kvm_read_sanitised_id_reg(const struct kvm_vcpu *vcpu,
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MTEX);
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_DF2);
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_PFAR);
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MPAM_frac);
break;
case SYS_ID_AA64PFR2_EL1:
/* We only expose FPMR */
@ -1732,11 +1742,8 @@ static unsigned int fp8_visibility(const struct kvm_vcpu *vcpu,
return REG_HIDDEN;
}
static u64 read_sanitised_id_aa64pfr0_el1(struct kvm_vcpu *vcpu,
const struct sys_reg_desc *rd)
static u64 sanitise_id_aa64pfr0_el1(const struct kvm_vcpu *vcpu, u64 val)
{
u64 val = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
if (!vcpu_has_sve(vcpu))
val &= ~ID_AA64PFR0_EL1_SVE_MASK;
@ -1764,6 +1771,13 @@ static u64 read_sanitised_id_aa64pfr0_el1(struct kvm_vcpu *vcpu,
val &= ~ID_AA64PFR0_EL1_AMU_MASK;
/*
* MPAM is disabled by default as KVM also needs a set of PARTID to
* program the MPAMVPMx_EL2 PARTID remapping registers with. But some
* older kernels let the guest see the ID bit.
*/
val &= ~ID_AA64PFR0_EL1_MPAM_MASK;
return val;
}
@ -1777,11 +1791,8 @@ static u64 read_sanitised_id_aa64pfr0_el1(struct kvm_vcpu *vcpu,
(val); \
})
static u64 read_sanitised_id_aa64dfr0_el1(struct kvm_vcpu *vcpu,
const struct sys_reg_desc *rd)
static u64 sanitise_id_aa64dfr0_el1(const struct kvm_vcpu *vcpu, u64 val)
{
u64 val = read_sanitised_ftr_reg(SYS_ID_AA64DFR0_EL1);
val = ID_REG_LIMIT_FIELD_ENUM(val, ID_AA64DFR0_EL1, DebugVer, V8P8);
/*
@ -1874,6 +1885,70 @@ static int set_id_dfr0_el1(struct kvm_vcpu *vcpu,
return set_id_reg(vcpu, rd, val);
}
static int set_id_aa64pfr0_el1(struct kvm_vcpu *vcpu,
const struct sys_reg_desc *rd, u64 user_val)
{
u64 hw_val = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
u64 mpam_mask = ID_AA64PFR0_EL1_MPAM_MASK;
/*
* Commit 011e5f5bf529f ("arm64/cpufeature: Add remaining feature bits
* in ID_AA64PFR0 register") exposed the MPAM field of AA64PFR0_EL1 to
* guests, but didn't add trap handling. KVM doesn't support MPAM and
* always returns an UNDEF for these registers. The guest must see 0
* for this field.
*
* But KVM must also accept values from user-space that were provided
* by KVM. On CPUs that support MPAM, permit user-space to write
* the sanitizied value to ID_AA64PFR0_EL1.MPAM, but ignore this field.
*/
if ((hw_val & mpam_mask) == (user_val & mpam_mask))
user_val &= ~ID_AA64PFR0_EL1_MPAM_MASK;
return set_id_reg(vcpu, rd, user_val);
}
static int set_id_aa64pfr1_el1(struct kvm_vcpu *vcpu,
const struct sys_reg_desc *rd, u64 user_val)
{
u64 hw_val = read_sanitised_ftr_reg(SYS_ID_AA64PFR1_EL1);
u64 mpam_mask = ID_AA64PFR1_EL1_MPAM_frac_MASK;
/* See set_id_aa64pfr0_el1 for comment about MPAM */
if ((hw_val & mpam_mask) == (user_val & mpam_mask))
user_val &= ~ID_AA64PFR1_EL1_MPAM_frac_MASK;
return set_id_reg(vcpu, rd, user_val);
}
static int set_ctr_el0(struct kvm_vcpu *vcpu,
const struct sys_reg_desc *rd, u64 user_val)
{
u8 user_L1Ip = SYS_FIELD_GET(CTR_EL0, L1Ip, user_val);
/*
* Both AIVIVT (0b01) and VPIPT (0b00) are documented as reserved.
* Hence only allow to set VIPT(0b10) or PIPT(0b11) for L1Ip based
* on what hardware reports.
*
* Using a VIPT software model on PIPT will lead to over invalidation,
* but still correct. Hence, we can allow downgrading PIPT to VIPT,
* but not the other way around. This is handled via arm64_ftr_safe_value()
* as CTR_EL0 ftr_bits has L1Ip field with type FTR_EXACT and safe value
* set as VIPT.
*/
switch (user_L1Ip) {
case CTR_EL0_L1Ip_RESERVED_VPIPT:
case CTR_EL0_L1Ip_RESERVED_AIVIVT:
return -EINVAL;
case CTR_EL0_L1Ip_VIPT:
case CTR_EL0_L1Ip_PIPT:
return set_id_reg(vcpu, rd, user_val);
default:
return -ENOENT;
}
}
/*
* cpufeature ID register user accessors
*
@ -2199,6 +2274,15 @@ static bool bad_redir_trap(struct kvm_vcpu *vcpu,
.val = mask, \
}
/* sys_reg_desc initialiser for cpufeature ID registers that need filtering */
#define ID_FILTERED(sysreg, name, mask) { \
ID_DESC(sysreg), \
.set_user = set_##name, \
.visibility = id_visibility, \
.reset = kvm_read_sanitised_id_reg, \
.val = (mask), \
}
/*
* sys_reg_desc initialiser for architecturally unallocated cpufeature ID
* register with encoding Op0=3, Op1=0, CRn=0, CRm=crm, Op2=op2
@ -2461,18 +2545,15 @@ static const struct sys_reg_desc sys_reg_descs[] = {
/* AArch64 ID registers */
/* CRm=4 */
{ SYS_DESC(SYS_ID_AA64PFR0_EL1),
.access = access_id_reg,
.get_user = get_id_reg,
.set_user = set_id_reg,
.reset = read_sanitised_id_aa64pfr0_el1,
.val = ~(ID_AA64PFR0_EL1_AMU |
ID_AA64PFR0_EL1_MPAM |
ID_AA64PFR0_EL1_SVE |
ID_AA64PFR0_EL1_RAS |
ID_AA64PFR0_EL1_AdvSIMD |
ID_AA64PFR0_EL1_FP), },
ID_WRITABLE(ID_AA64PFR1_EL1, ~(ID_AA64PFR1_EL1_PFAR |
ID_FILTERED(ID_AA64PFR0_EL1, id_aa64pfr0_el1,
~(ID_AA64PFR0_EL1_AMU |
ID_AA64PFR0_EL1_MPAM |
ID_AA64PFR0_EL1_SVE |
ID_AA64PFR0_EL1_RAS |
ID_AA64PFR0_EL1_AdvSIMD |
ID_AA64PFR0_EL1_FP)),
ID_FILTERED(ID_AA64PFR1_EL1, id_aa64pfr1_el1,
~(ID_AA64PFR1_EL1_PFAR |
ID_AA64PFR1_EL1_DF2 |
ID_AA64PFR1_EL1_MTEX |
ID_AA64PFR1_EL1_THE |
@ -2493,11 +2574,6 @@ static const struct sys_reg_desc sys_reg_descs[] = {
ID_WRITABLE(ID_AA64FPFR0_EL1, ~ID_AA64FPFR0_EL1_RES0),
/* CRm=5 */
{ SYS_DESC(SYS_ID_AA64DFR0_EL1),
.access = access_id_reg,
.get_user = get_id_reg,
.set_user = set_id_aa64dfr0_el1,
.reset = read_sanitised_id_aa64dfr0_el1,
/*
* Prior to FEAT_Debugv8.9, the architecture defines context-aware
* breakpoints (CTX_CMPs) as the highest numbered breakpoints (BRPs).
@ -2510,10 +2586,11 @@ static const struct sys_reg_desc sys_reg_descs[] = {
* See DDI0487K.a, section D2.8.3 Breakpoint types and linking
* of breakpoints for more details.
*/
.val = ID_AA64DFR0_EL1_DoubleLock_MASK |
ID_AA64DFR0_EL1_WRPs_MASK |
ID_AA64DFR0_EL1_PMUVer_MASK |
ID_AA64DFR0_EL1_DebugVer_MASK, },
ID_FILTERED(ID_AA64DFR0_EL1, id_aa64dfr0_el1,
ID_AA64DFR0_EL1_DoubleLock_MASK |
ID_AA64DFR0_EL1_WRPs_MASK |
ID_AA64DFR0_EL1_PMUVer_MASK |
ID_AA64DFR0_EL1_DebugVer_MASK),
ID_SANITISED(ID_AA64DFR1_EL1),
ID_UNALLOCATED(5,2),
ID_UNALLOCATED(5,3),
@ -2643,8 +2720,11 @@ static const struct sys_reg_desc sys_reg_descs[] = {
{ SYS_DESC(SYS_LOREA_EL1), trap_loregion },
{ SYS_DESC(SYS_LORN_EL1), trap_loregion },
{ SYS_DESC(SYS_LORC_EL1), trap_loregion },
{ SYS_DESC(SYS_MPAMIDR_EL1), undef_access },
{ SYS_DESC(SYS_LORID_EL1), trap_loregion },
{ SYS_DESC(SYS_MPAM1_EL1), undef_access },
{ SYS_DESC(SYS_MPAM0_EL1), undef_access },
{ SYS_DESC(SYS_VBAR_EL1), access_rw, reset_val, VBAR_EL1, 0 },
{ SYS_DESC(SYS_DISR_EL1), NULL, reset_val, DISR_EL1, 0 },
@ -2689,10 +2769,12 @@ static const struct sys_reg_desc sys_reg_descs[] = {
{ SYS_DESC(SYS_CCSIDR2_EL1), undef_access },
{ SYS_DESC(SYS_SMIDR_EL1), undef_access },
{ SYS_DESC(SYS_CSSELR_EL1), access_csselr, reset_unknown, CSSELR_EL1 },
ID_WRITABLE(CTR_EL0, CTR_EL0_DIC_MASK |
CTR_EL0_IDC_MASK |
CTR_EL0_DminLine_MASK |
CTR_EL0_IminLine_MASK),
ID_FILTERED(CTR_EL0, ctr_el0,
CTR_EL0_DIC_MASK |
CTR_EL0_IDC_MASK |
CTR_EL0_DminLine_MASK |
CTR_EL0_L1Ip_MASK |
CTR_EL0_IminLine_MASK),
{ SYS_DESC(SYS_SVCR), undef_access, reset_val, SVCR, 0, .visibility = sme_visibility },
{ SYS_DESC(SYS_FPMR), undef_access, reset_val, FPMR, 0, .visibility = fp8_visibility },
@ -2952,6 +3034,17 @@ static const struct sys_reg_desc sys_reg_descs[] = {
EL2_REG_FILTERED(POR_EL2, access_rw, reset_val, 0,
s1poe_el2_visibility),
EL2_REG(AMAIR_EL2, access_rw, reset_val, 0),
{ SYS_DESC(SYS_MPAMHCR_EL2), undef_access },
{ SYS_DESC(SYS_MPAMVPMV_EL2), undef_access },
{ SYS_DESC(SYS_MPAM2_EL2), undef_access },
{ SYS_DESC(SYS_MPAMVPM0_EL2), undef_access },
{ SYS_DESC(SYS_MPAMVPM1_EL2), undef_access },
{ SYS_DESC(SYS_MPAMVPM2_EL2), undef_access },
{ SYS_DESC(SYS_MPAMVPM3_EL2), undef_access },
{ SYS_DESC(SYS_MPAMVPM4_EL2), undef_access },
{ SYS_DESC(SYS_MPAMVPM5_EL2), undef_access },
{ SYS_DESC(SYS_MPAMVPM6_EL2), undef_access },
{ SYS_DESC(SYS_MPAMVPM7_EL2), undef_access },
EL2_REG(VBAR_EL2, access_rw, reset_val, 0),
EL2_REG(RVBAR_EL2, access_rw, reset_val, 0),

View File

@ -60,6 +60,8 @@ HW_DBM
KVM_HVHE
KVM_PROTECTED_MODE
MISMATCHED_CACHE_TYPE
MPAM
MPAM_HCR
MTE
MTE_ASYMM
SME

View File

@ -2738,6 +2738,126 @@ Field 1 E2SPE
Field 0 E0HSPE
EndSysreg
Sysreg MPAMHCR_EL2 3 4 10 4 0
Res0 63:32
Field 31 TRAP_MPAMIDR_EL1
Res0 30:9
Field 8 GSTAPP_PLK
Res0 7:2
Field 1 EL1_VPMEN
Field 0 EL0_VPMEN
EndSysreg
Sysreg MPAMVPMV_EL2 3 4 10 4 1
Res0 63:32
Field 31 VPM_V31
Field 30 VPM_V30
Field 29 VPM_V29
Field 28 VPM_V28
Field 27 VPM_V27
Field 26 VPM_V26
Field 25 VPM_V25
Field 24 VPM_V24
Field 23 VPM_V23
Field 22 VPM_V22
Field 21 VPM_V21
Field 20 VPM_V20
Field 19 VPM_V19
Field 18 VPM_V18
Field 17 VPM_V17
Field 16 VPM_V16
Field 15 VPM_V15
Field 14 VPM_V14
Field 13 VPM_V13
Field 12 VPM_V12
Field 11 VPM_V11
Field 10 VPM_V10
Field 9 VPM_V9
Field 8 VPM_V8
Field 7 VPM_V7
Field 6 VPM_V6
Field 5 VPM_V5
Field 4 VPM_V4
Field 3 VPM_V3
Field 2 VPM_V2
Field 1 VPM_V1
Field 0 VPM_V0
EndSysreg
Sysreg MPAM2_EL2 3 4 10 5 0
Field 63 MPAMEN
Res0 62:59
Field 58 TIDR
Res0 57
Field 56 ALTSP_HFC
Field 55 ALTSP_EL2
Field 54 ALTSP_FRCD
Res0 53:51
Field 50 EnMPAMSM
Field 49 TRAPMPAM0EL1
Field 48 TRAPMPAM1EL1
Field 47:40 PMG_D
Field 39:32 PMG_I
Field 31:16 PARTID_D
Field 15:0 PARTID_I
EndSysreg
Sysreg MPAMVPM0_EL2 3 4 10 6 0
Field 63:48 PhyPARTID3
Field 47:32 PhyPARTID2
Field 31:16 PhyPARTID1
Field 15:0 PhyPARTID0
EndSysreg
Sysreg MPAMVPM1_EL2 3 4 10 6 1
Field 63:48 PhyPARTID7
Field 47:32 PhyPARTID6
Field 31:16 PhyPARTID5
Field 15:0 PhyPARTID4
EndSysreg
Sysreg MPAMVPM2_EL2 3 4 10 6 2
Field 63:48 PhyPARTID11
Field 47:32 PhyPARTID10
Field 31:16 PhyPARTID9
Field 15:0 PhyPARTID8
EndSysreg
Sysreg MPAMVPM3_EL2 3 4 10 6 3
Field 63:48 PhyPARTID15
Field 47:32 PhyPARTID14
Field 31:16 PhyPARTID13
Field 15:0 PhyPARTID12
EndSysreg
Sysreg MPAMVPM4_EL2 3 4 10 6 4
Field 63:48 PhyPARTID19
Field 47:32 PhyPARTID18
Field 31:16 PhyPARTID17
Field 15:0 PhyPARTID16
EndSysreg
Sysreg MPAMVPM5_EL2 3 4 10 6 5
Field 63:48 PhyPARTID23
Field 47:32 PhyPARTID22
Field 31:16 PhyPARTID21
Field 15:0 PhyPARTID20
EndSysreg
Sysreg MPAMVPM6_EL2 3 4 10 6 6
Field 63:48 PhyPARTID27
Field 47:32 PhyPARTID26
Field 31:16 PhyPARTID25
Field 15:0 PhyPARTID24
EndSysreg
Sysreg MPAMVPM7_EL2 3 4 10 6 7
Field 63:48 PhyPARTID31
Field 47:32 PhyPARTID30
Field 31:16 PhyPARTID29
Field 15:0 PhyPARTID28
EndSysreg
Sysreg CONTEXTIDR_EL2 3 4 13 0 1
Fields CONTEXTIDR_ELx
EndSysreg
@ -2770,6 +2890,10 @@ Sysreg FAR_EL12 3 5 6 0 0
Field 63:0 ADDR
EndSysreg
Sysreg MPAM1_EL12 3 5 10 5 0
Fields MPAM1_ELx
EndSysreg
Sysreg CONTEXTIDR_EL12 3 5 13 0 1
Fields CONTEXTIDR_ELx
EndSysreg
@ -2949,6 +3073,22 @@ Res0 1
Field 0 EN
EndSysreg
Sysreg MPAMIDR_EL1 3 0 10 4 4
Res0 63:62
Field 61 HAS_SDEFLT
Field 60 HAS_FORCE_NS
Field 59 SP4
Field 58 HAS_TIDR
Field 57 HAS_ALTSP
Res0 56:40
Field 39:32 PMG_MAX
Res0 31:21
Field 20:18 VPMR_MAX
Field 17 HAS_HCR
Res0 16
Field 15:0 PARTID_MAX
EndSysreg
Sysreg LORID_EL1 3 0 10 4 7
Res0 63:24
Field 23:16 LD
@ -2956,6 +3096,27 @@ Res0 15:8
Field 7:0 LR
EndSysreg
Sysreg MPAM1_EL1 3 0 10 5 0
Field 63 MPAMEN
Res0 62:61
Field 60 FORCED_NS
Res0 59:55
Field 54 ALTSP_FRCD
Res0 53:48
Field 47:40 PMG_D
Field 39:32 PMG_I
Field 31:16 PARTID_D
Field 15:0 PARTID_I
EndSysreg
Sysreg MPAM0_EL1 3 0 10 5 1
Res0 63:48
Field 47:40 PMG_D
Field 39:32 PMG_I
Field 31:16 PARTID_D
Field 15:0 PARTID_I
EndSysreg
Sysreg ISR_EL1 3 0 12 1 0
Res0 63:11
Field 10 IS

View File

@ -443,6 +443,101 @@ static void test_vm_ftr_id_regs(struct kvm_vcpu *vcpu, bool aarch64_only)
}
}
#define MPAM_IDREG_TEST 6
static void test_user_set_mpam_reg(struct kvm_vcpu *vcpu)
{
uint64_t masks[KVM_ARM_FEATURE_ID_RANGE_SIZE];
struct reg_mask_range range = {
.addr = (__u64)masks,
};
uint64_t val;
int idx, err;
/*
* If ID_AA64PFR0.MPAM is _not_ officially modifiable and is zero,
* check that if it can be set to 1, (i.e. it is supported by the
* hardware), that it can't be set to other values.
*/
/* Get writable masks for feature ID registers */
memset(range.reserved, 0, sizeof(range.reserved));
vm_ioctl(vcpu->vm, KVM_ARM_GET_REG_WRITABLE_MASKS, &range);
/* Writeable? Nothing to test! */
idx = encoding_to_range_idx(SYS_ID_AA64PFR0_EL1);
if ((masks[idx] & ID_AA64PFR0_EL1_MPAM_MASK) == ID_AA64PFR0_EL1_MPAM_MASK) {
ksft_test_result_skip("ID_AA64PFR0_EL1.MPAM is officially writable, nothing to test\n");
return;
}
/* Get the id register value */
vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR0_EL1), &val);
/* Try to set MPAM=0. This should always be possible. */
val &= ~ID_AA64PFR0_EL1_MPAM_MASK;
val |= FIELD_PREP(ID_AA64PFR0_EL1_MPAM_MASK, 0);
err = __vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR0_EL1), val);
if (err)
ksft_test_result_fail("ID_AA64PFR0_EL1.MPAM=0 was not accepted\n");
else
ksft_test_result_pass("ID_AA64PFR0_EL1.MPAM=0 worked\n");
/* Try to set MPAM=1 */
val &= ~ID_AA64PFR0_EL1_MPAM_MASK;
val |= FIELD_PREP(ID_AA64PFR0_EL1_MPAM_MASK, 1);
err = __vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR0_EL1), val);
if (err)
ksft_test_result_skip("ID_AA64PFR0_EL1.MPAM is not writable, nothing to test\n");
else
ksft_test_result_pass("ID_AA64PFR0_EL1.MPAM=1 was writable\n");
/* Try to set MPAM=2 */
val &= ~ID_AA64PFR0_EL1_MPAM_MASK;
val |= FIELD_PREP(ID_AA64PFR0_EL1_MPAM_MASK, 2);
err = __vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR0_EL1), val);
if (err)
ksft_test_result_pass("ID_AA64PFR0_EL1.MPAM not arbitrarily modifiable\n");
else
ksft_test_result_fail("ID_AA64PFR0_EL1.MPAM value should not be ignored\n");
/* And again for ID_AA64PFR1_EL1.MPAM_frac */
idx = encoding_to_range_idx(SYS_ID_AA64PFR1_EL1);
if ((masks[idx] & ID_AA64PFR1_EL1_MPAM_frac_MASK) == ID_AA64PFR1_EL1_MPAM_frac_MASK) {
ksft_test_result_skip("ID_AA64PFR1_EL1.MPAM_frac is officially writable, nothing to test\n");
return;
}
/* Get the id register value */
vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR1_EL1), &val);
/* Try to set MPAM_frac=0. This should always be possible. */
val &= ~ID_AA64PFR1_EL1_MPAM_frac_MASK;
val |= FIELD_PREP(ID_AA64PFR1_EL1_MPAM_frac_MASK, 0);
err = __vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR1_EL1), val);
if (err)
ksft_test_result_fail("ID_AA64PFR0_EL1.MPAM_frac=0 was not accepted\n");
else
ksft_test_result_pass("ID_AA64PFR0_EL1.MPAM_frac=0 worked\n");
/* Try to set MPAM_frac=1 */
val &= ~ID_AA64PFR1_EL1_MPAM_frac_MASK;
val |= FIELD_PREP(ID_AA64PFR1_EL1_MPAM_frac_MASK, 1);
err = __vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR1_EL1), val);
if (err)
ksft_test_result_skip("ID_AA64PFR1_EL1.MPAM_frac is not writable, nothing to test\n");
else
ksft_test_result_pass("ID_AA64PFR0_EL1.MPAM_frac=1 was writable\n");
/* Try to set MPAM_frac=2 */
val &= ~ID_AA64PFR1_EL1_MPAM_frac_MASK;
val |= FIELD_PREP(ID_AA64PFR1_EL1_MPAM_frac_MASK, 2);
err = __vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR1_EL1), val);
if (err)
ksft_test_result_pass("ID_AA64PFR1_EL1.MPAM_frac not arbitrarily modifiable\n");
else
ksft_test_result_fail("ID_AA64PFR1_EL1.MPAM_frac value should not be ignored\n");
}
static void test_guest_reg_read(struct kvm_vcpu *vcpu)
{
bool done = false;
@ -581,12 +676,14 @@ int main(void)
ARRAY_SIZE(ftr_id_aa64isar2_el1) + ARRAY_SIZE(ftr_id_aa64pfr0_el1) +
ARRAY_SIZE(ftr_id_aa64pfr1_el1) + ARRAY_SIZE(ftr_id_aa64mmfr0_el1) +
ARRAY_SIZE(ftr_id_aa64mmfr1_el1) + ARRAY_SIZE(ftr_id_aa64mmfr2_el1) +
ARRAY_SIZE(ftr_id_aa64zfr0_el1) - ARRAY_SIZE(test_regs) + 2;
ARRAY_SIZE(ftr_id_aa64zfr0_el1) - ARRAY_SIZE(test_regs) + 2 +
MPAM_IDREG_TEST;
ksft_set_plan(test_cnt);
test_vm_ftr_id_regs(vcpu, aarch64_only);
test_vcpu_ftr_id_regs(vcpu);
test_user_set_mpam_reg(vcpu);
test_guest_reg_read(vcpu);