mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-12-29 17:25:38 +00:00
KVM: arm64: PMU: Add a helper to read a vCPU's PMCR_EL0
Add a helper to read a vCPU's PMCR_EL0, and use it whenever KVM reads a vCPU's PMCR_EL0. Currently, the PMCR_EL0 value is tracked per vCPU. The following patches will make (only) PMCR_EL0.N track per guest. Having the new helper will be useful to combine the PMCR_EL0.N field (tracked per guest) and the other fields (tracked per vCPU) to provide the value of PMCR_EL0. No functional change intended. Reviewed-by: Sebastian Ott <sebott@redhat.com> Signed-off-by: Reiji Watanabe <reijiw@google.com> Signed-off-by: Raghavendra Rao Ananta <rananta@google.com> Reviewed-by: Eric Auger <eric.auger@redhat.com> Signed-off-by: Marc Zyngier <maz@kernel.org> Link: https://lore.kernel.org/r/20231020214053.2144305-4-rananta@google.com Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
This commit is contained in:
parent
4277335797
commit
57fc267f1b
@ -801,8 +801,7 @@ static int check_vcpu_requests(struct kvm_vcpu *vcpu)
|
||||
}
|
||||
|
||||
if (kvm_check_request(KVM_REQ_RELOAD_PMU, vcpu))
|
||||
kvm_pmu_handle_pmcr(vcpu,
|
||||
__vcpu_sys_reg(vcpu, PMCR_EL0));
|
||||
kvm_pmu_handle_pmcr(vcpu, kvm_vcpu_read_pmcr(vcpu));
|
||||
|
||||
if (kvm_check_request(KVM_REQ_RESYNC_PMU_EL0, vcpu))
|
||||
kvm_vcpu_pmu_restore_guest(vcpu);
|
||||
|
@ -72,7 +72,7 @@ static bool kvm_pmc_is_64bit(struct kvm_pmc *pmc)
|
||||
|
||||
static bool kvm_pmc_has_64bit_overflow(struct kvm_pmc *pmc)
|
||||
{
|
||||
u64 val = __vcpu_sys_reg(kvm_pmc_to_vcpu(pmc), PMCR_EL0);
|
||||
u64 val = kvm_vcpu_read_pmcr(kvm_pmc_to_vcpu(pmc));
|
||||
|
||||
return (pmc->idx < ARMV8_PMU_CYCLE_IDX && (val & ARMV8_PMU_PMCR_LP)) ||
|
||||
(pmc->idx == ARMV8_PMU_CYCLE_IDX && (val & ARMV8_PMU_PMCR_LC));
|
||||
@ -250,7 +250,7 @@ void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu)
|
||||
|
||||
u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
u64 val = __vcpu_sys_reg(vcpu, PMCR_EL0) >> ARMV8_PMU_PMCR_N_SHIFT;
|
||||
u64 val = kvm_vcpu_read_pmcr(vcpu) >> ARMV8_PMU_PMCR_N_SHIFT;
|
||||
|
||||
val &= ARMV8_PMU_PMCR_N_MASK;
|
||||
if (val == 0)
|
||||
@ -272,7 +272,7 @@ void kvm_pmu_enable_counter_mask(struct kvm_vcpu *vcpu, u64 val)
|
||||
if (!kvm_vcpu_has_pmu(vcpu))
|
||||
return;
|
||||
|
||||
if (!(__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E) || !val)
|
||||
if (!(kvm_vcpu_read_pmcr(vcpu) & ARMV8_PMU_PMCR_E) || !val)
|
||||
return;
|
||||
|
||||
for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++) {
|
||||
@ -324,7 +324,7 @@ static u64 kvm_pmu_overflow_status(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
u64 reg = 0;
|
||||
|
||||
if ((__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E)) {
|
||||
if ((kvm_vcpu_read_pmcr(vcpu) & ARMV8_PMU_PMCR_E)) {
|
||||
reg = __vcpu_sys_reg(vcpu, PMOVSSET_EL0);
|
||||
reg &= __vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
|
||||
reg &= __vcpu_sys_reg(vcpu, PMINTENSET_EL1);
|
||||
@ -426,7 +426,7 @@ static void kvm_pmu_counter_increment(struct kvm_vcpu *vcpu,
|
||||
{
|
||||
int i;
|
||||
|
||||
if (!(__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E))
|
||||
if (!(kvm_vcpu_read_pmcr(vcpu) & ARMV8_PMU_PMCR_E))
|
||||
return;
|
||||
|
||||
/* Weed out disabled counters */
|
||||
@ -569,7 +569,7 @@ void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val)
|
||||
static bool kvm_pmu_counter_is_enabled(struct kvm_pmc *pmc)
|
||||
{
|
||||
struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc);
|
||||
return (__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E) &&
|
||||
return (kvm_vcpu_read_pmcr(vcpu) & ARMV8_PMU_PMCR_E) &&
|
||||
(__vcpu_sys_reg(vcpu, PMCNTENSET_EL0) & BIT(pmc->idx));
|
||||
}
|
||||
|
||||
@ -1084,3 +1084,12 @@ u8 kvm_arm_pmu_get_pmuver_limit(void)
|
||||
ID_AA64DFR0_EL1_PMUVer_V3P5);
|
||||
return FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer), tmp);
|
||||
}
|
||||
|
||||
/**
|
||||
* kvm_vcpu_read_pmcr - Read PMCR_EL0 register for the vCPU
|
||||
* @vcpu: The vcpu pointer
|
||||
*/
|
||||
u64 kvm_vcpu_read_pmcr(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return __vcpu_sys_reg(vcpu, PMCR_EL0);
|
||||
}
|
||||
|
@ -822,7 +822,7 @@ static bool access_pmcr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
|
||||
* Only update writeable bits of PMCR (continuing into
|
||||
* kvm_pmu_handle_pmcr() as well)
|
||||
*/
|
||||
val = __vcpu_sys_reg(vcpu, PMCR_EL0);
|
||||
val = kvm_vcpu_read_pmcr(vcpu);
|
||||
val &= ~ARMV8_PMU_PMCR_MASK;
|
||||
val |= p->regval & ARMV8_PMU_PMCR_MASK;
|
||||
if (!kvm_supports_32bit_el0())
|
||||
@ -830,7 +830,7 @@ static bool access_pmcr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
|
||||
kvm_pmu_handle_pmcr(vcpu, val);
|
||||
} else {
|
||||
/* PMCR.P & PMCR.C are RAZ */
|
||||
val = __vcpu_sys_reg(vcpu, PMCR_EL0)
|
||||
val = kvm_vcpu_read_pmcr(vcpu)
|
||||
& ~(ARMV8_PMU_PMCR_P | ARMV8_PMU_PMCR_C);
|
||||
p->regval = val;
|
||||
}
|
||||
@ -879,7 +879,7 @@ static bool pmu_counter_idx_valid(struct kvm_vcpu *vcpu, u64 idx)
|
||||
{
|
||||
u64 pmcr, val;
|
||||
|
||||
pmcr = __vcpu_sys_reg(vcpu, PMCR_EL0);
|
||||
pmcr = kvm_vcpu_read_pmcr(vcpu);
|
||||
val = (pmcr >> ARMV8_PMU_PMCR_N_SHIFT) & ARMV8_PMU_PMCR_N_MASK;
|
||||
if (idx >= val && idx != ARMV8_PMU_CYCLE_IDX) {
|
||||
kvm_inject_undefined(vcpu);
|
||||
|
@ -103,6 +103,7 @@ void kvm_vcpu_pmu_resync_el0(void);
|
||||
u8 kvm_arm_pmu_get_pmuver_limit(void);
|
||||
int kvm_arm_set_default_pmu(struct kvm *kvm);
|
||||
|
||||
u64 kvm_vcpu_read_pmcr(struct kvm_vcpu *vcpu);
|
||||
#else
|
||||
struct kvm_pmu {
|
||||
};
|
||||
@ -180,6 +181,11 @@ static inline int kvm_arm_set_default_pmu(struct kvm *kvm)
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
static inline u64 kvm_vcpu_read_pmcr(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
Loading…
Reference in New Issue
Block a user