mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-12-29 17:25:38 +00:00
KVM: arm64: PMU: Set PMCR_EL0.N for vCPU based on the associated PMU
The number of PMU event counters is indicated in PMCR_EL0.N. For a vCPU with PMUv3 configured, the value is set to the same value as the current PE on every vCPU reset. Unless the vCPU is pinned to PEs that has the PMU associated to the guest from the initial vCPU reset, the value might be different from the PMU's PMCR_EL0.N on heterogeneous PMU systems. Fix this by setting the vCPU's PMCR_EL0.N to the PMU's PMCR_EL0.N value. Track the PMCR_EL0.N per guest, as only one PMU can be set for the guest (PMCR_EL0.N must be the same for all vCPUs of the guest), and it is convenient for updating the value. To achieve this, the patch introduces a helper, kvm_arm_pmu_get_max_counters(), that reads the maximum number of counters from the arm_pmu associated to the VM. Make the function global as upcoming patches will be interested to know the value while setting the PMCR.N of the guest from userspace. KVM does not yet support userspace modifying PMCR_EL0.N. The following patch will add support for that. Reviewed-by: Sebastian Ott <sebott@redhat.com> Co-developed-by: Marc Zyngier <maz@kernel.org> Signed-off-by: Marc Zyngier <maz@kernel.org> Signed-off-by: Reiji Watanabe <reijiw@google.com> Signed-off-by: Raghavendra Rao Ananta <rananta@google.com> Link: https://lore.kernel.org/r/20231020214053.2144305-5-rananta@google.com Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
This commit is contained in:
parent
57fc267f1b
commit
4d20debf9c
@ -257,6 +257,9 @@ struct kvm_arch {
|
||||
|
||||
cpumask_var_t supported_cpus;
|
||||
|
||||
/* PMCR_EL0.N value for the guest */
|
||||
u8 pmcr_n;
|
||||
|
||||
/* Hypercall features firmware registers' descriptor */
|
||||
struct kvm_smccc_features smccc_feat;
|
||||
struct maple_tree smccc_filter;
|
||||
|
@ -873,11 +873,27 @@ static bool pmu_irq_is_valid(struct kvm *kvm, int irq)
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* kvm_arm_pmu_get_max_counters - Return the max number of PMU counters.
|
||||
* @kvm: The kvm pointer
|
||||
*/
|
||||
u8 kvm_arm_pmu_get_max_counters(struct kvm *kvm)
|
||||
{
|
||||
struct arm_pmu *arm_pmu = kvm->arch.arm_pmu;
|
||||
|
||||
/*
|
||||
* The arm_pmu->num_events considers the cycle counter as well.
|
||||
* Ignore that and return only the general-purpose counters.
|
||||
*/
|
||||
return arm_pmu->num_events - 1;
|
||||
}
|
||||
|
||||
static void kvm_arm_set_pmu(struct kvm *kvm, struct arm_pmu *arm_pmu)
|
||||
{
|
||||
lockdep_assert_held(&kvm->arch.config_lock);
|
||||
|
||||
kvm->arch.arm_pmu = arm_pmu;
|
||||
kvm->arch.pmcr_n = kvm_arm_pmu_get_max_counters(kvm);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -1091,5 +1107,8 @@ u8 kvm_arm_pmu_get_pmuver_limit(void)
|
||||
*/
|
||||
u64 kvm_vcpu_read_pmcr(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return __vcpu_sys_reg(vcpu, PMCR_EL0);
|
||||
u64 pmcr = __vcpu_sys_reg(vcpu, PMCR_EL0) &
|
||||
~(ARMV8_PMU_PMCR_N_MASK << ARMV8_PMU_PMCR_N_SHIFT);
|
||||
|
||||
return pmcr | ((u64)vcpu->kvm->arch.pmcr_n << ARMV8_PMU_PMCR_N_SHIFT);
|
||||
}
|
||||
|
@ -721,12 +721,7 @@ static u64 reset_pmu_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
|
||||
{
|
||||
u64 n, mask = BIT(ARMV8_PMU_CYCLE_IDX);
|
||||
|
||||
/* No PMU available, any PMU reg may UNDEF... */
|
||||
if (!kvm_arm_support_pmu_v3())
|
||||
return 0;
|
||||
|
||||
n = read_sysreg(pmcr_el0) >> ARMV8_PMU_PMCR_N_SHIFT;
|
||||
n &= ARMV8_PMU_PMCR_N_MASK;
|
||||
n = vcpu->kvm->arch.pmcr_n;
|
||||
if (n)
|
||||
mask |= GENMASK(n - 1, 0);
|
||||
|
||||
@ -762,17 +757,15 @@ static u64 reset_pmselr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
|
||||
|
||||
static u64 reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
|
||||
{
|
||||
u64 pmcr;
|
||||
u64 pmcr = 0;
|
||||
|
||||
/* No PMU available, PMCR_EL0 may UNDEF... */
|
||||
if (!kvm_arm_support_pmu_v3())
|
||||
return 0;
|
||||
|
||||
/* Only preserve PMCR_EL0.N, and reset the rest to 0 */
|
||||
pmcr = read_sysreg(pmcr_el0) & (ARMV8_PMU_PMCR_N_MASK << ARMV8_PMU_PMCR_N_SHIFT);
|
||||
if (!kvm_supports_32bit_el0())
|
||||
pmcr |= ARMV8_PMU_PMCR_LC;
|
||||
|
||||
/*
|
||||
* The value of PMCR.N field is included when the
|
||||
* vCPU register is read via kvm_vcpu_read_pmcr().
|
||||
*/
|
||||
__vcpu_sys_reg(vcpu, r->reg) = pmcr;
|
||||
|
||||
return __vcpu_sys_reg(vcpu, r->reg);
|
||||
@ -1103,6 +1096,13 @@ static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
|
||||
return true;
|
||||
}
|
||||
|
||||
static int get_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
|
||||
u64 *val)
|
||||
{
|
||||
*val = kvm_vcpu_read_pmcr(vcpu);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Silly macro to expand the DBG{BCR,BVR,WVR,WCR}n_EL1 registers in one go */
|
||||
#define DBG_BCR_BVR_WCR_WVR_EL1(n) \
|
||||
{ SYS_DESC(SYS_DBGBVRn_EL1(n)), \
|
||||
@ -2167,7 +2167,7 @@ static const struct sys_reg_desc sys_reg_descs[] = {
|
||||
{ SYS_DESC(SYS_SVCR), undef_access },
|
||||
|
||||
{ PMU_SYS_REG(PMCR_EL0), .access = access_pmcr,
|
||||
.reset = reset_pmcr, .reg = PMCR_EL0 },
|
||||
.reset = reset_pmcr, .reg = PMCR_EL0, .get_user = get_pmcr },
|
||||
{ PMU_SYS_REG(PMCNTENSET_EL0),
|
||||
.access = access_pmcnten, .reg = PMCNTENSET_EL0 },
|
||||
{ PMU_SYS_REG(PMCNTENCLR_EL0),
|
||||
|
@ -102,6 +102,7 @@ void kvm_vcpu_pmu_resync_el0(void);
|
||||
|
||||
u8 kvm_arm_pmu_get_pmuver_limit(void);
|
||||
int kvm_arm_set_default_pmu(struct kvm *kvm);
|
||||
u8 kvm_arm_pmu_get_max_counters(struct kvm *kvm);
|
||||
|
||||
u64 kvm_vcpu_read_pmcr(struct kvm_vcpu *vcpu);
|
||||
#else
|
||||
@ -181,6 +182,11 @@ static inline int kvm_arm_set_default_pmu(struct kvm *kvm)
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
static inline u8 kvm_arm_pmu_get_max_counters(struct kvm *kvm)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline u64 kvm_vcpu_read_pmcr(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return 0;
|
||||
|
Loading…
Reference in New Issue
Block a user