mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-01-14 17:14:09 +00:00
KVM: arm64: Add unified helper for reprogramming counters by mask
Having separate helpers for enabling/disabling counters provides the wrong abstraction, as the state of each counter needs to be evaluated independently and, in some cases, use a different global enable bit. Collapse the enable/disable accessors into a single, common helper that reconfigures every counter set in @mask, leaving the complexity of determining if an event is actually enabled in kvm_pmu_counter_is_enabled(). Reviewed-by: Marc Zyngier <maz@kernel.org> Link: https://lore.kernel.org/r/20241217175513.3658056-1-oliver.upton@linux.dev Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
This commit is contained in:
parent
985bb51f17
commit
e22c369520
@ -24,6 +24,7 @@ static DEFINE_MUTEX(arm_pmus_lock);
|
||||
|
||||
static void kvm_pmu_create_perf_event(struct kvm_pmc *pmc);
|
||||
static void kvm_pmu_release_perf_event(struct kvm_pmc *pmc);
|
||||
static bool kvm_pmu_counter_is_enabled(struct kvm_pmc *pmc);
|
||||
|
||||
static struct kvm_vcpu *kvm_pmc_to_vcpu(const struct kvm_pmc *pmc)
|
||||
{
|
||||
@ -327,48 +328,25 @@ u64 kvm_pmu_implemented_counter_mask(struct kvm_vcpu *vcpu)
|
||||
return GENMASK(val - 1, 0) | BIT(ARMV8_PMU_CYCLE_IDX);
|
||||
}
|
||||
|
||||
/**
|
||||
* kvm_pmu_enable_counter_mask - enable selected PMU counters
|
||||
* @vcpu: The vcpu pointer
|
||||
* @val: the value guest writes to PMCNTENSET register
|
||||
*
|
||||
* Call perf_event_enable to start counting the perf event
|
||||
*/
|
||||
void kvm_pmu_enable_counter_mask(struct kvm_vcpu *vcpu, u64 val)
|
||||
static void kvm_pmc_enable_perf_event(struct kvm_pmc *pmc)
|
||||
{
|
||||
int i;
|
||||
if (!kvm_vcpu_has_pmu(vcpu))
|
||||
if (!pmc->perf_event) {
|
||||
kvm_pmu_create_perf_event(pmc);
|
||||
return;
|
||||
|
||||
if (!(kvm_vcpu_read_pmcr(vcpu) & ARMV8_PMU_PMCR_E) || !val)
|
||||
return;
|
||||
|
||||
for (i = 0; i < KVM_ARMV8_PMU_MAX_COUNTERS; i++) {
|
||||
struct kvm_pmc *pmc;
|
||||
|
||||
if (!(val & BIT(i)))
|
||||
continue;
|
||||
|
||||
pmc = kvm_vcpu_idx_to_pmc(vcpu, i);
|
||||
|
||||
if (!pmc->perf_event) {
|
||||
kvm_pmu_create_perf_event(pmc);
|
||||
} else {
|
||||
perf_event_enable(pmc->perf_event);
|
||||
if (pmc->perf_event->state != PERF_EVENT_STATE_ACTIVE)
|
||||
kvm_debug("fail to enable perf event\n");
|
||||
}
|
||||
}
|
||||
|
||||
perf_event_enable(pmc->perf_event);
|
||||
if (pmc->perf_event->state != PERF_EVENT_STATE_ACTIVE)
|
||||
kvm_debug("fail to enable perf event\n");
|
||||
}
|
||||
|
||||
/**
|
||||
* kvm_pmu_disable_counter_mask - disable selected PMU counters
|
||||
* @vcpu: The vcpu pointer
|
||||
* @val: the value guest writes to PMCNTENCLR register
|
||||
*
|
||||
* Call perf_event_disable to stop counting the perf event
|
||||
*/
|
||||
void kvm_pmu_disable_counter_mask(struct kvm_vcpu *vcpu, u64 val)
|
||||
static void kvm_pmc_disable_perf_event(struct kvm_pmc *pmc)
|
||||
{
|
||||
if (pmc->perf_event)
|
||||
perf_event_disable(pmc->perf_event);
|
||||
}
|
||||
|
||||
void kvm_pmu_reprogram_counter_mask(struct kvm_vcpu *vcpu, u64 val)
|
||||
{
|
||||
int i;
|
||||
|
||||
@ -376,16 +354,18 @@ void kvm_pmu_disable_counter_mask(struct kvm_vcpu *vcpu, u64 val)
|
||||
return;
|
||||
|
||||
for (i = 0; i < KVM_ARMV8_PMU_MAX_COUNTERS; i++) {
|
||||
struct kvm_pmc *pmc;
|
||||
struct kvm_pmc *pmc = kvm_vcpu_idx_to_pmc(vcpu, i);
|
||||
|
||||
if (!(val & BIT(i)))
|
||||
continue;
|
||||
|
||||
pmc = kvm_vcpu_idx_to_pmc(vcpu, i);
|
||||
|
||||
if (pmc->perf_event)
|
||||
perf_event_disable(pmc->perf_event);
|
||||
if (kvm_pmu_counter_is_enabled(pmc))
|
||||
kvm_pmc_enable_perf_event(pmc);
|
||||
else
|
||||
kvm_pmc_disable_perf_event(pmc);
|
||||
}
|
||||
|
||||
kvm_vcpu_pmu_restore_guest(vcpu);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -630,10 +610,10 @@ void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val)
|
||||
__vcpu_sys_reg(vcpu, PMCR_EL0) = val & ~(ARMV8_PMU_PMCR_C | ARMV8_PMU_PMCR_P);
|
||||
|
||||
if (val & ARMV8_PMU_PMCR_E) {
|
||||
kvm_pmu_enable_counter_mask(vcpu,
|
||||
kvm_pmu_reprogram_counter_mask(vcpu,
|
||||
__vcpu_sys_reg(vcpu, PMCNTENSET_EL0));
|
||||
} else {
|
||||
kvm_pmu_disable_counter_mask(vcpu,
|
||||
kvm_pmu_reprogram_counter_mask(vcpu,
|
||||
__vcpu_sys_reg(vcpu, PMCNTENSET_EL0));
|
||||
}
|
||||
|
||||
|
@ -1208,16 +1208,14 @@ static bool access_pmcnten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
|
||||
mask = kvm_pmu_accessible_counter_mask(vcpu);
|
||||
if (p->is_write) {
|
||||
val = p->regval & mask;
|
||||
if (r->Op2 & 0x1) {
|
||||
if (r->Op2 & 0x1)
|
||||
/* accessing PMCNTENSET_EL0 */
|
||||
__vcpu_sys_reg(vcpu, PMCNTENSET_EL0) |= val;
|
||||
kvm_pmu_enable_counter_mask(vcpu, val);
|
||||
kvm_vcpu_pmu_restore_guest(vcpu);
|
||||
} else {
|
||||
else
|
||||
/* accessing PMCNTENCLR_EL0 */
|
||||
__vcpu_sys_reg(vcpu, PMCNTENSET_EL0) &= ~val;
|
||||
kvm_pmu_disable_counter_mask(vcpu, val);
|
||||
}
|
||||
|
||||
kvm_pmu_reprogram_counter_mask(vcpu, val);
|
||||
} else {
|
||||
p->regval = __vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
|
||||
}
|
||||
|
@ -53,8 +53,7 @@ u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1);
|
||||
void kvm_pmu_vcpu_init(struct kvm_vcpu *vcpu);
|
||||
void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu);
|
||||
void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu);
|
||||
void kvm_pmu_disable_counter_mask(struct kvm_vcpu *vcpu, u64 val);
|
||||
void kvm_pmu_enable_counter_mask(struct kvm_vcpu *vcpu, u64 val);
|
||||
void kvm_pmu_reprogram_counter_mask(struct kvm_vcpu *vcpu, u64 val);
|
||||
void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu);
|
||||
void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu);
|
||||
bool kvm_pmu_should_notify_user(struct kvm_vcpu *vcpu);
|
||||
@ -127,8 +126,7 @@ static inline u64 kvm_pmu_accessible_counter_mask(struct kvm_vcpu *vcpu)
|
||||
static inline void kvm_pmu_vcpu_init(struct kvm_vcpu *vcpu) {}
|
||||
static inline void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu) {}
|
||||
static inline void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu) {}
|
||||
static inline void kvm_pmu_disable_counter_mask(struct kvm_vcpu *vcpu, u64 val) {}
|
||||
static inline void kvm_pmu_enable_counter_mask(struct kvm_vcpu *vcpu, u64 val) {}
|
||||
static inline void kvm_pmu_reprogram_counter_mask(struct kvm_vcpu *vcpu, u64 val) {}
|
||||
static inline void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu) {}
|
||||
static inline void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu) {}
|
||||
static inline bool kvm_pmu_should_notify_user(struct kvm_vcpu *vcpu)
|
||||
|
Loading…
x
Reference in New Issue
Block a user