mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-12-28 16:56:26 +00:00
KVM: arm64: nv: Reprogram PMU events affected by nested transition
Start reprogramming PMU events at nested boundaries now that everything is in place to handle the EL2 event filter. Only repaint events where the filter differs between EL1 and EL2 as a slight optimization. PMU now 'works' for nested VMs, albeit slow. Reviewed-by: Marc Zyngier <maz@kernel.org> Link: https://lore.kernel.org/r/20241025182559.3364829-1-oliver.upton@linux.dev Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
This commit is contained in:
parent
8a34979030
commit
ae323e0358
@ -2450,6 +2450,8 @@ void kvm_emulate_nested_eret(struct kvm_vcpu *vcpu)
|
||||
|
||||
kvm_arch_vcpu_load(vcpu, smp_processor_id());
|
||||
preempt_enable();
|
||||
|
||||
kvm_pmu_nested_transition(vcpu);
|
||||
}
|
||||
|
||||
static void kvm_inject_el2_exception(struct kvm_vcpu *vcpu, u64 esr_el2,
|
||||
@ -2532,6 +2534,8 @@ static int kvm_inject_nested(struct kvm_vcpu *vcpu, u64 esr_el2,
|
||||
kvm_arch_vcpu_load(vcpu, smp_processor_id());
|
||||
preempt_enable();
|
||||
|
||||
kvm_pmu_nested_transition(vcpu);
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
@ -1215,3 +1215,32 @@ u64 kvm_vcpu_read_pmcr(struct kvm_vcpu *vcpu)
|
||||
|
||||
return u64_replace_bits(pmcr, vcpu->kvm->arch.pmcr_n, ARMV8_PMU_PMCR_N);
|
||||
}
|
||||
|
||||
void kvm_pmu_nested_transition(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
bool reprogrammed = false;
|
||||
unsigned long mask;
|
||||
int i;
|
||||
|
||||
if (!kvm_vcpu_has_pmu(vcpu))
|
||||
return;
|
||||
|
||||
mask = __vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
|
||||
for_each_set_bit(i, &mask, 32) {
|
||||
struct kvm_pmc *pmc = kvm_vcpu_idx_to_pmc(vcpu, i);
|
||||
|
||||
/*
|
||||
* We only need to reconfigure events where the filter is
|
||||
* different at EL1 vs. EL2, as we're multiplexing the true EL1
|
||||
* event filter bit for nested.
|
||||
*/
|
||||
if (kvm_pmc_counts_at_el1(pmc) == kvm_pmc_counts_at_el2(pmc))
|
||||
continue;
|
||||
|
||||
kvm_pmu_create_perf_event(pmc);
|
||||
reprogrammed = true;
|
||||
}
|
||||
|
||||
if (reprogrammed)
|
||||
kvm_vcpu_pmu_restore_guest(vcpu);
|
||||
}
|
||||
|
@ -98,6 +98,7 @@ u8 kvm_arm_pmu_get_max_counters(struct kvm *kvm);
|
||||
|
||||
u64 kvm_vcpu_read_pmcr(struct kvm_vcpu *vcpu);
|
||||
bool kvm_pmu_counter_is_hyp(struct kvm_vcpu *vcpu, unsigned int idx);
|
||||
void kvm_pmu_nested_transition(struct kvm_vcpu *vcpu);
|
||||
#else
|
||||
struct kvm_pmu {
|
||||
};
|
||||
@ -198,6 +199,8 @@ static inline bool kvm_pmu_counter_is_hyp(struct kvm_vcpu *vcpu, unsigned int id
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline void kvm_pmu_nested_transition(struct kvm_vcpu *vcpu) {}
|
||||
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
Loading…
Reference in New Issue
Block a user