mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2025-01-17 05:45:20 +00:00
KVM: x86: Remove hwapic_irr_update() from kvm_x86_ops
Remove the redundant .hwapic_irr_update() ops. If a vCPU has APICv enabled, KVM updates its RVI before VM-enter to L1 in vmx_sync_pir_to_irr(). This guarantees RVI is up-to-date and aligned with the vIRR in the virtual APIC. So, no need to update RVI every time the vIRR changes. Note that KVM never updates vmcs02 RVI in .hwapic_irr_update() or vmx_sync_pir_to_irr(). So, removing .hwapic_irr_update() has no impact to the nested case. Signed-off-by: Chao Gao <chao.gao@intel.com> Link: https://lore.kernel.org/r/20241111085947.432645-1-chao.gao@intel.com Signed-off-by: Sean Christopherson <seanjc@google.com>
This commit is contained in:
parent
3d91521e57
commit
ca0245d131
@ -83,7 +83,6 @@ KVM_X86_OP(enable_nmi_window)
|
||||
KVM_X86_OP(enable_irq_window)
|
||||
KVM_X86_OP_OPTIONAL(update_cr8_intercept)
|
||||
KVM_X86_OP(refresh_apicv_exec_ctrl)
|
||||
KVM_X86_OP_OPTIONAL(hwapic_irr_update)
|
||||
KVM_X86_OP_OPTIONAL(hwapic_isr_update)
|
||||
KVM_X86_OP_OPTIONAL(load_eoi_exitmap)
|
||||
KVM_X86_OP_OPTIONAL(set_virtual_apic_mode)
|
||||
|
@ -1734,7 +1734,6 @@ struct kvm_x86_ops {
|
||||
const unsigned long required_apicv_inhibits;
|
||||
bool allow_apicv_in_x2apic_without_x2apic_virtualization;
|
||||
void (*refresh_apicv_exec_ctrl)(struct kvm_vcpu *vcpu);
|
||||
void (*hwapic_irr_update)(struct kvm_vcpu *vcpu, int max_irr);
|
||||
void (*hwapic_isr_update)(struct kvm_vcpu *vcpu, int isr);
|
||||
void (*load_eoi_exitmap)(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap);
|
||||
void (*set_virtual_apic_mode)(struct kvm_vcpu *vcpu);
|
||||
|
@ -734,10 +734,7 @@ static inline int apic_find_highest_irr(struct kvm_lapic *apic)
|
||||
static inline void apic_clear_irr(int vec, struct kvm_lapic *apic)
|
||||
{
|
||||
if (unlikely(apic->apicv_active)) {
|
||||
/* need to update RVI */
|
||||
kvm_lapic_clear_vector(vec, apic->regs + APIC_IRR);
|
||||
kvm_x86_call(hwapic_irr_update)(apic->vcpu,
|
||||
apic_find_highest_irr(apic));
|
||||
} else {
|
||||
apic->irr_pending = false;
|
||||
kvm_lapic_clear_vector(vec, apic->regs + APIC_IRR);
|
||||
@ -2816,7 +2813,6 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event)
|
||||
apic_update_ppr(apic);
|
||||
if (apic->apicv_active) {
|
||||
kvm_x86_call(apicv_post_state_restore)(vcpu);
|
||||
kvm_x86_call(hwapic_irr_update)(vcpu, -1);
|
||||
kvm_x86_call(hwapic_isr_update)(vcpu, -1);
|
||||
}
|
||||
|
||||
@ -3132,7 +3128,6 @@ int kvm_apic_set_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s)
|
||||
kvm_apic_update_apicv(vcpu);
|
||||
if (apic->apicv_active) {
|
||||
kvm_x86_call(apicv_post_state_restore)(vcpu);
|
||||
kvm_x86_call(hwapic_irr_update)(vcpu, apic_find_highest_irr(apic));
|
||||
kvm_x86_call(hwapic_isr_update)(vcpu, apic_find_highest_isr(apic));
|
||||
}
|
||||
kvm_make_request(KVM_REQ_EVENT, vcpu);
|
||||
|
@ -100,7 +100,6 @@ struct kvm_x86_ops vt_x86_ops __initdata = {
|
||||
.load_eoi_exitmap = vmx_load_eoi_exitmap,
|
||||
.apicv_pre_state_restore = vmx_apicv_pre_state_restore,
|
||||
.required_apicv_inhibits = VMX_REQUIRED_APICV_INHIBITS,
|
||||
.hwapic_irr_update = vmx_hwapic_irr_update,
|
||||
.hwapic_isr_update = vmx_hwapic_isr_update,
|
||||
.sync_pir_to_irr = vmx_sync_pir_to_irr,
|
||||
.deliver_interrupt = vmx_deliver_interrupt,
|
||||
|
@ -6918,20 +6918,6 @@ static void vmx_set_rvi(int vector)
|
||||
}
|
||||
}
|
||||
|
||||
void vmx_hwapic_irr_update(struct kvm_vcpu *vcpu, int max_irr)
|
||||
{
|
||||
/*
|
||||
* When running L2, updating RVI is only relevant when
|
||||
* vmcs12 virtual-interrupt-delivery enabled.
|
||||
* However, it can be enabled only when L1 also
|
||||
* intercepts external-interrupts and in that case
|
||||
* we should not update vmcs02 RVI but instead intercept
|
||||
* interrupt. Therefore, do nothing when running L2.
|
||||
*/
|
||||
if (!is_guest_mode(vcpu))
|
||||
vmx_set_rvi(max_irr);
|
||||
}
|
||||
|
||||
int vmx_sync_pir_to_irr(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
||||
|
@ -47,7 +47,6 @@ bool vmx_apic_init_signal_blocked(struct kvm_vcpu *vcpu);
|
||||
void vmx_migrate_timers(struct kvm_vcpu *vcpu);
|
||||
void vmx_set_virtual_apic_mode(struct kvm_vcpu *vcpu);
|
||||
void vmx_apicv_pre_state_restore(struct kvm_vcpu *vcpu);
|
||||
void vmx_hwapic_irr_update(struct kvm_vcpu *vcpu, int max_irr);
|
||||
void vmx_hwapic_isr_update(struct kvm_vcpu *vcpu, int max_isr);
|
||||
int vmx_sync_pir_to_irr(struct kvm_vcpu *vcpu);
|
||||
void vmx_deliver_interrupt(struct kvm_lapic *apic, int delivery_mode,
|
||||
|
Loading…
x
Reference in New Issue
Block a user