mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2025-01-04 04:02:26 +00:00
KVM: Make kvm_make_vcpus_request_mask() use pre-allocated cpu_kick_mask
kvm_make_vcpus_request_mask() already disables preemption so just like kvm_make_all_cpus_request_except() it can be switched to using pre-allocated per-cpu cpumasks. This allows for improvements for both users of the function: in Hyper-V emulation code 'tlb_flush' can now be dropped from 'struct kvm_vcpu_hv' and kvm_make_scan_ioapic_request_mask() gets rid of dynamic allocation. cpumask_available() checks in kvm_make_vcpu_request() and kvm_kick_many_cpus() can now be dropped as they checks for an impossible condition: kvm_init() makes sure per-cpu masks are allocated. Signed-off-by: Vitaly Kuznetsov <vkuznets@redhat.com> Reviewed-by: Sean Christopherson <seanjc@google.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> Message-Id: <20210903075141.403071-9-vkuznets@redhat.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
baff59ccdc
commit
620b2438ab
@ -581,7 +581,6 @@ struct kvm_vcpu_hv {
|
||||
struct kvm_hyperv_exit exit;
|
||||
struct kvm_vcpu_hv_stimer stimer[HV_SYNIC_STIMER_COUNT];
|
||||
DECLARE_BITMAP(stimer_pending_bitmap, HV_SYNIC_STIMER_COUNT);
|
||||
cpumask_t tlb_flush;
|
||||
bool enforce_cpuid;
|
||||
struct {
|
||||
u32 features_eax; /* HYPERV_CPUID_FEATURES.EAX */
|
||||
|
@ -1754,7 +1754,6 @@ static u64 kvm_hv_flush_tlb(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc, bool
|
||||
int i;
|
||||
gpa_t gpa;
|
||||
struct kvm *kvm = vcpu->kvm;
|
||||
struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
|
||||
struct hv_tlb_flush_ex flush_ex;
|
||||
struct hv_tlb_flush flush;
|
||||
u64 vp_bitmap[KVM_HV_MAX_SPARSE_VCPU_SET_BITS];
|
||||
@ -1836,8 +1835,6 @@ static u64 kvm_hv_flush_tlb(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc, bool
|
||||
}
|
||||
}
|
||||
|
||||
cpumask_clear(&hv_vcpu->tlb_flush);
|
||||
|
||||
/*
|
||||
* vcpu->arch.cr3 may not be up-to-date for running vCPUs so we can't
|
||||
* analyze it here, flush TLB regardless of the specified address space.
|
||||
@ -1849,7 +1846,7 @@ static u64 kvm_hv_flush_tlb(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc, bool
|
||||
vp_bitmap, vcpu_bitmap);
|
||||
|
||||
kvm_make_vcpus_request_mask(kvm, KVM_REQ_TLB_FLUSH_GUEST,
|
||||
vcpu_mask, &hv_vcpu->tlb_flush);
|
||||
vcpu_mask);
|
||||
}
|
||||
|
||||
ret_success:
|
||||
|
@ -9242,14 +9242,7 @@ static void process_smi(struct kvm_vcpu *vcpu)
|
||||
void kvm_make_scan_ioapic_request_mask(struct kvm *kvm,
|
||||
unsigned long *vcpu_bitmap)
|
||||
{
|
||||
cpumask_var_t cpus;
|
||||
|
||||
zalloc_cpumask_var(&cpus, GFP_ATOMIC);
|
||||
|
||||
kvm_make_vcpus_request_mask(kvm, KVM_REQ_SCAN_IOAPIC,
|
||||
vcpu_bitmap, cpus);
|
||||
|
||||
free_cpumask_var(cpus);
|
||||
kvm_make_vcpus_request_mask(kvm, KVM_REQ_SCAN_IOAPIC, vcpu_bitmap);
|
||||
}
|
||||
|
||||
void kvm_make_scan_ioapic_request(struct kvm *kvm)
|
||||
|
@ -160,7 +160,7 @@ static inline bool is_error_page(struct page *page)
|
||||
#define KVM_ARCH_REQ(nr) KVM_ARCH_REQ_FLAGS(nr, 0)
|
||||
|
||||
bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned int req,
|
||||
unsigned long *vcpu_bitmap, cpumask_var_t tmp);
|
||||
unsigned long *vcpu_bitmap);
|
||||
bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req);
|
||||
bool kvm_make_all_cpus_request_except(struct kvm *kvm, unsigned int req,
|
||||
struct kvm_vcpu *except);
|
||||
|
@ -237,15 +237,8 @@ static void ack_flush(void *_completed)
|
||||
{
|
||||
}
|
||||
|
||||
static inline bool kvm_kick_many_cpus(cpumask_var_t tmp, bool wait)
|
||||
static inline bool kvm_kick_many_cpus(struct cpumask *cpus, bool wait)
|
||||
{
|
||||
const struct cpumask *cpus;
|
||||
|
||||
if (likely(cpumask_available(tmp)))
|
||||
cpus = tmp;
|
||||
else
|
||||
cpus = cpu_online_mask;
|
||||
|
||||
if (cpumask_empty(cpus))
|
||||
return false;
|
||||
|
||||
@ -254,7 +247,7 @@ static inline bool kvm_kick_many_cpus(cpumask_var_t tmp, bool wait)
|
||||
}
|
||||
|
||||
static void kvm_make_vcpu_request(struct kvm *kvm, struct kvm_vcpu *vcpu,
|
||||
unsigned int req, cpumask_var_t tmp,
|
||||
unsigned int req, struct cpumask *tmp,
|
||||
int current_cpu)
|
||||
{
|
||||
int cpu;
|
||||
@ -264,14 +257,6 @@ static void kvm_make_vcpu_request(struct kvm *kvm, struct kvm_vcpu *vcpu,
|
||||
if (!(req & KVM_REQUEST_NO_WAKEUP) && kvm_vcpu_wake_up(vcpu))
|
||||
return;
|
||||
|
||||
/*
|
||||
* tmp can be "unavailable" if cpumasks are allocated off stack as
|
||||
* allocation of the mask is deliberately not fatal and is handled by
|
||||
* falling back to kicking all online CPUs.
|
||||
*/
|
||||
if (!cpumask_available(tmp))
|
||||
return;
|
||||
|
||||
/*
|
||||
* Note, the vCPU could get migrated to a different pCPU at any point
|
||||
* after kvm_request_needs_ipi(), which could result in sending an IPI
|
||||
@ -290,22 +275,26 @@ static void kvm_make_vcpu_request(struct kvm *kvm, struct kvm_vcpu *vcpu,
|
||||
}
|
||||
|
||||
bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned int req,
|
||||
unsigned long *vcpu_bitmap, cpumask_var_t tmp)
|
||||
unsigned long *vcpu_bitmap)
|
||||
{
|
||||
struct kvm_vcpu *vcpu;
|
||||
struct cpumask *cpus;
|
||||
int i, me;
|
||||
bool called;
|
||||
|
||||
me = get_cpu();
|
||||
|
||||
cpus = this_cpu_cpumask_var_ptr(cpu_kick_mask);
|
||||
cpumask_clear(cpus);
|
||||
|
||||
for_each_set_bit(i, vcpu_bitmap, KVM_MAX_VCPUS) {
|
||||
vcpu = kvm_get_vcpu(kvm, i);
|
||||
if (!vcpu)
|
||||
continue;
|
||||
kvm_make_vcpu_request(kvm, vcpu, req, tmp, me);
|
||||
kvm_make_vcpu_request(kvm, vcpu, req, cpus, me);
|
||||
}
|
||||
|
||||
called = kvm_kick_many_cpus(tmp, !!(req & KVM_REQUEST_WAIT));
|
||||
called = kvm_kick_many_cpus(cpus, !!(req & KVM_REQUEST_WAIT));
|
||||
put_cpu();
|
||||
|
||||
return called;
|
||||
|
Loading…
Reference in New Issue
Block a user