mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-01-10 07:10:27 +00:00
Merge git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull KVM fixes from Avi Kivity: "Two asynchronous page fault fixes (one guest, one host), a powerpc page refcount fix, and an ia64 build fix." * git://git.kernel.org/pub/scm/virt/kvm/kvm: KVM: ia64: fix build due to typo KVM: PPC: Book3S HV: Fix refcounting of hugepages KVM: Do not take reference to mm during async #PF KVM: ensure async PF event wakes up vcpu from halt
This commit is contained in:
commit
63f4711aec
@ -1174,7 +1174,7 @@ out:
|
|||||||
|
|
||||||
bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu)
|
bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
return irqchip_in_kernel(vcpu->kcm) == (vcpu->arch.apic != NULL);
|
return irqchip_in_kernel(vcpu->kvm) == (vcpu->arch.apic != NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
|
int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
|
||||||
|
@ -258,6 +258,8 @@ static long kvmppc_get_guest_page(struct kvm *kvm, unsigned long gfn,
|
|||||||
!(memslot->userspace_addr & (s - 1))) {
|
!(memslot->userspace_addr & (s - 1))) {
|
||||||
start &= ~(s - 1);
|
start &= ~(s - 1);
|
||||||
pgsize = s;
|
pgsize = s;
|
||||||
|
get_page(hpage);
|
||||||
|
put_page(page);
|
||||||
page = hpage;
|
page = hpage;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -281,11 +283,8 @@ static long kvmppc_get_guest_page(struct kvm *kvm, unsigned long gfn,
|
|||||||
err = 0;
|
err = 0;
|
||||||
|
|
||||||
out:
|
out:
|
||||||
if (got) {
|
if (got)
|
||||||
if (PageHuge(page))
|
|
||||||
page = compound_head(page);
|
|
||||||
put_page(page);
|
put_page(page);
|
||||||
}
|
|
||||||
return err;
|
return err;
|
||||||
|
|
||||||
up_err:
|
up_err:
|
||||||
@ -678,8 +677,15 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
|||||||
SetPageDirty(page);
|
SetPageDirty(page);
|
||||||
|
|
||||||
out_put:
|
out_put:
|
||||||
if (page)
|
if (page) {
|
||||||
put_page(page);
|
/*
|
||||||
|
* We drop pages[0] here, not page because page might
|
||||||
|
* have been set to the head page of a compound, but
|
||||||
|
* we have to drop the reference on the correct tail
|
||||||
|
* page to match the get inside gup()
|
||||||
|
*/
|
||||||
|
put_page(pages[0]);
|
||||||
|
}
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
out_unlock:
|
out_unlock:
|
||||||
@ -979,6 +985,7 @@ void *kvmppc_pin_guest_page(struct kvm *kvm, unsigned long gpa,
|
|||||||
pa = *physp;
|
pa = *physp;
|
||||||
}
|
}
|
||||||
page = pfn_to_page(pa >> PAGE_SHIFT);
|
page = pfn_to_page(pa >> PAGE_SHIFT);
|
||||||
|
get_page(page);
|
||||||
} else {
|
} else {
|
||||||
hva = gfn_to_hva_memslot(memslot, gfn);
|
hva = gfn_to_hva_memslot(memslot, gfn);
|
||||||
npages = get_user_pages_fast(hva, 1, 1, pages);
|
npages = get_user_pages_fast(hva, 1, 1, pages);
|
||||||
@ -991,8 +998,6 @@ void *kvmppc_pin_guest_page(struct kvm *kvm, unsigned long gpa,
|
|||||||
page = compound_head(page);
|
page = compound_head(page);
|
||||||
psize <<= compound_order(page);
|
psize <<= compound_order(page);
|
||||||
}
|
}
|
||||||
if (!kvm->arch.using_mmu_notifiers)
|
|
||||||
get_page(page);
|
|
||||||
offset = gpa & (psize - 1);
|
offset = gpa & (psize - 1);
|
||||||
if (nb_ret)
|
if (nb_ret)
|
||||||
*nb_ret = psize - offset;
|
*nb_ret = psize - offset;
|
||||||
@ -1003,7 +1008,6 @@ void kvmppc_unpin_guest_page(struct kvm *kvm, void *va)
|
|||||||
{
|
{
|
||||||
struct page *page = virt_to_page(va);
|
struct page *page = virt_to_page(va);
|
||||||
|
|
||||||
page = compound_head(page);
|
|
||||||
put_page(page);
|
put_page(page);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1192,8 +1192,6 @@ static void unpin_slot(struct kvm *kvm, int slot_id)
|
|||||||
continue;
|
continue;
|
||||||
pfn = physp[j] >> PAGE_SHIFT;
|
pfn = physp[j] >> PAGE_SHIFT;
|
||||||
page = pfn_to_page(pfn);
|
page = pfn_to_page(pfn);
|
||||||
if (PageHuge(page))
|
|
||||||
page = compound_head(page);
|
|
||||||
SetPageDirty(page);
|
SetPageDirty(page);
|
||||||
put_page(page);
|
put_page(page);
|
||||||
}
|
}
|
||||||
|
@ -79,7 +79,6 @@ struct kvm_task_sleep_node {
|
|||||||
u32 token;
|
u32 token;
|
||||||
int cpu;
|
int cpu;
|
||||||
bool halted;
|
bool halted;
|
||||||
struct mm_struct *mm;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct kvm_task_sleep_head {
|
static struct kvm_task_sleep_head {
|
||||||
@ -126,9 +125,7 @@ void kvm_async_pf_task_wait(u32 token)
|
|||||||
|
|
||||||
n.token = token;
|
n.token = token;
|
||||||
n.cpu = smp_processor_id();
|
n.cpu = smp_processor_id();
|
||||||
n.mm = current->active_mm;
|
|
||||||
n.halted = idle || preempt_count() > 1;
|
n.halted = idle || preempt_count() > 1;
|
||||||
atomic_inc(&n.mm->mm_count);
|
|
||||||
init_waitqueue_head(&n.wq);
|
init_waitqueue_head(&n.wq);
|
||||||
hlist_add_head(&n.link, &b->list);
|
hlist_add_head(&n.link, &b->list);
|
||||||
spin_unlock(&b->lock);
|
spin_unlock(&b->lock);
|
||||||
@ -161,9 +158,6 @@ EXPORT_SYMBOL_GPL(kvm_async_pf_task_wait);
|
|||||||
static void apf_task_wake_one(struct kvm_task_sleep_node *n)
|
static void apf_task_wake_one(struct kvm_task_sleep_node *n)
|
||||||
{
|
{
|
||||||
hlist_del_init(&n->link);
|
hlist_del_init(&n->link);
|
||||||
if (!n->mm)
|
|
||||||
return;
|
|
||||||
mmdrop(n->mm);
|
|
||||||
if (n->halted)
|
if (n->halted)
|
||||||
smp_send_reschedule(n->cpu);
|
smp_send_reschedule(n->cpu);
|
||||||
else if (waitqueue_active(&n->wq))
|
else if (waitqueue_active(&n->wq))
|
||||||
@ -207,7 +201,7 @@ again:
|
|||||||
* async PF was not yet handled.
|
* async PF was not yet handled.
|
||||||
* Add dummy entry for the token.
|
* Add dummy entry for the token.
|
||||||
*/
|
*/
|
||||||
n = kmalloc(sizeof(*n), GFP_ATOMIC);
|
n = kzalloc(sizeof(*n), GFP_ATOMIC);
|
||||||
if (!n) {
|
if (!n) {
|
||||||
/*
|
/*
|
||||||
* Allocation failed! Busy wait while other cpu
|
* Allocation failed! Busy wait while other cpu
|
||||||
@ -219,7 +213,6 @@ again:
|
|||||||
}
|
}
|
||||||
n->token = token;
|
n->token = token;
|
||||||
n->cpu = smp_processor_id();
|
n->cpu = smp_processor_id();
|
||||||
n->mm = NULL;
|
|
||||||
init_waitqueue_head(&n->wq);
|
init_waitqueue_head(&n->wq);
|
||||||
hlist_add_head(&n->link, &b->list);
|
hlist_add_head(&n->link, &b->list);
|
||||||
} else
|
} else
|
||||||
|
@ -6581,6 +6581,7 @@ void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
|
|||||||
kvm_inject_page_fault(vcpu, &fault);
|
kvm_inject_page_fault(vcpu, &fault);
|
||||||
}
|
}
|
||||||
vcpu->arch.apf.halted = false;
|
vcpu->arch.apf.halted = false;
|
||||||
|
vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
|
bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
|
||||||
|
Loading…
x
Reference in New Issue
Block a user