KVM: MIPS: Use kvm_faultin_pfn() to map pfns into the guest

Convert MIPS to kvm_faultin_pfn()+kvm_release_faultin_page(), which
are new APIs to consolidate arch code and provide consistent behavior
across all KVM architectures.

Signed-off-by: Sean Christopherson <seanjc@google.com>
Tested-by: Dmitry Osipenko <dmitry.osipenko@collabora.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Message-ID: <20241010182427.1434605-73-seanjc@google.com>
This commit is contained in:
Sean Christopherson 2024-10-10 11:24:14 -07:00 committed by Paolo Bonzini
parent 13d66fddaa
commit 7e8f1aa59d

View File

@ -557,6 +557,7 @@ static int kvm_mips_map_page(struct kvm_vcpu *vcpu, unsigned long gpa,
bool writeable; bool writeable;
unsigned long prot_bits; unsigned long prot_bits;
unsigned long mmu_seq; unsigned long mmu_seq;
struct page *page;
/* Try the fast path to handle old / clean pages */ /* Try the fast path to handle old / clean pages */
srcu_idx = srcu_read_lock(&kvm->srcu); srcu_idx = srcu_read_lock(&kvm->srcu);
@ -578,7 +579,7 @@ static int kvm_mips_map_page(struct kvm_vcpu *vcpu, unsigned long gpa,
mmu_seq = kvm->mmu_invalidate_seq; mmu_seq = kvm->mmu_invalidate_seq;
/* /*
* Ensure the read of mmu_invalidate_seq isn't reordered with PTE reads * Ensure the read of mmu_invalidate_seq isn't reordered with PTE reads
* in gfn_to_pfn_prot() (which calls get_user_pages()), so that we don't * in kvm_faultin_pfn() (which calls get_user_pages()), so that we don't
* risk the page we get a reference to getting unmapped before we have a * risk the page we get a reference to getting unmapped before we have a
* chance to grab the mmu_lock without mmu_invalidate_retry() noticing. * chance to grab the mmu_lock without mmu_invalidate_retry() noticing.
* *
@ -590,7 +591,7 @@ static int kvm_mips_map_page(struct kvm_vcpu *vcpu, unsigned long gpa,
smp_rmb(); smp_rmb();
/* Slow path - ask KVM core whether we can access this GPA */ /* Slow path - ask KVM core whether we can access this GPA */
pfn = gfn_to_pfn_prot(kvm, gfn, write_fault, &writeable); pfn = kvm_faultin_pfn(vcpu, gfn, write_fault, &writeable, &page);
if (is_error_noslot_pfn(pfn)) { if (is_error_noslot_pfn(pfn)) {
err = -EFAULT; err = -EFAULT;
goto out; goto out;
@ -602,10 +603,10 @@ static int kvm_mips_map_page(struct kvm_vcpu *vcpu, unsigned long gpa,
/* /*
* This can happen when mappings are changed asynchronously, but * This can happen when mappings are changed asynchronously, but
* also synchronously if a COW is triggered by * also synchronously if a COW is triggered by
* gfn_to_pfn_prot(). * kvm_faultin_pfn().
*/ */
spin_unlock(&kvm->mmu_lock); spin_unlock(&kvm->mmu_lock);
kvm_release_pfn_clean(pfn); kvm_release_page_unused(page);
goto retry; goto retry;
} }
@ -632,10 +633,7 @@ static int kvm_mips_map_page(struct kvm_vcpu *vcpu, unsigned long gpa,
if (out_buddy) if (out_buddy)
*out_buddy = *ptep_buddy(ptep); *out_buddy = *ptep_buddy(ptep);
if (writeable) kvm_release_faultin_page(kvm, page, false, writeable);
kvm_set_pfn_dirty(pfn);
kvm_release_pfn_clean(pfn);
spin_unlock(&kvm->mmu_lock); spin_unlock(&kvm->mmu_lock);
out: out:
srcu_read_unlock(&kvm->srcu, srcu_idx); srcu_read_unlock(&kvm->srcu, srcu_idx);