mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2025-01-15 21:23:23 +00:00
KVM: Drop KVM_ERR_PTR_BAD_PAGE and instead return NULL to indicate an error
Remove KVM_ERR_PTR_BAD_PAGE and instead return NULL, as "bad page" is just a leftover bit of weirdness from days of old when KVM stuffed a "bad" page into the guest instead of actually handling missing pages. See commit cea7bb21280e ("KVM: MMU: Make gfn_to_page() always safe"). Reviewed-by: Alex Bennée <alex.bennee@linaro.org> Tested-by: Alex Bennée <alex.bennee@linaro.org> Signed-off-by: Sean Christopherson <seanjc@google.com> Tested-by: Dmitry Osipenko <dmitry.osipenko@collabora.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> Message-ID: <20241010182427.1434605-2-seanjc@google.com>
This commit is contained in:
parent
e9001a382f
commit
037bc38b29
@ -645,7 +645,7 @@ static void kvmppc_patch_dcbz(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte)
|
||||
int i;
|
||||
|
||||
hpage = gfn_to_page(vcpu->kvm, pte->raddr >> PAGE_SHIFT);
|
||||
if (is_error_page(hpage))
|
||||
if (!hpage)
|
||||
return;
|
||||
|
||||
hpage_offset = pte->raddr & ~PAGE_MASK;
|
||||
|
@ -654,7 +654,7 @@ static int kvmppc_xive_native_set_queue_config(struct kvmppc_xive *xive,
|
||||
}
|
||||
|
||||
page = gfn_to_page(kvm, gfn);
|
||||
if (is_error_page(page)) {
|
||||
if (!page) {
|
||||
srcu_read_unlock(&kvm->srcu, srcu_idx);
|
||||
pr_err("Couldn't get queue page %llx!\n", kvm_eq.qaddr);
|
||||
return -EINVAL;
|
||||
|
@ -661,7 +661,7 @@ static int pin_guest_page(struct kvm *kvm, gpa_t gpa, hpa_t *hpa)
|
||||
struct page *page;
|
||||
|
||||
page = gfn_to_page(kvm, gpa_to_gfn(gpa));
|
||||
if (is_error_page(page))
|
||||
if (!page)
|
||||
return -EINVAL;
|
||||
*hpa = (hpa_t)page_to_phys(page) + (gpa & ~PAGE_MASK);
|
||||
return 0;
|
||||
|
@ -2664,7 +2664,7 @@ int kvm_alloc_apic_access_page(struct kvm *kvm)
|
||||
}
|
||||
|
||||
page = gfn_to_page(kvm, APIC_DEFAULT_PHYS_BASE >> PAGE_SHIFT);
|
||||
if (is_error_page(page)) {
|
||||
if (!page) {
|
||||
ret = -EFAULT;
|
||||
goto out;
|
||||
}
|
||||
|
@ -153,13 +153,6 @@ static inline bool kvm_is_error_gpa(gpa_t gpa)
|
||||
return gpa == INVALID_GPA;
|
||||
}
|
||||
|
||||
#define KVM_ERR_PTR_BAD_PAGE (ERR_PTR(-ENOENT))
|
||||
|
||||
static inline bool is_error_page(struct page *page)
|
||||
{
|
||||
return IS_ERR(page);
|
||||
}
|
||||
|
||||
#define KVM_REQUEST_MASK GENMASK(7,0)
|
||||
#define KVM_REQUEST_NO_WAKEUP BIT(8)
|
||||
#define KVM_REQUEST_WAIT BIT(9)
|
||||
|
@ -3066,19 +3066,14 @@ EXPORT_SYMBOL_GPL(gfn_to_page_many_atomic);
|
||||
*/
|
||||
struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
|
||||
{
|
||||
struct page *page;
|
||||
kvm_pfn_t pfn;
|
||||
|
||||
pfn = gfn_to_pfn(kvm, gfn);
|
||||
|
||||
if (is_error_noslot_pfn(pfn))
|
||||
return KVM_ERR_PTR_BAD_PAGE;
|
||||
return NULL;
|
||||
|
||||
page = kvm_pfn_to_refcounted_page(pfn);
|
||||
if (!page)
|
||||
return KVM_ERR_PTR_BAD_PAGE;
|
||||
|
||||
return page;
|
||||
return kvm_pfn_to_refcounted_page(pfn);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(gfn_to_page);
|
||||
|
||||
@ -3172,7 +3167,8 @@ static void kvm_set_page_accessed(struct page *page)
|
||||
|
||||
void kvm_release_page_clean(struct page *page)
|
||||
{
|
||||
WARN_ON(is_error_page(page));
|
||||
if (WARN_ON(!page))
|
||||
return;
|
||||
|
||||
kvm_set_page_accessed(page);
|
||||
put_page(page);
|
||||
@ -3196,7 +3192,8 @@ EXPORT_SYMBOL_GPL(kvm_release_pfn_clean);
|
||||
|
||||
void kvm_release_page_dirty(struct page *page)
|
||||
{
|
||||
WARN_ON(is_error_page(page));
|
||||
if (WARN_ON(!page))
|
||||
return;
|
||||
|
||||
kvm_set_page_dirty(page);
|
||||
kvm_release_page_clean(page);
|
||||
|
Loading…
x
Reference in New Issue
Block a user