mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2025-01-11 15:40:50 +00:00
mm: convert insert_pfn() to vm_fault_t
All callers convert its errno into a vm_fault_t, so convert it to return a vm_fault_t directly. Link: http://lkml.kernel.org/r/20180828145728.11873-11-willy@infradead.org Signed-off-by: Matthew Wilcox <willy@infradead.org> Reviewed-by: Andrew Morton <akpm@linux-foundation.org> Cc: Nicolas Pitre <nicolas.pitre@linaro.org> Cc: Souptick Joarder <jrdr.linux@gmail.com> Cc: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
79f3aa5ba9
commit
9b5a8e00d4
24
mm/memory.c
24
mm/memory.c
@ -1520,19 +1520,16 @@ int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
|
||||
}
|
||||
EXPORT_SYMBOL(vm_insert_page);
|
||||
|
||||
static int insert_pfn(struct vm_area_struct *vma, unsigned long addr,
|
||||
static vm_fault_t insert_pfn(struct vm_area_struct *vma, unsigned long addr,
|
||||
pfn_t pfn, pgprot_t prot, bool mkwrite)
|
||||
{
|
||||
struct mm_struct *mm = vma->vm_mm;
|
||||
int retval;
|
||||
pte_t *pte, entry;
|
||||
spinlock_t *ptl;
|
||||
|
||||
retval = -ENOMEM;
|
||||
pte = get_locked_pte(mm, addr, &ptl);
|
||||
if (!pte)
|
||||
goto out;
|
||||
retval = -EBUSY;
|
||||
return VM_FAULT_OOM;
|
||||
if (!pte_none(*pte)) {
|
||||
if (mkwrite) {
|
||||
/*
|
||||
@ -1565,11 +1562,9 @@ out_mkwrite:
|
||||
set_pte_at(mm, addr, pte, entry);
|
||||
update_mmu_cache(vma, addr, pte); /* XXX: why not for insert_page? */
|
||||
|
||||
retval = 0;
|
||||
out_unlock:
|
||||
pte_unmap_unlock(pte, ptl);
|
||||
out:
|
||||
return retval;
|
||||
return VM_FAULT_NOPAGE;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -1593,8 +1588,6 @@ out:
|
||||
vm_fault_t vmf_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr,
|
||||
unsigned long pfn, pgprot_t pgprot)
|
||||
{
|
||||
int err;
|
||||
|
||||
/*
|
||||
* Technically, architectures with pte_special can avoid all these
|
||||
* restrictions (same for remap_pfn_range). However we would like
|
||||
@ -1615,15 +1608,8 @@ vm_fault_t vmf_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr,
|
||||
|
||||
track_pfn_insert(vma, &pgprot, __pfn_to_pfn_t(pfn, PFN_DEV));
|
||||
|
||||
err = insert_pfn(vma, addr, __pfn_to_pfn_t(pfn, PFN_DEV), pgprot,
|
||||
return insert_pfn(vma, addr, __pfn_to_pfn_t(pfn, PFN_DEV), pgprot,
|
||||
false);
|
||||
|
||||
if (err == -ENOMEM)
|
||||
return VM_FAULT_OOM;
|
||||
if (err < 0 && err != -EBUSY)
|
||||
return VM_FAULT_SIGBUS;
|
||||
|
||||
return VM_FAULT_NOPAGE;
|
||||
}
|
||||
EXPORT_SYMBOL(vmf_insert_pfn_prot);
|
||||
|
||||
@ -1703,7 +1689,7 @@ static vm_fault_t __vm_insert_mixed(struct vm_area_struct *vma,
|
||||
page = pfn_to_page(pfn_t_to_pfn(pfn));
|
||||
err = insert_page(vma, addr, page, pgprot);
|
||||
} else {
|
||||
err = insert_pfn(vma, addr, pfn, pgprot, mkwrite);
|
||||
return insert_pfn(vma, addr, pfn, pgprot, mkwrite);
|
||||
}
|
||||
|
||||
if (err == -ENOMEM)
|
||||
|
Loading…
x
Reference in New Issue
Block a user