mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2024-12-29 09:12:07 +00:00
mm: remove follow_pfn
Remove follow_pfn now that the last user is gone. Link: https://lkml.kernel.org/r/20240324234542.2038726-3-hch@lst.de Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: David Hildenbrand <david@redhat.com> Cc: Andy Lutomirski <luto@kernel.org> Cc: Dave Hansen <dave.hansen@linux.intel.com> Cc: Fei Li <fei1.li@intel.com> Cc: Ingo Molnar <mingo@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Nathan Chancellor <nathan@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
1b265da7ea
commit
cb10c28ac8
@ -2424,8 +2424,6 @@ int
|
||||
copy_page_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma);
|
||||
int follow_pte(struct mm_struct *mm, unsigned long address,
|
||||
pte_t **ptepp, spinlock_t **ptlp);
|
||||
int follow_pfn(struct vm_area_struct *vma, unsigned long address,
|
||||
unsigned long *pfn);
|
||||
int follow_phys(struct vm_area_struct *vma, unsigned long address,
|
||||
unsigned int flags, unsigned long *prot, resource_size_t *phys);
|
||||
int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
|
||||
|
36
mm/memory.c
36
mm/memory.c
@ -5884,8 +5884,8 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
|
||||
* Only IO mappings and raw PFN mappings are allowed. The mmap semaphore
|
||||
* should be taken for read.
|
||||
*
|
||||
* KVM uses this function. While it is arguably less bad than ``follow_pfn``,
|
||||
* it is not a good general-purpose API.
|
||||
* KVM uses this function. While it is arguably less bad than the historic
|
||||
* ``follow_pfn``, it is not a good general-purpose API.
|
||||
*
|
||||
* Return: zero on success, -ve otherwise.
|
||||
*/
|
||||
@ -5927,38 +5927,6 @@ int follow_pte(struct mm_struct *mm, unsigned long address,
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(follow_pte);
|
||||
|
||||
/**
|
||||
* follow_pfn - look up PFN at a user virtual address
|
||||
* @vma: memory mapping
|
||||
* @address: user virtual address
|
||||
* @pfn: location to store found PFN
|
||||
*
|
||||
* Only IO mappings and raw PFN mappings are allowed.
|
||||
*
|
||||
* This function does not allow the caller to read the permissions
|
||||
* of the PTE. Do not use it.
|
||||
*
|
||||
* Return: zero and the pfn at @pfn on success, -ve otherwise.
|
||||
*/
|
||||
int follow_pfn(struct vm_area_struct *vma, unsigned long address,
|
||||
unsigned long *pfn)
|
||||
{
|
||||
int ret = -EINVAL;
|
||||
spinlock_t *ptl;
|
||||
pte_t *ptep;
|
||||
|
||||
if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
|
||||
return ret;
|
||||
|
||||
ret = follow_pte(vma->vm_mm, address, &ptep, &ptl);
|
||||
if (ret)
|
||||
return ret;
|
||||
*pfn = pte_pfn(ptep_get(ptep));
|
||||
pte_unmap_unlock(ptep, ptl);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(follow_pfn);
|
||||
|
||||
#ifdef CONFIG_HAVE_IOREMAP_PROT
|
||||
int follow_phys(struct vm_area_struct *vma,
|
||||
unsigned long address, unsigned int flags,
|
||||
|
21
mm/nommu.c
21
mm/nommu.c
@ -110,27 +110,6 @@ unsigned int kobjsize(const void *objp)
|
||||
return page_size(page);
|
||||
}
|
||||
|
||||
/**
|
||||
* follow_pfn - look up PFN at a user virtual address
|
||||
* @vma: memory mapping
|
||||
* @address: user virtual address
|
||||
* @pfn: location to store found PFN
|
||||
*
|
||||
* Only IO mappings and raw PFN mappings are allowed.
|
||||
*
|
||||
* Returns zero and the pfn at @pfn on success, -ve otherwise.
|
||||
*/
|
||||
int follow_pfn(struct vm_area_struct *vma, unsigned long address,
|
||||
unsigned long *pfn)
|
||||
{
|
||||
if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
|
||||
return -EINVAL;
|
||||
|
||||
*pfn = address >> PAGE_SHIFT;
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(follow_pfn);
|
||||
|
||||
void vfree(const void *addr)
|
||||
{
|
||||
kfree(addr);
|
||||
|
Loading…
Reference in New Issue
Block a user