mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2025-01-01 10:42:11 +00:00
mm/gup: handle huge pmd for follow_pmd_mask()
Replace pmd_trans_huge() with pmd_leaf() to also cover pmd_huge() as long as enabled. FOLL_TOUCH and FOLL_SPLIT_PMD only apply to THP, not yet huge. Since now follow_trans_huge_pmd() can process hugetlb pages, renaming it into follow_huge_pmd() to match what it does. Move it into gup.c so not depend on CONFIG_THP. When at it, move the ctx->page_mask setup into follow_huge_pmd(), only set it when the page is valid. It was not a bug to set it before even if GUP failed (page==NULL), because follow_page_mask() callers always ignores page_mask if so. But doing so makes the code cleaner. [peterx@redhat.com: allow follow_pmd_mask() to take hugetlb tail pages] Link: https://lkml.kernel.org/r/20240403013249.1418299-3-peterx@redhat.com Link: https://lkml.kernel.org/r/20240327152332.950956-12-peterx@redhat.com Signed-off-by: Peter Xu <peterx@redhat.com> Reviewed-by: Jason Gunthorpe <jgg@nvidia.com> Tested-by: Ryan Roberts <ryan.roberts@arm.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Andrew Jones <andrew.jones@linux.dev> Cc: Aneesh Kumar K.V (IBM) <aneesh.kumar@kernel.org> Cc: Axel Rasmussen <axelrasmussen@google.com> Cc: Christophe Leroy <christophe.leroy@csgroup.eu> Cc: Christoph Hellwig <hch@infradead.org> Cc: David Hildenbrand <david@redhat.com> Cc: James Houghton <jthoughton@google.com> Cc: John Hubbard <jhubbard@nvidia.com> Cc: Kirill A. Shutemov <kirill@shutemov.name> Cc: Lorenzo Stoakes <lstoakes@gmail.com> Cc: Matthew Wilcox (Oracle) <willy@infradead.org> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: "Mike Rapoport (IBM)" <rppt@kernel.org> Cc: Muchun Song <muchun.song@linux.dev> Cc: Rik van Riel <riel@surriel.com> Cc: Vlastimil Babka <vbabka@suse.cz> Cc: Yang Shi <shy828301@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
1b16761802
commit
4418c522f6
104
mm/gup.c
104
mm/gup.c
@ -580,6 +580,90 @@ static struct page *follow_huge_pud(struct vm_area_struct *vma,
|
|||||||
|
|
||||||
return page;
|
return page;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* FOLL_FORCE can write to even unwritable PMDs in COW mappings. */
|
||||||
|
static inline bool can_follow_write_pmd(pmd_t pmd, struct page *page,
|
||||||
|
struct vm_area_struct *vma,
|
||||||
|
unsigned int flags)
|
||||||
|
{
|
||||||
|
/* If the pmd is writable, we can write to the page. */
|
||||||
|
if (pmd_write(pmd))
|
||||||
|
return true;
|
||||||
|
|
||||||
|
/* Maybe FOLL_FORCE is set to override it? */
|
||||||
|
if (!(flags & FOLL_FORCE))
|
||||||
|
return false;
|
||||||
|
|
||||||
|
/* But FOLL_FORCE has no effect on shared mappings */
|
||||||
|
if (vma->vm_flags & (VM_MAYSHARE | VM_SHARED))
|
||||||
|
return false;
|
||||||
|
|
||||||
|
/* ... or read-only private ones */
|
||||||
|
if (!(vma->vm_flags & VM_MAYWRITE))
|
||||||
|
return false;
|
||||||
|
|
||||||
|
/* ... or already writable ones that just need to take a write fault */
|
||||||
|
if (vma->vm_flags & VM_WRITE)
|
||||||
|
return false;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* See can_change_pte_writable(): we broke COW and could map the page
|
||||||
|
* writable if we have an exclusive anonymous page ...
|
||||||
|
*/
|
||||||
|
if (!page || !PageAnon(page) || !PageAnonExclusive(page))
|
||||||
|
return false;
|
||||||
|
|
||||||
|
/* ... and a write-fault isn't required for other reasons. */
|
||||||
|
if (vma_soft_dirty_enabled(vma) && !pmd_soft_dirty(pmd))
|
||||||
|
return false;
|
||||||
|
return !userfaultfd_huge_pmd_wp(vma, pmd);
|
||||||
|
}
|
||||||
|
|
||||||
|
static struct page *follow_huge_pmd(struct vm_area_struct *vma,
|
||||||
|
unsigned long addr, pmd_t *pmd,
|
||||||
|
unsigned int flags,
|
||||||
|
struct follow_page_context *ctx)
|
||||||
|
{
|
||||||
|
struct mm_struct *mm = vma->vm_mm;
|
||||||
|
pmd_t pmdval = *pmd;
|
||||||
|
struct page *page;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
assert_spin_locked(pmd_lockptr(mm, pmd));
|
||||||
|
|
||||||
|
page = pmd_page(pmdval);
|
||||||
|
if ((flags & FOLL_WRITE) &&
|
||||||
|
!can_follow_write_pmd(pmdval, page, vma, flags))
|
||||||
|
return NULL;
|
||||||
|
|
||||||
|
/* Avoid dumping huge zero page */
|
||||||
|
if ((flags & FOLL_DUMP) && is_huge_zero_pmd(pmdval))
|
||||||
|
return ERR_PTR(-EFAULT);
|
||||||
|
|
||||||
|
if (pmd_protnone(*pmd) && !gup_can_follow_protnone(vma, flags))
|
||||||
|
return NULL;
|
||||||
|
|
||||||
|
if (!pmd_write(pmdval) && gup_must_unshare(vma, flags, page))
|
||||||
|
return ERR_PTR(-EMLINK);
|
||||||
|
|
||||||
|
VM_BUG_ON_PAGE((flags & FOLL_PIN) && PageAnon(page) &&
|
||||||
|
!PageAnonExclusive(page), page);
|
||||||
|
|
||||||
|
ret = try_grab_page(page, flags);
|
||||||
|
if (ret)
|
||||||
|
return ERR_PTR(ret);
|
||||||
|
|
||||||
|
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
||||||
|
if (pmd_trans_huge(pmdval) && (flags & FOLL_TOUCH))
|
||||||
|
touch_pmd(vma, addr, pmd, flags & FOLL_WRITE);
|
||||||
|
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
|
||||||
|
|
||||||
|
page += (addr & ~HPAGE_PMD_MASK) >> PAGE_SHIFT;
|
||||||
|
ctx->page_mask = HPAGE_PMD_NR - 1;
|
||||||
|
|
||||||
|
return page;
|
||||||
|
}
|
||||||
|
|
||||||
#else /* CONFIG_PGTABLE_HAS_HUGE_LEAVES */
|
#else /* CONFIG_PGTABLE_HAS_HUGE_LEAVES */
|
||||||
static struct page *follow_huge_pud(struct vm_area_struct *vma,
|
static struct page *follow_huge_pud(struct vm_area_struct *vma,
|
||||||
unsigned long addr, pud_t *pudp,
|
unsigned long addr, pud_t *pudp,
|
||||||
@ -587,6 +671,14 @@ static struct page *follow_huge_pud(struct vm_area_struct *vma,
|
|||||||
{
|
{
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static struct page *follow_huge_pmd(struct vm_area_struct *vma,
|
||||||
|
unsigned long addr, pmd_t *pmd,
|
||||||
|
unsigned int flags,
|
||||||
|
struct follow_page_context *ctx)
|
||||||
|
{
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
#endif /* CONFIG_PGTABLE_HAS_HUGE_LEAVES */
|
#endif /* CONFIG_PGTABLE_HAS_HUGE_LEAVES */
|
||||||
|
|
||||||
static int follow_pfn_pte(struct vm_area_struct *vma, unsigned long address,
|
static int follow_pfn_pte(struct vm_area_struct *vma, unsigned long address,
|
||||||
@ -784,31 +876,31 @@ static struct page *follow_pmd_mask(struct vm_area_struct *vma,
|
|||||||
return page;
|
return page;
|
||||||
return no_page_table(vma, flags, address);
|
return no_page_table(vma, flags, address);
|
||||||
}
|
}
|
||||||
if (likely(!pmd_trans_huge(pmdval)))
|
if (likely(!pmd_leaf(pmdval)))
|
||||||
return follow_page_pte(vma, address, pmd, flags, &ctx->pgmap);
|
return follow_page_pte(vma, address, pmd, flags, &ctx->pgmap);
|
||||||
|
|
||||||
if (pmd_protnone(pmdval) && !gup_can_follow_protnone(vma, flags))
|
if (pmd_protnone(pmdval) && !gup_can_follow_protnone(vma, flags))
|
||||||
return no_page_table(vma, flags, address);
|
return no_page_table(vma, flags, address);
|
||||||
|
|
||||||
ptl = pmd_lock(mm, pmd);
|
ptl = pmd_lock(mm, pmd);
|
||||||
if (unlikely(!pmd_present(*pmd))) {
|
pmdval = *pmd;
|
||||||
|
if (unlikely(!pmd_present(pmdval))) {
|
||||||
spin_unlock(ptl);
|
spin_unlock(ptl);
|
||||||
return no_page_table(vma, flags, address);
|
return no_page_table(vma, flags, address);
|
||||||
}
|
}
|
||||||
if (unlikely(!pmd_trans_huge(*pmd))) {
|
if (unlikely(!pmd_leaf(pmdval))) {
|
||||||
spin_unlock(ptl);
|
spin_unlock(ptl);
|
||||||
return follow_page_pte(vma, address, pmd, flags, &ctx->pgmap);
|
return follow_page_pte(vma, address, pmd, flags, &ctx->pgmap);
|
||||||
}
|
}
|
||||||
if (flags & FOLL_SPLIT_PMD) {
|
if (pmd_trans_huge(pmdval) && (flags & FOLL_SPLIT_PMD)) {
|
||||||
spin_unlock(ptl);
|
spin_unlock(ptl);
|
||||||
split_huge_pmd(vma, pmd, address);
|
split_huge_pmd(vma, pmd, address);
|
||||||
/* If pmd was left empty, stuff a page table in there quickly */
|
/* If pmd was left empty, stuff a page table in there quickly */
|
||||||
return pte_alloc(mm, pmd) ? ERR_PTR(-ENOMEM) :
|
return pte_alloc(mm, pmd) ? ERR_PTR(-ENOMEM) :
|
||||||
follow_page_pte(vma, address, pmd, flags, &ctx->pgmap);
|
follow_page_pte(vma, address, pmd, flags, &ctx->pgmap);
|
||||||
}
|
}
|
||||||
page = follow_trans_huge_pmd(vma, address, pmd, flags);
|
page = follow_huge_pmd(vma, address, pmd, flags, ctx);
|
||||||
spin_unlock(ptl);
|
spin_unlock(ptl);
|
||||||
ctx->page_mask = HPAGE_PMD_NR - 1;
|
|
||||||
return page;
|
return page;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1221,8 +1221,8 @@ vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, pfn_t pfn, bool write)
|
|||||||
EXPORT_SYMBOL_GPL(vmf_insert_pfn_pud);
|
EXPORT_SYMBOL_GPL(vmf_insert_pfn_pud);
|
||||||
#endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
|
#endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
|
||||||
|
|
||||||
static void touch_pmd(struct vm_area_struct *vma, unsigned long addr,
|
void touch_pmd(struct vm_area_struct *vma, unsigned long addr,
|
||||||
pmd_t *pmd, bool write)
|
pmd_t *pmd, bool write)
|
||||||
{
|
{
|
||||||
pmd_t _pmd;
|
pmd_t _pmd;
|
||||||
|
|
||||||
@ -1577,88 +1577,6 @@ static inline bool can_change_pmd_writable(struct vm_area_struct *vma,
|
|||||||
return pmd_dirty(pmd);
|
return pmd_dirty(pmd);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* FOLL_FORCE can write to even unwritable PMDs in COW mappings. */
|
|
||||||
static inline bool can_follow_write_pmd(pmd_t pmd, struct page *page,
|
|
||||||
struct vm_area_struct *vma,
|
|
||||||
unsigned int flags)
|
|
||||||
{
|
|
||||||
/* If the pmd is writable, we can write to the page. */
|
|
||||||
if (pmd_write(pmd))
|
|
||||||
return true;
|
|
||||||
|
|
||||||
/* Maybe FOLL_FORCE is set to override it? */
|
|
||||||
if (!(flags & FOLL_FORCE))
|
|
||||||
return false;
|
|
||||||
|
|
||||||
/* But FOLL_FORCE has no effect on shared mappings */
|
|
||||||
if (vma->vm_flags & (VM_MAYSHARE | VM_SHARED))
|
|
||||||
return false;
|
|
||||||
|
|
||||||
/* ... or read-only private ones */
|
|
||||||
if (!(vma->vm_flags & VM_MAYWRITE))
|
|
||||||
return false;
|
|
||||||
|
|
||||||
/* ... or already writable ones that just need to take a write fault */
|
|
||||||
if (vma->vm_flags & VM_WRITE)
|
|
||||||
return false;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* See can_change_pte_writable(): we broke COW and could map the page
|
|
||||||
* writable if we have an exclusive anonymous page ...
|
|
||||||
*/
|
|
||||||
if (!page || !PageAnon(page) || !PageAnonExclusive(page))
|
|
||||||
return false;
|
|
||||||
|
|
||||||
/* ... and a write-fault isn't required for other reasons. */
|
|
||||||
if (vma_soft_dirty_enabled(vma) && !pmd_soft_dirty(pmd))
|
|
||||||
return false;
|
|
||||||
return !userfaultfd_huge_pmd_wp(vma, pmd);
|
|
||||||
}
|
|
||||||
|
|
||||||
struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
|
|
||||||
unsigned long addr,
|
|
||||||
pmd_t *pmd,
|
|
||||||
unsigned int flags)
|
|
||||||
{
|
|
||||||
struct mm_struct *mm = vma->vm_mm;
|
|
||||||
struct page *page;
|
|
||||||
int ret;
|
|
||||||
|
|
||||||
assert_spin_locked(pmd_lockptr(mm, pmd));
|
|
||||||
|
|
||||||
page = pmd_page(*pmd);
|
|
||||||
VM_BUG_ON_PAGE(!PageHead(page) && !is_zone_device_page(page), page);
|
|
||||||
|
|
||||||
if ((flags & FOLL_WRITE) &&
|
|
||||||
!can_follow_write_pmd(*pmd, page, vma, flags))
|
|
||||||
return NULL;
|
|
||||||
|
|
||||||
/* Avoid dumping huge zero page */
|
|
||||||
if ((flags & FOLL_DUMP) && is_huge_zero_pmd(*pmd))
|
|
||||||
return ERR_PTR(-EFAULT);
|
|
||||||
|
|
||||||
if (pmd_protnone(*pmd) && !gup_can_follow_protnone(vma, flags))
|
|
||||||
return NULL;
|
|
||||||
|
|
||||||
if (!pmd_write(*pmd) && gup_must_unshare(vma, flags, page))
|
|
||||||
return ERR_PTR(-EMLINK);
|
|
||||||
|
|
||||||
VM_BUG_ON_PAGE((flags & FOLL_PIN) && PageAnon(page) &&
|
|
||||||
!PageAnonExclusive(page), page);
|
|
||||||
|
|
||||||
ret = try_grab_page(page, flags);
|
|
||||||
if (ret)
|
|
||||||
return ERR_PTR(ret);
|
|
||||||
|
|
||||||
if (flags & FOLL_TOUCH)
|
|
||||||
touch_pmd(vma, addr, pmd, flags & FOLL_WRITE);
|
|
||||||
|
|
||||||
page += (addr & ~HPAGE_PMD_MASK) >> PAGE_SHIFT;
|
|
||||||
VM_BUG_ON_PAGE(!PageCompound(page) && !is_zone_device_page(page), page);
|
|
||||||
|
|
||||||
return page;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* NUMA hinting page fault entry point for trans huge pmds */
|
/* NUMA hinting page fault entry point for trans huge pmds */
|
||||||
vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf)
|
vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf)
|
||||||
{
|
{
|
||||||
|
@ -1114,9 +1114,8 @@ int __must_check try_grab_page(struct page *page, unsigned int flags);
|
|||||||
*/
|
*/
|
||||||
void touch_pud(struct vm_area_struct *vma, unsigned long addr,
|
void touch_pud(struct vm_area_struct *vma, unsigned long addr,
|
||||||
pud_t *pud, bool write);
|
pud_t *pud, bool write);
|
||||||
struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
|
void touch_pmd(struct vm_area_struct *vma, unsigned long addr,
|
||||||
unsigned long addr, pmd_t *pmd,
|
pmd_t *pmd, bool write);
|
||||||
unsigned int flags);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* mm/mmap.c
|
* mm/mmap.c
|
||||||
|
Loading…
Reference in New Issue
Block a user