mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2025-01-19 12:00:00 +00:00
mm: convert do_huge_pmd_anonymous_page to huge_zero_folio
Use folios more widely. Link: https://lkml.kernel.org/r/20240326202833.523759-7-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Reviewed-by: David Hildenbrand <david@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
5691753d73
commit
e28833bc4a
@ -971,14 +971,14 @@ gfp_t vma_thp_gfp_mask(struct vm_area_struct *vma)
|
||||
}
|
||||
|
||||
/* Caller must hold page table lock. */
|
||||
static void set_huge_zero_page(pgtable_t pgtable, struct mm_struct *mm,
|
||||
static void set_huge_zero_folio(pgtable_t pgtable, struct mm_struct *mm,
|
||||
struct vm_area_struct *vma, unsigned long haddr, pmd_t *pmd,
|
||||
struct page *zero_page)
|
||||
struct folio *zero_folio)
|
||||
{
|
||||
pmd_t entry;
|
||||
if (!pmd_none(*pmd))
|
||||
return;
|
||||
entry = mk_pmd(zero_page, vma->vm_page_prot);
|
||||
entry = mk_pmd(&zero_folio->page, vma->vm_page_prot);
|
||||
entry = pmd_mkhuge(entry);
|
||||
pgtable_trans_huge_deposit(mm, pmd, pgtable);
|
||||
set_pmd_at(mm, haddr, pmd, entry);
|
||||
@ -1002,13 +1002,14 @@ vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf)
|
||||
!mm_forbids_zeropage(vma->vm_mm) &&
|
||||
transparent_hugepage_use_zero_page()) {
|
||||
pgtable_t pgtable;
|
||||
struct page *zero_page;
|
||||
struct folio *zero_folio;
|
||||
vm_fault_t ret;
|
||||
|
||||
pgtable = pte_alloc_one(vma->vm_mm);
|
||||
if (unlikely(!pgtable))
|
||||
return VM_FAULT_OOM;
|
||||
zero_page = mm_get_huge_zero_page(vma->vm_mm);
|
||||
if (unlikely(!zero_page)) {
|
||||
zero_folio = mm_get_huge_zero_folio(vma->vm_mm);
|
||||
if (unlikely(!zero_folio)) {
|
||||
pte_free(vma->vm_mm, pgtable);
|
||||
count_vm_event(THP_FAULT_FALLBACK);
|
||||
return VM_FAULT_FALLBACK;
|
||||
@ -1026,8 +1027,8 @@ vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf)
|
||||
ret = handle_userfault(vmf, VM_UFFD_MISSING);
|
||||
VM_BUG_ON(ret & VM_FAULT_FALLBACK);
|
||||
} else {
|
||||
set_huge_zero_page(pgtable, vma->vm_mm, vma,
|
||||
haddr, vmf->pmd, zero_page);
|
||||
set_huge_zero_folio(pgtable, vma->vm_mm, vma,
|
||||
haddr, vmf->pmd, zero_folio);
|
||||
update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
|
||||
spin_unlock(vmf->ptl);
|
||||
}
|
||||
@ -1336,9 +1337,9 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
|
||||
*/
|
||||
if (is_huge_zero_pmd(pmd)) {
|
||||
/*
|
||||
* get_huge_zero_page() will never allocate a new page here,
|
||||
* since we already have a zero page to copy. It just takes a
|
||||
* reference.
|
||||
* mm_get_huge_zero_folio() will never allocate a new
|
||||
* folio here, since we already have a zero page to
|
||||
* copy. It just takes a reference.
|
||||
*/
|
||||
mm_get_huge_zero_folio(dst_mm);
|
||||
goto out_zero_page;
|
||||
|
Loading…
x
Reference in New Issue
Block a user