mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2025-01-06 05:02:31 +00:00
mm: Avoid modifying vmf.address in __collapse_huge_page_swapin()
In preparation for const-ifying the anonymous struct field of 'struct vm_fault', rework __collapse_huge_page_swapin() to avoid continuously updating vmf.address and instead populate a new 'struct vm_fault' on the stack for each page being processed. Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Signed-off-by: Will Deacon <will@kernel.org>
This commit is contained in:
parent
9d3af4b448
commit
2b635dd372
@ -991,38 +991,41 @@ static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address,
|
||||
|
||||
static bool __collapse_huge_page_swapin(struct mm_struct *mm,
|
||||
struct vm_area_struct *vma,
|
||||
unsigned long address, pmd_t *pmd,
|
||||
unsigned long haddr, pmd_t *pmd,
|
||||
int referenced)
|
||||
{
|
||||
int swapped_in = 0;
|
||||
vm_fault_t ret = 0;
|
||||
struct vm_fault vmf = {
|
||||
.vma = vma,
|
||||
.address = address,
|
||||
.flags = FAULT_FLAG_ALLOW_RETRY,
|
||||
.pmd = pmd,
|
||||
.pgoff = linear_page_index(vma, address),
|
||||
};
|
||||
unsigned long address, end = haddr + (HPAGE_PMD_NR * PAGE_SIZE);
|
||||
|
||||
vmf.pte = pte_offset_map(pmd, address);
|
||||
for (; vmf.address < address + HPAGE_PMD_NR*PAGE_SIZE;
|
||||
vmf.pte++, vmf.address += PAGE_SIZE) {
|
||||
for (address = haddr; address < end; address += PAGE_SIZE) {
|
||||
struct vm_fault vmf = {
|
||||
.vma = vma,
|
||||
.address = address,
|
||||
.pgoff = linear_page_index(vma, haddr),
|
||||
.flags = FAULT_FLAG_ALLOW_RETRY,
|
||||
.pmd = pmd,
|
||||
};
|
||||
|
||||
vmf.pte = pte_offset_map(pmd, address);
|
||||
vmf.orig_pte = *vmf.pte;
|
||||
if (!is_swap_pte(vmf.orig_pte))
|
||||
if (!is_swap_pte(vmf.orig_pte)) {
|
||||
pte_unmap(vmf.pte);
|
||||
continue;
|
||||
}
|
||||
swapped_in++;
|
||||
ret = do_swap_page(&vmf);
|
||||
|
||||
/* do_swap_page returns VM_FAULT_RETRY with released mmap_lock */
|
||||
if (ret & VM_FAULT_RETRY) {
|
||||
mmap_read_lock(mm);
|
||||
if (hugepage_vma_revalidate(mm, address, &vmf.vma)) {
|
||||
if (hugepage_vma_revalidate(mm, haddr, &vma)) {
|
||||
/* vma is no longer available, don't continue to swapin */
|
||||
trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
|
||||
return false;
|
||||
}
|
||||
/* check if the pmd is still valid */
|
||||
if (mm_find_pmd(mm, address) != pmd) {
|
||||
if (mm_find_pmd(mm, haddr) != pmd) {
|
||||
trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
|
||||
return false;
|
||||
}
|
||||
@ -1031,11 +1034,7 @@ static bool __collapse_huge_page_swapin(struct mm_struct *mm,
|
||||
trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
|
||||
return false;
|
||||
}
|
||||
/* pte is unmapped now, we need to map it */
|
||||
vmf.pte = pte_offset_map(pmd, vmf.address);
|
||||
}
|
||||
vmf.pte--;
|
||||
pte_unmap(vmf.pte);
|
||||
|
||||
/* Drain LRU add pagevec to remove extra pin on the swapped in pages */
|
||||
if (swapped_in)
|
||||
|
Loading…
Reference in New Issue
Block a user