mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2024-12-29 17:23:36 +00:00
mm/mprotect: delete pmd_none_or_clear_bad_unless_trans_huge()
change_pmd_range() had special pmd_none_or_clear_bad_unless_trans_huge(), required to avoid "bad" choices when setting automatic NUMA hinting under mmap_read_lock(); but most of that is already covered in pte_offset_map() now. change_pmd_range() just wants a pmd_none() check before wasting time on MMU notifiers, then checks on the read-once _pmd value to work out what's needed for huge cases. If change_pte_range() returns -EAGAIN to retry if pte_offset_map_lock() fails, nothing more special is needed. Link: https://lkml.kernel.org/r/725a42a9-91e9-c868-925-e3a5fd40bb4f@google.com Signed-off-by: Hugh Dickins <hughd@google.com> Cc: Alistair Popple <apopple@nvidia.com> Cc: Anshuman Khandual <anshuman.khandual@arm.com> Cc: Axel Rasmussen <axelrasmussen@google.com> Cc: Christophe Leroy <christophe.leroy@csgroup.eu> Cc: Christoph Hellwig <hch@infradead.org> Cc: David Hildenbrand <david@redhat.com> Cc: "Huang, Ying" <ying.huang@intel.com> Cc: Ira Weiny <ira.weiny@intel.com> Cc: Jason Gunthorpe <jgg@ziepe.ca> Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Cc: Lorenzo Stoakes <lstoakes@gmail.com> Cc: Matthew Wilcox <willy@infradead.org> Cc: Mel Gorman <mgorman@techsingularity.net> Cc: Miaohe Lin <linmiaohe@huawei.com> Cc: Mike Kravetz <mike.kravetz@oracle.com> Cc: Mike Rapoport (IBM) <rppt@kernel.org> Cc: Minchan Kim <minchan@kernel.org> Cc: Naoya Horiguchi <naoya.horiguchi@nec.com> Cc: Pavel Tatashin <pasha.tatashin@soleen.com> Cc: Peter Xu <peterx@redhat.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Qi Zheng <zhengqi.arch@bytedance.com> Cc: Ralph Campbell <rcampbell@nvidia.com> Cc: Ryan Roberts <ryan.roberts@arm.com> Cc: SeongJae Park <sj@kernel.org> Cc: Song Liu <song@kernel.org> Cc: Steven Price <steven.price@arm.com> Cc: Suren Baghdasaryan <surenb@google.com> Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com> Cc: Will Deacon <will@kernel.org> Cc: Yang Shi <shy828301@gmail.com> Cc: Yu Zhao <yuzhao@google.com> Cc: Zack Rusin <zackr@vmware.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
04dee9e85c
commit
670ddd8cdc
@ -93,22 +93,9 @@ static long change_pte_range(struct mmu_gather *tlb,
|
||||
bool uffd_wp_resolve = cp_flags & MM_CP_UFFD_WP_RESOLVE;
|
||||
|
||||
tlb_change_page_size(tlb, PAGE_SIZE);
|
||||
|
||||
/*
|
||||
* Can be called with only the mmap_lock for reading by
|
||||
* prot_numa so we must check the pmd isn't constantly
|
||||
* changing from under us from pmd_none to pmd_trans_huge
|
||||
* and/or the other way around.
|
||||
*/
|
||||
if (pmd_trans_unstable(pmd))
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* The pmd points to a regular pte so the pmd can't change
|
||||
* from under us even if the mmap_lock is only hold for
|
||||
* reading.
|
||||
*/
|
||||
pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
|
||||
if (!pte)
|
||||
return -EAGAIN;
|
||||
|
||||
/* Get target node for single threaded private VMAs */
|
||||
if (prot_numa && !(vma->vm_flags & VM_SHARED) &&
|
||||
@ -301,26 +288,6 @@ static long change_pte_range(struct mmu_gather *tlb,
|
||||
return pages;
|
||||
}
|
||||
|
||||
/*
|
||||
* Used when setting automatic NUMA hinting protection where it is
|
||||
* critical that a numa hinting PMD is not confused with a bad PMD.
|
||||
*/
|
||||
static inline int pmd_none_or_clear_bad_unless_trans_huge(pmd_t *pmd)
|
||||
{
|
||||
pmd_t pmdval = pmdp_get_lockless(pmd);
|
||||
|
||||
if (pmd_none(pmdval))
|
||||
return 1;
|
||||
if (pmd_trans_huge(pmdval))
|
||||
return 0;
|
||||
if (unlikely(pmd_bad(pmdval))) {
|
||||
pmd_clear_bad(pmd);
|
||||
return 1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Return true if we want to split THPs into PTE mappings in change
|
||||
* protection procedure, false otherwise.
|
||||
@ -398,7 +365,8 @@ static inline long change_pmd_range(struct mmu_gather *tlb,
|
||||
pmd = pmd_offset(pud, addr);
|
||||
do {
|
||||
long ret;
|
||||
|
||||
pmd_t _pmd;
|
||||
again:
|
||||
next = pmd_addr_end(addr, end);
|
||||
|
||||
ret = change_pmd_prepare(vma, pmd, cp_flags);
|
||||
@ -406,16 +374,8 @@ static inline long change_pmd_range(struct mmu_gather *tlb,
|
||||
pages = ret;
|
||||
break;
|
||||
}
|
||||
/*
|
||||
* Automatic NUMA balancing walks the tables with mmap_lock
|
||||
* held for read. It's possible a parallel update to occur
|
||||
* between pmd_trans_huge() and a pmd_none_or_clear_bad()
|
||||
* check leading to a false positive and clearing.
|
||||
* Hence, it's necessary to atomically read the PMD value
|
||||
* for all the checks.
|
||||
*/
|
||||
if (!is_swap_pmd(*pmd) && !pmd_devmap(*pmd) &&
|
||||
pmd_none_or_clear_bad_unless_trans_huge(pmd))
|
||||
|
||||
if (pmd_none(*pmd))
|
||||
goto next;
|
||||
|
||||
/* invoke the mmu notifier if the pmd is populated */
|
||||
@ -426,7 +386,8 @@ static inline long change_pmd_range(struct mmu_gather *tlb,
|
||||
mmu_notifier_invalidate_range_start(&range);
|
||||
}
|
||||
|
||||
if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || pmd_devmap(*pmd)) {
|
||||
_pmd = pmdp_get_lockless(pmd);
|
||||
if (is_swap_pmd(_pmd) || pmd_trans_huge(_pmd) || pmd_devmap(_pmd)) {
|
||||
if ((next - addr != HPAGE_PMD_SIZE) ||
|
||||
pgtable_split_needed(vma, cp_flags)) {
|
||||
__split_huge_pmd(vma, pmd, addr, false, NULL);
|
||||
@ -441,15 +402,10 @@ static inline long change_pmd_range(struct mmu_gather *tlb,
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
/*
|
||||
* change_huge_pmd() does not defer TLB flushes,
|
||||
* so no need to propagate the tlb argument.
|
||||
*/
|
||||
int nr_ptes = change_huge_pmd(tlb, vma, pmd,
|
||||
ret = change_huge_pmd(tlb, vma, pmd,
|
||||
addr, newprot, cp_flags);
|
||||
|
||||
if (nr_ptes) {
|
||||
if (nr_ptes == HPAGE_PMD_NR) {
|
||||
if (ret) {
|
||||
if (ret == HPAGE_PMD_NR) {
|
||||
pages += HPAGE_PMD_NR;
|
||||
nr_huge_updates++;
|
||||
}
|
||||
@ -460,8 +416,12 @@ static inline long change_pmd_range(struct mmu_gather *tlb,
|
||||
}
|
||||
/* fall through, the trans huge pmd just split */
|
||||
}
|
||||
pages += change_pte_range(tlb, vma, pmd, addr, next,
|
||||
newprot, cp_flags);
|
||||
|
||||
ret = change_pte_range(tlb, vma, pmd, addr, next, newprot,
|
||||
cp_flags);
|
||||
if (ret < 0)
|
||||
goto again;
|
||||
pages += ret;
|
||||
next:
|
||||
cond_resched();
|
||||
} while (pmd++, addr = next, addr != end);
|
||||
|
Loading…
Reference in New Issue
Block a user