mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-01-10 07:10:27 +00:00
mm/huge_memory: remove stale locking logic from __split_huge_pmd()
Let's remove the stale logic that was required for reuse_swap_page(). [akpm@linux-foundation.org: simplification, per Yang Shi] Link: https://lkml.kernel.org/r/20220131162940.210846-10-david@redhat.com Signed-off-by: David Hildenbrand <david@redhat.com> Acked-by: Vlastimil Babka <vbabka@suse.cz> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Christoph Hellwig <hch@lst.de> Cc: David Rientjes <rientjes@google.com> Cc: Don Dutile <ddutile@redhat.com> Cc: Hugh Dickins <hughd@google.com> Cc: Jan Kara <jack@suse.cz> Cc: Jann Horn <jannh@google.com> Cc: Jason Gunthorpe <jgg@nvidia.com> Cc: John Hubbard <jhubbard@nvidia.com> Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Cc: Liang Zhang <zhangliang5@huawei.com> Cc: Matthew Wilcox (Oracle) <willy@infradead.org> Cc: Michal Hocko <mhocko@kernel.org> Cc: Mike Kravetz <mike.kravetz@oracle.com> Cc: Mike Rapoport <rppt@linux.ibm.com> Cc: Nadav Amit <nadav.amit@gmail.com> Cc: Oleg Nesterov <oleg@redhat.com> Cc: Peter Xu <peterx@redhat.com> Cc: Rik van Riel <riel@surriel.com> Cc: Roman Gushchin <roman.gushchin@linux.dev> Cc: Shakeel Butt <shakeelb@google.com> Cc: Yang Shi <shy828301@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
55c62fa7c5
commit
7f7609175f
@ -2133,8 +2133,6 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
|
||||
{
|
||||
spinlock_t *ptl;
|
||||
struct mmu_notifier_range range;
|
||||
bool do_unlock_folio = false;
|
||||
pmd_t _pmd;
|
||||
|
||||
mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm,
|
||||
address & HPAGE_PMD_MASK,
|
||||
@ -2153,42 +2151,12 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
|
||||
goto out;
|
||||
}
|
||||
|
||||
repeat:
|
||||
if (pmd_trans_huge(*pmd)) {
|
||||
if (!folio) {
|
||||
folio = page_folio(pmd_page(*pmd));
|
||||
/*
|
||||
* An anonymous page must be locked, to ensure that a
|
||||
* concurrent reuse_swap_page() sees stable mapcount;
|
||||
* but reuse_swap_page() is not used on shmem or file,
|
||||
* and page lock must not be taken when zap_pmd_range()
|
||||
* calls __split_huge_pmd() while i_mmap_lock is held.
|
||||
*/
|
||||
if (folio_test_anon(folio)) {
|
||||
if (unlikely(!folio_trylock(folio))) {
|
||||
folio_get(folio);
|
||||
_pmd = *pmd;
|
||||
spin_unlock(ptl);
|
||||
folio_lock(folio);
|
||||
spin_lock(ptl);
|
||||
if (unlikely(!pmd_same(*pmd, _pmd))) {
|
||||
folio_unlock(folio);
|
||||
folio_put(folio);
|
||||
folio = NULL;
|
||||
goto repeat;
|
||||
}
|
||||
folio_put(folio);
|
||||
}
|
||||
do_unlock_folio = true;
|
||||
}
|
||||
}
|
||||
} else if (!(pmd_devmap(*pmd) || is_pmd_migration_entry(*pmd)))
|
||||
goto out;
|
||||
__split_huge_pmd_locked(vma, pmd, range.start, freeze);
|
||||
if (pmd_trans_huge(*pmd) || pmd_devmap(*pmd) ||
|
||||
is_pmd_migration_entry(*pmd))
|
||||
__split_huge_pmd_locked(vma, pmd, range.start, freeze);
|
||||
|
||||
out:
|
||||
spin_unlock(ptl);
|
||||
if (do_unlock_folio)
|
||||
folio_unlock(folio);
|
||||
/*
|
||||
* No need to double call mmu_notifier->invalidate_range() callback.
|
||||
* They are 3 cases to consider inside __split_huge_pmd_locked():
|
||||
|
Loading…
x
Reference in New Issue
Block a user