mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-12-29 09:16:33 +00:00
mm/mprotect: push mmu notifier to PUDs
mprotect() does mmu notifiers in PMD levels. It's there since 2014 of commita5338093bf
("mm: move mmu notifier call from change_protection to change_pmd_range"). At that time, the issue was that NUMA balancing can be applied on a huge range of VM memory, even if nothing was populated. The notification can be avoided in this case if no valid pmd detected, which includes either THP or a PTE pgtable page. Now to pave way for PUD handling, this isn't enough. We need to generate mmu notifications even on PUD entries properly. mprotect() is currently broken on PUD (e.g., one can easily trigger kernel error with dax 1G mappings already), this is the start to fix it. To fix that, this patch proposes to push such notifications to the PUD layers. There is risk on regressing the problem Rik wanted to resolve before, but I think it shouldn't really happen, and I still chose this solution because of a few reasons: 1) Consider a large VM that should definitely contain more than GBs of memory, it's highly likely that PUDs are also none. In this case there will have no regression. 2) KVM has evolved a lot over the years to get rid of rmap walks, which might be the major cause of the previous soft-lockup. At least TDP MMU already got rid of rmap as long as not nested (which should be the major use case, IIUC), then the TDP MMU pgtable walker will simply see empty VM pgtable (e.g. EPT on x86), the invalidation of a full empty region in most cases could be pretty fast now, comparing to 2014. 3) KVM has explicit code paths now to even give way for mmu notifiers just like this one, e.g. in commitd02c357e5b
("KVM: x86/mmu: Retry fault before acquiring mmu_lock if mapping is changing"). It'll also avoid contentions that may also contribute to a soft-lockup. 4) Stick with PMD layer simply don't work when PUD is there... We need one way or another to fix PUD mappings on mprotect(). Pushing it to PUD should be the safest approach as of now, e.g. there's yet no sign of huge P4D coming on any known archs. Link: https://lkml.kernel.org/r/20240812181225.1360970-3-peterx@redhat.com Signed-off-by: Peter Xu <peterx@redhat.com> Cc: Sean Christopherson <seanjc@google.com> Cc: Paolo Bonzini <pbonzini@redhat.com> Cc: David Rientjes <rientjes@google.com> Cc: Rik van Riel <riel@surriel.com> Cc: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com> Cc: Borislav Petkov <bp@alien8.de> Cc: Christophe Leroy <christophe.leroy@csgroup.eu> Cc: Dan Williams <dan.j.williams@intel.com> Cc: Dave Hansen <dave.hansen@linux.intel.com> Cc: Dave Jiang <dave.jiang@intel.com> Cc: David Hildenbrand <david@redhat.com> Cc: "Edgecombe, Rick P" <rick.p.edgecombe@intel.com> Cc: Hugh Dickins <hughd@google.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: Kirill A. Shutemov <kirill@shutemov.name> Cc: Matthew Wilcox <willy@infradead.org> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Nicholas Piggin <npiggin@gmail.com> Cc: Oscar Salvador <osalvador@suse.de> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Vlastimil Babka <vbabka@suse.cz> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
5b198b4759
commit
7f06e3aa2e
@ -363,9 +363,6 @@ static inline long change_pmd_range(struct mmu_gather *tlb,
|
||||
unsigned long next;
|
||||
long pages = 0;
|
||||
unsigned long nr_huge_updates = 0;
|
||||
struct mmu_notifier_range range;
|
||||
|
||||
range.start = 0;
|
||||
|
||||
pmd = pmd_offset(pud, addr);
|
||||
do {
|
||||
@ -383,14 +380,6 @@ static inline long change_pmd_range(struct mmu_gather *tlb,
|
||||
if (pmd_none(*pmd))
|
||||
goto next;
|
||||
|
||||
/* invoke the mmu notifier if the pmd is populated */
|
||||
if (!range.start) {
|
||||
mmu_notifier_range_init(&range,
|
||||
MMU_NOTIFY_PROTECTION_VMA, 0,
|
||||
vma->vm_mm, addr, end);
|
||||
mmu_notifier_invalidate_range_start(&range);
|
||||
}
|
||||
|
||||
_pmd = pmdp_get_lockless(pmd);
|
||||
if (is_swap_pmd(_pmd) || pmd_trans_huge(_pmd) || pmd_devmap(_pmd)) {
|
||||
if ((next - addr != HPAGE_PMD_SIZE) ||
|
||||
@ -431,9 +420,6 @@ static inline long change_pmd_range(struct mmu_gather *tlb,
|
||||
cond_resched();
|
||||
} while (pmd++, addr = next, addr != end);
|
||||
|
||||
if (range.start)
|
||||
mmu_notifier_invalidate_range_end(&range);
|
||||
|
||||
if (nr_huge_updates)
|
||||
count_vm_numa_events(NUMA_HUGE_PTE_UPDATES, nr_huge_updates);
|
||||
return pages;
|
||||
@ -443,22 +429,36 @@ static inline long change_pud_range(struct mmu_gather *tlb,
|
||||
struct vm_area_struct *vma, p4d_t *p4d, unsigned long addr,
|
||||
unsigned long end, pgprot_t newprot, unsigned long cp_flags)
|
||||
{
|
||||
struct mmu_notifier_range range;
|
||||
pud_t *pud;
|
||||
unsigned long next;
|
||||
long pages = 0, ret;
|
||||
|
||||
range.start = 0;
|
||||
|
||||
pud = pud_offset(p4d, addr);
|
||||
do {
|
||||
next = pud_addr_end(addr, end);
|
||||
ret = change_prepare(vma, pud, pmd, addr, cp_flags);
|
||||
if (ret)
|
||||
return ret;
|
||||
if (ret) {
|
||||
pages = ret;
|
||||
break;
|
||||
}
|
||||
if (pud_none_or_clear_bad(pud))
|
||||
continue;
|
||||
if (!range.start) {
|
||||
mmu_notifier_range_init(&range,
|
||||
MMU_NOTIFY_PROTECTION_VMA, 0,
|
||||
vma->vm_mm, addr, end);
|
||||
mmu_notifier_invalidate_range_start(&range);
|
||||
}
|
||||
pages += change_pmd_range(tlb, vma, pud, addr, next, newprot,
|
||||
cp_flags);
|
||||
} while (pud++, addr = next, addr != end);
|
||||
|
||||
if (range.start)
|
||||
mmu_notifier_invalidate_range_end(&range);
|
||||
|
||||
return pages;
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user