mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-12-28 16:56:26 +00:00
9 hotfixes. 6 for MM, 3 for other areas. Four of these patches address
post-6.0 issues. -----BEGIN PGP SIGNATURE----- iHUEABYKAB0WIQTTMBEPP41GrTpTJgfdBJ7gKXxAjgUCY5Ur2AAKCRDdBJ7gKXxA jsGmAQDWSq6z9fVgk30XpMr/X7t5c6NTPw5GocVpdwG8iqch3gEAjEs5/Kcd/mx4 d1dLaJFu1u3syessp8nJrNr1HANIog8= =L8zu -----END PGP SIGNATURE----- Merge tag 'mm-hotfixes-stable-2022-12-10-1' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm Pull misc fixes from Andrew Morton: "Nine hotfixes. Six for MM, three for other areas. Four of these patches address post-6.0 issues" * tag 'mm-hotfixes-stable-2022-12-10-1' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm: memcg: fix possible use-after-free in memcg_write_event_control() MAINTAINERS: update Muchun Song's email mm/gup: fix gup_pud_range() for dax mmap: fix do_brk_flags() modifying obviously incorrect VMAs mm/swap: fix SWP_PFN_BITS with CONFIG_PHYS_ADDR_T_64BIT on 32bit tmpfs: fix data loss from failed fallocate kselftests: cgroup: update kmem test precision tolerance mm: do not BUG_ON missing brk mapping, because userspace can unmap it mailmap: update Matti Vaittinen's email address
This commit is contained in:
commit
4cee37b3a4
3
.mailmap
3
.mailmap
@ -287,6 +287,7 @@ Matthew Wilcox <willy@infradead.org> <willy@linux.intel.com>
|
||||
Matthew Wilcox <willy@infradead.org> <willy@parisc-linux.org>
|
||||
Matthias Fuchs <socketcan@esd.eu> <matthias.fuchs@esd.eu>
|
||||
Matthieu CASTET <castet.matthieu@free.fr>
|
||||
Matti Vaittinen <mazziesaccount@gmail.com> <matti.vaittinen@fi.rohmeurope.com>
|
||||
Matt Ranostay <matt.ranostay@konsulko.com> <matt@ranostay.consulting>
|
||||
Matt Ranostay <mranostay@gmail.com> Matthew Ranostay <mranostay@embeddedalley.com>
|
||||
Matt Ranostay <mranostay@gmail.com> <matt.ranostay@intel.com>
|
||||
@ -372,6 +373,8 @@ Ricardo Ribalda <ribalda@kernel.org> <ricardo.ribalda@gmail.com>
|
||||
Roman Gushchin <roman.gushchin@linux.dev> <guro@fb.com>
|
||||
Roman Gushchin <roman.gushchin@linux.dev> <guroan@gmail.com>
|
||||
Roman Gushchin <roman.gushchin@linux.dev> <klamm@yandex-team.ru>
|
||||
Muchun Song <muchun.song@linux.dev> <songmuchun@bytedance.com>
|
||||
Muchun Song <muchun.song@linux.dev> <smuchun@gmail.com>
|
||||
Ross Zwisler <zwisler@kernel.org> <ross.zwisler@linux.intel.com>
|
||||
Rudolf Marek <R.Marek@sh.cvut.cz>
|
||||
Rui Saraiva <rmps@joel.ist.utl.pt>
|
||||
|
@ -5299,7 +5299,7 @@ M: Johannes Weiner <hannes@cmpxchg.org>
|
||||
M: Michal Hocko <mhocko@kernel.org>
|
||||
M: Roman Gushchin <roman.gushchin@linux.dev>
|
||||
M: Shakeel Butt <shakeelb@google.com>
|
||||
R: Muchun Song <songmuchun@bytedance.com>
|
||||
R: Muchun Song <muchun.song@linux.dev>
|
||||
L: cgroups@vger.kernel.org
|
||||
L: linux-mm@kvack.org
|
||||
S: Maintained
|
||||
@ -9439,7 +9439,7 @@ F: drivers/net/ethernet/huawei/hinic/
|
||||
|
||||
HUGETLB SUBSYSTEM
|
||||
M: Mike Kravetz <mike.kravetz@oracle.com>
|
||||
M: Muchun Song <songmuchun@bytedance.com>
|
||||
M: Muchun Song <muchun.song@linux.dev>
|
||||
L: linux-mm@kvack.org
|
||||
S: Maintained
|
||||
F: Documentation/ABI/testing/sysfs-kernel-mm-hugepages
|
||||
|
@ -33,11 +33,13 @@
|
||||
* can use the extra bits to store other information besides PFN.
|
||||
*/
|
||||
#ifdef MAX_PHYSMEM_BITS
|
||||
#define SWP_PFN_BITS (MAX_PHYSMEM_BITS - PAGE_SHIFT)
|
||||
#define SWP_PFN_BITS (MAX_PHYSMEM_BITS - PAGE_SHIFT)
|
||||
#else /* MAX_PHYSMEM_BITS */
|
||||
#define SWP_PFN_BITS (BITS_PER_LONG - PAGE_SHIFT)
|
||||
#define SWP_PFN_BITS min_t(int, \
|
||||
sizeof(phys_addr_t) * 8 - PAGE_SHIFT, \
|
||||
SWP_TYPE_SHIFT)
|
||||
#endif /* MAX_PHYSMEM_BITS */
|
||||
#define SWP_PFN_MASK (BIT(SWP_PFN_BITS) - 1)
|
||||
#define SWP_PFN_MASK (BIT(SWP_PFN_BITS) - 1)
|
||||
|
||||
/**
|
||||
* Migration swap entry specific bitfield definitions. Layout:
|
||||
|
2
mm/gup.c
2
mm/gup.c
@ -2852,7 +2852,7 @@ static int gup_pud_range(p4d_t *p4dp, p4d_t p4d, unsigned long addr, unsigned lo
|
||||
next = pud_addr_end(addr, end);
|
||||
if (unlikely(!pud_present(pud)))
|
||||
return 0;
|
||||
if (unlikely(pud_huge(pud))) {
|
||||
if (unlikely(pud_huge(pud) || pud_devmap(pud))) {
|
||||
if (!gup_huge_pud(pud, pudp, addr, next, flags,
|
||||
pages, nr))
|
||||
return 0;
|
||||
|
14
mm/mmap.c
14
mm/mmap.c
@ -226,8 +226,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
|
||||
/* Search one past newbrk */
|
||||
mas_set(&mas, newbrk);
|
||||
brkvma = mas_find(&mas, oldbrk);
|
||||
BUG_ON(brkvma == NULL);
|
||||
if (brkvma->vm_start >= oldbrk)
|
||||
if (!brkvma || brkvma->vm_start >= oldbrk)
|
||||
goto out; /* mapping intersects with an existing non-brk vma. */
|
||||
/*
|
||||
* mm->brk must be protected by write mmap_lock.
|
||||
@ -2946,9 +2945,9 @@ static int do_brk_flags(struct ma_state *mas, struct vm_area_struct *vma,
|
||||
* Expand the existing vma if possible; Note that singular lists do not
|
||||
* occur after forking, so the expand will only happen on new VMAs.
|
||||
*/
|
||||
if (vma &&
|
||||
(!vma->anon_vma || list_is_singular(&vma->anon_vma_chain)) &&
|
||||
((vma->vm_flags & ~VM_SOFTDIRTY) == flags)) {
|
||||
if (vma && vma->vm_end == addr && !vma_policy(vma) &&
|
||||
can_vma_merge_after(vma, flags, NULL, NULL,
|
||||
addr >> PAGE_SHIFT, NULL_VM_UFFD_CTX, NULL)) {
|
||||
mas_set_range(mas, vma->vm_start, addr + len - 1);
|
||||
if (mas_preallocate(mas, vma, GFP_KERNEL))
|
||||
return -ENOMEM;
|
||||
@ -3035,11 +3034,6 @@ int vm_brk_flags(unsigned long addr, unsigned long request, unsigned long flags)
|
||||
goto munmap_failed;
|
||||
|
||||
vma = mas_prev(&mas, 0);
|
||||
if (!vma || vma->vm_end != addr || vma_policy(vma) ||
|
||||
!can_vma_merge_after(vma, flags, NULL, NULL,
|
||||
addr >> PAGE_SHIFT, NULL_VM_UFFD_CTX, NULL))
|
||||
vma = NULL;
|
||||
|
||||
ret = do_brk_flags(&mas, vma, addr, len, flags);
|
||||
populate = ((mm->def_flags & VM_LOCKED) != 0);
|
||||
mmap_write_unlock(mm);
|
||||
|
11
mm/shmem.c
11
mm/shmem.c
@ -948,6 +948,15 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
|
||||
index++;
|
||||
}
|
||||
|
||||
/*
|
||||
* When undoing a failed fallocate, we want none of the partial folio
|
||||
* zeroing and splitting below, but shall want to truncate the whole
|
||||
* folio when !uptodate indicates that it was added by this fallocate,
|
||||
* even when [lstart, lend] covers only a part of the folio.
|
||||
*/
|
||||
if (unfalloc)
|
||||
goto whole_folios;
|
||||
|
||||
same_folio = (lstart >> PAGE_SHIFT) == (lend >> PAGE_SHIFT);
|
||||
folio = shmem_get_partial_folio(inode, lstart >> PAGE_SHIFT);
|
||||
if (folio) {
|
||||
@ -973,6 +982,8 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
|
||||
folio_put(folio);
|
||||
}
|
||||
|
||||
whole_folios:
|
||||
|
||||
index = start;
|
||||
while (index < end) {
|
||||
cond_resched();
|
||||
|
@ -19,12 +19,12 @@
|
||||
|
||||
|
||||
/*
|
||||
* Memory cgroup charging is performed using percpu batches 32 pages
|
||||
* Memory cgroup charging is performed using percpu batches 64 pages
|
||||
* big (look at MEMCG_CHARGE_BATCH), whereas memory.stat is exact. So
|
||||
* the maximum discrepancy between charge and vmstat entries is number
|
||||
* of cpus multiplied by 32 pages.
|
||||
* of cpus multiplied by 64 pages.
|
||||
*/
|
||||
#define MAX_VMSTAT_ERROR (4096 * 32 * get_nprocs())
|
||||
#define MAX_VMSTAT_ERROR (4096 * 64 * get_nprocs())
|
||||
|
||||
|
||||
static int alloc_dcache(const char *cgroup, void *arg)
|
||||
|
Loading…
Reference in New Issue
Block a user