mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2025-01-03 19:55:31 +00:00
mm/vma: extract the gathering of vmas from do_vmi_align_munmap()
Create vmi_gather_munmap_vmas() to handle the gathering of vmas into a detached maple tree for removal later. Part of the gathering is the splitting of vmas that span the boundary. Link: https://lkml.kernel.org/r/20240830040101.822209-5-Liam.Howlett@oracle.com Signed-off-by: Liam R. Howlett <Liam.Howlett@oracle.com> Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com> Cc: Bert Karwatzki <spasswolf@web.de> Cc: Jeff Xu <jeffxu@chromium.org> Cc: Jiri Olsa <olsajiri@gmail.com> Cc: Kees Cook <kees@kernel.org> Cc: Lorenzo Stoakes <lstoakes@gmail.com> Cc: Mark Brown <broonie@kernel.org> Cc: Matthew Wilcox <willy@infradead.org> Cc: "Paul E. McKenney" <paulmck@kernel.org> Cc: Paul Moore <paul@paul-moore.com> Cc: Sidhartha Kumar <sidhartha.kumar@oracle.com> Cc: Suren Baghdasaryan <surenb@google.com> Cc: Vlastimil Babka <vbabka@suse.cz> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
01cf21e9e1
commit
6898c9039b
95
mm/vma.c
95
mm/vma.c
@ -737,32 +737,30 @@ vmi_complete_munmap_vmas(struct vma_iterator *vmi, struct vm_area_struct *vma,
|
||||
}
|
||||
|
||||
/*
|
||||
* do_vmi_align_munmap() - munmap the aligned region from @start to @end.
|
||||
* vmi_gather_munmap_vmas() - Put all VMAs within a range into a maple tree
|
||||
* for removal at a later date. Handles splitting first and last if necessary
|
||||
* and marking the vmas as isolated.
|
||||
*
|
||||
* @vmi: The vma iterator
|
||||
* @vma: The starting vm_area_struct
|
||||
* @mm: The mm_struct
|
||||
* @start: The aligned start address to munmap.
|
||||
* @end: The aligned end address to munmap.
|
||||
* @uf: The userfaultfd list_head
|
||||
* @unlock: Set to true to drop the mmap_lock. unlocking only happens on
|
||||
* success.
|
||||
* @mas_detach: The maple state tracking the detached tree
|
||||
* @locked_vm: a pointer to store the VM_LOCKED pages count.
|
||||
*
|
||||
* Return: 0 on success and drops the lock if so directed, error and leaves the
|
||||
* lock held otherwise.
|
||||
* Return: 0 on success, -EPERM on mseal vmas, -ENOMEM otherwise
|
||||
*/
|
||||
int
|
||||
do_vmi_align_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma,
|
||||
static int
|
||||
vmi_gather_munmap_vmas(struct vma_iterator *vmi, struct vm_area_struct *vma,
|
||||
struct mm_struct *mm, unsigned long start,
|
||||
unsigned long end, struct list_head *uf, bool unlock)
|
||||
unsigned long end, struct list_head *uf,
|
||||
struct ma_state *mas_detach, unsigned long *locked_vm)
|
||||
{
|
||||
struct vm_area_struct *next = NULL;
|
||||
struct maple_tree mt_detach;
|
||||
int count = 0;
|
||||
int error = -ENOMEM;
|
||||
unsigned long locked_vm = 0;
|
||||
MA_STATE(mas_detach, &mt_detach, 0, 0);
|
||||
mt_init_flags(&mt_detach, vmi->mas.tree->ma_flags & MT_FLAGS_LOCK_MASK);
|
||||
mt_on_stack(mt_detach);
|
||||
|
||||
/*
|
||||
* If we need to split any vma, do it now to save pain later.
|
||||
@ -789,8 +787,7 @@ do_vmi_align_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma,
|
||||
goto start_split_failed;
|
||||
}
|
||||
|
||||
error = __split_vma(vmi, vma, start, 1);
|
||||
if (error)
|
||||
if (__split_vma(vmi, vma, start, 1))
|
||||
goto start_split_failed;
|
||||
}
|
||||
|
||||
@ -807,20 +804,18 @@ do_vmi_align_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma,
|
||||
|
||||
/* Does it split the end? */
|
||||
if (next->vm_end > end) {
|
||||
error = __split_vma(vmi, next, end, 0);
|
||||
if (error)
|
||||
if (__split_vma(vmi, next, end, 0))
|
||||
goto end_split_failed;
|
||||
}
|
||||
vma_start_write(next);
|
||||
mas_set(&mas_detach, count);
|
||||
error = mas_store_gfp(&mas_detach, next, GFP_KERNEL);
|
||||
if (error)
|
||||
mas_set(mas_detach, count++);
|
||||
if (mas_store_gfp(mas_detach, next, GFP_KERNEL))
|
||||
goto munmap_gather_failed;
|
||||
|
||||
vma_mark_detached(next, true);
|
||||
if (next->vm_flags & VM_LOCKED)
|
||||
locked_vm += vma_pages(next);
|
||||
*locked_vm += vma_pages(next);
|
||||
|
||||
count++;
|
||||
if (unlikely(uf)) {
|
||||
/*
|
||||
* If userfaultfd_unmap_prep returns an error the vmas
|
||||
@ -831,9 +826,7 @@ do_vmi_align_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma,
|
||||
* split, despite we could. This is unlikely enough
|
||||
* failure that it's not worth optimizing it for.
|
||||
*/
|
||||
error = userfaultfd_unmap_prep(next, start, end, uf);
|
||||
|
||||
if (error)
|
||||
if (userfaultfd_unmap_prep(next, start, end, uf))
|
||||
goto userfaultfd_error;
|
||||
}
|
||||
#ifdef CONFIG_DEBUG_VM_MAPLE_TREE
|
||||
@ -845,7 +838,7 @@ do_vmi_align_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma,
|
||||
#if defined(CONFIG_DEBUG_VM_MAPLE_TREE)
|
||||
/* Make sure no VMAs are about to be lost. */
|
||||
{
|
||||
MA_STATE(test, &mt_detach, 0, 0);
|
||||
MA_STATE(test, mas_detach->tree, 0, 0);
|
||||
struct vm_area_struct *vma_mas, *vma_test;
|
||||
int test_count = 0;
|
||||
|
||||
@ -865,6 +858,48 @@ do_vmi_align_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma,
|
||||
while (vma_iter_addr(vmi) > start)
|
||||
vma_iter_prev_range(vmi);
|
||||
|
||||
return 0;
|
||||
|
||||
userfaultfd_error:
|
||||
munmap_gather_failed:
|
||||
end_split_failed:
|
||||
modify_vma_failed:
|
||||
abort_munmap_vmas(mas_detach);
|
||||
start_split_failed:
|
||||
map_count_exceeded:
|
||||
return error;
|
||||
}
|
||||
|
||||
/*
|
||||
* do_vmi_align_munmap() - munmap the aligned region from @start to @end.
|
||||
* @vmi: The vma iterator
|
||||
* @vma: The starting vm_area_struct
|
||||
* @mm: The mm_struct
|
||||
* @start: The aligned start address to munmap.
|
||||
* @end: The aligned end address to munmap.
|
||||
* @uf: The userfaultfd list_head
|
||||
* @unlock: Set to true to drop the mmap_lock. unlocking only happens on
|
||||
* success.
|
||||
*
|
||||
* Return: 0 on success and drops the lock if so directed, error and leaves the
|
||||
* lock held otherwise.
|
||||
*/
|
||||
int do_vmi_align_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma,
|
||||
struct mm_struct *mm, unsigned long start, unsigned long end,
|
||||
struct list_head *uf, bool unlock)
|
||||
{
|
||||
struct maple_tree mt_detach;
|
||||
MA_STATE(mas_detach, &mt_detach, 0, 0);
|
||||
mt_init_flags(&mt_detach, vmi->mas.tree->ma_flags & MT_FLAGS_LOCK_MASK);
|
||||
mt_on_stack(mt_detach);
|
||||
int error;
|
||||
unsigned long locked_vm = 0;
|
||||
|
||||
error = vmi_gather_munmap_vmas(vmi, vma, mm, start, end, uf,
|
||||
&mas_detach, &locked_vm);
|
||||
if (error)
|
||||
goto gather_failed;
|
||||
|
||||
error = vma_iter_clear_gfp(vmi, start, end, GFP_KERNEL);
|
||||
if (error)
|
||||
goto clear_tree_failed;
|
||||
@ -872,17 +907,11 @@ do_vmi_align_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma,
|
||||
/* Point of no return */
|
||||
vmi_complete_munmap_vmas(vmi, vma, mm, start, end, unlock, &mas_detach,
|
||||
locked_vm);
|
||||
|
||||
return 0;
|
||||
|
||||
modify_vma_failed:
|
||||
clear_tree_failed:
|
||||
userfaultfd_error:
|
||||
munmap_gather_failed:
|
||||
end_split_failed:
|
||||
abort_munmap_vmas(&mas_detach);
|
||||
start_split_failed:
|
||||
map_count_exceeded:
|
||||
gather_failed:
|
||||
validate_mm(mm);
|
||||
return error;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user