nommu: convert nommu to using the vma iterator

Gain type safety in nommu by using the vma_iterator and not the maple tree
directly.

Link: https://lkml.kernel.org/r/20230120162650.984577-28-Liam.Howlett@oracle.com
Signed-off-by: Liam R.  Howlett <Liam.Howlett@oracle.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Liam R. Howlett 2023-01-20 11:26:28 -05:00 committed by Andrew Morton
parent a27a11f92f
commit 47d9644de9

View File

@ -544,19 +544,6 @@ static void put_nommu_region(struct vm_region *region)
__put_nommu_region(region);
}
void vma_mas_store(struct vm_area_struct *vma, struct ma_state *mas)
{
mas_set_range(mas, vma->vm_start, vma->vm_end - 1);
mas_store_prealloc(mas, vma);
}
void vma_mas_remove(struct vm_area_struct *vma, struct ma_state *mas)
{
mas->index = vma->vm_start;
mas->last = vma->vm_end - 1;
mas_store_prealloc(mas, NULL);
}
static void setup_vma_to_mm(struct vm_area_struct *vma, struct mm_struct *mm)
{
vma->vm_mm = mm;
@ -574,13 +561,13 @@ static void setup_vma_to_mm(struct vm_area_struct *vma, struct mm_struct *mm)
}
/*
* mas_add_vma_to_mm() - Maple state variant of add_mas_to_mm().
* @mas: The maple state with preallocations.
* vmi_add_vma_to_mm() - VMA Iterator variant of add_vmi_to_mm().
* @vmi: The VMA iterator
* @mm: The mm_struct
* @vma: The vma to add
*
*/
static void mas_add_vma_to_mm(struct ma_state *mas, struct mm_struct *mm,
static void vmi_add_vma_to_mm(struct vma_iterator *vmi, struct mm_struct *mm,
struct vm_area_struct *vma)
{
BUG_ON(!vma->vm_region);
@ -589,7 +576,7 @@ static void mas_add_vma_to_mm(struct ma_state *mas, struct mm_struct *mm,
mm->map_count++;
/* add the VMA to the tree */
vma_mas_store(vma, mas);
vma_iter_store(vmi, vma);
}
/*
@ -600,14 +587,14 @@ static void mas_add_vma_to_mm(struct ma_state *mas, struct mm_struct *mm,
*/
static int add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma)
{
MA_STATE(mas, &mm->mm_mt, vma->vm_start, vma->vm_end);
VMA_ITERATOR(vmi, mm, vma->vm_start);
if (mas_preallocate(&mas, GFP_KERNEL)) {
if (vma_iter_prealloc(&vmi)) {
pr_warn("Allocation of vma tree for process %d failed\n",
current->pid);
return -ENOMEM;
}
mas_add_vma_to_mm(&mas, mm, vma);
vmi_add_vma_to_mm(&vmi, mm, vma);
return 0;
}
@ -626,14 +613,15 @@ static void cleanup_vma_from_mm(struct vm_area_struct *vma)
i_mmap_unlock_write(mapping);
}
}
/*
* delete a VMA from its owning mm_struct and address space
*/
static int delete_vma_from_mm(struct vm_area_struct *vma)
{
MA_STATE(mas, &vma->vm_mm->mm_mt, 0, 0);
VMA_ITERATOR(vmi, vma->vm_mm, vma->vm_start);
if (mas_preallocate(&mas, GFP_KERNEL)) {
if (vma_iter_prealloc(&vmi)) {
pr_warn("Allocation of vma tree for process %d failed\n",
current->pid);
return -ENOMEM;
@ -641,10 +629,9 @@ static int delete_vma_from_mm(struct vm_area_struct *vma)
cleanup_vma_from_mm(vma);
/* remove from the MM's tree and list */
vma_mas_remove(vma, &mas);
vma_iter_clear(&vmi, vma->vm_start, vma->vm_end);
return 0;
}
/*
* destroy a VMA record
*/
@ -675,9 +662,9 @@ EXPORT_SYMBOL(find_vma_intersection);
*/
struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
{
MA_STATE(mas, &mm->mm_mt, addr, addr);
VMA_ITERATOR(vmi, mm, addr);
return mas_walk(&mas);
return vma_iter_load(&vmi);
}
EXPORT_SYMBOL(find_vma);
@ -709,9 +696,9 @@ static struct vm_area_struct *find_vma_exact(struct mm_struct *mm,
{
struct vm_area_struct *vma;
unsigned long end = addr + len;
MA_STATE(mas, &mm->mm_mt, addr, addr);
VMA_ITERATOR(vmi, mm, addr);
vma = mas_walk(&mas);
vma = vma_iter_load(&vmi);
if (!vma)
return NULL;
if (vma->vm_start != addr)
@ -1062,7 +1049,7 @@ unsigned long do_mmap(struct file *file,
vm_flags_t vm_flags;
unsigned long capabilities, result;
int ret;
MA_STATE(mas, &current->mm->mm_mt, 0, 0);
VMA_ITERATOR(vmi, current->mm, 0);
*populate = 0;
@ -1091,8 +1078,8 @@ unsigned long do_mmap(struct file *file,
if (!vma)
goto error_getting_vma;
if (mas_preallocate(&mas, GFP_KERNEL))
goto error_maple_preallocate;
if (vma_iter_prealloc(&vmi))
goto error_vma_iter_prealloc;
region->vm_usage = 1;
region->vm_flags = vm_flags;
@ -1234,7 +1221,7 @@ unsigned long do_mmap(struct file *file,
current->mm->total_vm += len >> PAGE_SHIFT;
share:
mas_add_vma_to_mm(&mas, current->mm, vma);
vmi_add_vma_to_mm(&vmi, current->mm, vma);
/* we flush the region from the icache only when the first executable
* mapping of it is made */
@ -1250,7 +1237,7 @@ unsigned long do_mmap(struct file *file,
error_just_free:
up_write(&nommu_region_sem);
error:
mas_destroy(&mas);
vma_iter_free(&vmi);
if (region->vm_file)
fput(region->vm_file);
kmem_cache_free(vm_region_jar, region);
@ -1278,7 +1265,7 @@ unsigned long do_mmap(struct file *file,
show_free_areas(0, NULL);
return -ENOMEM;
error_maple_preallocate:
error_vma_iter_prealloc:
kmem_cache_free(vm_region_jar, region);
vm_area_free(vma);
pr_warn("Allocation of vma tree for process %d failed\n", current->pid);
@ -1344,20 +1331,18 @@ SYSCALL_DEFINE1(old_mmap, struct mmap_arg_struct __user *, arg)
* split a vma into two pieces at address 'addr', a new vma is allocated either
* for the first part or the tail.
*/
int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long addr, int new_below)
int vmi_split_vma(struct vma_iterator *vmi, struct mm_struct *mm,
struct vm_area_struct *vma, unsigned long addr, int new_below)
{
struct vm_area_struct *new;
struct vm_region *region;
unsigned long npages;
MA_STATE(mas, &mm->mm_mt, vma->vm_start, vma->vm_end);
/* we're only permitted to split anonymous regions (these should have
* only a single usage on the region) */
if (vma->vm_file)
return -ENOMEM;
mm = vma->vm_mm;
if (mm->map_count >= sysctl_max_map_count)
return -ENOMEM;
@ -1369,10 +1354,10 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
if (!new)
goto err_vma_dup;
if (mas_preallocate(&mas, GFP_KERNEL)) {
if (vma_iter_prealloc(vmi)) {
pr_warn("Allocation of vma tree for process %d failed\n",
current->pid);
goto err_mas_preallocate;
goto err_vmi_preallocate;
}
/* most fields are the same, copy all, and then fixup */
@ -1406,13 +1391,11 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
setup_vma_to_mm(vma, mm);
setup_vma_to_mm(new, mm);
mas_set_range(&mas, vma->vm_start, vma->vm_end - 1);
mas_store(&mas, vma);
vma_mas_store(new, &mas);
vma_iter_store(vmi, new);
mm->map_count++;
return 0;
err_mas_preallocate:
err_vmi_preallocate:
vm_area_free(new);
err_vma_dup:
kmem_cache_free(vm_region_jar, region);
@ -1466,7 +1449,7 @@ static int shrink_vma(struct mm_struct *mm,
*/
int do_munmap(struct mm_struct *mm, unsigned long start, size_t len, struct list_head *uf)
{
MA_STATE(mas, &mm->mm_mt, start, start);
VMA_ITERATOR(vmi, mm, start);
struct vm_area_struct *vma;
unsigned long end;
int ret = 0;
@ -1478,7 +1461,7 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len, struct list
end = start + len;
/* find the first potentially overlapping VMA */
vma = mas_find(&mas, end - 1);
vma = vma_find(&vmi, end);
if (!vma) {
static int limit;
if (limit < 5) {
@ -1497,7 +1480,7 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len, struct list
return -EINVAL;
if (end == vma->vm_end)
goto erase_whole_vma;
vma = mas_next(&mas, end - 1);
vma = vma_find(&vmi, end);
} while (vma);
return -EINVAL;
} else {
@ -1511,7 +1494,7 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len, struct list
if (end != vma->vm_end && offset_in_page(end))
return -EINVAL;
if (start != vma->vm_start && end != vma->vm_end) {
ret = split_vma(mm, vma, start, 1);
ret = vmi_split_vma(&vmi, mm, vma, start, 1);
if (ret < 0)
return ret;
}