io_uring/memmap: optimise single folio regions

We don't need to vmap if memory is already physically contiguous. There
are two important cases it covers: PAGE_SIZE regions and huge pages.
Use io_check_coalesce_buffer() to get the number of contiguous folios.

Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/d5240af23064a824c29d14d2406f1ae764bf4505.1732886067.git.asml.silence@gmail.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Pavel Begunkov 2024-11-29 13:34:28 +00:00 committed by Jens Axboe
parent aee10f60d2
commit cc2f1a864c

View File

@ -226,12 +226,31 @@ void io_free_region(struct io_ring_ctx *ctx, struct io_mapped_region *mr)
memset(mr, 0, sizeof(*mr));
}
static int io_region_init_ptr(struct io_mapped_region *mr)
{
struct io_imu_folio_data ifd;
void *ptr;
if (io_check_coalesce_buffer(mr->pages, mr->nr_pages, &ifd)) {
if (ifd.nr_folios == 1) {
mr->ptr = page_address(mr->pages[0]);
return 0;
}
}
ptr = vmap(mr->pages, mr->nr_pages, VM_MAP, PAGE_KERNEL);
if (!ptr)
return -ENOMEM;
mr->ptr = ptr;
mr->flags |= IO_REGION_F_VMAP;
return 0;
}
int io_create_region(struct io_ring_ctx *ctx, struct io_mapped_region *mr,
struct io_uring_region_desc *reg)
{
struct page **pages;
int nr_pages, ret;
void *vptr;
u64 end;
if (WARN_ON_ONCE(mr->pages || mr->ptr || mr->nr_pages))
@ -267,13 +286,9 @@ int io_create_region(struct io_ring_ctx *ctx, struct io_mapped_region *mr,
mr->pages = pages;
mr->flags |= IO_REGION_F_USER_PROVIDED;
vptr = vmap(pages, nr_pages, VM_MAP, PAGE_KERNEL);
if (!vptr) {
ret = -ENOMEM;
ret = io_region_init_ptr(mr);
if (ret)
goto out_free;
}
mr->ptr = vptr;
mr->flags |= IO_REGION_F_VMAP;
return 0;
out_free:
io_free_region(ctx, mr);