io_uring: use region api for SQ

Convert internal parts of the SQ managment to the region API.

Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/1fb73ced6b835cb319ab0fe1dc0b2e982a9a5650.1732886067.git.asml.silence@gmail.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Pavel Begunkov 2024-11-29 13:34:34 +00:00 committed by Jens Axboe
parent 2fdec0d862
commit 55ea1ea1c8
4 changed files with 34 additions and 47 deletions

View File

@ -432,10 +432,9 @@ struct io_ring_ctx {
* the gup'ed pages for the two rings, and the sqes.
*/
unsigned short n_ring_pages;
unsigned short n_sqe_pages;
struct page **ring_pages;
struct page **sqe_pages;
struct io_mapped_region sq_region;
/* used for optimised request parameter and wait argument passing */
struct io_mapped_region param_region;
};

View File

@ -2637,29 +2637,19 @@ static void *io_rings_map(struct io_ring_ctx *ctx, unsigned long uaddr,
size);
}
static void *io_sqes_map(struct io_ring_ctx *ctx, unsigned long uaddr,
size_t size)
{
return __io_uaddr_map(&ctx->sqe_pages, &ctx->n_sqe_pages, uaddr,
size);
}
static void io_rings_free(struct io_ring_ctx *ctx)
{
if (!(ctx->flags & IORING_SETUP_NO_MMAP)) {
io_pages_unmap(ctx->rings, &ctx->ring_pages, &ctx->n_ring_pages,
true);
io_pages_unmap(ctx->sq_sqes, &ctx->sqe_pages, &ctx->n_sqe_pages,
true);
} else {
io_pages_free(&ctx->ring_pages, ctx->n_ring_pages);
ctx->n_ring_pages = 0;
io_pages_free(&ctx->sqe_pages, ctx->n_sqe_pages);
ctx->n_sqe_pages = 0;
vunmap(ctx->rings);
vunmap(ctx->sq_sqes);
}
io_free_region(ctx, &ctx->sq_region);
ctx->rings = NULL;
ctx->sq_sqes = NULL;
}
@ -3476,9 +3466,10 @@ bool io_is_uring_fops(struct file *file)
static __cold int io_allocate_scq_urings(struct io_ring_ctx *ctx,
struct io_uring_params *p)
{
struct io_uring_region_desc rd;
struct io_rings *rings;
size_t size, sq_array_offset;
void *ptr;
int ret;
/* make sure these are sane, as we already accounted them */
ctx->sq_entries = p->sq_entries;
@ -3514,17 +3505,18 @@ static __cold int io_allocate_scq_urings(struct io_ring_ctx *ctx,
return -EOVERFLOW;
}
if (!(ctx->flags & IORING_SETUP_NO_MMAP))
ptr = io_pages_map(&ctx->sqe_pages, &ctx->n_sqe_pages, size);
else
ptr = io_sqes_map(ctx, p->sq_off.user_addr, size);
if (IS_ERR(ptr)) {
io_rings_free(ctx);
return PTR_ERR(ptr);
memset(&rd, 0, sizeof(rd));
rd.size = PAGE_ALIGN(size);
if (ctx->flags & IORING_SETUP_NO_MMAP) {
rd.user_addr = p->sq_off.user_addr;
rd.flags |= IORING_MEM_REGION_TYPE_USER;
}
ctx->sq_sqes = ptr;
ret = io_create_region(ctx, &ctx->sq_region, &rd, IORING_OFF_SQES);
if (ret) {
io_rings_free(ctx);
return ret;
}
ctx->sq_sqes = io_region_get_ptr(&ctx->sq_region);
return 0;
}

View File

@ -474,8 +474,7 @@ __cold int io_uring_mmap(struct file *file, struct vm_area_struct *vma)
npages = min(ctx->n_ring_pages, (sz + PAGE_SIZE - 1) >> PAGE_SHIFT);
return io_uring_mmap_pages(ctx, vma, ctx->ring_pages, npages);
case IORING_OFF_SQES:
return io_uring_mmap_pages(ctx, vma, ctx->sqe_pages,
ctx->n_sqe_pages);
return io_region_mmap(ctx, &ctx->sq_region, vma);
case IORING_OFF_PBUF_RING:
return io_pbuf_mmap(file, vma);
case IORING_MAP_OFF_PARAM_REGION:

View File

@ -368,11 +368,11 @@ static int io_register_clock(struct io_ring_ctx *ctx,
*/
struct io_ring_ctx_rings {
unsigned short n_ring_pages;
unsigned short n_sqe_pages;
struct page **ring_pages;
struct page **sqe_pages;
struct io_uring_sqe *sq_sqes;
struct io_rings *rings;
struct io_uring_sqe *sq_sqes;
struct io_mapped_region sq_region;
};
static void io_register_free_rings(struct io_ring_ctx *ctx,
@ -382,14 +382,11 @@ static void io_register_free_rings(struct io_ring_ctx *ctx,
if (!(p->flags & IORING_SETUP_NO_MMAP)) {
io_pages_unmap(r->rings, &r->ring_pages, &r->n_ring_pages,
true);
io_pages_unmap(r->sq_sqes, &r->sqe_pages, &r->n_sqe_pages,
true);
} else {
io_pages_free(&r->ring_pages, r->n_ring_pages);
io_pages_free(&r->sqe_pages, r->n_sqe_pages);
vunmap(r->rings);
vunmap(r->sq_sqes);
}
io_free_region(ctx, &r->sq_region);
}
#define swap_old(ctx, o, n, field) \
@ -404,11 +401,11 @@ static void io_register_free_rings(struct io_ring_ctx *ctx,
static int io_register_resize_rings(struct io_ring_ctx *ctx, void __user *arg)
{
struct io_uring_region_desc rd;
struct io_ring_ctx_rings o = { }, n = { }, *to_free = NULL;
size_t size, sq_array_offset;
struct io_uring_params p;
unsigned i, tail;
void *ptr;
int ret;
/* for single issuer, must be owner resizing */
@ -466,16 +463,18 @@ static int io_register_resize_rings(struct io_ring_ctx *ctx, void __user *arg)
return -EOVERFLOW;
}
if (!(p.flags & IORING_SETUP_NO_MMAP))
ptr = io_pages_map(&n.sqe_pages, &n.n_sqe_pages, size);
else
ptr = __io_uaddr_map(&n.sqe_pages, &n.n_sqe_pages,
p.sq_off.user_addr,
size);
if (IS_ERR(ptr)) {
io_register_free_rings(ctx, &p, &n);
return PTR_ERR(ptr);
memset(&rd, 0, sizeof(rd));
rd.size = PAGE_ALIGN(size);
if (p.flags & IORING_SETUP_NO_MMAP) {
rd.user_addr = p.sq_off.user_addr;
rd.flags |= IORING_MEM_REGION_TYPE_USER;
}
ret = io_create_region_mmap_safe(ctx, &n.sq_region, &rd, IORING_OFF_SQES);
if (ret) {
io_register_free_rings(ctx, &p, &n);
return ret;
}
n.sq_sqes = io_region_get_ptr(&n.sq_region);
/*
* If using SQPOLL, park the thread
@ -506,7 +505,6 @@ static int io_register_resize_rings(struct io_ring_ctx *ctx, void __user *arg)
* Now copy SQ and CQ entries, if any. If either of the destination
* rings can't hold what is already there, then fail the operation.
*/
n.sq_sqes = ptr;
tail = o.rings->sq.tail;
if (tail - o.rings->sq.head > p.sq_entries)
goto overflow;
@ -555,9 +553,8 @@ static int io_register_resize_rings(struct io_ring_ctx *ctx, void __user *arg)
ctx->rings = n.rings;
ctx->sq_sqes = n.sq_sqes;
swap_old(ctx, o, n, n_ring_pages);
swap_old(ctx, o, n, n_sqe_pages);
swap_old(ctx, o, n, ring_pages);
swap_old(ctx, o, n, sqe_pages);
swap_old(ctx, o, n, sq_region);
to_free = &o;
ret = 0;
out: