mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2024-12-29 09:12:07 +00:00
io_uring: use region api for SQ
Convert internal parts of the SQ managment to the region API. Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Link: https://lore.kernel.org/r/1fb73ced6b835cb319ab0fe1dc0b2e982a9a5650.1732886067.git.asml.silence@gmail.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
2fdec0d862
commit
55ea1ea1c8
@ -432,10 +432,9 @@ struct io_ring_ctx {
|
|||||||
* the gup'ed pages for the two rings, and the sqes.
|
* the gup'ed pages for the two rings, and the sqes.
|
||||||
*/
|
*/
|
||||||
unsigned short n_ring_pages;
|
unsigned short n_ring_pages;
|
||||||
unsigned short n_sqe_pages;
|
|
||||||
struct page **ring_pages;
|
struct page **ring_pages;
|
||||||
struct page **sqe_pages;
|
|
||||||
|
|
||||||
|
struct io_mapped_region sq_region;
|
||||||
/* used for optimised request parameter and wait argument passing */
|
/* used for optimised request parameter and wait argument passing */
|
||||||
struct io_mapped_region param_region;
|
struct io_mapped_region param_region;
|
||||||
};
|
};
|
||||||
|
@ -2637,29 +2637,19 @@ static void *io_rings_map(struct io_ring_ctx *ctx, unsigned long uaddr,
|
|||||||
size);
|
size);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void *io_sqes_map(struct io_ring_ctx *ctx, unsigned long uaddr,
|
|
||||||
size_t size)
|
|
||||||
{
|
|
||||||
return __io_uaddr_map(&ctx->sqe_pages, &ctx->n_sqe_pages, uaddr,
|
|
||||||
size);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void io_rings_free(struct io_ring_ctx *ctx)
|
static void io_rings_free(struct io_ring_ctx *ctx)
|
||||||
{
|
{
|
||||||
if (!(ctx->flags & IORING_SETUP_NO_MMAP)) {
|
if (!(ctx->flags & IORING_SETUP_NO_MMAP)) {
|
||||||
io_pages_unmap(ctx->rings, &ctx->ring_pages, &ctx->n_ring_pages,
|
io_pages_unmap(ctx->rings, &ctx->ring_pages, &ctx->n_ring_pages,
|
||||||
true);
|
true);
|
||||||
io_pages_unmap(ctx->sq_sqes, &ctx->sqe_pages, &ctx->n_sqe_pages,
|
|
||||||
true);
|
|
||||||
} else {
|
} else {
|
||||||
io_pages_free(&ctx->ring_pages, ctx->n_ring_pages);
|
io_pages_free(&ctx->ring_pages, ctx->n_ring_pages);
|
||||||
ctx->n_ring_pages = 0;
|
ctx->n_ring_pages = 0;
|
||||||
io_pages_free(&ctx->sqe_pages, ctx->n_sqe_pages);
|
|
||||||
ctx->n_sqe_pages = 0;
|
|
||||||
vunmap(ctx->rings);
|
vunmap(ctx->rings);
|
||||||
vunmap(ctx->sq_sqes);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
io_free_region(ctx, &ctx->sq_region);
|
||||||
|
|
||||||
ctx->rings = NULL;
|
ctx->rings = NULL;
|
||||||
ctx->sq_sqes = NULL;
|
ctx->sq_sqes = NULL;
|
||||||
}
|
}
|
||||||
@ -3476,9 +3466,10 @@ bool io_is_uring_fops(struct file *file)
|
|||||||
static __cold int io_allocate_scq_urings(struct io_ring_ctx *ctx,
|
static __cold int io_allocate_scq_urings(struct io_ring_ctx *ctx,
|
||||||
struct io_uring_params *p)
|
struct io_uring_params *p)
|
||||||
{
|
{
|
||||||
|
struct io_uring_region_desc rd;
|
||||||
struct io_rings *rings;
|
struct io_rings *rings;
|
||||||
size_t size, sq_array_offset;
|
size_t size, sq_array_offset;
|
||||||
void *ptr;
|
int ret;
|
||||||
|
|
||||||
/* make sure these are sane, as we already accounted them */
|
/* make sure these are sane, as we already accounted them */
|
||||||
ctx->sq_entries = p->sq_entries;
|
ctx->sq_entries = p->sq_entries;
|
||||||
@ -3514,17 +3505,18 @@ static __cold int io_allocate_scq_urings(struct io_ring_ctx *ctx,
|
|||||||
return -EOVERFLOW;
|
return -EOVERFLOW;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!(ctx->flags & IORING_SETUP_NO_MMAP))
|
memset(&rd, 0, sizeof(rd));
|
||||||
ptr = io_pages_map(&ctx->sqe_pages, &ctx->n_sqe_pages, size);
|
rd.size = PAGE_ALIGN(size);
|
||||||
else
|
if (ctx->flags & IORING_SETUP_NO_MMAP) {
|
||||||
ptr = io_sqes_map(ctx, p->sq_off.user_addr, size);
|
rd.user_addr = p->sq_off.user_addr;
|
||||||
|
rd.flags |= IORING_MEM_REGION_TYPE_USER;
|
||||||
if (IS_ERR(ptr)) {
|
|
||||||
io_rings_free(ctx);
|
|
||||||
return PTR_ERR(ptr);
|
|
||||||
}
|
}
|
||||||
|
ret = io_create_region(ctx, &ctx->sq_region, &rd, IORING_OFF_SQES);
|
||||||
ctx->sq_sqes = ptr;
|
if (ret) {
|
||||||
|
io_rings_free(ctx);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
ctx->sq_sqes = io_region_get_ptr(&ctx->sq_region);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -474,8 +474,7 @@ __cold int io_uring_mmap(struct file *file, struct vm_area_struct *vma)
|
|||||||
npages = min(ctx->n_ring_pages, (sz + PAGE_SIZE - 1) >> PAGE_SHIFT);
|
npages = min(ctx->n_ring_pages, (sz + PAGE_SIZE - 1) >> PAGE_SHIFT);
|
||||||
return io_uring_mmap_pages(ctx, vma, ctx->ring_pages, npages);
|
return io_uring_mmap_pages(ctx, vma, ctx->ring_pages, npages);
|
||||||
case IORING_OFF_SQES:
|
case IORING_OFF_SQES:
|
||||||
return io_uring_mmap_pages(ctx, vma, ctx->sqe_pages,
|
return io_region_mmap(ctx, &ctx->sq_region, vma);
|
||||||
ctx->n_sqe_pages);
|
|
||||||
case IORING_OFF_PBUF_RING:
|
case IORING_OFF_PBUF_RING:
|
||||||
return io_pbuf_mmap(file, vma);
|
return io_pbuf_mmap(file, vma);
|
||||||
case IORING_MAP_OFF_PARAM_REGION:
|
case IORING_MAP_OFF_PARAM_REGION:
|
||||||
|
@ -368,11 +368,11 @@ static int io_register_clock(struct io_ring_ctx *ctx,
|
|||||||
*/
|
*/
|
||||||
struct io_ring_ctx_rings {
|
struct io_ring_ctx_rings {
|
||||||
unsigned short n_ring_pages;
|
unsigned short n_ring_pages;
|
||||||
unsigned short n_sqe_pages;
|
|
||||||
struct page **ring_pages;
|
struct page **ring_pages;
|
||||||
struct page **sqe_pages;
|
|
||||||
struct io_uring_sqe *sq_sqes;
|
|
||||||
struct io_rings *rings;
|
struct io_rings *rings;
|
||||||
|
|
||||||
|
struct io_uring_sqe *sq_sqes;
|
||||||
|
struct io_mapped_region sq_region;
|
||||||
};
|
};
|
||||||
|
|
||||||
static void io_register_free_rings(struct io_ring_ctx *ctx,
|
static void io_register_free_rings(struct io_ring_ctx *ctx,
|
||||||
@ -382,14 +382,11 @@ static void io_register_free_rings(struct io_ring_ctx *ctx,
|
|||||||
if (!(p->flags & IORING_SETUP_NO_MMAP)) {
|
if (!(p->flags & IORING_SETUP_NO_MMAP)) {
|
||||||
io_pages_unmap(r->rings, &r->ring_pages, &r->n_ring_pages,
|
io_pages_unmap(r->rings, &r->ring_pages, &r->n_ring_pages,
|
||||||
true);
|
true);
|
||||||
io_pages_unmap(r->sq_sqes, &r->sqe_pages, &r->n_sqe_pages,
|
|
||||||
true);
|
|
||||||
} else {
|
} else {
|
||||||
io_pages_free(&r->ring_pages, r->n_ring_pages);
|
io_pages_free(&r->ring_pages, r->n_ring_pages);
|
||||||
io_pages_free(&r->sqe_pages, r->n_sqe_pages);
|
|
||||||
vunmap(r->rings);
|
vunmap(r->rings);
|
||||||
vunmap(r->sq_sqes);
|
|
||||||
}
|
}
|
||||||
|
io_free_region(ctx, &r->sq_region);
|
||||||
}
|
}
|
||||||
|
|
||||||
#define swap_old(ctx, o, n, field) \
|
#define swap_old(ctx, o, n, field) \
|
||||||
@ -404,11 +401,11 @@ static void io_register_free_rings(struct io_ring_ctx *ctx,
|
|||||||
|
|
||||||
static int io_register_resize_rings(struct io_ring_ctx *ctx, void __user *arg)
|
static int io_register_resize_rings(struct io_ring_ctx *ctx, void __user *arg)
|
||||||
{
|
{
|
||||||
|
struct io_uring_region_desc rd;
|
||||||
struct io_ring_ctx_rings o = { }, n = { }, *to_free = NULL;
|
struct io_ring_ctx_rings o = { }, n = { }, *to_free = NULL;
|
||||||
size_t size, sq_array_offset;
|
size_t size, sq_array_offset;
|
||||||
struct io_uring_params p;
|
struct io_uring_params p;
|
||||||
unsigned i, tail;
|
unsigned i, tail;
|
||||||
void *ptr;
|
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
/* for single issuer, must be owner resizing */
|
/* for single issuer, must be owner resizing */
|
||||||
@ -466,16 +463,18 @@ static int io_register_resize_rings(struct io_ring_ctx *ctx, void __user *arg)
|
|||||||
return -EOVERFLOW;
|
return -EOVERFLOW;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!(p.flags & IORING_SETUP_NO_MMAP))
|
memset(&rd, 0, sizeof(rd));
|
||||||
ptr = io_pages_map(&n.sqe_pages, &n.n_sqe_pages, size);
|
rd.size = PAGE_ALIGN(size);
|
||||||
else
|
if (p.flags & IORING_SETUP_NO_MMAP) {
|
||||||
ptr = __io_uaddr_map(&n.sqe_pages, &n.n_sqe_pages,
|
rd.user_addr = p.sq_off.user_addr;
|
||||||
p.sq_off.user_addr,
|
rd.flags |= IORING_MEM_REGION_TYPE_USER;
|
||||||
size);
|
|
||||||
if (IS_ERR(ptr)) {
|
|
||||||
io_register_free_rings(ctx, &p, &n);
|
|
||||||
return PTR_ERR(ptr);
|
|
||||||
}
|
}
|
||||||
|
ret = io_create_region_mmap_safe(ctx, &n.sq_region, &rd, IORING_OFF_SQES);
|
||||||
|
if (ret) {
|
||||||
|
io_register_free_rings(ctx, &p, &n);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
n.sq_sqes = io_region_get_ptr(&n.sq_region);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If using SQPOLL, park the thread
|
* If using SQPOLL, park the thread
|
||||||
@ -506,7 +505,6 @@ static int io_register_resize_rings(struct io_ring_ctx *ctx, void __user *arg)
|
|||||||
* Now copy SQ and CQ entries, if any. If either of the destination
|
* Now copy SQ and CQ entries, if any. If either of the destination
|
||||||
* rings can't hold what is already there, then fail the operation.
|
* rings can't hold what is already there, then fail the operation.
|
||||||
*/
|
*/
|
||||||
n.sq_sqes = ptr;
|
|
||||||
tail = o.rings->sq.tail;
|
tail = o.rings->sq.tail;
|
||||||
if (tail - o.rings->sq.head > p.sq_entries)
|
if (tail - o.rings->sq.head > p.sq_entries)
|
||||||
goto overflow;
|
goto overflow;
|
||||||
@ -555,9 +553,8 @@ static int io_register_resize_rings(struct io_ring_ctx *ctx, void __user *arg)
|
|||||||
ctx->rings = n.rings;
|
ctx->rings = n.rings;
|
||||||
ctx->sq_sqes = n.sq_sqes;
|
ctx->sq_sqes = n.sq_sqes;
|
||||||
swap_old(ctx, o, n, n_ring_pages);
|
swap_old(ctx, o, n, n_ring_pages);
|
||||||
swap_old(ctx, o, n, n_sqe_pages);
|
|
||||||
swap_old(ctx, o, n, ring_pages);
|
swap_old(ctx, o, n, ring_pages);
|
||||||
swap_old(ctx, o, n, sqe_pages);
|
swap_old(ctx, o, n, sq_region);
|
||||||
to_free = &o;
|
to_free = &o;
|
||||||
ret = 0;
|
ret = 0;
|
||||||
out:
|
out:
|
||||||
|
Loading…
Reference in New Issue
Block a user