io_uring/kbuf: remove pbuf ring refcounting

struct io_buffer_list refcounting was needed for RCU based sync with
mmap, now  we can kill it.

Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/4a9cc54bf0077bb2bf2f3daf917549ddd41080da.1732886067.git.asml.silence@gmail.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Pavel Begunkov 2024-11-29 13:34:37 +00:00 committed by Jens Axboe
parent bd417d5546
commit 0a2ac2b194
3 changed files with 7 additions and 18 deletions

View File

@ -48,7 +48,6 @@ static int io_buffer_add_list(struct io_ring_ctx *ctx,
* always under the ->uring_lock, but lookups from mmap do. * always under the ->uring_lock, but lookups from mmap do.
*/ */
bl->bgid = bgid; bl->bgid = bgid;
atomic_set(&bl->refs, 1);
guard(mutex)(&ctx->mmap_lock); guard(mutex)(&ctx->mmap_lock);
return xa_err(xa_store(&ctx->io_bl_xa, bgid, bl, GFP_KERNEL)); return xa_err(xa_store(&ctx->io_bl_xa, bgid, bl, GFP_KERNEL));
} }
@ -385,12 +384,10 @@ static int __io_remove_buffers(struct io_ring_ctx *ctx,
return i; return i;
} }
void io_put_bl(struct io_ring_ctx *ctx, struct io_buffer_list *bl) static void io_put_bl(struct io_ring_ctx *ctx, struct io_buffer_list *bl)
{ {
if (atomic_dec_and_test(&bl->refs)) { __io_remove_buffers(ctx, bl, -1U);
__io_remove_buffers(ctx, bl, -1U); kfree(bl);
kfree(bl);
}
} }
void io_destroy_buffers(struct io_ring_ctx *ctx) void io_destroy_buffers(struct io_ring_ctx *ctx)
@ -804,10 +801,8 @@ struct io_buffer_list *io_pbuf_get_bl(struct io_ring_ctx *ctx,
bl = xa_load(&ctx->io_bl_xa, bgid); bl = xa_load(&ctx->io_bl_xa, bgid);
/* must be a mmap'able buffer ring and have pages */ /* must be a mmap'able buffer ring and have pages */
if (bl && bl->flags & IOBL_MMAP) { if (bl && bl->flags & IOBL_MMAP)
if (atomic_inc_not_zero(&bl->refs)) return bl;
return bl;
}
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
} }
@ -817,7 +812,7 @@ int io_pbuf_mmap(struct file *file, struct vm_area_struct *vma)
struct io_ring_ctx *ctx = file->private_data; struct io_ring_ctx *ctx = file->private_data;
loff_t pgoff = vma->vm_pgoff << PAGE_SHIFT; loff_t pgoff = vma->vm_pgoff << PAGE_SHIFT;
struct io_buffer_list *bl; struct io_buffer_list *bl;
int bgid, ret; int bgid;
lockdep_assert_held(&ctx->mmap_lock); lockdep_assert_held(&ctx->mmap_lock);
@ -826,7 +821,5 @@ int io_pbuf_mmap(struct file *file, struct vm_area_struct *vma)
if (IS_ERR(bl)) if (IS_ERR(bl))
return PTR_ERR(bl); return PTR_ERR(bl);
ret = io_uring_mmap_pages(ctx, vma, bl->buf_pages, bl->buf_nr_pages); return io_uring_mmap_pages(ctx, vma, bl->buf_pages, bl->buf_nr_pages);
io_put_bl(ctx, bl);
return ret;
} }

View File

@ -35,8 +35,6 @@ struct io_buffer_list {
__u16 mask; __u16 mask;
__u16 flags; __u16 flags;
atomic_t refs;
}; };
struct io_buffer { struct io_buffer {
@ -83,7 +81,6 @@ void __io_put_kbuf(struct io_kiocb *req, int len, unsigned issue_flags);
bool io_kbuf_recycle_legacy(struct io_kiocb *req, unsigned issue_flags); bool io_kbuf_recycle_legacy(struct io_kiocb *req, unsigned issue_flags);
void io_put_bl(struct io_ring_ctx *ctx, struct io_buffer_list *bl);
struct io_buffer_list *io_pbuf_get_bl(struct io_ring_ctx *ctx, struct io_buffer_list *io_pbuf_get_bl(struct io_ring_ctx *ctx,
unsigned long bgid); unsigned long bgid);
int io_pbuf_mmap(struct file *file, struct vm_area_struct *vma); int io_pbuf_mmap(struct file *file, struct vm_area_struct *vma);

View File

@ -383,7 +383,6 @@ static void *io_uring_validate_mmap_request(struct file *file, loff_t pgoff,
if (IS_ERR(bl)) if (IS_ERR(bl))
return bl; return bl;
ptr = bl->buf_ring; ptr = bl->buf_ring;
io_put_bl(ctx, bl);
return ptr; return ptr;
} }
case IORING_MAP_OFF_PARAM_REGION: case IORING_MAP_OFF_PARAM_REGION: