mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2025-01-04 12:16:41 +00:00
io_uring: kbuf: kill __io_kbuf_recycle()
__io_kbuf_recycle() is only called in io_kbuf_recycle(). Kill it and tweak the code so that the legacy pbuf and ring pbuf code become clear Signed-off-by: Hao Xu <howeyxu@tencent.com> Link: https://lore.kernel.org/r/20220622055551.642370-1-hao.xu@linux.dev Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
c6dd763c24
commit
024b8fde33
@ -37,36 +37,30 @@ static inline struct io_buffer_list *io_buffer_get_list(struct io_ring_ctx *ctx,
|
||||
return xa_load(&ctx->io_bl_xa, bgid);
|
||||
}
|
||||
|
||||
void __io_kbuf_recycle(struct io_kiocb *req, unsigned issue_flags)
|
||||
static int io_buffer_add_list(struct io_ring_ctx *ctx,
|
||||
struct io_buffer_list *bl, unsigned int bgid)
|
||||
{
|
||||
bl->bgid = bgid;
|
||||
if (bgid < BGID_ARRAY)
|
||||
return 0;
|
||||
|
||||
return xa_err(xa_store(&ctx->io_bl_xa, bgid, bl, GFP_KERNEL));
|
||||
}
|
||||
|
||||
void io_kbuf_recycle_legacy(struct io_kiocb *req, unsigned issue_flags)
|
||||
{
|
||||
struct io_ring_ctx *ctx = req->ctx;
|
||||
struct io_buffer_list *bl;
|
||||
struct io_buffer *buf;
|
||||
|
||||
/*
|
||||
* We don't need to recycle for REQ_F_BUFFER_RING, we can just clear
|
||||
* the flag and hence ensure that bl->head doesn't get incremented.
|
||||
* If the tail has already been incremented, hang on to it.
|
||||
* For legacy provided buffer mode, don't recycle if we already did
|
||||
* IO to this buffer. For ring-mapped provided buffer mode, we should
|
||||
* increment ring->head to explicitly monopolize the buffer to avoid
|
||||
* multiple use.
|
||||
*/
|
||||
if (req->flags & REQ_F_BUFFER_RING) {
|
||||
if (req->buf_list) {
|
||||
if (req->flags & REQ_F_PARTIAL_IO) {
|
||||
/*
|
||||
* If we end up here, then the io_uring_lock has
|
||||
* been kept held since we retrieved the buffer.
|
||||
* For the io-wq case, we already cleared
|
||||
* req->buf_list when the buffer was retrieved,
|
||||
* hence it cannot be set here for that case.
|
||||
*/
|
||||
req->buf_list->head++;
|
||||
req->buf_list = NULL;
|
||||
} else {
|
||||
req->buf_index = req->buf_list->bgid;
|
||||
req->flags &= ~REQ_F_BUFFER_RING;
|
||||
}
|
||||
}
|
||||
if (req->flags & REQ_F_PARTIAL_IO)
|
||||
return;
|
||||
}
|
||||
|
||||
io_ring_submit_lock(ctx, issue_flags);
|
||||
|
||||
@ -77,16 +71,35 @@ void __io_kbuf_recycle(struct io_kiocb *req, unsigned issue_flags)
|
||||
req->buf_index = buf->bgid;
|
||||
|
||||
io_ring_submit_unlock(ctx, issue_flags);
|
||||
return;
|
||||
}
|
||||
|
||||
static int io_buffer_add_list(struct io_ring_ctx *ctx,
|
||||
struct io_buffer_list *bl, unsigned int bgid)
|
||||
void io_kbuf_recycle_ring(struct io_kiocb *req)
|
||||
{
|
||||
bl->bgid = bgid;
|
||||
if (bgid < BGID_ARRAY)
|
||||
return 0;
|
||||
|
||||
return xa_err(xa_store(&ctx->io_bl_xa, bgid, bl, GFP_KERNEL));
|
||||
/*
|
||||
* We don't need to recycle for REQ_F_BUFFER_RING, we can just clear
|
||||
* the flag and hence ensure that bl->head doesn't get incremented.
|
||||
* If the tail has already been incremented, hang on to it.
|
||||
* The exception is partial io, that case we should increment bl->head
|
||||
* to monopolize the buffer.
|
||||
*/
|
||||
if (req->buf_list) {
|
||||
if (req->flags & REQ_F_PARTIAL_IO) {
|
||||
/*
|
||||
* If we end up here, then the io_uring_lock has
|
||||
* been kept held since we retrieved the buffer.
|
||||
* For the io-wq case, we already cleared
|
||||
* req->buf_list when the buffer was retrieved,
|
||||
* hence it cannot be set here for that case.
|
||||
*/
|
||||
req->buf_list->head++;
|
||||
req->buf_list = NULL;
|
||||
} else {
|
||||
req->buf_index = req->buf_list->bgid;
|
||||
req->flags &= ~REQ_F_BUFFER_RING;
|
||||
}
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
unsigned int __io_put_kbuf(struct io_kiocb *req, unsigned issue_flags)
|
||||
|
@ -35,7 +35,6 @@ struct io_buffer {
|
||||
|
||||
void __user *io_buffer_select(struct io_kiocb *req, size_t *len,
|
||||
unsigned int issue_flags);
|
||||
void __io_kbuf_recycle(struct io_kiocb *req, unsigned issue_flags);
|
||||
void io_destroy_buffers(struct io_ring_ctx *ctx);
|
||||
|
||||
int io_remove_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
|
||||
@ -49,6 +48,9 @@ int io_unregister_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg);
|
||||
|
||||
unsigned int __io_put_kbuf(struct io_kiocb *req, unsigned issue_flags);
|
||||
|
||||
void io_kbuf_recycle_legacy(struct io_kiocb *req, unsigned issue_flags);
|
||||
void io_kbuf_recycle_ring(struct io_kiocb *req);
|
||||
|
||||
static inline bool io_do_buffer_select(struct io_kiocb *req)
|
||||
{
|
||||
if (!(req->flags & REQ_F_BUFFER_SELECT))
|
||||
@ -58,18 +60,6 @@ static inline bool io_do_buffer_select(struct io_kiocb *req)
|
||||
|
||||
static inline void io_kbuf_recycle(struct io_kiocb *req, unsigned issue_flags)
|
||||
{
|
||||
if (!(req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING)))
|
||||
return;
|
||||
/*
|
||||
* For legacy provided buffer mode, don't recycle if we already did
|
||||
* IO to this buffer. For ring-mapped provided buffer mode, we should
|
||||
* increment ring->head to explicitly monopolize the buffer to avoid
|
||||
* multiple use.
|
||||
*/
|
||||
if ((req->flags & REQ_F_BUFFER_SELECTED) &&
|
||||
(req->flags & REQ_F_PARTIAL_IO))
|
||||
return;
|
||||
|
||||
/*
|
||||
* READV uses fields in `struct io_rw` (len/addr) to stash the selected
|
||||
* buffer data. However if that buffer is recycled the original request
|
||||
@ -78,7 +68,10 @@ static inline void io_kbuf_recycle(struct io_kiocb *req, unsigned issue_flags)
|
||||
if (req->opcode == IORING_OP_READV)
|
||||
return;
|
||||
|
||||
__io_kbuf_recycle(req, issue_flags);
|
||||
if (req->flags & REQ_F_BUFFER_SELECTED)
|
||||
io_kbuf_recycle_legacy(req, issue_flags);
|
||||
if (req->flags & REQ_F_BUFFER_RING)
|
||||
io_kbuf_recycle_ring(req);
|
||||
}
|
||||
|
||||
static inline unsigned int __io_put_kbuf_list(struct io_kiocb *req,
|
||||
|
Loading…
Reference in New Issue
Block a user