io_uring/rsrc: add & apply io_req_assign_buf_node()

The following pattern becomes more and more:

+       io_req_assign_rsrc_node(&req->buf_node, node);
+       req->flags |= REQ_F_BUF_NODE;

so make it a helper, which is less fragile to use than above code, for
example, the BUF_NODE flag is even missed in current io_uring_cmd_prep().

Signed-off-by: Ming Lei <ming.lei@redhat.com>
Link: https://lore.kernel.org/r/20241107110149.890530-4-ming.lei@redhat.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Ming Lei 2024-11-07 19:01:36 +08:00 committed by Jens Axboe
parent 4f219fcce5
commit 039c878db7
5 changed files with 11 additions and 7 deletions

View File

@ -1348,8 +1348,7 @@ static int io_send_zc_import(struct io_kiocb *req, unsigned int issue_flags)
io_ring_submit_lock(ctx, issue_flags); io_ring_submit_lock(ctx, issue_flags);
node = io_rsrc_node_lookup(&ctx->buf_table, sr->buf_index); node = io_rsrc_node_lookup(&ctx->buf_table, sr->buf_index);
if (node) { if (node) {
io_req_assign_rsrc_node(&sr->notif->buf_node, node); io_req_assign_buf_node(sr->notif, node);
sr->notif->flags |= REQ_F_BUF_NODE;
ret = 0; ret = 0;
} }
io_ring_submit_unlock(ctx, issue_flags); io_ring_submit_unlock(ctx, issue_flags);

View File

@ -67,8 +67,7 @@ int io_nop(struct io_kiocb *req, unsigned int issue_flags)
io_ring_submit_lock(ctx, issue_flags); io_ring_submit_lock(ctx, issue_flags);
node = io_rsrc_node_lookup(&ctx->buf_table, nop->buffer); node = io_rsrc_node_lookup(&ctx->buf_table, nop->buffer);
if (node) { if (node) {
io_req_assign_rsrc_node(&req->buf_node, node); io_req_assign_buf_node(req, node);
req->flags |= REQ_F_BUF_NODE;
ret = 0; ret = 0;
} }
io_ring_submit_unlock(ctx, issue_flags); io_ring_submit_unlock(ctx, issue_flags);

View File

@ -111,6 +111,13 @@ static inline void io_req_assign_rsrc_node(struct io_rsrc_node **dst_node,
*dst_node = node; *dst_node = node;
} }
static inline void io_req_assign_buf_node(struct io_kiocb *req,
struct io_rsrc_node *node)
{
io_req_assign_rsrc_node(&req->buf_node, node);
req->flags |= REQ_F_BUF_NODE;
}
int io_files_update(struct io_kiocb *req, unsigned int issue_flags); int io_files_update(struct io_kiocb *req, unsigned int issue_flags);
int io_files_update_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe); int io_files_update_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);

View File

@ -341,8 +341,7 @@ static int io_prep_rw_fixed(struct io_kiocb *req, const struct io_uring_sqe *sqe
node = io_rsrc_node_lookup(&ctx->buf_table, req->buf_index); node = io_rsrc_node_lookup(&ctx->buf_table, req->buf_index);
if (!node) if (!node)
return -EFAULT; return -EFAULT;
io_req_assign_rsrc_node(&req->buf_node, node); io_req_assign_buf_node(req, node);
req->flags |= REQ_F_BUF_NODE;
io = req->async_data; io = req->async_data;
ret = io_import_fixed(ddir, &io->iter, node->buf, rw->addr, rw->len); ret = io_import_fixed(ddir, &io->iter, node->buf, rw->addr, rw->len);

View File

@ -219,7 +219,7 @@ int io_uring_cmd_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
* being called. This prevents destruction of the mapped buffer * being called. This prevents destruction of the mapped buffer
* we'll need at actual import time. * we'll need at actual import time.
*/ */
io_req_assign_rsrc_node(&req->buf_node, node); io_req_assign_buf_node(req, node);
} }
ioucmd->cmd_op = READ_ONCE(sqe->cmd_op); ioucmd->cmd_op = READ_ONCE(sqe->cmd_op);