mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2024-12-29 17:22:07 +00:00
io_uring/rsrc: add & apply io_req_assign_buf_node()
The following pattern becomes more and more: + io_req_assign_rsrc_node(&req->buf_node, node); + req->flags |= REQ_F_BUF_NODE; so make it a helper, which is less fragile to use than above code, for example, the BUF_NODE flag is even missed in current io_uring_cmd_prep(). Signed-off-by: Ming Lei <ming.lei@redhat.com> Link: https://lore.kernel.org/r/20241107110149.890530-4-ming.lei@redhat.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
4f219fcce5
commit
039c878db7
@ -1348,8 +1348,7 @@ static int io_send_zc_import(struct io_kiocb *req, unsigned int issue_flags)
|
||||
io_ring_submit_lock(ctx, issue_flags);
|
||||
node = io_rsrc_node_lookup(&ctx->buf_table, sr->buf_index);
|
||||
if (node) {
|
||||
io_req_assign_rsrc_node(&sr->notif->buf_node, node);
|
||||
sr->notif->flags |= REQ_F_BUF_NODE;
|
||||
io_req_assign_buf_node(sr->notif, node);
|
||||
ret = 0;
|
||||
}
|
||||
io_ring_submit_unlock(ctx, issue_flags);
|
||||
|
@ -67,8 +67,7 @@ int io_nop(struct io_kiocb *req, unsigned int issue_flags)
|
||||
io_ring_submit_lock(ctx, issue_flags);
|
||||
node = io_rsrc_node_lookup(&ctx->buf_table, nop->buffer);
|
||||
if (node) {
|
||||
io_req_assign_rsrc_node(&req->buf_node, node);
|
||||
req->flags |= REQ_F_BUF_NODE;
|
||||
io_req_assign_buf_node(req, node);
|
||||
ret = 0;
|
||||
}
|
||||
io_ring_submit_unlock(ctx, issue_flags);
|
||||
|
@ -111,6 +111,13 @@ static inline void io_req_assign_rsrc_node(struct io_rsrc_node **dst_node,
|
||||
*dst_node = node;
|
||||
}
|
||||
|
||||
static inline void io_req_assign_buf_node(struct io_kiocb *req,
|
||||
struct io_rsrc_node *node)
|
||||
{
|
||||
io_req_assign_rsrc_node(&req->buf_node, node);
|
||||
req->flags |= REQ_F_BUF_NODE;
|
||||
}
|
||||
|
||||
int io_files_update(struct io_kiocb *req, unsigned int issue_flags);
|
||||
int io_files_update_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
|
||||
|
||||
|
@ -341,8 +341,7 @@ static int io_prep_rw_fixed(struct io_kiocb *req, const struct io_uring_sqe *sqe
|
||||
node = io_rsrc_node_lookup(&ctx->buf_table, req->buf_index);
|
||||
if (!node)
|
||||
return -EFAULT;
|
||||
io_req_assign_rsrc_node(&req->buf_node, node);
|
||||
req->flags |= REQ_F_BUF_NODE;
|
||||
io_req_assign_buf_node(req, node);
|
||||
|
||||
io = req->async_data;
|
||||
ret = io_import_fixed(ddir, &io->iter, node->buf, rw->addr, rw->len);
|
||||
|
@ -219,7 +219,7 @@ int io_uring_cmd_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
|
||||
* being called. This prevents destruction of the mapped buffer
|
||||
* we'll need at actual import time.
|
||||
*/
|
||||
io_req_assign_rsrc_node(&req->buf_node, node);
|
||||
io_req_assign_buf_node(req, node);
|
||||
}
|
||||
ioucmd->cmd_op = READ_ONCE(sqe->cmd_op);
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user