mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2024-12-29 09:12:07 +00:00
io_uring: kill dead code in io_req_complete_post
Since commit 8f6c829491fe ("io_uring: remove struct io_tw_state::locked"), io_req_complete_post() is only called from io-wq submit work, where the request reference is guaranteed to be grabbed and won't drop to zero in io_req_complete_post(). Kill the dead code, meantime add req_ref_put() to put the reference. Cc: Pavel Begunkov <asml.silence@gmail.com> Signed-off-by: Ming Lei <ming.lei@redhat.com> Reviewed-by: Pavel Begunkov <asml.silence@gmail.com> Signed-by: Pavel Begunkov <asml.silence@gmail.com> Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Link: https://lore.kernel.org/r/1d8297e2046553153e763a52574f0e0f4d512f86.1712331455.git.asml.silence@gmail.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
285207f67c
commit
f39130004d
@ -928,7 +928,6 @@ bool io_req_post_cqe(struct io_kiocb *req, s32 res, u32 cflags)
|
|||||||
static void io_req_complete_post(struct io_kiocb *req, unsigned issue_flags)
|
static void io_req_complete_post(struct io_kiocb *req, unsigned issue_flags)
|
||||||
{
|
{
|
||||||
struct io_ring_ctx *ctx = req->ctx;
|
struct io_ring_ctx *ctx = req->ctx;
|
||||||
struct io_rsrc_node *rsrc_node = NULL;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Handle special CQ sync cases via task_work. DEFER_TASKRUN requires
|
* Handle special CQ sync cases via task_work. DEFER_TASKRUN requires
|
||||||
@ -945,42 +944,10 @@ static void io_req_complete_post(struct io_kiocb *req, unsigned issue_flags)
|
|||||||
if (!io_fill_cqe_req(ctx, req))
|
if (!io_fill_cqe_req(ctx, req))
|
||||||
io_req_cqe_overflow(req);
|
io_req_cqe_overflow(req);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* If we're the last reference to this request, add to our locked
|
|
||||||
* free_list cache.
|
|
||||||
*/
|
|
||||||
if (req_ref_put_and_test(req)) {
|
|
||||||
if (req->flags & IO_REQ_LINK_FLAGS) {
|
|
||||||
if (req->flags & IO_DISARM_MASK)
|
|
||||||
io_disarm_next(req);
|
|
||||||
if (req->link) {
|
|
||||||
io_req_task_queue(req->link);
|
|
||||||
req->link = NULL;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
io_put_kbuf_comp(req);
|
|
||||||
if (unlikely(req->flags & IO_REQ_CLEAN_FLAGS))
|
|
||||||
io_clean_op(req);
|
|
||||||
io_put_file(req);
|
|
||||||
|
|
||||||
rsrc_node = req->rsrc_node;
|
|
||||||
/*
|
|
||||||
* Selected buffer deallocation in io_clean_op() assumes that
|
|
||||||
* we don't hold ->completion_lock. Clean them here to avoid
|
|
||||||
* deadlocks.
|
|
||||||
*/
|
|
||||||
io_put_task_remote(req->task);
|
|
||||||
wq_list_add_head(&req->comp_list, &ctx->locked_free_list);
|
|
||||||
ctx->locked_free_nr++;
|
|
||||||
}
|
|
||||||
io_cq_unlock_post(ctx);
|
io_cq_unlock_post(ctx);
|
||||||
|
|
||||||
if (rsrc_node) {
|
/* called from io-wq submit work only, the ref won't drop to zero */
|
||||||
io_ring_submit_lock(ctx, issue_flags);
|
req_ref_put(req);
|
||||||
io_put_rsrc_node(ctx, rsrc_node);
|
|
||||||
io_ring_submit_unlock(ctx, issue_flags);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void io_req_defer_failed(struct io_kiocb *req, s32 res)
|
void io_req_defer_failed(struct io_kiocb *req, s32 res)
|
||||||
|
@ -33,6 +33,13 @@ static inline void req_ref_get(struct io_kiocb *req)
|
|||||||
atomic_inc(&req->refs);
|
atomic_inc(&req->refs);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline void req_ref_put(struct io_kiocb *req)
|
||||||
|
{
|
||||||
|
WARN_ON_ONCE(!(req->flags & REQ_F_REFCOUNT));
|
||||||
|
WARN_ON_ONCE(req_ref_zero_or_close_to_overflow(req));
|
||||||
|
atomic_dec(&req->refs);
|
||||||
|
}
|
||||||
|
|
||||||
static inline void __io_req_set_refcount(struct io_kiocb *req, int nr)
|
static inline void __io_req_set_refcount(struct io_kiocb *req, int nr)
|
||||||
{
|
{
|
||||||
if (!(req->flags & REQ_F_REFCOUNT)) {
|
if (!(req->flags & REQ_F_REFCOUNT)) {
|
||||||
|
Loading…
Reference in New Issue
Block a user