mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2025-01-06 05:06:29 +00:00
io_uring: optimise hot path of ltimeout prep
io_prep_linked_timeout() grew too heavy and compiler now refuse to inline the function. Help it by splitting in two and annotating with inline. Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Link: https://lore.kernel.org/r/560636717a32e9513724f09b9ecaace942dde4d4.1628705069.git.asml.silence@gmail.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
20e60a3832
commit
fd08e5309b
@ -1046,7 +1046,6 @@ static bool io_cqring_fill_event(struct io_ring_ctx *ctx, u64 user_data,
|
|||||||
static void io_put_req(struct io_kiocb *req);
|
static void io_put_req(struct io_kiocb *req);
|
||||||
static void io_put_req_deferred(struct io_kiocb *req);
|
static void io_put_req_deferred(struct io_kiocb *req);
|
||||||
static void io_dismantle_req(struct io_kiocb *req);
|
static void io_dismantle_req(struct io_kiocb *req);
|
||||||
static struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req);
|
|
||||||
static void io_queue_linked_timeout(struct io_kiocb *req);
|
static void io_queue_linked_timeout(struct io_kiocb *req);
|
||||||
static int __io_register_rsrc_update(struct io_ring_ctx *ctx, unsigned type,
|
static int __io_register_rsrc_update(struct io_ring_ctx *ctx, unsigned type,
|
||||||
struct io_uring_rsrc_update2 *up,
|
struct io_uring_rsrc_update2 *up,
|
||||||
@ -1299,6 +1298,31 @@ static void io_req_track_inflight(struct io_kiocb *req)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static struct io_kiocb *__io_prep_linked_timeout(struct io_kiocb *req)
|
||||||
|
{
|
||||||
|
struct io_kiocb *nxt = req->link;
|
||||||
|
|
||||||
|
if (req->flags & REQ_F_LINK_TIMEOUT)
|
||||||
|
return NULL;
|
||||||
|
|
||||||
|
/* linked timeouts should have two refs once prep'ed */
|
||||||
|
io_req_refcount(req);
|
||||||
|
io_req_refcount(nxt);
|
||||||
|
req_ref_get(nxt);
|
||||||
|
|
||||||
|
nxt->timeout.head = req;
|
||||||
|
nxt->flags |= REQ_F_LTIMEOUT_ACTIVE;
|
||||||
|
req->flags |= REQ_F_LINK_TIMEOUT;
|
||||||
|
return nxt;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req)
|
||||||
|
{
|
||||||
|
if (likely(!req->link || req->link->opcode != IORING_OP_LINK_TIMEOUT))
|
||||||
|
return NULL;
|
||||||
|
return __io_prep_linked_timeout(req);
|
||||||
|
}
|
||||||
|
|
||||||
static void io_prep_async_work(struct io_kiocb *req)
|
static void io_prep_async_work(struct io_kiocb *req)
|
||||||
{
|
{
|
||||||
const struct io_op_def *def = &io_op_defs[req->opcode];
|
const struct io_op_def *def = &io_op_defs[req->opcode];
|
||||||
@ -6453,25 +6477,6 @@ static void io_queue_linked_timeout(struct io_kiocb *req)
|
|||||||
io_put_req(req);
|
io_put_req(req);
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req)
|
|
||||||
{
|
|
||||||
struct io_kiocb *nxt = req->link;
|
|
||||||
|
|
||||||
if (!nxt || (req->flags & REQ_F_LINK_TIMEOUT) ||
|
|
||||||
nxt->opcode != IORING_OP_LINK_TIMEOUT)
|
|
||||||
return NULL;
|
|
||||||
|
|
||||||
/* linked timeouts should have two refs once prep'ed */
|
|
||||||
io_req_refcount(req);
|
|
||||||
io_req_refcount(nxt);
|
|
||||||
req_ref_get(nxt);
|
|
||||||
|
|
||||||
nxt->timeout.head = req;
|
|
||||||
nxt->flags |= REQ_F_LTIMEOUT_ACTIVE;
|
|
||||||
req->flags |= REQ_F_LINK_TIMEOUT;
|
|
||||||
return nxt;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void __io_queue_sqe(struct io_kiocb *req)
|
static void __io_queue_sqe(struct io_kiocb *req)
|
||||||
__must_hold(&req->ctx->uring_lock)
|
__must_hold(&req->ctx->uring_lock)
|
||||||
{
|
{
|
||||||
|
Loading…
Reference in New Issue
Block a user