mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-01-17 02:15:57 +00:00
io_uring: complete notifiers in tw
We need a task context to post CQEs but using wq is too expensive. Try to complete notifiers using task_work and fall back to wq if fails. Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Link: https://lore.kernel.org/r/089799ab665b10b78fdc614ae6d59fa7ef0d5f91.1657643355.git.asml.silence@gmail.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
eb4a299b2f
commit
e58d498e81
@ -13,6 +13,11 @@ static void __io_notif_complete_tw(struct callback_head *cb)
|
|||||||
struct io_notif *notif = container_of(cb, struct io_notif, task_work);
|
struct io_notif *notif = container_of(cb, struct io_notif, task_work);
|
||||||
struct io_ring_ctx *ctx = notif->ctx;
|
struct io_ring_ctx *ctx = notif->ctx;
|
||||||
|
|
||||||
|
if (likely(notif->task)) {
|
||||||
|
io_put_task(notif->task, 1);
|
||||||
|
notif->task = NULL;
|
||||||
|
}
|
||||||
|
|
||||||
io_cq_lock(ctx);
|
io_cq_lock(ctx);
|
||||||
io_fill_cqe_aux(ctx, notif->tag, 0, notif->seq, true);
|
io_fill_cqe_aux(ctx, notif->tag, 0, notif->seq, true);
|
||||||
|
|
||||||
@ -43,6 +48,14 @@ static void io_uring_tx_zerocopy_callback(struct sk_buff *skb,
|
|||||||
|
|
||||||
if (!refcount_dec_and_test(&uarg->refcnt))
|
if (!refcount_dec_and_test(&uarg->refcnt))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
if (likely(notif->task)) {
|
||||||
|
init_task_work(¬if->task_work, __io_notif_complete_tw);
|
||||||
|
if (likely(!task_work_add(notif->task, ¬if->task_work,
|
||||||
|
TWA_SIGNAL)))
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
INIT_WORK(¬if->commit_work, io_notif_complete_wq);
|
INIT_WORK(¬if->commit_work, io_notif_complete_wq);
|
||||||
queue_work(system_unbound_wq, ¬if->commit_work);
|
queue_work(system_unbound_wq, ¬if->commit_work);
|
||||||
}
|
}
|
||||||
@ -134,12 +147,15 @@ __cold int io_notif_unregister(struct io_ring_ctx *ctx)
|
|||||||
for (i = 0; i < ctx->nr_notif_slots; i++) {
|
for (i = 0; i < ctx->nr_notif_slots; i++) {
|
||||||
struct io_notif_slot *slot = &ctx->notif_slots[i];
|
struct io_notif_slot *slot = &ctx->notif_slots[i];
|
||||||
|
|
||||||
if (slot->notif)
|
if (!slot->notif)
|
||||||
io_notif_slot_flush(slot);
|
continue;
|
||||||
|
if (WARN_ON_ONCE(slot->notif->task))
|
||||||
|
slot->notif->task = NULL;
|
||||||
|
io_notif_slot_flush(slot);
|
||||||
}
|
}
|
||||||
|
|
||||||
kvfree(ctx->notif_slots);
|
kvfree(ctx->notif_slots);
|
||||||
ctx->notif_slots = NULL;
|
ctx->notif_slots = NULL;
|
||||||
ctx->nr_notif_slots = 0;
|
ctx->nr_notif_slots = 0;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -11,6 +11,9 @@ struct io_notif {
|
|||||||
struct ubuf_info uarg;
|
struct ubuf_info uarg;
|
||||||
struct io_ring_ctx *ctx;
|
struct io_ring_ctx *ctx;
|
||||||
|
|
||||||
|
/* complete via tw if ->task is non-NULL, fallback to wq otherwise */
|
||||||
|
struct task_struct *task;
|
||||||
|
|
||||||
/* cqe->user_data, io_notif_slot::tag if not overridden */
|
/* cqe->user_data, io_notif_slot::tag if not overridden */
|
||||||
u64 tag;
|
u64 tag;
|
||||||
/* see struct io_notif_slot::seq */
|
/* see struct io_notif_slot::seq */
|
||||||
|
Loading…
x
Reference in New Issue
Block a user