mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-01-04 12:13:43 +00:00
io_uring/rsrc: kill rsrc_ref_lock
We use ->rsrc_ref_lock spinlock to protect ->rsrc_ref_list in io_rsrc_node_ref_zero(). Now we removed pcpu refcounting, which means io_rsrc_node_ref_zero() is not executed from the irq context as an RCU callback anymore, and we also put it under ->uring_lock. io_rsrc_node_switch(), which queues up nodes into the list, is also protected by ->uring_lock, so we can safely get rid of ->rsrc_ref_lock. Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Link: https://lore.kernel.org/r/6b60af883c263551190b526a55ff2c9d5ae07141.1680576071.git.asml.silence@gmail.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
ef8ae64ffa
commit
0a4813b1ab
@ -333,8 +333,8 @@ struct io_ring_ctx {
|
|||||||
struct delayed_work rsrc_put_work;
|
struct delayed_work rsrc_put_work;
|
||||||
struct callback_head rsrc_put_tw;
|
struct callback_head rsrc_put_tw;
|
||||||
struct llist_head rsrc_put_llist;
|
struct llist_head rsrc_put_llist;
|
||||||
|
/* protected by ->uring_lock */
|
||||||
struct list_head rsrc_ref_list;
|
struct list_head rsrc_ref_list;
|
||||||
spinlock_t rsrc_ref_lock;
|
|
||||||
|
|
||||||
struct list_head io_buffers_pages;
|
struct list_head io_buffers_pages;
|
||||||
|
|
||||||
|
@ -325,7 +325,6 @@ static __cold struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
|
|||||||
INIT_LIST_HEAD(&ctx->defer_list);
|
INIT_LIST_HEAD(&ctx->defer_list);
|
||||||
INIT_LIST_HEAD(&ctx->timeout_list);
|
INIT_LIST_HEAD(&ctx->timeout_list);
|
||||||
INIT_LIST_HEAD(&ctx->ltimeout_list);
|
INIT_LIST_HEAD(&ctx->ltimeout_list);
|
||||||
spin_lock_init(&ctx->rsrc_ref_lock);
|
|
||||||
INIT_LIST_HEAD(&ctx->rsrc_ref_list);
|
INIT_LIST_HEAD(&ctx->rsrc_ref_list);
|
||||||
INIT_DELAYED_WORK(&ctx->rsrc_put_work, io_rsrc_put_work);
|
INIT_DELAYED_WORK(&ctx->rsrc_put_work, io_rsrc_put_work);
|
||||||
init_task_work(&ctx->rsrc_put_tw, io_rsrc_put_tw);
|
init_task_work(&ctx->rsrc_put_tw, io_rsrc_put_tw);
|
||||||
|
@ -209,11 +209,9 @@ void io_rsrc_node_ref_zero(struct io_rsrc_node *node)
|
|||||||
__must_hold(&node->rsrc_data->ctx->uring_lock)
|
__must_hold(&node->rsrc_data->ctx->uring_lock)
|
||||||
{
|
{
|
||||||
struct io_ring_ctx *ctx = node->rsrc_data->ctx;
|
struct io_ring_ctx *ctx = node->rsrc_data->ctx;
|
||||||
unsigned long flags;
|
|
||||||
bool first_add = false;
|
bool first_add = false;
|
||||||
unsigned long delay = HZ;
|
unsigned long delay = HZ;
|
||||||
|
|
||||||
spin_lock_irqsave(&ctx->rsrc_ref_lock, flags);
|
|
||||||
node->done = true;
|
node->done = true;
|
||||||
|
|
||||||
/* if we are mid-quiesce then do not delay */
|
/* if we are mid-quiesce then do not delay */
|
||||||
@ -229,7 +227,6 @@ void io_rsrc_node_ref_zero(struct io_rsrc_node *node)
|
|||||||
list_del(&node->node);
|
list_del(&node->node);
|
||||||
first_add |= llist_add(&node->llist, &ctx->rsrc_put_llist);
|
first_add |= llist_add(&node->llist, &ctx->rsrc_put_llist);
|
||||||
}
|
}
|
||||||
spin_unlock_irqrestore(&ctx->rsrc_ref_lock, flags);
|
|
||||||
|
|
||||||
if (!first_add)
|
if (!first_add)
|
||||||
return;
|
return;
|
||||||
@ -268,9 +265,7 @@ void io_rsrc_node_switch(struct io_ring_ctx *ctx,
|
|||||||
struct io_rsrc_node *rsrc_node = ctx->rsrc_node;
|
struct io_rsrc_node *rsrc_node = ctx->rsrc_node;
|
||||||
|
|
||||||
rsrc_node->rsrc_data = data_to_kill;
|
rsrc_node->rsrc_data = data_to_kill;
|
||||||
spin_lock_irq(&ctx->rsrc_ref_lock);
|
|
||||||
list_add_tail(&rsrc_node->node, &ctx->rsrc_ref_list);
|
list_add_tail(&rsrc_node->node, &ctx->rsrc_ref_list);
|
||||||
spin_unlock_irq(&ctx->rsrc_ref_lock);
|
|
||||||
|
|
||||||
atomic_inc(&data_to_kill->refs);
|
atomic_inc(&data_to_kill->refs);
|
||||||
/* put master ref */
|
/* put master ref */
|
||||||
|
Loading…
Reference in New Issue
Block a user