mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2025-01-01 10:42:11 +00:00
io_uring: separate ref_list from fixed_rsrc_data
Uplevel ref_list and make it common to all resources. This is to allow one common ref_list to be used for both files, and buffers in upcoming patches. Signed-off-by: Bijan Mottahedeh <bijan.mottahedeh@oracle.com> Reviewed-by: Pavel Begunkov <asml.silence@gmail.com> Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
5023853183
commit
d67d2263fb
@ -227,8 +227,6 @@ struct fixed_rsrc_data {
|
||||
struct fixed_rsrc_ref_node *node;
|
||||
struct percpu_ref refs;
|
||||
struct completion done;
|
||||
struct list_head ref_list;
|
||||
spinlock_t lock;
|
||||
};
|
||||
|
||||
struct io_buffer {
|
||||
@ -398,6 +396,8 @@ struct io_ring_ctx {
|
||||
|
||||
struct delayed_work rsrc_put_work;
|
||||
struct llist_head rsrc_put_llist;
|
||||
struct list_head rsrc_ref_list;
|
||||
spinlock_t rsrc_ref_lock;
|
||||
|
||||
struct work_struct exit_work;
|
||||
struct io_restriction restrictions;
|
||||
@ -1342,6 +1342,8 @@ static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
|
||||
INIT_LIST_HEAD(&ctx->timeout_list);
|
||||
spin_lock_init(&ctx->inflight_lock);
|
||||
INIT_LIST_HEAD(&ctx->inflight_list);
|
||||
spin_lock_init(&ctx->rsrc_ref_lock);
|
||||
INIT_LIST_HEAD(&ctx->rsrc_ref_list);
|
||||
INIT_DELAYED_WORK(&ctx->rsrc_put_work, io_rsrc_put_work);
|
||||
init_llist_head(&ctx->rsrc_put_llist);
|
||||
return ctx;
|
||||
@ -7348,13 +7350,14 @@ static void io_rsrc_ref_kill(struct percpu_ref *ref)
|
||||
complete(&data->done);
|
||||
}
|
||||
|
||||
static void io_sqe_rsrc_set_node(struct fixed_rsrc_data *rsrc_data,
|
||||
static void io_sqe_rsrc_set_node(struct io_ring_ctx *ctx,
|
||||
struct fixed_rsrc_data *rsrc_data,
|
||||
struct fixed_rsrc_ref_node *ref_node)
|
||||
{
|
||||
spin_lock_bh(&rsrc_data->lock);
|
||||
spin_lock_bh(&ctx->rsrc_ref_lock);
|
||||
rsrc_data->node = ref_node;
|
||||
list_add_tail(&ref_node->node, &rsrc_data->ref_list);
|
||||
spin_unlock_bh(&rsrc_data->lock);
|
||||
list_add_tail(&ref_node->node, &ctx->rsrc_ref_list);
|
||||
spin_unlock_bh(&ctx->rsrc_ref_lock);
|
||||
percpu_ref_get(&rsrc_data->refs);
|
||||
}
|
||||
|
||||
@ -7371,9 +7374,9 @@ static int io_sqe_files_unregister(struct io_ring_ctx *ctx)
|
||||
if (!backup_node)
|
||||
return -ENOMEM;
|
||||
|
||||
spin_lock_bh(&data->lock);
|
||||
spin_lock_bh(&ctx->rsrc_ref_lock);
|
||||
ref_node = data->node;
|
||||
spin_unlock_bh(&data->lock);
|
||||
spin_unlock_bh(&ctx->rsrc_ref_lock);
|
||||
if (ref_node)
|
||||
percpu_ref_kill(&ref_node->refs);
|
||||
|
||||
@ -7389,7 +7392,7 @@ static int io_sqe_files_unregister(struct io_ring_ctx *ctx)
|
||||
if (ret < 0) {
|
||||
percpu_ref_resurrect(&data->refs);
|
||||
reinit_completion(&data->done);
|
||||
io_sqe_rsrc_set_node(data, backup_node);
|
||||
io_sqe_rsrc_set_node(ctx, data, backup_node);
|
||||
return ret;
|
||||
}
|
||||
} while (1);
|
||||
@ -7763,11 +7766,11 @@ static void io_rsrc_data_ref_zero(struct percpu_ref *ref)
|
||||
data = ref_node->rsrc_data;
|
||||
ctx = data->ctx;
|
||||
|
||||
spin_lock_bh(&data->lock);
|
||||
spin_lock_bh(&ctx->rsrc_ref_lock);
|
||||
ref_node->done = true;
|
||||
|
||||
while (!list_empty(&data->ref_list)) {
|
||||
ref_node = list_first_entry(&data->ref_list,
|
||||
while (!list_empty(&ctx->rsrc_ref_list)) {
|
||||
ref_node = list_first_entry(&ctx->rsrc_ref_list,
|
||||
struct fixed_rsrc_ref_node, node);
|
||||
/* recycle ref nodes in order */
|
||||
if (!ref_node->done)
|
||||
@ -7775,7 +7778,7 @@ static void io_rsrc_data_ref_zero(struct percpu_ref *ref)
|
||||
list_del(&ref_node->node);
|
||||
first_add |= llist_add(&ref_node->llist, &ctx->rsrc_put_llist);
|
||||
}
|
||||
spin_unlock_bh(&data->lock);
|
||||
spin_unlock_bh(&ctx->rsrc_ref_lock);
|
||||
|
||||
if (percpu_ref_is_dying(&data->refs))
|
||||
delay = 0;
|
||||
@ -7836,8 +7839,6 @@ static int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
|
||||
return -ENOMEM;
|
||||
file_data->ctx = ctx;
|
||||
init_completion(&file_data->done);
|
||||
INIT_LIST_HEAD(&file_data->ref_list);
|
||||
spin_lock_init(&file_data->lock);
|
||||
|
||||
nr_tables = DIV_ROUND_UP(nr_args, IORING_MAX_FILES_TABLE);
|
||||
file_data->table = kcalloc(nr_tables, sizeof(*file_data->table),
|
||||
@ -7898,7 +7899,7 @@ static int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
io_sqe_rsrc_set_node(file_data, ref_node);
|
||||
io_sqe_rsrc_set_node(ctx, file_data, ref_node);
|
||||
return ret;
|
||||
out_fput:
|
||||
for (i = 0; i < ctx->nr_user_files; i++) {
|
||||
@ -8059,7 +8060,7 @@ static int __io_sqe_files_update(struct io_ring_ctx *ctx,
|
||||
|
||||
if (needs_switch) {
|
||||
percpu_ref_kill(&data->node->refs);
|
||||
io_sqe_rsrc_set_node(data, ref_node);
|
||||
io_sqe_rsrc_set_node(ctx, data, ref_node);
|
||||
} else
|
||||
destroy_fixed_rsrc_ref_node(ref_node);
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user