mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-01-01 10:43:43 +00:00
io-wq: have io_wq_create() take a 'data' argument
We currently pass in 4 arguments outside of the bounded size. In preparation for adding one more argument, let's bundle them up in a struct to make it more readable. No functional changes in this patch. Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
311ae9e159
commit
576a347b7a
14
fs/io-wq.c
14
fs/io-wq.c
@ -974,9 +974,7 @@ void io_wq_flush(struct io_wq *wq)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
struct io_wq *io_wq_create(unsigned bounded, struct mm_struct *mm,
|
struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data)
|
||||||
struct user_struct *user, get_work_fn *get_work,
|
|
||||||
put_work_fn *put_work)
|
|
||||||
{
|
{
|
||||||
int ret = -ENOMEM, i, node;
|
int ret = -ENOMEM, i, node;
|
||||||
struct io_wq *wq;
|
struct io_wq *wq;
|
||||||
@ -992,11 +990,11 @@ struct io_wq *io_wq_create(unsigned bounded, struct mm_struct *mm,
|
|||||||
return ERR_PTR(-ENOMEM);
|
return ERR_PTR(-ENOMEM);
|
||||||
}
|
}
|
||||||
|
|
||||||
wq->get_work = get_work;
|
wq->get_work = data->get_work;
|
||||||
wq->put_work = put_work;
|
wq->put_work = data->put_work;
|
||||||
|
|
||||||
/* caller must already hold a reference to this */
|
/* caller must already hold a reference to this */
|
||||||
wq->user = user;
|
wq->user = data->user;
|
||||||
|
|
||||||
i = 0;
|
i = 0;
|
||||||
for_each_online_node(node) {
|
for_each_online_node(node) {
|
||||||
@ -1009,7 +1007,7 @@ struct io_wq *io_wq_create(unsigned bounded, struct mm_struct *mm,
|
|||||||
wqe->node = node;
|
wqe->node = node;
|
||||||
wqe->acct[IO_WQ_ACCT_BOUND].max_workers = bounded;
|
wqe->acct[IO_WQ_ACCT_BOUND].max_workers = bounded;
|
||||||
atomic_set(&wqe->acct[IO_WQ_ACCT_BOUND].nr_running, 0);
|
atomic_set(&wqe->acct[IO_WQ_ACCT_BOUND].nr_running, 0);
|
||||||
if (user) {
|
if (wq->user) {
|
||||||
wqe->acct[IO_WQ_ACCT_UNBOUND].max_workers =
|
wqe->acct[IO_WQ_ACCT_UNBOUND].max_workers =
|
||||||
task_rlimit(current, RLIMIT_NPROC);
|
task_rlimit(current, RLIMIT_NPROC);
|
||||||
}
|
}
|
||||||
@ -1031,7 +1029,7 @@ struct io_wq *io_wq_create(unsigned bounded, struct mm_struct *mm,
|
|||||||
goto err;
|
goto err;
|
||||||
|
|
||||||
/* caller must have already done mmgrab() on this mm */
|
/* caller must have already done mmgrab() on this mm */
|
||||||
wq->mm = mm;
|
wq->mm = data->mm;
|
||||||
|
|
||||||
wq->manager = kthread_create(io_wq_manager, wq, "io_wq_manager");
|
wq->manager = kthread_create(io_wq_manager, wq, "io_wq_manager");
|
||||||
if (!IS_ERR(wq->manager)) {
|
if (!IS_ERR(wq->manager)) {
|
||||||
|
12
fs/io-wq.h
12
fs/io-wq.h
@ -42,9 +42,15 @@ struct io_wq_work {
|
|||||||
typedef void (get_work_fn)(struct io_wq_work *);
|
typedef void (get_work_fn)(struct io_wq_work *);
|
||||||
typedef void (put_work_fn)(struct io_wq_work *);
|
typedef void (put_work_fn)(struct io_wq_work *);
|
||||||
|
|
||||||
struct io_wq *io_wq_create(unsigned bounded, struct mm_struct *mm,
|
struct io_wq_data {
|
||||||
struct user_struct *user,
|
struct mm_struct *mm;
|
||||||
get_work_fn *get_work, put_work_fn *put_work);
|
struct user_struct *user;
|
||||||
|
|
||||||
|
get_work_fn *get_work;
|
||||||
|
put_work_fn *put_work;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data);
|
||||||
void io_wq_destroy(struct io_wq *wq);
|
void io_wq_destroy(struct io_wq *wq);
|
||||||
|
|
||||||
void io_wq_enqueue(struct io_wq *wq, struct io_wq_work *work);
|
void io_wq_enqueue(struct io_wq *wq, struct io_wq_work *work);
|
||||||
|
@ -3962,6 +3962,7 @@ static void io_get_work(struct io_wq_work *work)
|
|||||||
static int io_sq_offload_start(struct io_ring_ctx *ctx,
|
static int io_sq_offload_start(struct io_ring_ctx *ctx,
|
||||||
struct io_uring_params *p)
|
struct io_uring_params *p)
|
||||||
{
|
{
|
||||||
|
struct io_wq_data data;
|
||||||
unsigned concurrency;
|
unsigned concurrency;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
@ -4006,10 +4007,14 @@ static int io_sq_offload_start(struct io_ring_ctx *ctx,
|
|||||||
goto err;
|
goto err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
data.mm = ctx->sqo_mm;
|
||||||
|
data.user = ctx->user;
|
||||||
|
data.get_work = io_get_work;
|
||||||
|
data.put_work = io_put_work;
|
||||||
|
|
||||||
/* Do QD, or 4 * CPUS, whatever is smallest */
|
/* Do QD, or 4 * CPUS, whatever is smallest */
|
||||||
concurrency = min(ctx->sq_entries, 4 * num_online_cpus());
|
concurrency = min(ctx->sq_entries, 4 * num_online_cpus());
|
||||||
ctx->io_wq = io_wq_create(concurrency, ctx->sqo_mm, ctx->user,
|
ctx->io_wq = io_wq_create(concurrency, &data);
|
||||||
io_get_work, io_put_work);
|
|
||||||
if (IS_ERR(ctx->io_wq)) {
|
if (IS_ERR(ctx->io_wq)) {
|
||||||
ret = PTR_ERR(ctx->io_wq);
|
ret = PTR_ERR(ctx->io_wq);
|
||||||
ctx->io_wq = NULL;
|
ctx->io_wq = NULL;
|
||||||
|
Loading…
Reference in New Issue
Block a user