mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2025-01-16 18:26:42 +00:00
io_uring-5.16-2021-11-09
-----BEGIN PGP SIGNATURE----- iQJEBAABCAAuFiEEwPw5LcreJtl1+l5K99NY+ylx4KYFAmGKqiIQHGF4Ym9lQGtl cm5lbC5kawAKCRD301j7KXHgpkpaD/4v6Wiepi45axWwc31mwAFQJuaeYoPvxlYg W72C12ofC4cDkkAHFRfuQLTdzCBpC83RLxI01byh0g9/ykWtxET1kv3qupUexcQx V9uOHJhuPAKx51/XPWA190l/Ns5H9XLPqPJfkbBpJB3Q+oN2Fc9xDzci1wlGsN2C wVvORBFneVi2GDb2ko8zgfWOSC3EERRJyorno47Zh8b6eTWzs+YuHHVRJMlsXV1Q f8ebcz6/ug1PE2DMIWeL8WhTbvZ0wWYO1tIRCr7HdlwwCQS7h8fY/nJW2GUwSgyM o+5kB23bPKKW5uXuz5o/jFwspESOWre7j4pMord5aUN+lsAc/HGWfOkVeXmnE0cj 9URpGXsko0i0PRAZjmYedgwRztlFnoYkAepLG8YMHu/GS/RAMcQLo805q0wzOcsW H8KkW6seIo+pdzY0faQx05vx5+x3o0BP6ly8fTrSZcImu0p91J8TUYlXm98l4cP5 QvlnThALQlbZdrOAkmZ36V5Ay0OGB7YLPoWF6ED3suncOTwHiyGtjeXgnxi9ba56 IsmHwAxDXGiOubUsWXVL6Ti81sdCqEvURrjd56r7aAZUkjhRT/cFd2H5lqm+AZQn 2/HnyDBwEgoJC3rluDlR6HhWmgSJeoFHke7m3hXGOVgpJJgO6Uzn7Jc/XJFJOpJ9 8HgoXJktHg== =8CHx -----END PGP SIGNATURE----- Merge tag 'io_uring-5.16-2021-11-09' of git://git.kernel.dk/linux-block Pull io_uring fixes from Jens Axboe: "Minor fixes that should go into the 5.16 release: - Fix max worker setting not working correctly on NUMA (Beld) - Correctly return current setting for max workers if zeroes are passed in (Pavel) - io_queue_sqe_arm_apoll() cleanup, as identified during the initial merge (Pavel) - Misc fixes (Nghia, me)" * tag 'io_uring-5.16-2021-11-09' of git://git.kernel.dk/linux-block: io_uring: honour zeroes as io-wq worker limits io_uring: remove dead 'sqe' store io_uring: remove redundant assignment to ret in io_register_iowq_max_workers() io-wq: fix max-workers not correctly set on multi-node system io_uring: clean up io_queue_sqe_arm_apoll
This commit is contained in:
commit
007301c472
16
fs/io-wq.c
16
fs/io-wq.c
@ -1308,7 +1308,9 @@ int io_wq_cpu_affinity(struct io_wq *wq, cpumask_var_t mask)
|
|||||||
*/
|
*/
|
||||||
int io_wq_max_workers(struct io_wq *wq, int *new_count)
|
int io_wq_max_workers(struct io_wq *wq, int *new_count)
|
||||||
{
|
{
|
||||||
int i, node, prev = 0;
|
int prev[IO_WQ_ACCT_NR];
|
||||||
|
bool first_node = true;
|
||||||
|
int i, node;
|
||||||
|
|
||||||
BUILD_BUG_ON((int) IO_WQ_ACCT_BOUND != (int) IO_WQ_BOUND);
|
BUILD_BUG_ON((int) IO_WQ_ACCT_BOUND != (int) IO_WQ_BOUND);
|
||||||
BUILD_BUG_ON((int) IO_WQ_ACCT_UNBOUND != (int) IO_WQ_UNBOUND);
|
BUILD_BUG_ON((int) IO_WQ_ACCT_UNBOUND != (int) IO_WQ_UNBOUND);
|
||||||
@ -1319,6 +1321,9 @@ int io_wq_max_workers(struct io_wq *wq, int *new_count)
|
|||||||
new_count[i] = task_rlimit(current, RLIMIT_NPROC);
|
new_count[i] = task_rlimit(current, RLIMIT_NPROC);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
for (i = 0; i < IO_WQ_ACCT_NR; i++)
|
||||||
|
prev[i] = 0;
|
||||||
|
|
||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
for_each_node(node) {
|
for_each_node(node) {
|
||||||
struct io_wqe *wqe = wq->wqes[node];
|
struct io_wqe *wqe = wq->wqes[node];
|
||||||
@ -1327,14 +1332,19 @@ int io_wq_max_workers(struct io_wq *wq, int *new_count)
|
|||||||
raw_spin_lock(&wqe->lock);
|
raw_spin_lock(&wqe->lock);
|
||||||
for (i = 0; i < IO_WQ_ACCT_NR; i++) {
|
for (i = 0; i < IO_WQ_ACCT_NR; i++) {
|
||||||
acct = &wqe->acct[i];
|
acct = &wqe->acct[i];
|
||||||
prev = max_t(int, acct->max_workers, prev);
|
if (first_node)
|
||||||
|
prev[i] = max_t(int, acct->max_workers, prev[i]);
|
||||||
if (new_count[i])
|
if (new_count[i])
|
||||||
acct->max_workers = new_count[i];
|
acct->max_workers = new_count[i];
|
||||||
new_count[i] = prev;
|
|
||||||
}
|
}
|
||||||
raw_spin_unlock(&wqe->lock);
|
raw_spin_unlock(&wqe->lock);
|
||||||
|
first_node = false;
|
||||||
}
|
}
|
||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
|
|
||||||
|
for (i = 0; i < IO_WQ_ACCT_NR; i++)
|
||||||
|
new_count[i] = prev[i];
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -6950,10 +6950,6 @@ static void io_queue_sqe_arm_apoll(struct io_kiocb *req)
|
|||||||
|
|
||||||
switch (io_arm_poll_handler(req)) {
|
switch (io_arm_poll_handler(req)) {
|
||||||
case IO_APOLL_READY:
|
case IO_APOLL_READY:
|
||||||
if (linked_timeout) {
|
|
||||||
io_queue_linked_timeout(linked_timeout);
|
|
||||||
linked_timeout = NULL;
|
|
||||||
}
|
|
||||||
io_req_task_queue(req);
|
io_req_task_queue(req);
|
||||||
break;
|
break;
|
||||||
case IO_APOLL_ABORTED:
|
case IO_APOLL_ABORTED:
|
||||||
@ -10144,7 +10140,7 @@ static __cold void __io_uring_show_fdinfo(struct io_ring_ctx *ctx,
|
|||||||
for (i = 0; i < sq_entries; i++) {
|
for (i = 0; i < sq_entries; i++) {
|
||||||
unsigned int entry = i + sq_head;
|
unsigned int entry = i + sq_head;
|
||||||
unsigned int sq_idx = READ_ONCE(ctx->sq_array[entry & sq_mask]);
|
unsigned int sq_idx = READ_ONCE(ctx->sq_array[entry & sq_mask]);
|
||||||
struct io_uring_sqe *sqe = &ctx->sq_sqes[sq_idx];
|
struct io_uring_sqe *sqe;
|
||||||
|
|
||||||
if (sq_idx > sq_mask)
|
if (sq_idx > sq_mask)
|
||||||
continue;
|
continue;
|
||||||
@ -10795,10 +10791,11 @@ static __cold int io_register_iowq_max_workers(struct io_ring_ctx *ctx,
|
|||||||
|
|
||||||
BUILD_BUG_ON(sizeof(new_count) != sizeof(ctx->iowq_limits));
|
BUILD_BUG_ON(sizeof(new_count) != sizeof(ctx->iowq_limits));
|
||||||
|
|
||||||
memcpy(ctx->iowq_limits, new_count, sizeof(new_count));
|
for (i = 0; i < ARRAY_SIZE(new_count); i++)
|
||||||
|
if (new_count[i])
|
||||||
|
ctx->iowq_limits[i] = new_count[i];
|
||||||
ctx->iowq_limits_set = true;
|
ctx->iowq_limits_set = true;
|
||||||
|
|
||||||
ret = -EINVAL;
|
|
||||||
if (tctx && tctx->io_wq) {
|
if (tctx && tctx->io_wq) {
|
||||||
ret = io_wq_max_workers(tctx->io_wq, new_count);
|
ret = io_wq_max_workers(tctx->io_wq, new_count);
|
||||||
if (ret)
|
if (ret)
|
||||||
|
Loading…
x
Reference in New Issue
Block a user