io_uring-6.13-20242901

-----BEGIN PGP SIGNATURE-----
 
 iQJEBAABCAAuFiEEwPw5LcreJtl1+l5K99NY+ylx4KYFAmdJ6igQHGF4Ym9lQGtl
 cm5lbC5kawAKCRD301j7KXHgpjj3D/44ltUzbKLiGRE8wvtyWSFdAeGUT8DA0MTW
 ot+Tr43PY6+J+v5ClUmgzJYqLRjNUxJAGUWM8Tmr7tZ2UtKwhHX/CEUtbqOEm2Sg
 e6aofpzR+sXX+ZqZRrLMPj6gLvuklWra+1STyzA6EkcvLiMqsLCY/U8nIm03VW26
 ua0kj+5477pEo9Hei4mfLtHCad94IX6UAv5xuh+90Xo9zxdWYA5sCv6SpXlG/5vy
 VYF8yChIiQC3SBgs1ewALblkm2RsCU59p0/9mOHOeBYzaFnoOV66fHEawWwKF2qM
 FLp6ZKpFEgxiRW9JpxhUw8Pv0hQx5FWN15FLLTPb/ss4Xo5uFRq8+0fDP8S5U9OT
 T37sj1nej7adaSjRWkmrgclNggFyhMmoCO9jMWxO1dmWNtHB153xGWNUcd0v/P2+
 FdjibQd79Wpq7aWbKPOQORU8rqshNusUVlge/KlvyufEne9EuOQVjGk/i2AEjU5y
 f1DomdUbEBeGB2FE7w0YYquI0oBOLQvBBk/hQl5pW7rfMgFoU0WAXiZLaJhM0i81
 RgbI5FH1rFZtsnJ3kG6HpNPcibK2seip6weNfgZZnDZCSOHiCZbuxi+WBLtupKng
 8J+ZXoDjucBVRgrUQRz6Km62oTLJQ/6CcazqrKvLxERa0eB6SNOxZRd1XYNFKacn
 xIyyyzQj1g==
 =b84h
 -----END PGP SIGNATURE-----

Merge tag 'io_uring-6.13-20242901' of git://git.kernel.dk/linux

Pull more io_uring updates from Jens Axboe:

 - Remove a leftover struct from when the cqwait registered waiting was
   transitioned to regions.

 - Fix for an issue introduced in this merge window, where nop->fd might
   be used uninitialized. Ensure it's always set.

 - Add capping of the task_work run in local task_work mode, to prevent
   bursty and long chains from adding too much latency.

 - Work around xa_store() leaving ->head non-NULL if it encounters an
   allocation error during storing. Just a debug trigger, and can go
   away once xa_store() behaves in a more expected way for this
   condition. Not a major thing as it basically requires fault injection
   to trigger it.

 - Fix a few mapping corner cases

 - Fix KCSAN complaint on reading the table size post unlock. Again not
   a "real" issue, but it's easy to silence by just keeping the reading
   inside the lock that protects it.

* tag 'io_uring-6.13-20242901' of git://git.kernel.dk/linux:
  io_uring/tctx: work around xa_store() allocation error issue
  io_uring: fix corner case forgetting to vunmap
  io_uring: fix task_work cap overshooting
  io_uring: check for overflows in io_pin_pages
  io_uring/nop: ensure nop->fd is always initialized
  io_uring: limit local tw done
  io_uring: add io_local_work_pending()
  io_uring/region: return negative -E2BIG in io_create_region()
  io_uring: protect register tracing
  io_uring: remove io_uring_cqwait_reg_arg
This commit is contained in:
Linus Torvalds 2024-11-30 15:43:02 -08:00
commit dd54fcced8
8 changed files with 87 additions and 47 deletions

View File

@ -336,6 +336,7 @@ struct io_ring_ctx {
*/ */
struct { struct {
struct llist_head work_llist; struct llist_head work_llist;
struct llist_head retry_llist;
unsigned long check_cq; unsigned long check_cq;
atomic_t cq_wait_nr; atomic_t cq_wait_nr;
atomic_t cq_timeouts; atomic_t cq_timeouts;

View File

@ -873,20 +873,6 @@ enum {
IORING_REG_WAIT_TS = (1U << 0), IORING_REG_WAIT_TS = (1U << 0),
}; };
/*
* Argument for IORING_REGISTER_CQWAIT_REG, registering a region of
* struct io_uring_reg_wait that can be indexed when io_uring_enter(2) is
* called rather than pass in a wait argument structure separately.
*/
struct io_uring_cqwait_reg_arg {
__u32 flags;
__u32 struct_size;
__u32 nr_entries;
__u32 pad;
__u64 user_addr;
__u64 pad2[3];
};
/* /*
* Argument for io_uring_enter(2) with * Argument for io_uring_enter(2) with
* IORING_GETEVENTS | IORING_ENTER_EXT_ARG_REG set, where the actual argument * IORING_GETEVENTS | IORING_ENTER_EXT_ARG_REG set, where the actual argument

View File

@ -121,6 +121,7 @@
#define IO_COMPL_BATCH 32 #define IO_COMPL_BATCH 32
#define IO_REQ_ALLOC_BATCH 8 #define IO_REQ_ALLOC_BATCH 8
#define IO_LOCAL_TW_DEFAULT_MAX 20
struct io_defer_entry { struct io_defer_entry {
struct list_head list; struct list_head list;
@ -1255,12 +1256,14 @@ static void __cold io_move_task_work_from_local(struct io_ring_ctx *ctx)
struct llist_node *node = llist_del_all(&ctx->work_llist); struct llist_node *node = llist_del_all(&ctx->work_llist);
__io_fallback_tw(node, false); __io_fallback_tw(node, false);
node = llist_del_all(&ctx->retry_llist);
__io_fallback_tw(node, false);
} }
static bool io_run_local_work_continue(struct io_ring_ctx *ctx, int events, static bool io_run_local_work_continue(struct io_ring_ctx *ctx, int events,
int min_events) int min_events)
{ {
if (llist_empty(&ctx->work_llist)) if (!io_local_work_pending(ctx))
return false; return false;
if (events < min_events) if (events < min_events)
return true; return true;
@ -1269,8 +1272,29 @@ static bool io_run_local_work_continue(struct io_ring_ctx *ctx, int events,
return false; return false;
} }
static int __io_run_local_work_loop(struct llist_node **node,
struct io_tw_state *ts,
int events)
{
int ret = 0;
while (*node) {
struct llist_node *next = (*node)->next;
struct io_kiocb *req = container_of(*node, struct io_kiocb,
io_task_work.node);
INDIRECT_CALL_2(req->io_task_work.func,
io_poll_task_func, io_req_rw_complete,
req, ts);
*node = next;
if (++ret >= events)
break;
}
return ret;
}
static int __io_run_local_work(struct io_ring_ctx *ctx, struct io_tw_state *ts, static int __io_run_local_work(struct io_ring_ctx *ctx, struct io_tw_state *ts,
int min_events) int min_events, int max_events)
{ {
struct llist_node *node; struct llist_node *node;
unsigned int loops = 0; unsigned int loops = 0;
@ -1281,25 +1305,23 @@ static int __io_run_local_work(struct io_ring_ctx *ctx, struct io_tw_state *ts,
if (ctx->flags & IORING_SETUP_TASKRUN_FLAG) if (ctx->flags & IORING_SETUP_TASKRUN_FLAG)
atomic_andnot(IORING_SQ_TASKRUN, &ctx->rings->sq_flags); atomic_andnot(IORING_SQ_TASKRUN, &ctx->rings->sq_flags);
again: again:
min_events -= ret;
ret = __io_run_local_work_loop(&ctx->retry_llist.first, ts, max_events);
if (ctx->retry_llist.first)
goto retry_done;
/* /*
* llists are in reverse order, flip it back the right way before * llists are in reverse order, flip it back the right way before
* running the pending items. * running the pending items.
*/ */
node = llist_reverse_order(llist_del_all(&ctx->work_llist)); node = llist_reverse_order(llist_del_all(&ctx->work_llist));
while (node) { ret += __io_run_local_work_loop(&node, ts, max_events - ret);
struct llist_node *next = node->next; ctx->retry_llist.first = node;
struct io_kiocb *req = container_of(node, struct io_kiocb,
io_task_work.node);
INDIRECT_CALL_2(req->io_task_work.func,
io_poll_task_func, io_req_rw_complete,
req, ts);
ret++;
node = next;
}
loops++; loops++;
if (io_run_local_work_continue(ctx, ret, min_events)) if (io_run_local_work_continue(ctx, ret, min_events))
goto again; goto again;
retry_done:
io_submit_flush_completions(ctx); io_submit_flush_completions(ctx);
if (io_run_local_work_continue(ctx, ret, min_events)) if (io_run_local_work_continue(ctx, ret, min_events))
goto again; goto again;
@ -1313,18 +1335,20 @@ static inline int io_run_local_work_locked(struct io_ring_ctx *ctx,
{ {
struct io_tw_state ts = {}; struct io_tw_state ts = {};
if (llist_empty(&ctx->work_llist)) if (!io_local_work_pending(ctx))
return 0; return 0;
return __io_run_local_work(ctx, &ts, min_events); return __io_run_local_work(ctx, &ts, min_events,
max(IO_LOCAL_TW_DEFAULT_MAX, min_events));
} }
static int io_run_local_work(struct io_ring_ctx *ctx, int min_events) static int io_run_local_work(struct io_ring_ctx *ctx, int min_events,
int max_events)
{ {
struct io_tw_state ts = {}; struct io_tw_state ts = {};
int ret; int ret;
mutex_lock(&ctx->uring_lock); mutex_lock(&ctx->uring_lock);
ret = __io_run_local_work(ctx, &ts, min_events); ret = __io_run_local_work(ctx, &ts, min_events, max_events);
mutex_unlock(&ctx->uring_lock); mutex_unlock(&ctx->uring_lock);
return ret; return ret;
} }
@ -2328,9 +2352,9 @@ static int io_wake_function(struct wait_queue_entry *curr, unsigned int mode,
int io_run_task_work_sig(struct io_ring_ctx *ctx) int io_run_task_work_sig(struct io_ring_ctx *ctx)
{ {
if (!llist_empty(&ctx->work_llist)) { if (io_local_work_pending(ctx)) {
__set_current_state(TASK_RUNNING); __set_current_state(TASK_RUNNING);
if (io_run_local_work(ctx, INT_MAX) > 0) if (io_run_local_work(ctx, INT_MAX, IO_LOCAL_TW_DEFAULT_MAX) > 0)
return 0; return 0;
} }
if (io_run_task_work() > 0) if (io_run_task_work() > 0)
@ -2459,7 +2483,7 @@ static inline int io_cqring_wait_schedule(struct io_ring_ctx *ctx,
{ {
if (unlikely(READ_ONCE(ctx->check_cq))) if (unlikely(READ_ONCE(ctx->check_cq)))
return 1; return 1;
if (unlikely(!llist_empty(&ctx->work_llist))) if (unlikely(io_local_work_pending(ctx)))
return 1; return 1;
if (unlikely(task_work_pending(current))) if (unlikely(task_work_pending(current)))
return 1; return 1;
@ -2493,8 +2517,9 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events, u32 flags,
if (!io_allowed_run_tw(ctx)) if (!io_allowed_run_tw(ctx))
return -EEXIST; return -EEXIST;
if (!llist_empty(&ctx->work_llist)) if (io_local_work_pending(ctx))
io_run_local_work(ctx, min_events); io_run_local_work(ctx, min_events,
max(IO_LOCAL_TW_DEFAULT_MAX, min_events));
io_run_task_work(); io_run_task_work();
if (unlikely(test_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq))) if (unlikely(test_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq)))
@ -2564,8 +2589,8 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events, u32 flags,
* If we got woken because of task_work being processed, run it * If we got woken because of task_work being processed, run it
* now rather than let the caller do another wait loop. * now rather than let the caller do another wait loop.
*/ */
if (!llist_empty(&ctx->work_llist)) if (io_local_work_pending(ctx))
io_run_local_work(ctx, nr_wait); io_run_local_work(ctx, nr_wait, nr_wait);
io_run_task_work(); io_run_task_work();
/* /*
@ -3077,7 +3102,7 @@ static __cold bool io_uring_try_cancel_requests(struct io_ring_ctx *ctx,
if ((ctx->flags & IORING_SETUP_DEFER_TASKRUN) && if ((ctx->flags & IORING_SETUP_DEFER_TASKRUN) &&
io_allowed_defer_tw_run(ctx)) io_allowed_defer_tw_run(ctx))
ret |= io_run_local_work(ctx, INT_MAX) > 0; ret |= io_run_local_work(ctx, INT_MAX, INT_MAX) > 0;
ret |= io_cancel_defer_files(ctx, tctx, cancel_all); ret |= io_cancel_defer_files(ctx, tctx, cancel_all);
mutex_lock(&ctx->uring_lock); mutex_lock(&ctx->uring_lock);
ret |= io_poll_remove_all(ctx, tctx, cancel_all); ret |= io_poll_remove_all(ctx, tctx, cancel_all);
@ -3158,7 +3183,7 @@ __cold void io_uring_cancel_generic(bool cancel_all, struct io_sq_data *sqd)
io_run_task_work(); io_run_task_work();
io_uring_drop_tctx_refs(current); io_uring_drop_tctx_refs(current);
xa_for_each(&tctx->xa, index, node) { xa_for_each(&tctx->xa, index, node) {
if (!llist_empty(&node->ctx->work_llist)) { if (io_local_work_pending(node->ctx)) {
WARN_ON_ONCE(node->ctx->submitter_task && WARN_ON_ONCE(node->ctx->submitter_task &&
node->ctx->submitter_task != current); node->ctx->submitter_task != current);
goto end_wait; goto end_wait;

View File

@ -347,9 +347,14 @@ static inline int io_run_task_work(void)
return ret; return ret;
} }
static inline bool io_local_work_pending(struct io_ring_ctx *ctx)
{
return !llist_empty(&ctx->work_llist) || !llist_empty(&ctx->retry_llist);
}
static inline bool io_task_work_pending(struct io_ring_ctx *ctx) static inline bool io_task_work_pending(struct io_ring_ctx *ctx)
{ {
return task_work_pending(current) || !llist_empty(&ctx->work_llist); return task_work_pending(current) || io_local_work_pending(ctx);
} }
static inline void io_tw_lock(struct io_ring_ctx *ctx, struct io_tw_state *ts) static inline void io_tw_lock(struct io_ring_ctx *ctx, struct io_tw_state *ts)
@ -484,6 +489,6 @@ enum {
static inline bool io_has_work(struct io_ring_ctx *ctx) static inline bool io_has_work(struct io_ring_ctx *ctx)
{ {
return test_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq) || return test_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq) ||
!llist_empty(&ctx->work_llist); io_local_work_pending(ctx);
} }
#endif #endif

View File

@ -73,6 +73,8 @@ void *io_pages_map(struct page ***out_pages, unsigned short *npages,
ret = io_mem_alloc_compound(pages, nr_pages, size, gfp); ret = io_mem_alloc_compound(pages, nr_pages, size, gfp);
if (!IS_ERR(ret)) if (!IS_ERR(ret))
goto done; goto done;
if (nr_pages == 1)
goto fail;
ret = io_mem_alloc_single(pages, nr_pages, size, gfp); ret = io_mem_alloc_single(pages, nr_pages, size, gfp);
if (!IS_ERR(ret)) { if (!IS_ERR(ret)) {
@ -81,7 +83,7 @@ void *io_pages_map(struct page ***out_pages, unsigned short *npages,
*npages = nr_pages; *npages = nr_pages;
return ret; return ret;
} }
fail:
kvfree(pages); kvfree(pages);
*out_pages = NULL; *out_pages = NULL;
*npages = 0; *npages = 0;
@ -136,7 +138,12 @@ struct page **io_pin_pages(unsigned long uaddr, unsigned long len, int *npages)
struct page **pages; struct page **pages;
int ret; int ret;
end = (uaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT; if (check_add_overflow(uaddr, len, &end))
return ERR_PTR(-EOVERFLOW);
if (check_add_overflow(end, PAGE_SIZE - 1, &end))
return ERR_PTR(-EOVERFLOW);
end = end >> PAGE_SHIFT;
start = uaddr >> PAGE_SHIFT; start = uaddr >> PAGE_SHIFT;
nr_pages = end - start; nr_pages = end - start;
if (WARN_ON_ONCE(!nr_pages)) if (WARN_ON_ONCE(!nr_pages))
@ -229,7 +236,7 @@ int io_create_region(struct io_ring_ctx *ctx, struct io_mapped_region *mr,
if (!reg->size || reg->mmap_offset || reg->id) if (!reg->size || reg->mmap_offset || reg->id)
return -EINVAL; return -EINVAL;
if ((reg->size >> PAGE_SHIFT) > INT_MAX) if ((reg->size >> PAGE_SHIFT) > INT_MAX)
return E2BIG; return -E2BIG;
if ((reg->user_addr | reg->size) & ~PAGE_MASK) if ((reg->user_addr | reg->size) & ~PAGE_MASK)
return -EINVAL; return -EINVAL;
if (check_add_overflow(reg->user_addr, reg->size, &end)) if (check_add_overflow(reg->user_addr, reg->size, &end))

View File

@ -35,10 +35,14 @@ int io_nop_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
nop->result = READ_ONCE(sqe->len); nop->result = READ_ONCE(sqe->len);
else else
nop->result = 0; nop->result = 0;
if (nop->flags & IORING_NOP_FIXED_FILE) if (nop->flags & IORING_NOP_FILE)
nop->fd = READ_ONCE(sqe->fd); nop->fd = READ_ONCE(sqe->fd);
else
nop->fd = -1;
if (nop->flags & IORING_NOP_FIXED_BUFFER) if (nop->flags & IORING_NOP_FIXED_BUFFER)
nop->buffer = READ_ONCE(sqe->buf_index); nop->buffer = READ_ONCE(sqe->buf_index);
else
nop->buffer = -1;
return 0; return 0;
} }

View File

@ -905,9 +905,10 @@ SYSCALL_DEFINE4(io_uring_register, unsigned int, fd, unsigned int, opcode,
mutex_lock(&ctx->uring_lock); mutex_lock(&ctx->uring_lock);
ret = __io_uring_register(ctx, opcode, arg, nr_args); ret = __io_uring_register(ctx, opcode, arg, nr_args);
mutex_unlock(&ctx->uring_lock);
trace_io_uring_register(ctx, opcode, ctx->file_table.data.nr, trace_io_uring_register(ctx, opcode, ctx->file_table.data.nr,
ctx->buf_table.nr, ret); ctx->buf_table.nr, ret);
mutex_unlock(&ctx->uring_lock);
if (!use_registered_ring) if (!use_registered_ring)
fput(file); fput(file);
return ret; return ret;

View File

@ -47,8 +47,19 @@ static struct io_wq *io_init_wq_offload(struct io_ring_ctx *ctx,
void __io_uring_free(struct task_struct *tsk) void __io_uring_free(struct task_struct *tsk)
{ {
struct io_uring_task *tctx = tsk->io_uring; struct io_uring_task *tctx = tsk->io_uring;
struct io_tctx_node *node;
unsigned long index;
WARN_ON_ONCE(!xa_empty(&tctx->xa)); /*
* Fault injection forcing allocation errors in the xa_store() path
* can lead to xa_empty() returning false, even though no actual
* node is stored in the xarray. Until that gets sorted out, attempt
* an iteration here and warn if any entries are found.
*/
xa_for_each(&tctx->xa, index, node) {
WARN_ON_ONCE(1);
break;
}
WARN_ON_ONCE(tctx->io_wq); WARN_ON_ONCE(tctx->io_wq);
WARN_ON_ONCE(tctx->cached_refs); WARN_ON_ONCE(tctx->cached_refs);