2022-05-24 18:45:38 +00:00
|
|
|
#ifndef IOU_CORE_H
|
|
|
|
#define IOU_CORE_H
|
|
|
|
|
|
|
|
#include <linux/errno.h>
|
2022-05-25 03:54:43 +00:00
|
|
|
#include <linux/lockdep.h>
|
2023-01-24 15:24:25 +00:00
|
|
|
#include <linux/resume_user_mode.h>
|
2023-01-18 15:56:30 +00:00
|
|
|
#include <linux/kasan.h>
|
2024-01-29 03:08:24 +00:00
|
|
|
#include <linux/poll.h>
|
2022-06-16 12:57:19 +00:00
|
|
|
#include <linux/io_uring_types.h>
|
2022-11-20 17:18:45 +00:00
|
|
|
#include <uapi/linux/eventpoll.h>
|
2022-06-16 12:57:19 +00:00
|
|
|
#include "io-wq.h"
|
2022-06-21 09:09:01 +00:00
|
|
|
#include "slist.h"
|
2022-06-16 12:57:19 +00:00
|
|
|
#include "filetable.h"
|
2022-05-24 18:45:38 +00:00
|
|
|
|
2022-06-13 13:27:03 +00:00
|
|
|
#ifndef CREATE_TRACE_POINTS
|
|
|
|
#include <trace/events/io_uring.h>
|
|
|
|
#endif
|
|
|
|
|
2022-05-24 21:21:00 +00:00
|
|
|
enum {
|
|
|
|
IOU_OK = 0,
|
|
|
|
IOU_ISSUE_SKIP_COMPLETE = -EIOCBQUEUED,
|
2022-06-30 09:12:25 +00:00
|
|
|
|
2024-01-29 18:57:11 +00:00
|
|
|
/*
|
|
|
|
* Requeue the task_work to restart operations on this request. The
|
|
|
|
* actual value isn't important, should just be not an otherwise
|
|
|
|
* valid error code, yet less than -MAX_ERRNO and valid internally.
|
|
|
|
*/
|
|
|
|
IOU_REQUEUE = -3072,
|
|
|
|
|
2022-06-30 09:12:25 +00:00
|
|
|
/*
|
2022-11-17 18:40:16 +00:00
|
|
|
* Intended only when both IO_URING_F_MULTISHOT is passed
|
|
|
|
* to indicate to the poll runner that multishot should be
|
2022-06-30 09:12:25 +00:00
|
|
|
* removed and the result is set on req->cqe.res.
|
|
|
|
*/
|
|
|
|
IOU_STOP_MULTISHOT = -ECANCELED,
|
2022-05-24 21:21:00 +00:00
|
|
|
};
|
|
|
|
|
2023-06-08 16:38:35 +00:00
|
|
|
struct io_wait_queue {
|
|
|
|
struct wait_queue_entry wq;
|
|
|
|
struct io_ring_ctx *ctx;
|
|
|
|
unsigned cq_tail;
|
io_uring: add support for batch wait timeout
Waiting for events with io_uring has two knobs that can be set:
1) The number of events to wake for
2) The timeout associated with the event
Waiting will abort when either of those conditions are met, as expected.
This adds support for a third event, which is associated with the number
of events to wait for. Applications generally like to handle batches of
completions, and right now they'd set a number of events to wait for and
the timeout for that. If no events have been received but the timeout
triggers, control is returned to the application and it can wait again.
However, if the application doesn't have anything to do until events are
reaped, then it's possible to make this waiting more efficient.
For example, the application may have a latency time of 50 usecs and
wanting to handle a batch of 8 requests at the time. If it uses 50 usecs
as the timeout, then it'll be doing 20K context switches per second even
if nothing is happening.
This introduces the notion of min batch wait time. If the min batch wait
time expires, then we'll return to userspace if we have any events at all.
If none are available, the general wait time is applied. Any request
arriving after the min batch wait time will cause waiting to stop and
return control to the application.
Signed-off-by: Jens Axboe <axboe@kernel.dk>
2024-01-04 17:17:54 +00:00
|
|
|
unsigned cq_min_tail;
|
2023-06-08 16:38:35 +00:00
|
|
|
unsigned nr_timeouts;
|
2024-01-04 15:46:23 +00:00
|
|
|
int hit_timeout;
|
io_uring: add support for batch wait timeout
Waiting for events with io_uring has two knobs that can be set:
1) The number of events to wake for
2) The timeout associated with the event
Waiting will abort when either of those conditions are met, as expected.
This adds support for a third event, which is associated with the number
of events to wait for. Applications generally like to handle batches of
completions, and right now they'd set a number of events to wait for and
the timeout for that. If no events have been received but the timeout
triggers, control is returned to the application and it can wait again.
However, if the application doesn't have anything to do until events are
reaped, then it's possible to make this waiting more efficient.
For example, the application may have a latency time of 50 usecs and
wanting to handle a batch of 8 requests at the time. If it uses 50 usecs
as the timeout, then it'll be doing 20K context switches per second even
if nothing is happening.
This introduces the notion of min batch wait time. If the min batch wait
time expires, then we'll return to userspace if we have any events at all.
If none are available, the general wait time is applied. Any request
arriving after the min batch wait time will cause waiting to stop and
return control to the application.
Signed-off-by: Jens Axboe <axboe@kernel.dk>
2024-01-04 17:17:54 +00:00
|
|
|
ktime_t min_timeout;
|
2023-06-08 16:38:35 +00:00
|
|
|
ktime_t timeout;
|
2024-01-04 15:46:23 +00:00
|
|
|
struct hrtimer t;
|
2023-06-08 16:38:35 +00:00
|
|
|
|
2023-06-08 16:38:36 +00:00
|
|
|
#ifdef CONFIG_NET_RX_BUSY_POLL
|
2024-07-26 14:24:30 +00:00
|
|
|
ktime_t napi_busy_poll_dt;
|
2023-06-08 16:38:36 +00:00
|
|
|
bool napi_prefer_busy_poll;
|
|
|
|
#endif
|
2023-06-08 16:38:35 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
static inline bool io_should_wake(struct io_wait_queue *iowq)
|
|
|
|
{
|
|
|
|
struct io_ring_ctx *ctx = iowq->ctx;
|
|
|
|
int dist = READ_ONCE(ctx->rings->cq.tail) - (int) iowq->cq_tail;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Wake up if we have enough events, or if a timeout occurred since we
|
|
|
|
* started waiting. For timeouts, we always want to return to userspace,
|
|
|
|
* regardless of event count.
|
|
|
|
*/
|
|
|
|
return dist >= 0 || atomic_read(&ctx->cq_timeouts) != iowq->nr_timeouts;
|
|
|
|
}
|
|
|
|
|
2024-10-21 19:29:39 +00:00
|
|
|
#define IORING_MAX_ENTRIES 32768
|
|
|
|
#define IORING_MAX_CQ_ENTRIES (2 * IORING_MAX_ENTRIES)
|
|
|
|
|
|
|
|
unsigned long rings_size(unsigned int flags, unsigned int sq_entries,
|
|
|
|
unsigned int cq_entries, size_t *sq_offset);
|
2024-10-21 19:32:19 +00:00
|
|
|
int io_uring_fill_params(unsigned entries, struct io_uring_params *p);
|
2023-08-24 22:53:26 +00:00
|
|
|
bool io_cqe_cache_refill(struct io_ring_ctx *ctx, bool overflow);
|
2022-08-30 12:50:10 +00:00
|
|
|
int io_run_task_work_sig(struct io_ring_ctx *ctx);
|
2022-11-24 09:35:53 +00:00
|
|
|
void io_req_defer_failed(struct io_kiocb *req, s32 res);
|
2022-11-24 09:35:58 +00:00
|
|
|
bool io_post_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags);
|
2024-06-06 16:28:26 +00:00
|
|
|
void io_add_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags);
|
2024-03-18 22:00:31 +00:00
|
|
|
bool io_req_post_cqe(struct io_kiocb *req, s32 res, u32 cflags);
|
2022-06-19 11:26:05 +00:00
|
|
|
void __io_commit_cqring_flush(struct io_ring_ctx *ctx);
|
|
|
|
|
|
|
|
struct file *io_file_get_normal(struct io_kiocb *req, int fd);
|
|
|
|
struct file *io_file_get_fixed(struct io_kiocb *req, int fd,
|
|
|
|
unsigned issue_flags);
|
|
|
|
|
2023-04-06 13:20:10 +00:00
|
|
|
void __io_req_task_work_add(struct io_kiocb *req, unsigned flags);
|
2024-03-28 18:38:44 +00:00
|
|
|
void io_req_task_work_add_remote(struct io_kiocb *req, struct io_ring_ctx *ctx,
|
|
|
|
unsigned flags);
|
2022-06-19 11:26:05 +00:00
|
|
|
bool io_alloc_async_data(struct io_kiocb *req);
|
|
|
|
void io_req_task_queue(struct io_kiocb *req);
|
2023-03-27 15:38:15 +00:00
|
|
|
void io_req_task_complete(struct io_kiocb *req, struct io_tw_state *ts);
|
2022-06-19 11:26:05 +00:00
|
|
|
void io_req_task_queue_fail(struct io_kiocb *req, int ret);
|
2023-03-27 15:38:15 +00:00
|
|
|
void io_req_task_submit(struct io_kiocb *req, struct io_tw_state *ts);
|
2024-02-02 17:20:05 +00:00
|
|
|
struct llist_node *io_handle_tw_list(struct llist_node *node, unsigned int *count, unsigned int max_entries);
|
|
|
|
struct llist_node *tctx_task_work_run(struct io_uring_task *tctx, unsigned int max_entries, unsigned int *count);
|
2022-06-19 11:26:05 +00:00
|
|
|
void tctx_task_work(struct callback_head *cb);
|
|
|
|
__cold void io_uring_cancel_generic(bool cancel_all, struct io_sq_data *sqd);
|
|
|
|
int io_uring_alloc_task_context(struct task_struct *task,
|
|
|
|
struct io_ring_ctx *ctx);
|
|
|
|
|
2023-04-28 16:40:30 +00:00
|
|
|
int io_ring_add_registered_file(struct io_uring_task *tctx, struct file *file,
|
|
|
|
int start, int end);
|
2024-09-11 16:34:37 +00:00
|
|
|
void io_req_queue_iowq(struct io_kiocb *req);
|
2023-04-28 16:40:30 +00:00
|
|
|
|
2023-03-27 15:38:15 +00:00
|
|
|
int io_poll_issue(struct io_kiocb *req, struct io_tw_state *ts);
|
2022-06-19 11:26:05 +00:00
|
|
|
int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr);
|
|
|
|
int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin);
|
2023-08-24 22:53:29 +00:00
|
|
|
void __io_submit_flush_completions(struct io_ring_ctx *ctx);
|
2022-06-19 11:26:05 +00:00
|
|
|
|
|
|
|
struct io_wq_work *io_wq_free_work(struct io_wq_work *work);
|
|
|
|
void io_wq_submit_work(struct io_wq_work *work);
|
|
|
|
|
|
|
|
void io_free_req(struct io_kiocb *req);
|
|
|
|
void io_queue_next(struct io_kiocb *req);
|
2022-07-12 20:52:47 +00:00
|
|
|
void io_task_refs_refill(struct io_uring_task *tctx);
|
2022-07-27 09:30:40 +00:00
|
|
|
bool __io_alloc_req_refill(struct io_ring_ctx *ctx);
|
2022-06-19 11:26:05 +00:00
|
|
|
|
2024-11-03 17:22:43 +00:00
|
|
|
bool io_match_task_safe(struct io_kiocb *head, struct io_uring_task *tctx,
|
2022-06-19 11:26:05 +00:00
|
|
|
bool cancel_all);
|
|
|
|
|
2023-12-19 15:54:20 +00:00
|
|
|
void io_activate_pollwq(struct io_ring_ctx *ctx);
|
|
|
|
|
2023-10-03 01:51:38 +00:00
|
|
|
static inline void io_lockdep_assert_cq_locked(struct io_ring_ctx *ctx)
|
|
|
|
{
|
2024-03-18 22:00:35 +00:00
|
|
|
#if defined(CONFIG_PROVE_LOCKING)
|
2023-10-03 01:51:38 +00:00
|
|
|
lockdep_assert(in_task());
|
|
|
|
|
|
|
|
if (ctx->flags & IORING_SETUP_IOPOLL) {
|
|
|
|
lockdep_assert_held(&ctx->uring_lock);
|
|
|
|
} else if (!ctx->task_complete) {
|
|
|
|
lockdep_assert_held(&ctx->completion_lock);
|
|
|
|
} else if (ctx->submitter_task) {
|
|
|
|
/*
|
|
|
|
* ->submitter_task may be NULL and we can still post a CQE,
|
|
|
|
* if the ring has been setup with IORING_SETUP_R_DISABLED.
|
|
|
|
* Not from an SQE, as those cannot be submitted, but via
|
|
|
|
* updating tagged resources.
|
|
|
|
*/
|
2024-11-05 02:12:33 +00:00
|
|
|
if (percpu_ref_is_dying(&ctx->refs))
|
2023-10-03 01:51:38 +00:00
|
|
|
lockdep_assert(current_work());
|
|
|
|
else
|
|
|
|
lockdep_assert(current == ctx->submitter_task);
|
|
|
|
}
|
|
|
|
#endif
|
2024-03-18 22:00:35 +00:00
|
|
|
}
|
2023-01-04 01:34:57 +00:00
|
|
|
|
2022-11-11 16:54:08 +00:00
|
|
|
static inline void io_req_task_work_add(struct io_kiocb *req)
|
|
|
|
{
|
2023-04-06 13:20:10 +00:00
|
|
|
__io_req_task_work_add(req, 0);
|
2022-11-11 16:54:08 +00:00
|
|
|
}
|
|
|
|
|
2024-03-18 22:00:23 +00:00
|
|
|
static inline void io_submit_flush_completions(struct io_ring_ctx *ctx)
|
|
|
|
{
|
|
|
|
if (!wq_list_empty(&ctx->submit_state.compl_reqs) ||
|
2024-03-18 22:00:32 +00:00
|
|
|
ctx->submit_state.cq_flush)
|
2024-03-18 22:00:23 +00:00
|
|
|
__io_submit_flush_completions(ctx);
|
|
|
|
}
|
|
|
|
|
2022-06-19 11:26:05 +00:00
|
|
|
#define io_for_each_link(pos, head) \
|
|
|
|
for (pos = (head); pos; pos = pos->link)
|
2022-06-13 13:27:03 +00:00
|
|
|
|
2023-08-24 22:53:27 +00:00
|
|
|
static inline bool io_get_cqe_overflow(struct io_ring_ctx *ctx,
|
|
|
|
struct io_uring_cqe **ret,
|
|
|
|
bool overflow)
|
2022-06-13 13:27:03 +00:00
|
|
|
{
|
2023-08-24 22:53:26 +00:00
|
|
|
io_lockdep_assert_cq_locked(ctx);
|
2022-06-13 13:27:03 +00:00
|
|
|
|
2023-08-24 22:53:26 +00:00
|
|
|
if (unlikely(ctx->cqe_cached >= ctx->cqe_sentinel)) {
|
|
|
|
if (unlikely(!io_cqe_cache_refill(ctx, overflow)))
|
2023-08-24 22:53:27 +00:00
|
|
|
return false;
|
2022-06-13 13:27:03 +00:00
|
|
|
}
|
2023-08-24 22:53:27 +00:00
|
|
|
*ret = ctx->cqe_cached;
|
2023-08-24 22:53:26 +00:00
|
|
|
ctx->cached_cq_tail++;
|
|
|
|
ctx->cqe_cached++;
|
|
|
|
if (ctx->flags & IORING_SETUP_CQE32)
|
|
|
|
ctx->cqe_cached++;
|
2023-08-24 22:53:27 +00:00
|
|
|
return true;
|
2022-09-23 13:53:25 +00:00
|
|
|
}
|
|
|
|
|
2023-08-24 22:53:27 +00:00
|
|
|
static inline bool io_get_cqe(struct io_ring_ctx *ctx, struct io_uring_cqe **ret)
|
2022-09-23 13:53:25 +00:00
|
|
|
{
|
2023-08-24 22:53:27 +00:00
|
|
|
return io_get_cqe_overflow(ctx, ret, false);
|
2022-06-13 13:27:03 +00:00
|
|
|
}
|
|
|
|
|
2023-08-24 22:53:30 +00:00
|
|
|
static __always_inline bool io_fill_cqe_req(struct io_ring_ctx *ctx,
|
|
|
|
struct io_kiocb *req)
|
2022-06-13 13:27:03 +00:00
|
|
|
{
|
|
|
|
struct io_uring_cqe *cqe;
|
|
|
|
|
2022-06-17 08:48:04 +00:00
|
|
|
/*
|
|
|
|
* If we can't get a cq entry, userspace overflowed the
|
|
|
|
* submission (by quite a lot). Increment the overflow count in
|
|
|
|
* the ring.
|
|
|
|
*/
|
2023-08-24 22:53:27 +00:00
|
|
|
if (unlikely(!io_get_cqe(ctx, &cqe)))
|
2022-12-07 15:50:01 +00:00
|
|
|
return false;
|
2022-06-30 09:12:31 +00:00
|
|
|
|
|
|
|
|
2022-06-17 08:48:04 +00:00
|
|
|
memcpy(cqe, &req->cqe, sizeof(*cqe));
|
|
|
|
if (ctx->flags & IORING_SETUP_CQE32) {
|
2023-08-24 22:53:25 +00:00
|
|
|
memcpy(cqe->big_cqe, &req->big_cqe, sizeof(*cqe));
|
|
|
|
memset(&req->big_cqe, 0, sizeof(req->big_cqe));
|
2022-06-13 13:27:03 +00:00
|
|
|
}
|
2024-10-18 16:14:00 +00:00
|
|
|
|
|
|
|
if (trace_io_uring_complete_enabled())
|
|
|
|
trace_io_uring_complete(req->ctx, req, cqe);
|
2022-06-17 08:48:04 +00:00
|
|
|
return true;
|
2022-06-13 13:27:03 +00:00
|
|
|
}
|
|
|
|
|
2022-05-25 03:19:47 +00:00
|
|
|
static inline void req_set_fail(struct io_kiocb *req)
|
|
|
|
{
|
|
|
|
req->flags |= REQ_F_FAIL;
|
|
|
|
if (req->flags & REQ_F_CQE_SKIP) {
|
|
|
|
req->flags &= ~REQ_F_CQE_SKIP;
|
|
|
|
req->flags |= REQ_F_SKIP_LINK_CQES;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-05-24 18:45:38 +00:00
|
|
|
static inline void io_req_set_res(struct io_kiocb *req, s32 res, u32 cflags)
|
|
|
|
{
|
|
|
|
req->cqe.res = res;
|
|
|
|
req->cqe.flags = cflags;
|
|
|
|
}
|
|
|
|
|
2022-05-25 11:59:19 +00:00
|
|
|
static inline bool req_has_async_data(struct io_kiocb *req)
|
|
|
|
{
|
|
|
|
return req->flags & REQ_F_ASYNC_DATA;
|
|
|
|
}
|
|
|
|
|
2023-07-07 17:14:40 +00:00
|
|
|
static inline void io_put_file(struct io_kiocb *req)
|
2022-05-25 03:19:47 +00:00
|
|
|
{
|
2023-07-07 17:14:40 +00:00
|
|
|
if (!(req->flags & REQ_F_FIXED_FILE) && req->file)
|
|
|
|
fput(req->file);
|
2022-05-25 03:19:47 +00:00
|
|
|
}
|
|
|
|
|
2022-05-25 03:54:43 +00:00
|
|
|
static inline void io_ring_submit_unlock(struct io_ring_ctx *ctx,
|
|
|
|
unsigned issue_flags)
|
|
|
|
{
|
|
|
|
lockdep_assert_held(&ctx->uring_lock);
|
2024-01-29 03:32:52 +00:00
|
|
|
if (unlikely(issue_flags & IO_URING_F_UNLOCKED))
|
2022-05-25 03:54:43 +00:00
|
|
|
mutex_unlock(&ctx->uring_lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void io_ring_submit_lock(struct io_ring_ctx *ctx,
|
|
|
|
unsigned issue_flags)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* "Normal" inline submissions always hold the uring_lock, since we
|
|
|
|
* grab it from the system call. Same is true for the SQPOLL offload.
|
|
|
|
* The only exception is when we've detached the request and issue it
|
|
|
|
* from an async worker thread, grab the lock for that case.
|
|
|
|
*/
|
2024-01-29 03:32:52 +00:00
|
|
|
if (unlikely(issue_flags & IO_URING_F_UNLOCKED))
|
2022-05-25 03:54:43 +00:00
|
|
|
mutex_lock(&ctx->uring_lock);
|
|
|
|
lockdep_assert_held(&ctx->uring_lock);
|
|
|
|
}
|
|
|
|
|
2022-05-25 12:25:13 +00:00
|
|
|
static inline void io_commit_cqring(struct io_ring_ctx *ctx)
|
|
|
|
{
|
|
|
|
/* order cqe stores with ring update */
|
|
|
|
smp_store_release(&ctx->rings->cq.tail, ctx->cached_cq_tail);
|
|
|
|
}
|
|
|
|
|
2023-01-09 14:46:08 +00:00
|
|
|
static inline void io_poll_wq_wake(struct io_ring_ctx *ctx)
|
|
|
|
{
|
2023-01-09 14:46:09 +00:00
|
|
|
if (wq_has_sleeper(&ctx->poll_wq))
|
2023-01-09 14:46:08 +00:00
|
|
|
__wake_up(&ctx->poll_wq, TASK_NORMAL, 0,
|
|
|
|
poll_to_key(EPOLL_URING_WAKE | EPOLLIN));
|
|
|
|
}
|
|
|
|
|
2023-04-06 13:20:09 +00:00
|
|
|
static inline void io_cqring_wake(struct io_ring_ctx *ctx)
|
2022-06-13 13:27:03 +00:00
|
|
|
{
|
|
|
|
/*
|
2022-11-20 17:18:45 +00:00
|
|
|
* Trigger waitqueue handler on all waiters on our waitqueue. This
|
|
|
|
* won't necessarily wake up all the tasks, io_should_wake() will make
|
|
|
|
* that decision.
|
|
|
|
*
|
|
|
|
* Pass in EPOLLIN|EPOLL_URING_WAKE as the poll wakeup key. The latter
|
|
|
|
* set in the mask so that if we recurse back into our own poll
|
|
|
|
* waitqueue handlers, we know we have a dependency between eventfd or
|
|
|
|
* epoll and should terminate multishot poll at that point.
|
2022-06-13 13:27:03 +00:00
|
|
|
*/
|
2023-04-06 13:20:09 +00:00
|
|
|
if (wq_has_sleeper(&ctx->cq_wait))
|
2022-11-20 17:18:45 +00:00
|
|
|
__wake_up(&ctx->cq_wait, TASK_NORMAL, 0,
|
|
|
|
poll_to_key(EPOLL_URING_WAKE | EPOLLIN));
|
2022-06-13 13:27:03 +00:00
|
|
|
}
|
|
|
|
|
2022-05-25 15:13:39 +00:00
|
|
|
static inline bool io_sqring_full(struct io_ring_ctx *ctx)
|
|
|
|
{
|
|
|
|
struct io_rings *r = ctx->rings;
|
|
|
|
|
io_uring/sqpoll: close race on waiting for sqring entries
When an application uses SQPOLL, it must wait for the SQPOLL thread to
consume SQE entries, if it fails to get an sqe when calling
io_uring_get_sqe(). It can do so by calling io_uring_enter(2) with the
flag value of IORING_ENTER_SQ_WAIT. In liburing, this is generally done
with io_uring_sqring_wait(). There's a natural expectation that once
this call returns, a new SQE entry can be retrieved, filled out, and
submitted. However, the kernel uses the cached sq head to determine if
the SQRING is full or not. If the SQPOLL thread is currently in the
process of submitting SQE entries, it may have updated the cached sq
head, but not yet committed it to the SQ ring. Hence the kernel may find
that there are SQE entries ready to be consumed, and return successfully
to the application. If the SQPOLL thread hasn't yet committed the SQ
ring entries by the time the application returns to userspace and
attempts to get a new SQE, it will fail getting a new SQE.
Fix this by having io_sqring_full() always use the user visible SQ ring
head entry, rather than the internally cached one.
Cc: stable@vger.kernel.org # 5.10+
Link: https://github.com/axboe/liburing/discussions/1267
Reported-by: Benedek Thaler <thaler@thaler.hu>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
2024-10-15 14:58:25 +00:00
|
|
|
/*
|
|
|
|
* SQPOLL must use the actual sqring head, as using the cached_sq_head
|
|
|
|
* is race prone if the SQPOLL thread has grabbed entries but not yet
|
|
|
|
* committed them to the ring. For !SQPOLL, this doesn't matter, but
|
|
|
|
* since this helper is just used for SQPOLL sqring waits (or POLLOUT),
|
|
|
|
* just read the actual sqring head unconditionally.
|
|
|
|
*/
|
|
|
|
return READ_ONCE(r->sq.tail) - READ_ONCE(r->sq.head) == ctx->sq_entries;
|
2022-05-25 15:13:39 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline unsigned int io_sqring_entries(struct io_ring_ctx *ctx)
|
|
|
|
{
|
|
|
|
struct io_rings *rings = ctx->rings;
|
2023-03-30 16:05:31 +00:00
|
|
|
unsigned int entries;
|
2022-05-25 15:13:39 +00:00
|
|
|
|
|
|
|
/* make sure SQ entry isn't read before tail */
|
2023-03-30 16:05:31 +00:00
|
|
|
entries = smp_load_acquire(&rings->sq.tail) - ctx->cached_sq_head;
|
|
|
|
return min(entries, ctx->sq_entries);
|
2022-05-25 15:13:39 +00:00
|
|
|
}
|
|
|
|
|
2022-08-30 12:50:10 +00:00
|
|
|
static inline int io_run_task_work(void)
|
2022-05-25 15:13:39 +00:00
|
|
|
{
|
2024-02-02 17:20:05 +00:00
|
|
|
bool ret = false;
|
|
|
|
|
2022-11-25 16:36:29 +00:00
|
|
|
/*
|
|
|
|
* Always check-and-clear the task_work notification signal. With how
|
|
|
|
* signaling works for task_work, we can find it set with nothing to
|
|
|
|
* run. We need to clear it for that case, like get_signal() does.
|
|
|
|
*/
|
|
|
|
if (test_thread_flag(TIF_NOTIFY_SIGNAL))
|
|
|
|
clear_notify_signal();
|
2023-01-24 15:24:25 +00:00
|
|
|
/*
|
|
|
|
* PF_IO_WORKER never returns to userspace, so check here if we have
|
|
|
|
* notify work that needs processing.
|
|
|
|
*/
|
2024-02-02 17:20:05 +00:00
|
|
|
if (current->flags & PF_IO_WORKER) {
|
|
|
|
if (test_thread_flag(TIF_NOTIFY_RESUME)) {
|
|
|
|
__set_current_state(TASK_RUNNING);
|
|
|
|
resume_user_mode_work(NULL);
|
|
|
|
}
|
|
|
|
if (current->io_uring) {
|
|
|
|
unsigned int count = 0;
|
|
|
|
|
2024-10-17 14:31:56 +00:00
|
|
|
__set_current_state(TASK_RUNNING);
|
2024-02-02 17:20:05 +00:00
|
|
|
tctx_task_work_run(current->io_uring, UINT_MAX, &count);
|
|
|
|
if (count)
|
|
|
|
ret = true;
|
|
|
|
}
|
2023-02-06 15:20:46 +00:00
|
|
|
}
|
2022-09-29 21:29:13 +00:00
|
|
|
if (task_work_pending(current)) {
|
2022-05-25 15:13:39 +00:00
|
|
|
__set_current_state(TASK_RUNNING);
|
2022-09-29 21:29:13 +00:00
|
|
|
task_work_run();
|
2024-02-02 17:20:05 +00:00
|
|
|
ret = true;
|
2022-05-25 15:13:39 +00:00
|
|
|
}
|
|
|
|
|
2024-02-02 17:20:05 +00:00
|
|
|
return ret;
|
2022-08-30 12:50:10 +00:00
|
|
|
}
|
|
|
|
|
2024-11-20 22:14:51 +00:00
|
|
|
static inline bool io_local_work_pending(struct io_ring_ctx *ctx)
|
|
|
|
{
|
|
|
|
return !llist_empty(&ctx->work_llist);
|
|
|
|
}
|
|
|
|
|
2022-09-03 15:52:01 +00:00
|
|
|
static inline bool io_task_work_pending(struct io_ring_ctx *ctx)
|
|
|
|
{
|
2024-11-20 22:14:51 +00:00
|
|
|
return task_work_pending(current) || io_local_work_pending(ctx);
|
2022-09-03 15:52:01 +00:00
|
|
|
}
|
|
|
|
|
2023-03-27 15:38:15 +00:00
|
|
|
static inline void io_tw_lock(struct io_ring_ctx *ctx, struct io_tw_state *ts)
|
2022-06-15 16:33:51 +00:00
|
|
|
{
|
2024-03-18 22:00:30 +00:00
|
|
|
lockdep_assert_held(&ctx->uring_lock);
|
2022-06-15 16:33:51 +00:00
|
|
|
}
|
|
|
|
|
2022-06-20 00:26:00 +00:00
|
|
|
/*
|
|
|
|
* Don't complete immediately but use deferred completion infrastructure.
|
|
|
|
* Protected by ->uring_lock and can only be used either with
|
|
|
|
* IO_URING_F_COMPLETE_DEFER or inside a tw handler holding the mutex.
|
|
|
|
*/
|
|
|
|
static inline void io_req_complete_defer(struct io_kiocb *req)
|
|
|
|
__must_hold(&req->ctx->uring_lock)
|
2022-06-15 16:33:51 +00:00
|
|
|
{
|
|
|
|
struct io_submit_state *state = &req->ctx->submit_state;
|
|
|
|
|
2022-06-20 00:26:00 +00:00
|
|
|
lockdep_assert_held(&req->ctx->uring_lock);
|
|
|
|
|
2022-06-15 16:33:51 +00:00
|
|
|
wq_list_add_tail(&req->comp_list, &state->compl_reqs);
|
|
|
|
}
|
|
|
|
|
2022-06-20 00:25:57 +00:00
|
|
|
static inline void io_commit_cqring_flush(struct io_ring_ctx *ctx)
|
|
|
|
{
|
2023-01-09 14:46:09 +00:00
|
|
|
if (unlikely(ctx->off_timeout_used || ctx->drain_active ||
|
|
|
|
ctx->has_evfd || ctx->poll_activated))
|
2022-06-20 00:25:57 +00:00
|
|
|
__io_commit_cqring_flush(ctx);
|
|
|
|
}
|
|
|
|
|
2022-07-12 20:52:47 +00:00
|
|
|
static inline void io_get_task_refs(int nr)
|
|
|
|
{
|
|
|
|
struct io_uring_task *tctx = current->io_uring;
|
|
|
|
|
|
|
|
tctx->cached_refs -= nr;
|
|
|
|
if (unlikely(tctx->cached_refs < 0))
|
|
|
|
io_task_refs_refill(tctx);
|
|
|
|
}
|
|
|
|
|
2022-07-27 09:30:40 +00:00
|
|
|
static inline bool io_req_cache_empty(struct io_ring_ctx *ctx)
|
|
|
|
{
|
|
|
|
return !ctx->submit_state.free_list.next;
|
|
|
|
}
|
|
|
|
|
2023-01-18 15:56:30 +00:00
|
|
|
extern struct kmem_cache *req_cachep;
|
io_uring/kbuf: Use slab for struct io_buffer objects
The allocation of struct io_buffer for metadata of provided buffers is
done through a custom allocator that directly gets pages and
fragments them. But, slab would do just fine, as this is not a hot path
(in fact, it is a deprecated feature) and, by keeping a custom allocator
implementation we lose benefits like tracking, poisoning,
sanitizers. Finally, the custom code is more complex and requires
keeping the list of pages in struct ctx for no good reason. This patch
cleans this path up and just uses slab.
I microbenchmarked it by forcing the allocation of a large number of
objects with the least number of io_uring commands possible (keeping
nbufs=USHRT_MAX), with and without the patch. There is a slight
increase in time spent in the allocation with slab, of course, but even
when allocating to system resources exhaustion, which is not very
realistic and happened around 1/2 billion provided buffers for me, it
wasn't a significant hit in system time. Specially if we think of a
real-world scenario, an application doing register/unregister of
provided buffers will hit ctx->io_buffers_cache more often than actually
going to slab.
Signed-off-by: Gabriel Krisman Bertazi <krisman@suse.de>
Link: https://lore.kernel.org/r/20231005000531.30800-4-krisman@suse.de
Signed-off-by: Jens Axboe <axboe@kernel.dk>
2023-10-05 00:05:31 +00:00
|
|
|
extern struct kmem_cache *io_buf_cachep;
|
2023-01-18 15:56:30 +00:00
|
|
|
|
2023-01-23 14:37:16 +00:00
|
|
|
static inline struct io_kiocb *io_extract_req(struct io_ring_ctx *ctx)
|
2022-07-27 09:30:40 +00:00
|
|
|
{
|
2023-01-18 15:56:30 +00:00
|
|
|
struct io_kiocb *req;
|
2022-07-27 09:30:40 +00:00
|
|
|
|
2023-01-18 15:56:30 +00:00
|
|
|
req = container_of(ctx->submit_state.free_list.next, struct io_kiocb, comp_list);
|
|
|
|
wq_stack_extract(&ctx->submit_state.free_list);
|
|
|
|
return req;
|
2022-07-27 09:30:40 +00:00
|
|
|
}
|
|
|
|
|
2023-01-23 14:37:16 +00:00
|
|
|
static inline bool io_alloc_req(struct io_ring_ctx *ctx, struct io_kiocb **req)
|
|
|
|
{
|
|
|
|
if (unlikely(io_req_cache_empty(ctx))) {
|
|
|
|
if (!__io_alloc_req_refill(ctx))
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
*req = io_extract_req(ctx);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2023-01-05 11:22:23 +00:00
|
|
|
static inline bool io_allowed_defer_tw_run(struct io_ring_ctx *ctx)
|
|
|
|
{
|
|
|
|
return likely(ctx->submitter_task == current);
|
|
|
|
}
|
|
|
|
|
2022-09-08 15:56:52 +00:00
|
|
|
static inline bool io_allowed_run_tw(struct io_ring_ctx *ctx)
|
|
|
|
{
|
2022-09-08 15:56:53 +00:00
|
|
|
return likely(!(ctx->flags & IORING_SETUP_DEFER_TASKRUN) ||
|
|
|
|
ctx->submitter_task == current);
|
2022-09-08 15:56:52 +00:00
|
|
|
}
|
|
|
|
|
io_uring: move struct io_kiocb from task_struct to io_uring_task
Rather than store the task_struct itself in struct io_kiocb, store
the io_uring specific task_struct. The life times are the same in terms
of io_uring, and this avoids doing some dereferences through the
task_struct. For the hot path of putting local task references, we can
deref req->tctx instead, which we'll need anyway in that function
regardless of whether it's local or remote references.
This is mostly straight forward, except the original task PF_EXITING
check needs a bit of tweaking. task_work is _always_ run from the
originating task, except in the fallback case, where it's run from a
kernel thread. Replace the potentially racy (in case of fallback work)
checks for req->task->flags with current->flags. It's either the still
the original task, in which case PF_EXITING will be sane, or it has
PF_KTHREAD set, in which case it's fallback work. Both cases should
prevent moving forward with the given request.
Signed-off-by: Jens Axboe <axboe@kernel.dk>
2024-11-03 17:23:38 +00:00
|
|
|
/*
|
|
|
|
* Terminate the request if either of these conditions are true:
|
|
|
|
*
|
|
|
|
* 1) It's being executed by the original task, but that task is marked
|
|
|
|
* with PF_EXITING as it's exiting.
|
|
|
|
* 2) PF_KTHREAD is set, in which case the invoker of the task_work is
|
|
|
|
* our fallback task_work.
|
|
|
|
*/
|
|
|
|
static inline bool io_should_terminate_tw(void)
|
|
|
|
{
|
|
|
|
return current->flags & (PF_KTHREAD | PF_EXITING);
|
|
|
|
}
|
|
|
|
|
2022-11-23 11:33:39 +00:00
|
|
|
static inline void io_req_queue_tw_complete(struct io_kiocb *req, s32 res)
|
|
|
|
{
|
|
|
|
io_req_set_res(req, res, 0);
|
|
|
|
req->io_task_work.func = io_req_task_complete;
|
|
|
|
io_req_task_work_add(req);
|
|
|
|
}
|
|
|
|
|
2023-05-04 12:18:54 +00:00
|
|
|
/*
|
|
|
|
* IORING_SETUP_SQE128 contexts allocate twice the normal SQE size for each
|
|
|
|
* slot.
|
|
|
|
*/
|
|
|
|
static inline size_t uring_sqe_size(struct io_ring_ctx *ctx)
|
|
|
|
{
|
|
|
|
if (ctx->flags & IORING_SETUP_SQE128)
|
|
|
|
return 2 * sizeof(struct io_uring_sqe);
|
|
|
|
return sizeof(struct io_uring_sqe);
|
|
|
|
}
|
2024-01-29 03:08:24 +00:00
|
|
|
|
|
|
|
static inline bool io_file_can_poll(struct io_kiocb *req)
|
|
|
|
{
|
|
|
|
if (req->flags & REQ_F_CAN_POLL)
|
|
|
|
return true;
|
2024-06-01 18:25:35 +00:00
|
|
|
if (req->file && file_can_poll(req->file)) {
|
2024-01-29 03:08:24 +00:00
|
|
|
req->flags |= REQ_F_CAN_POLL;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
2024-02-14 19:59:36 +00:00
|
|
|
|
2024-08-07 14:18:14 +00:00
|
|
|
static inline ktime_t io_get_time(struct io_ring_ctx *ctx)
|
|
|
|
{
|
|
|
|
if (ctx->clockid == CLOCK_MONOTONIC)
|
|
|
|
return ktime_get();
|
|
|
|
|
|
|
|
return ktime_get_with_offset(ctx->clock_offset);
|
|
|
|
}
|
|
|
|
|
2024-02-14 19:59:36 +00:00
|
|
|
enum {
|
|
|
|
IO_CHECK_CQ_OVERFLOW_BIT,
|
|
|
|
IO_CHECK_CQ_DROPPED_BIT,
|
|
|
|
};
|
|
|
|
|
|
|
|
static inline bool io_has_work(struct io_ring_ctx *ctx)
|
|
|
|
{
|
|
|
|
return test_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq) ||
|
2024-11-20 22:14:51 +00:00
|
|
|
io_local_work_pending(ctx);
|
2024-02-14 19:59:36 +00:00
|
|
|
}
|
2022-05-24 18:45:38 +00:00
|
|
|
#endif
|