mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2024-12-28 00:33:16 +00:00
for-6.13/io_uring-20241118
-----BEGIN PGP SIGNATURE----- iQJEBAABCAAuFiEEwPw5LcreJtl1+l5K99NY+ylx4KYFAmc7S3kQHGF4Ym9lQGtl cm5lbC5kawAKCRD301j7KXHgpjHVEAC+CITBEcGy+S0IK0BpIAhuA+A621LtqBwy 0z/4MZKXMqvWxcFGQJ9Zr8MvxUnY4KFcssiaR5zk+I9TczNu7mLMuPYD1Gb0Klgz mwuFOylo1CAAC41IABYZZ/0qWbTaW0p8tpaGsTbTNk3tBxuMLB550+APAqC1OE9U bb7rP+FHc5+YGI9/7JNWt7NNTSHvVSO6oxjltCxHr1dRg93Jtr2jaY6letY3epFz TCFyfJlDtK8fPwtYRyG51M4g2Vdp9/4qsfPqvnXwUr9MdWaVh5/TFkyvqDi5sCKM zdK/sjRiimYzvqqKg6bzgYscITUPNk2TG6ZJq5U1L7lrglzVY69c7GIUnNzPrL/y AxQsR5Guxz3bRNYWZ4BKJDH+NNB+cgIFEXDsv72qoUy3HTzA6wOPZYxfjhZhKuG/ DjRwM7NGx5oPiKtpK99IulZttXdmtkH0csuLwKmOzrQskQdTuWyrEtU7UQql7oQ5 Rt3DhMXouzYZMicB8U5Q9gO2I3WN+2VVxXl4sa00LG8KsT6PzLnz4Q2k/1c83S6J rRivRbZAbZ1+BqKvF8T7GgzLCeaLgzbeoxmxj6xr87pf3SYEs2KhQeQ+n/C0HTOt GOcG1+bvh7t2aSvlBPKVCExWI4erwG6wXFhfGKsLW9CmwIMqRNxdePpRWe3Cueyp M3QRJuvTxQ== =bDvp -----END PGP SIGNATURE----- Merge tag 'for-6.13/io_uring-20241118' of git://git.kernel.dk/linux Pull io_uring updates from Jens Axboe: - Cleanups of the eventfd handling code, making it fully private. - Support for sending a sync message to another ring, without having a ring available to send a normal async message. - Get rid of the separate unlocked hash table, unify everything around the single locked one. - Add support for ring resizing. It can be hard to appropriately size the CQ ring upfront, if the application doesn't know how busy it will be. This results in applications sizing rings for the most busy case, which can be wasteful. With ring resizing, they can start small and grow the ring, if needed. - Add support for fixed wait regions, rather than needing to copy the same wait data tons of times for each wait operation. - Rewrite the resource node handling, which before was serialized per ring. This caused issues with particularly fixed files, where one file waiting on IO could hold up putting and freeing of other unrelated files. Now each node is handled separately. New code is much simpler too, and was a net 250 line reduction in code. - Add support for just doing partial buffer clones, rather than always cloning the entire buffer table. - Series adding static NAPI support, where a specific NAPI instance is used rather than having a list of them available that need lookup. - Add support for mapped regions, and also convert the fixed wait support mentioned above to that concept. This avoids doing special mappings for various planned features, and folds the existing registered wait into that too. - Add support for hybrid IO polling, which is a variant of strict IOPOLL but with an initial sleep delay to avoid spinning too early and wasting resources on devices that aren't necessarily in the < 5 usec category wrt latencies. - Various cleanups and little fixes. * tag 'for-6.13/io_uring-20241118' of git://git.kernel.dk/linux: (79 commits) io_uring/region: fix error codes after failed vmap io_uring: restore back registered wait arguments io_uring: add memory region registration io_uring: introduce concept of memory regions io_uring: temporarily disable registered waits io_uring: disable ENTER_EXT_ARG_REG for IOPOLL io_uring: fortify io_pin_pages with a warning switch io_msg_ring() to CLASS(fd) io_uring: fix invalid hybrid polling ctx leaks io_uring/uring_cmd: fix buffer index retrieval io_uring/rsrc: add & apply io_req_assign_buf_node() io_uring/rsrc: remove '->ctx_ptr' of 'struct io_rsrc_node' io_uring/rsrc: pass 'struct io_ring_ctx' reference to rsrc helpers io_uring: avoid normal tw intermediate fallback io_uring/napi: add static napi tracking strategy io_uring/napi: clean up __io_napi_do_busy_loop io_uring/napi: Use lock guards io_uring/napi: improve __io_napi_add io_uring/napi: fix io_napi_entry RCU accesses io_uring/napi: protect concurrent io_napi_entry timeout accesses ...
This commit is contained in:
commit
8350142a4b
@ -110,7 +110,7 @@ static inline void io_uring_cmd_complete_in_task(struct io_uring_cmd *ioucmd,
|
||||
|
||||
static inline struct task_struct *io_uring_cmd_get_task(struct io_uring_cmd *cmd)
|
||||
{
|
||||
return cmd_to_io_kiocb(cmd)->task;
|
||||
return cmd_to_io_kiocb(cmd)->tctx->task;
|
||||
}
|
||||
|
||||
#endif /* _LINUX_IO_URING_CMD_H */
|
||||
|
@ -56,19 +56,18 @@ struct io_wq_work {
|
||||
int cancel_seq;
|
||||
};
|
||||
|
||||
struct io_fixed_file {
|
||||
/* file * with additional FFS_* flags */
|
||||
unsigned long file_ptr;
|
||||
struct io_rsrc_data {
|
||||
unsigned int nr;
|
||||
struct io_rsrc_node **nodes;
|
||||
};
|
||||
|
||||
struct io_file_table {
|
||||
struct io_fixed_file *files;
|
||||
struct io_rsrc_data data;
|
||||
unsigned long *bitmap;
|
||||
unsigned int alloc_hint;
|
||||
};
|
||||
|
||||
struct io_hash_bucket {
|
||||
spinlock_t lock;
|
||||
struct hlist_head list;
|
||||
} ____cacheline_aligned_in_smp;
|
||||
|
||||
@ -77,6 +76,12 @@ struct io_hash_table {
|
||||
unsigned hash_bits;
|
||||
};
|
||||
|
||||
struct io_mapped_region {
|
||||
struct page **pages;
|
||||
void *vmap_ptr;
|
||||
size_t nr_pages;
|
||||
};
|
||||
|
||||
/*
|
||||
* Arbitrary limit, can be raised if need be
|
||||
*/
|
||||
@ -86,6 +91,7 @@ struct io_uring_task {
|
||||
/* submission side */
|
||||
int cached_refs;
|
||||
const struct io_ring_ctx *last;
|
||||
struct task_struct *task;
|
||||
struct io_wq *io_wq;
|
||||
struct file *registered_rings[IO_RINGFD_REG_MAX];
|
||||
|
||||
@ -271,7 +277,6 @@ struct io_ring_ctx {
|
||||
* Fixed resources fast path, should be accessed only under
|
||||
* uring_lock, and updated through io_uring_register(2)
|
||||
*/
|
||||
struct io_rsrc_node *rsrc_node;
|
||||
atomic_t cancel_seq;
|
||||
|
||||
/*
|
||||
@ -284,15 +289,13 @@ struct io_ring_ctx {
|
||||
struct io_wq_work_list iopoll_list;
|
||||
|
||||
struct io_file_table file_table;
|
||||
struct io_mapped_ubuf **user_bufs;
|
||||
unsigned nr_user_files;
|
||||
unsigned nr_user_bufs;
|
||||
struct io_rsrc_data buf_table;
|
||||
|
||||
struct io_submit_state submit_state;
|
||||
|
||||
struct xarray io_bl_xa;
|
||||
|
||||
struct io_hash_table cancel_table_locked;
|
||||
struct io_hash_table cancel_table;
|
||||
struct io_alloc_cache apoll_cache;
|
||||
struct io_alloc_cache netmsg_cache;
|
||||
struct io_alloc_cache rw_cache;
|
||||
@ -303,6 +306,11 @@ struct io_ring_ctx {
|
||||
* ->uring_cmd() by io_uring_cmd_insert_cancelable()
|
||||
*/
|
||||
struct hlist_head cancelable_uring_cmd;
|
||||
/*
|
||||
* For Hybrid IOPOLL, runtime in hybrid polling, without
|
||||
* scheduling time
|
||||
*/
|
||||
u64 hybrid_poll_time;
|
||||
} ____cacheline_aligned_in_smp;
|
||||
|
||||
struct {
|
||||
@ -317,6 +325,9 @@ struct io_ring_ctx {
|
||||
unsigned cq_entries;
|
||||
struct io_ev_fd __rcu *io_ev_fd;
|
||||
unsigned cq_extra;
|
||||
|
||||
void *cq_wait_arg;
|
||||
size_t cq_wait_size;
|
||||
} ____cacheline_aligned_in_smp;
|
||||
|
||||
/*
|
||||
@ -343,7 +354,6 @@ struct io_ring_ctx {
|
||||
|
||||
struct list_head io_buffers_comp;
|
||||
struct list_head cq_overflow_list;
|
||||
struct io_hash_table cancel_table;
|
||||
|
||||
struct hlist_head waitid_list;
|
||||
|
||||
@ -367,16 +377,6 @@ struct io_ring_ctx {
|
||||
struct wait_queue_head poll_wq;
|
||||
struct io_restriction restrictions;
|
||||
|
||||
/* slow path rsrc auxilary data, used by update/register */
|
||||
struct io_rsrc_data *file_data;
|
||||
struct io_rsrc_data *buf_data;
|
||||
|
||||
/* protected by ->uring_lock */
|
||||
struct list_head rsrc_ref_list;
|
||||
struct io_alloc_cache rsrc_node_cache;
|
||||
struct wait_queue_head rsrc_quiesce_wq;
|
||||
unsigned rsrc_quiesce;
|
||||
|
||||
u32 pers_next;
|
||||
struct xarray personalities;
|
||||
|
||||
@ -410,7 +410,7 @@ struct io_ring_ctx {
|
||||
/* napi busy poll default timeout */
|
||||
ktime_t napi_busy_poll_dt;
|
||||
bool napi_prefer_busy_poll;
|
||||
bool napi_enabled;
|
||||
u8 napi_track_mode;
|
||||
|
||||
DECLARE_HASHTABLE(napi_ht, 4);
|
||||
#endif
|
||||
@ -418,6 +418,13 @@ struct io_ring_ctx {
|
||||
/* protected by ->completion_lock */
|
||||
unsigned evfd_last_cq_tail;
|
||||
|
||||
/*
|
||||
* Protection for resize vs mmap races - both the mmap and resize
|
||||
* side will need to grab this lock, to prevent either side from
|
||||
* being run concurrently with the other.
|
||||
*/
|
||||
struct mutex resize_lock;
|
||||
|
||||
/*
|
||||
* If IORING_SETUP_NO_MMAP is used, then the below holds
|
||||
* the gup'ed pages for the two rings, and the sqes.
|
||||
@ -426,6 +433,9 @@ struct io_ring_ctx {
|
||||
unsigned short n_sqe_pages;
|
||||
struct page **ring_pages;
|
||||
struct page **sqe_pages;
|
||||
|
||||
/* used for optimised request parameter and wait argument passing */
|
||||
struct io_mapped_region param_region;
|
||||
};
|
||||
|
||||
struct io_tw_state {
|
||||
@ -448,6 +458,7 @@ enum {
|
||||
REQ_F_LINK_TIMEOUT_BIT,
|
||||
REQ_F_NEED_CLEANUP_BIT,
|
||||
REQ_F_POLLED_BIT,
|
||||
REQ_F_HYBRID_IOPOLL_STATE_BIT,
|
||||
REQ_F_BUFFER_SELECTED_BIT,
|
||||
REQ_F_BUFFER_RING_BIT,
|
||||
REQ_F_REISSUE_BIT,
|
||||
@ -460,7 +471,6 @@ enum {
|
||||
REQ_F_DOUBLE_POLL_BIT,
|
||||
REQ_F_APOLL_MULTISHOT_BIT,
|
||||
REQ_F_CLEAR_POLLIN_BIT,
|
||||
REQ_F_HASH_LOCKED_BIT,
|
||||
/* keep async read/write and isreg together and in order */
|
||||
REQ_F_SUPPORT_NOWAIT_BIT,
|
||||
REQ_F_ISREG_BIT,
|
||||
@ -469,6 +479,7 @@ enum {
|
||||
REQ_F_BL_EMPTY_BIT,
|
||||
REQ_F_BL_NO_RECYCLE_BIT,
|
||||
REQ_F_BUFFERS_COMMIT_BIT,
|
||||
REQ_F_BUF_NODE_BIT,
|
||||
|
||||
/* not a real bit, just to check we're not overflowing the space */
|
||||
__REQ_F_LAST_BIT,
|
||||
@ -507,6 +518,8 @@ enum {
|
||||
REQ_F_NEED_CLEANUP = IO_REQ_FLAG(REQ_F_NEED_CLEANUP_BIT),
|
||||
/* already went through poll handler */
|
||||
REQ_F_POLLED = IO_REQ_FLAG(REQ_F_POLLED_BIT),
|
||||
/* every req only blocks once in hybrid poll */
|
||||
REQ_F_IOPOLL_STATE = IO_REQ_FLAG(REQ_F_HYBRID_IOPOLL_STATE_BIT),
|
||||
/* buffer already selected */
|
||||
REQ_F_BUFFER_SELECTED = IO_REQ_FLAG(REQ_F_BUFFER_SELECTED_BIT),
|
||||
/* buffer selected from ring, needs commit */
|
||||
@ -535,8 +548,6 @@ enum {
|
||||
REQ_F_APOLL_MULTISHOT = IO_REQ_FLAG(REQ_F_APOLL_MULTISHOT_BIT),
|
||||
/* recvmsg special flag, clear EPOLLIN */
|
||||
REQ_F_CLEAR_POLLIN = IO_REQ_FLAG(REQ_F_CLEAR_POLLIN_BIT),
|
||||
/* hashed into ->cancel_hash_locked, protected by ->uring_lock */
|
||||
REQ_F_HASH_LOCKED = IO_REQ_FLAG(REQ_F_HASH_LOCKED_BIT),
|
||||
/* don't use lazy poll wake for this request */
|
||||
REQ_F_POLL_NO_LAZY = IO_REQ_FLAG(REQ_F_POLL_NO_LAZY_BIT),
|
||||
/* file is pollable */
|
||||
@ -547,6 +558,8 @@ enum {
|
||||
REQ_F_BL_NO_RECYCLE = IO_REQ_FLAG(REQ_F_BL_NO_RECYCLE_BIT),
|
||||
/* buffer ring head needs incrementing on put */
|
||||
REQ_F_BUFFERS_COMMIT = IO_REQ_FLAG(REQ_F_BUFFERS_COMMIT_BIT),
|
||||
/* buf node is valid */
|
||||
REQ_F_BUF_NODE = IO_REQ_FLAG(REQ_F_BUF_NODE_BIT),
|
||||
};
|
||||
|
||||
typedef void (*io_req_tw_func_t)(struct io_kiocb *req, struct io_tw_state *ts);
|
||||
@ -616,12 +629,9 @@ struct io_kiocb {
|
||||
struct io_cqe cqe;
|
||||
|
||||
struct io_ring_ctx *ctx;
|
||||
struct task_struct *task;
|
||||
struct io_uring_task *tctx;
|
||||
|
||||
union {
|
||||
/* store used ubuf, so we can prevent reloading */
|
||||
struct io_mapped_ubuf *imu;
|
||||
|
||||
/* stores selected buf, valid IFF REQ_F_BUFFER_SELECTED is set */
|
||||
struct io_buffer *kbuf;
|
||||
|
||||
@ -630,6 +640,8 @@ struct io_kiocb {
|
||||
* REQ_F_BUFFER_RING is set.
|
||||
*/
|
||||
struct io_buffer_list *buf_list;
|
||||
|
||||
struct io_rsrc_node *buf_node;
|
||||
};
|
||||
|
||||
union {
|
||||
@ -639,13 +651,20 @@ struct io_kiocb {
|
||||
__poll_t apoll_events;
|
||||
};
|
||||
|
||||
struct io_rsrc_node *rsrc_node;
|
||||
struct io_rsrc_node *file_node;
|
||||
|
||||
atomic_t refs;
|
||||
bool cancel_seq_set;
|
||||
struct io_task_work io_task_work;
|
||||
/* for polled requests, i.e. IORING_OP_POLL_ADD and async armed poll */
|
||||
struct hlist_node hash_node;
|
||||
union {
|
||||
/*
|
||||
* for polled requests, i.e. IORING_OP_POLL_ADD and async armed
|
||||
* poll
|
||||
*/
|
||||
struct hlist_node hash_node;
|
||||
/* For IOPOLL setup queues, with hybrid polling */
|
||||
u64 iopoll_start;
|
||||
};
|
||||
/* internal polling, see IORING_FEAT_FAST_POLL */
|
||||
struct async_poll *apoll;
|
||||
/* opcode allocated if it needs to store data for async defer */
|
||||
@ -668,4 +687,9 @@ struct io_overflow_cqe {
|
||||
struct io_uring_cqe cqe;
|
||||
};
|
||||
|
||||
static inline bool io_ctx_cqe32(struct io_ring_ctx *ctx)
|
||||
{
|
||||
return ctx->flags & IORING_SETUP_CQE32;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@ -315,20 +315,14 @@ TRACE_EVENT(io_uring_fail_link,
|
||||
* io_uring_complete - called when completing an SQE
|
||||
*
|
||||
* @ctx: pointer to a ring context structure
|
||||
* @req: pointer to a submitted request
|
||||
* @user_data: user data associated with the request
|
||||
* @res: result of the request
|
||||
* @cflags: completion flags
|
||||
* @extra1: extra 64-bit data for CQE32
|
||||
* @extra2: extra 64-bit data for CQE32
|
||||
*
|
||||
* @req: (optional) pointer to a submitted request
|
||||
* @cqe: pointer to the filled in CQE being posted
|
||||
*/
|
||||
TRACE_EVENT(io_uring_complete,
|
||||
|
||||
TP_PROTO(void *ctx, void *req, u64 user_data, int res, unsigned cflags,
|
||||
u64 extra1, u64 extra2),
|
||||
TP_PROTO(struct io_ring_ctx *ctx, void *req, struct io_uring_cqe *cqe),
|
||||
|
||||
TP_ARGS(ctx, req, user_data, res, cflags, extra1, extra2),
|
||||
TP_ARGS(ctx, req, cqe),
|
||||
|
||||
TP_STRUCT__entry (
|
||||
__field( void *, ctx )
|
||||
@ -343,11 +337,11 @@ TRACE_EVENT(io_uring_complete,
|
||||
TP_fast_assign(
|
||||
__entry->ctx = ctx;
|
||||
__entry->req = req;
|
||||
__entry->user_data = user_data;
|
||||
__entry->res = res;
|
||||
__entry->cflags = cflags;
|
||||
__entry->extra1 = extra1;
|
||||
__entry->extra2 = extra2;
|
||||
__entry->user_data = cqe->user_data;
|
||||
__entry->res = cqe->res;
|
||||
__entry->cflags = cqe->flags;
|
||||
__entry->extra1 = io_ctx_cqe32(ctx) ? cqe->big_cqe[0] : 0;
|
||||
__entry->extra2 = io_ctx_cqe32(ctx) ? cqe->big_cqe[1] : 0;
|
||||
),
|
||||
|
||||
TP_printk("ring %p, req %p, user_data 0x%llx, result %d, cflags 0x%x "
|
||||
|
@ -200,6 +200,9 @@ enum io_uring_sqe_flags_bit {
|
||||
*/
|
||||
#define IORING_SETUP_NO_SQARRAY (1U << 16)
|
||||
|
||||
/* Use hybrid poll in iopoll process */
|
||||
#define IORING_SETUP_HYBRID_IOPOLL (1U << 17)
|
||||
|
||||
enum io_uring_op {
|
||||
IORING_OP_NOP,
|
||||
IORING_OP_READV,
|
||||
@ -416,6 +419,9 @@ enum io_uring_msg_ring_flags {
|
||||
* IORING_NOP_INJECT_RESULT Inject result from sqe->result
|
||||
*/
|
||||
#define IORING_NOP_INJECT_RESULT (1U << 0)
|
||||
#define IORING_NOP_FILE (1U << 1)
|
||||
#define IORING_NOP_FIXED_FILE (1U << 2)
|
||||
#define IORING_NOP_FIXED_BUFFER (1U << 3)
|
||||
|
||||
/*
|
||||
* IO completion data structure (Completion Queue Entry)
|
||||
@ -518,6 +524,7 @@ struct io_cqring_offsets {
|
||||
#define IORING_ENTER_EXT_ARG (1U << 3)
|
||||
#define IORING_ENTER_REGISTERED_RING (1U << 4)
|
||||
#define IORING_ENTER_ABS_TIMER (1U << 5)
|
||||
#define IORING_ENTER_EXT_ARG_REG (1U << 6)
|
||||
|
||||
/*
|
||||
* Passed in for io_uring_setup(2). Copied back with updated info on success
|
||||
@ -612,6 +619,16 @@ enum io_uring_register_op {
|
||||
/* clone registered buffers from source ring to current ring */
|
||||
IORING_REGISTER_CLONE_BUFFERS = 30,
|
||||
|
||||
/* send MSG_RING without having a ring */
|
||||
IORING_REGISTER_SEND_MSG_RING = 31,
|
||||
|
||||
/* 32 reserved for zc rx */
|
||||
|
||||
/* resize CQ ring */
|
||||
IORING_REGISTER_RESIZE_RINGS = 33,
|
||||
|
||||
IORING_REGISTER_MEM_REGION = 34,
|
||||
|
||||
/* this goes last */
|
||||
IORING_REGISTER_LAST,
|
||||
|
||||
@ -632,6 +649,31 @@ struct io_uring_files_update {
|
||||
__aligned_u64 /* __s32 * */ fds;
|
||||
};
|
||||
|
||||
enum {
|
||||
/* initialise with user provided memory pointed by user_addr */
|
||||
IORING_MEM_REGION_TYPE_USER = 1,
|
||||
};
|
||||
|
||||
struct io_uring_region_desc {
|
||||
__u64 user_addr;
|
||||
__u64 size;
|
||||
__u32 flags;
|
||||
__u32 id;
|
||||
__u64 mmap_offset;
|
||||
__u64 __resv[4];
|
||||
};
|
||||
|
||||
enum {
|
||||
/* expose the region as registered wait arguments */
|
||||
IORING_MEM_REGION_REG_WAIT_ARG = 1,
|
||||
};
|
||||
|
||||
struct io_uring_mem_region_reg {
|
||||
__u64 region_uptr; /* struct io_uring_region_desc * */
|
||||
__u64 flags;
|
||||
__u64 __resv[2];
|
||||
};
|
||||
|
||||
/*
|
||||
* Register a fully sparse file space, rather than pass in an array of all
|
||||
* -1 file descriptors.
|
||||
@ -698,13 +740,17 @@ struct io_uring_clock_register {
|
||||
};
|
||||
|
||||
enum {
|
||||
IORING_REGISTER_SRC_REGISTERED = 1,
|
||||
IORING_REGISTER_SRC_REGISTERED = (1U << 0),
|
||||
IORING_REGISTER_DST_REPLACE = (1U << 1),
|
||||
};
|
||||
|
||||
struct io_uring_clone_buffers {
|
||||
__u32 src_fd;
|
||||
__u32 flags;
|
||||
__u32 pad[6];
|
||||
__u32 src_off;
|
||||
__u32 dst_off;
|
||||
__u32 nr;
|
||||
__u32 pad[3];
|
||||
};
|
||||
|
||||
struct io_uring_buf {
|
||||
@ -768,12 +814,40 @@ struct io_uring_buf_status {
|
||||
__u32 resv[8];
|
||||
};
|
||||
|
||||
enum io_uring_napi_op {
|
||||
/* register/ungister backward compatible opcode */
|
||||
IO_URING_NAPI_REGISTER_OP = 0,
|
||||
|
||||
/* opcodes to update napi_list when static tracking is used */
|
||||
IO_URING_NAPI_STATIC_ADD_ID = 1,
|
||||
IO_URING_NAPI_STATIC_DEL_ID = 2
|
||||
};
|
||||
|
||||
enum io_uring_napi_tracking_strategy {
|
||||
/* value must be 0 for backward compatibility */
|
||||
IO_URING_NAPI_TRACKING_DYNAMIC = 0,
|
||||
IO_URING_NAPI_TRACKING_STATIC = 1,
|
||||
IO_URING_NAPI_TRACKING_INACTIVE = 255
|
||||
};
|
||||
|
||||
/* argument for IORING_(UN)REGISTER_NAPI */
|
||||
struct io_uring_napi {
|
||||
__u32 busy_poll_to;
|
||||
__u8 prefer_busy_poll;
|
||||
__u8 pad[3];
|
||||
__u64 resv;
|
||||
|
||||
/* a io_uring_napi_op value */
|
||||
__u8 opcode;
|
||||
__u8 pad[2];
|
||||
|
||||
/*
|
||||
* for IO_URING_NAPI_REGISTER_OP, it is a
|
||||
* io_uring_napi_tracking_strategy value.
|
||||
*
|
||||
* for IO_URING_NAPI_STATIC_ADD_ID/IO_URING_NAPI_STATIC_DEL_ID
|
||||
* it is the napi id to add/del from napi_list.
|
||||
*/
|
||||
__u32 op_param;
|
||||
__u32 resv;
|
||||
};
|
||||
|
||||
/*
|
||||
@ -795,6 +869,43 @@ enum io_uring_register_restriction_op {
|
||||
IORING_RESTRICTION_LAST
|
||||
};
|
||||
|
||||
enum {
|
||||
IORING_REG_WAIT_TS = (1U << 0),
|
||||
};
|
||||
|
||||
/*
|
||||
* Argument for IORING_REGISTER_CQWAIT_REG, registering a region of
|
||||
* struct io_uring_reg_wait that can be indexed when io_uring_enter(2) is
|
||||
* called rather than pass in a wait argument structure separately.
|
||||
*/
|
||||
struct io_uring_cqwait_reg_arg {
|
||||
__u32 flags;
|
||||
__u32 struct_size;
|
||||
__u32 nr_entries;
|
||||
__u32 pad;
|
||||
__u64 user_addr;
|
||||
__u64 pad2[3];
|
||||
};
|
||||
|
||||
/*
|
||||
* Argument for io_uring_enter(2) with
|
||||
* IORING_GETEVENTS | IORING_ENTER_EXT_ARG_REG set, where the actual argument
|
||||
* is an index into a previously registered fixed wait region described by
|
||||
* the below structure.
|
||||
*/
|
||||
struct io_uring_reg_wait {
|
||||
struct __kernel_timespec ts;
|
||||
__u32 min_wait_usec;
|
||||
__u32 flags;
|
||||
__u64 sigmask;
|
||||
__u32 sigmask_sz;
|
||||
__u32 pad[3];
|
||||
__u64 pad2[2];
|
||||
};
|
||||
|
||||
/*
|
||||
* Argument for io_uring_enter(2) with IORING_GETEVENTS | IORING_ENTER_EXT_ARG
|
||||
*/
|
||||
struct io_uring_getevents_arg {
|
||||
__u64 sigmask;
|
||||
__u32 sigmask_sz;
|
||||
|
@ -205,7 +205,7 @@ int io_async_cancel(struct io_kiocb *req, unsigned int issue_flags)
|
||||
.opcode = cancel->opcode,
|
||||
.seq = atomic_inc_return(&req->ctx->cancel_seq),
|
||||
};
|
||||
struct io_uring_task *tctx = req->task->io_uring;
|
||||
struct io_uring_task *tctx = req->tctx;
|
||||
int ret;
|
||||
|
||||
if (cd.flags & IORING_ASYNC_CANCEL_FD) {
|
||||
@ -232,16 +232,6 @@ int io_async_cancel(struct io_kiocb *req, unsigned int issue_flags)
|
||||
return IOU_OK;
|
||||
}
|
||||
|
||||
void init_hash_table(struct io_hash_table *table, unsigned size)
|
||||
{
|
||||
unsigned int i;
|
||||
|
||||
for (i = 0; i < size; i++) {
|
||||
spin_lock_init(&table->hbs[i].lock);
|
||||
INIT_HLIST_HEAD(&table->hbs[i].list);
|
||||
}
|
||||
}
|
||||
|
||||
static int __io_sync_cancel(struct io_uring_task *tctx,
|
||||
struct io_cancel_data *cd, int fd)
|
||||
{
|
||||
@ -250,10 +240,12 @@ static int __io_sync_cancel(struct io_uring_task *tctx,
|
||||
/* fixed must be grabbed every time since we drop the uring_lock */
|
||||
if ((cd->flags & IORING_ASYNC_CANCEL_FD) &&
|
||||
(cd->flags & IORING_ASYNC_CANCEL_FD_FIXED)) {
|
||||
if (unlikely(fd >= ctx->nr_user_files))
|
||||
struct io_rsrc_node *node;
|
||||
|
||||
node = io_rsrc_node_lookup(&ctx->file_table.data, fd);
|
||||
if (unlikely(!node))
|
||||
return -EBADF;
|
||||
fd = array_index_nospec(fd, ctx->nr_user_files);
|
||||
cd->file = io_file_from_index(&ctx->file_table, fd);
|
||||
cd->file = io_slot_file(node);
|
||||
if (!cd->file)
|
||||
return -EBADF;
|
||||
}
|
||||
|
@ -20,7 +20,6 @@ int io_async_cancel(struct io_kiocb *req, unsigned int issue_flags);
|
||||
|
||||
int io_try_cancel(struct io_uring_task *tctx, struct io_cancel_data *cd,
|
||||
unsigned int issue_flags);
|
||||
void init_hash_table(struct io_hash_table *table, unsigned size);
|
||||
|
||||
int io_sync_cancel(struct io_ring_ctx *ctx, void __user *arg);
|
||||
bool io_cancel_req_match(struct io_kiocb *req, struct io_cancel_data *cd);
|
||||
|
@ -13,10 +13,12 @@
|
||||
|
||||
struct io_ev_fd {
|
||||
struct eventfd_ctx *cq_ev_fd;
|
||||
unsigned int eventfd_async: 1;
|
||||
struct rcu_head rcu;
|
||||
unsigned int eventfd_async;
|
||||
/* protected by ->completion_lock */
|
||||
unsigned last_cq_tail;
|
||||
refcount_t refs;
|
||||
atomic_t ops;
|
||||
struct rcu_head rcu;
|
||||
};
|
||||
|
||||
enum {
|
||||
@ -41,14 +43,58 @@ static void io_eventfd_do_signal(struct rcu_head *rcu)
|
||||
io_eventfd_free(rcu);
|
||||
}
|
||||
|
||||
void io_eventfd_signal(struct io_ring_ctx *ctx)
|
||||
static void io_eventfd_put(struct io_ev_fd *ev_fd)
|
||||
{
|
||||
struct io_ev_fd *ev_fd = NULL;
|
||||
if (refcount_dec_and_test(&ev_fd->refs))
|
||||
call_rcu(&ev_fd->rcu, io_eventfd_free);
|
||||
}
|
||||
|
||||
static void io_eventfd_release(struct io_ev_fd *ev_fd, bool put_ref)
|
||||
{
|
||||
if (put_ref)
|
||||
io_eventfd_put(ev_fd);
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
/*
|
||||
* Returns true if the caller should put the ev_fd reference, false if not.
|
||||
*/
|
||||
static bool __io_eventfd_signal(struct io_ev_fd *ev_fd)
|
||||
{
|
||||
if (eventfd_signal_allowed()) {
|
||||
eventfd_signal_mask(ev_fd->cq_ev_fd, EPOLL_URING_WAKE);
|
||||
return true;
|
||||
}
|
||||
if (!atomic_fetch_or(BIT(IO_EVENTFD_OP_SIGNAL_BIT), &ev_fd->ops)) {
|
||||
call_rcu_hurry(&ev_fd->rcu, io_eventfd_do_signal);
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* Trigger if eventfd_async isn't set, or if it's set and the caller is
|
||||
* an async worker. If ev_fd isn't valid, obviously return false.
|
||||
*/
|
||||
static bool io_eventfd_trigger(struct io_ev_fd *ev_fd)
|
||||
{
|
||||
if (ev_fd)
|
||||
return !ev_fd->eventfd_async || io_wq_current_is_worker();
|
||||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
* On success, returns with an ev_fd reference grabbed and the RCU read
|
||||
* lock held.
|
||||
*/
|
||||
static struct io_ev_fd *io_eventfd_grab(struct io_ring_ctx *ctx)
|
||||
{
|
||||
struct io_ev_fd *ev_fd;
|
||||
|
||||
if (READ_ONCE(ctx->rings->cq_flags) & IORING_CQ_EVENTFD_DISABLED)
|
||||
return;
|
||||
return NULL;
|
||||
|
||||
guard(rcu)();
|
||||
rcu_read_lock();
|
||||
|
||||
/*
|
||||
* rcu_dereference ctx->io_ev_fd once and use it for both for checking
|
||||
@ -57,51 +103,53 @@ void io_eventfd_signal(struct io_ring_ctx *ctx)
|
||||
ev_fd = rcu_dereference(ctx->io_ev_fd);
|
||||
|
||||
/*
|
||||
* Check again if ev_fd exists incase an io_eventfd_unregister call
|
||||
* Check again if ev_fd exists in case an io_eventfd_unregister call
|
||||
* completed between the NULL check of ctx->io_ev_fd at the start of
|
||||
* the function and rcu_read_lock.
|
||||
*/
|
||||
if (unlikely(!ev_fd))
|
||||
return;
|
||||
if (!refcount_inc_not_zero(&ev_fd->refs))
|
||||
return;
|
||||
if (ev_fd->eventfd_async && !io_wq_current_is_worker())
|
||||
goto out;
|
||||
if (io_eventfd_trigger(ev_fd) && refcount_inc_not_zero(&ev_fd->refs))
|
||||
return ev_fd;
|
||||
|
||||
if (likely(eventfd_signal_allowed())) {
|
||||
eventfd_signal_mask(ev_fd->cq_ev_fd, EPOLL_URING_WAKE);
|
||||
} else {
|
||||
if (!atomic_fetch_or(BIT(IO_EVENTFD_OP_SIGNAL_BIT), &ev_fd->ops)) {
|
||||
call_rcu_hurry(&ev_fd->rcu, io_eventfd_do_signal);
|
||||
return;
|
||||
}
|
||||
}
|
||||
out:
|
||||
if (refcount_dec_and_test(&ev_fd->refs))
|
||||
call_rcu(&ev_fd->rcu, io_eventfd_free);
|
||||
rcu_read_unlock();
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void io_eventfd_signal(struct io_ring_ctx *ctx)
|
||||
{
|
||||
struct io_ev_fd *ev_fd;
|
||||
|
||||
ev_fd = io_eventfd_grab(ctx);
|
||||
if (ev_fd)
|
||||
io_eventfd_release(ev_fd, __io_eventfd_signal(ev_fd));
|
||||
}
|
||||
|
||||
void io_eventfd_flush_signal(struct io_ring_ctx *ctx)
|
||||
{
|
||||
bool skip;
|
||||
struct io_ev_fd *ev_fd;
|
||||
|
||||
spin_lock(&ctx->completion_lock);
|
||||
ev_fd = io_eventfd_grab(ctx);
|
||||
if (ev_fd) {
|
||||
bool skip, put_ref = true;
|
||||
|
||||
/*
|
||||
* Eventfd should only get triggered when at least one event has been
|
||||
* posted. Some applications rely on the eventfd notification count
|
||||
* only changing IFF a new CQE has been added to the CQ ring. There's
|
||||
* no depedency on 1:1 relationship between how many times this
|
||||
* function is called (and hence the eventfd count) and number of CQEs
|
||||
* posted to the CQ ring.
|
||||
*/
|
||||
skip = ctx->cached_cq_tail == ctx->evfd_last_cq_tail;
|
||||
ctx->evfd_last_cq_tail = ctx->cached_cq_tail;
|
||||
spin_unlock(&ctx->completion_lock);
|
||||
if (skip)
|
||||
return;
|
||||
/*
|
||||
* Eventfd should only get triggered when at least one event
|
||||
* has been posted. Some applications rely on the eventfd
|
||||
* notification count only changing IFF a new CQE has been
|
||||
* added to the CQ ring. There's no dependency on 1:1
|
||||
* relationship between how many times this function is called
|
||||
* (and hence the eventfd count) and number of CQEs posted to
|
||||
* the CQ ring.
|
||||
*/
|
||||
spin_lock(&ctx->completion_lock);
|
||||
skip = ctx->cached_cq_tail == ev_fd->last_cq_tail;
|
||||
ev_fd->last_cq_tail = ctx->cached_cq_tail;
|
||||
spin_unlock(&ctx->completion_lock);
|
||||
|
||||
io_eventfd_signal(ctx);
|
||||
if (!skip)
|
||||
put_ref = __io_eventfd_signal(ev_fd);
|
||||
|
||||
io_eventfd_release(ev_fd, put_ref);
|
||||
}
|
||||
}
|
||||
|
||||
int io_eventfd_register(struct io_ring_ctx *ctx, void __user *arg,
|
||||
@ -132,7 +180,7 @@ int io_eventfd_register(struct io_ring_ctx *ctx, void __user *arg,
|
||||
}
|
||||
|
||||
spin_lock(&ctx->completion_lock);
|
||||
ctx->evfd_last_cq_tail = ctx->cached_cq_tail;
|
||||
ev_fd->last_cq_tail = ctx->cached_cq_tail;
|
||||
spin_unlock(&ctx->completion_lock);
|
||||
|
||||
ev_fd->eventfd_async = eventfd_async;
|
||||
@ -152,8 +200,7 @@ int io_eventfd_unregister(struct io_ring_ctx *ctx)
|
||||
if (ev_fd) {
|
||||
ctx->has_evfd = false;
|
||||
rcu_assign_pointer(ctx->io_ev_fd, NULL);
|
||||
if (refcount_dec_and_test(&ev_fd->refs))
|
||||
call_rcu(&ev_fd->rcu, io_eventfd_free);
|
||||
io_eventfd_put(ev_fd);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -46,6 +46,46 @@ static __cold int io_uring_show_cred(struct seq_file *m, unsigned int id,
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_NET_RX_BUSY_POLL
|
||||
static __cold void common_tracking_show_fdinfo(struct io_ring_ctx *ctx,
|
||||
struct seq_file *m,
|
||||
const char *tracking_strategy)
|
||||
{
|
||||
seq_puts(m, "NAPI:\tenabled\n");
|
||||
seq_printf(m, "napi tracking:\t%s\n", tracking_strategy);
|
||||
seq_printf(m, "napi_busy_poll_dt:\t%llu\n", ctx->napi_busy_poll_dt);
|
||||
if (ctx->napi_prefer_busy_poll)
|
||||
seq_puts(m, "napi_prefer_busy_poll:\ttrue\n");
|
||||
else
|
||||
seq_puts(m, "napi_prefer_busy_poll:\tfalse\n");
|
||||
}
|
||||
|
||||
static __cold void napi_show_fdinfo(struct io_ring_ctx *ctx,
|
||||
struct seq_file *m)
|
||||
{
|
||||
unsigned int mode = READ_ONCE(ctx->napi_track_mode);
|
||||
|
||||
switch (mode) {
|
||||
case IO_URING_NAPI_TRACKING_INACTIVE:
|
||||
seq_puts(m, "NAPI:\tdisabled\n");
|
||||
break;
|
||||
case IO_URING_NAPI_TRACKING_DYNAMIC:
|
||||
common_tracking_show_fdinfo(ctx, m, "dynamic");
|
||||
break;
|
||||
case IO_URING_NAPI_TRACKING_STATIC:
|
||||
common_tracking_show_fdinfo(ctx, m, "static");
|
||||
break;
|
||||
default:
|
||||
seq_printf(m, "NAPI:\tunknown mode (%u)\n", mode);
|
||||
}
|
||||
}
|
||||
#else
|
||||
static inline void napi_show_fdinfo(struct io_ring_ctx *ctx,
|
||||
struct seq_file *m)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Caller holds a reference to the file already, we don't need to do
|
||||
* anything else to get an extra reference.
|
||||
@ -165,20 +205,27 @@ __cold void io_uring_show_fdinfo(struct seq_file *m, struct file *file)
|
||||
seq_printf(m, "SqThreadCpu:\t%d\n", sq_cpu);
|
||||
seq_printf(m, "SqTotalTime:\t%llu\n", sq_total_time);
|
||||
seq_printf(m, "SqWorkTime:\t%llu\n", sq_work_time);
|
||||
seq_printf(m, "UserFiles:\t%u\n", ctx->nr_user_files);
|
||||
for (i = 0; has_lock && i < ctx->nr_user_files; i++) {
|
||||
struct file *f = io_file_from_index(&ctx->file_table, i);
|
||||
seq_printf(m, "UserFiles:\t%u\n", ctx->file_table.data.nr);
|
||||
for (i = 0; has_lock && i < ctx->file_table.data.nr; i++) {
|
||||
struct file *f = NULL;
|
||||
|
||||
if (ctx->file_table.data.nodes[i])
|
||||
f = io_slot_file(ctx->file_table.data.nodes[i]);
|
||||
if (f)
|
||||
seq_printf(m, "%5u: %s\n", i, file_dentry(f)->d_iname);
|
||||
else
|
||||
seq_printf(m, "%5u: <none>\n", i);
|
||||
}
|
||||
seq_printf(m, "UserBufs:\t%u\n", ctx->nr_user_bufs);
|
||||
for (i = 0; has_lock && i < ctx->nr_user_bufs; i++) {
|
||||
struct io_mapped_ubuf *buf = ctx->user_bufs[i];
|
||||
seq_printf(m, "UserBufs:\t%u\n", ctx->buf_table.nr);
|
||||
for (i = 0; has_lock && i < ctx->buf_table.nr; i++) {
|
||||
struct io_mapped_ubuf *buf = NULL;
|
||||
|
||||
seq_printf(m, "%5u: 0x%llx/%u\n", i, buf->ubuf, buf->len);
|
||||
if (ctx->buf_table.nodes[i])
|
||||
buf = ctx->buf_table.nodes[i]->buf;
|
||||
if (buf)
|
||||
seq_printf(m, "%5u: 0x%llx/%u\n", i, buf->ubuf, buf->len);
|
||||
else
|
||||
seq_printf(m, "%5u: <none>\n", i);
|
||||
}
|
||||
if (has_lock && !xa_empty(&ctx->personalities)) {
|
||||
unsigned long index;
|
||||
@ -190,22 +237,13 @@ __cold void io_uring_show_fdinfo(struct seq_file *m, struct file *file)
|
||||
}
|
||||
|
||||
seq_puts(m, "PollList:\n");
|
||||
for (i = 0; i < (1U << ctx->cancel_table.hash_bits); i++) {
|
||||
for (i = 0; has_lock && i < (1U << ctx->cancel_table.hash_bits); i++) {
|
||||
struct io_hash_bucket *hb = &ctx->cancel_table.hbs[i];
|
||||
struct io_hash_bucket *hbl = &ctx->cancel_table_locked.hbs[i];
|
||||
struct io_kiocb *req;
|
||||
|
||||
spin_lock(&hb->lock);
|
||||
hlist_for_each_entry(req, &hb->list, hash_node)
|
||||
seq_printf(m, " op=%d, task_works=%d\n", req->opcode,
|
||||
task_work_pending(req->task));
|
||||
spin_unlock(&hb->lock);
|
||||
|
||||
if (!has_lock)
|
||||
continue;
|
||||
hlist_for_each_entry(req, &hbl->list, hash_node)
|
||||
seq_printf(m, " op=%d, task_works=%d\n", req->opcode,
|
||||
task_work_pending(req->task));
|
||||
task_work_pending(req->tctx->task));
|
||||
}
|
||||
|
||||
if (has_lock)
|
||||
@ -221,18 +259,6 @@ __cold void io_uring_show_fdinfo(struct seq_file *m, struct file *file)
|
||||
|
||||
}
|
||||
spin_unlock(&ctx->completion_lock);
|
||||
|
||||
#ifdef CONFIG_NET_RX_BUSY_POLL
|
||||
if (ctx->napi_enabled) {
|
||||
seq_puts(m, "NAPI:\tenabled\n");
|
||||
seq_printf(m, "napi_busy_poll_dt:\t%llu\n", ctx->napi_busy_poll_dt);
|
||||
if (ctx->napi_prefer_busy_poll)
|
||||
seq_puts(m, "napi_prefer_busy_poll:\ttrue\n");
|
||||
else
|
||||
seq_puts(m, "napi_prefer_busy_poll:\tfalse\n");
|
||||
} else {
|
||||
seq_puts(m, "NAPI:\tdisabled\n");
|
||||
}
|
||||
#endif
|
||||
napi_show_fdinfo(ctx, m);
|
||||
}
|
||||
#endif
|
||||
|
@ -36,27 +36,22 @@ static int io_file_bitmap_get(struct io_ring_ctx *ctx)
|
||||
return -ENFILE;
|
||||
}
|
||||
|
||||
bool io_alloc_file_tables(struct io_file_table *table, unsigned nr_files)
|
||||
bool io_alloc_file_tables(struct io_ring_ctx *ctx, struct io_file_table *table,
|
||||
unsigned nr_files)
|
||||
{
|
||||
table->files = kvcalloc(nr_files, sizeof(table->files[0]),
|
||||
GFP_KERNEL_ACCOUNT);
|
||||
if (unlikely(!table->files))
|
||||
if (io_rsrc_data_alloc(&table->data, nr_files))
|
||||
return false;
|
||||
|
||||
table->bitmap = bitmap_zalloc(nr_files, GFP_KERNEL_ACCOUNT);
|
||||
if (unlikely(!table->bitmap)) {
|
||||
kvfree(table->files);
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
if (table->bitmap)
|
||||
return true;
|
||||
io_rsrc_data_free(ctx, &table->data);
|
||||
return false;
|
||||
}
|
||||
|
||||
void io_free_file_tables(struct io_file_table *table)
|
||||
void io_free_file_tables(struct io_ring_ctx *ctx, struct io_file_table *table)
|
||||
{
|
||||
kvfree(table->files);
|
||||
io_rsrc_data_free(ctx, &table->data);
|
||||
bitmap_free(table->bitmap);
|
||||
table->files = NULL;
|
||||
table->bitmap = NULL;
|
||||
}
|
||||
|
||||
@ -64,32 +59,24 @@ static int io_install_fixed_file(struct io_ring_ctx *ctx, struct file *file,
|
||||
u32 slot_index)
|
||||
__must_hold(&req->ctx->uring_lock)
|
||||
{
|
||||
struct io_fixed_file *file_slot;
|
||||
int ret;
|
||||
struct io_rsrc_node *node;
|
||||
|
||||
if (io_is_uring_fops(file))
|
||||
return -EBADF;
|
||||
if (!ctx->file_data)
|
||||
if (!ctx->file_table.data.nr)
|
||||
return -ENXIO;
|
||||
if (slot_index >= ctx->nr_user_files)
|
||||
if (slot_index >= ctx->file_table.data.nr)
|
||||
return -EINVAL;
|
||||
|
||||
slot_index = array_index_nospec(slot_index, ctx->nr_user_files);
|
||||
file_slot = io_fixed_file_slot(&ctx->file_table, slot_index);
|
||||
node = io_rsrc_node_alloc(ctx, IORING_RSRC_FILE);
|
||||
if (!node)
|
||||
return -ENOMEM;
|
||||
|
||||
if (file_slot->file_ptr) {
|
||||
ret = io_queue_rsrc_removal(ctx->file_data, slot_index,
|
||||
io_slot_file(file_slot));
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
file_slot->file_ptr = 0;
|
||||
} else {
|
||||
if (!io_reset_rsrc_node(ctx, &ctx->file_table.data, slot_index))
|
||||
io_file_bitmap_set(&ctx->file_table, slot_index);
|
||||
}
|
||||
|
||||
*io_get_tag_slot(ctx->file_data, slot_index) = 0;
|
||||
io_fixed_file_set(file_slot, file);
|
||||
ctx->file_table.data.nodes[slot_index] = node;
|
||||
io_fixed_file_set(node, file);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -134,25 +121,17 @@ int io_fixed_fd_install(struct io_kiocb *req, unsigned int issue_flags,
|
||||
|
||||
int io_fixed_fd_remove(struct io_ring_ctx *ctx, unsigned int offset)
|
||||
{
|
||||
struct io_fixed_file *file_slot;
|
||||
int ret;
|
||||
struct io_rsrc_node *node;
|
||||
|
||||
if (unlikely(!ctx->file_data))
|
||||
if (unlikely(!ctx->file_table.data.nr))
|
||||
return -ENXIO;
|
||||
if (offset >= ctx->nr_user_files)
|
||||
if (offset >= ctx->file_table.data.nr)
|
||||
return -EINVAL;
|
||||
|
||||
offset = array_index_nospec(offset, ctx->nr_user_files);
|
||||
file_slot = io_fixed_file_slot(&ctx->file_table, offset);
|
||||
if (!file_slot->file_ptr)
|
||||
node = io_rsrc_node_lookup(&ctx->file_table.data, offset);
|
||||
if (!node)
|
||||
return -EBADF;
|
||||
|
||||
ret = io_queue_rsrc_removal(ctx->file_data, offset,
|
||||
io_slot_file(file_slot));
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
file_slot->file_ptr = 0;
|
||||
io_reset_rsrc_node(ctx, &ctx->file_table.data, offset);
|
||||
io_file_bitmap_clear(&ctx->file_table, offset);
|
||||
return 0;
|
||||
}
|
||||
@ -167,7 +146,7 @@ int io_register_file_alloc_range(struct io_ring_ctx *ctx,
|
||||
return -EFAULT;
|
||||
if (check_add_overflow(range.off, range.len, &end))
|
||||
return -EOVERFLOW;
|
||||
if (range.resv || end > ctx->nr_user_files)
|
||||
if (range.resv || end > ctx->file_table.data.nr)
|
||||
return -EINVAL;
|
||||
|
||||
io_file_table_set_alloc_range(ctx, range.off, range.len);
|
||||
|
@ -4,9 +4,10 @@
|
||||
|
||||
#include <linux/file.h>
|
||||
#include <linux/io_uring_types.h>
|
||||
#include "rsrc.h"
|
||||
|
||||
bool io_alloc_file_tables(struct io_file_table *table, unsigned nr_files);
|
||||
void io_free_file_tables(struct io_file_table *table);
|
||||
bool io_alloc_file_tables(struct io_ring_ctx *ctx, struct io_file_table *table, unsigned nr_files);
|
||||
void io_free_file_tables(struct io_ring_ctx *ctx, struct io_file_table *table);
|
||||
|
||||
int io_fixed_fd_install(struct io_kiocb *req, unsigned int issue_flags,
|
||||
struct file *file, unsigned int file_slot);
|
||||
@ -33,50 +34,34 @@ static inline void io_file_bitmap_set(struct io_file_table *table, int bit)
|
||||
table->alloc_hint = bit + 1;
|
||||
}
|
||||
|
||||
static inline struct io_fixed_file *
|
||||
io_fixed_file_slot(struct io_file_table *table, unsigned i)
|
||||
{
|
||||
return &table->files[i];
|
||||
}
|
||||
|
||||
#define FFS_NOWAIT 0x1UL
|
||||
#define FFS_ISREG 0x2UL
|
||||
#define FFS_MASK ~(FFS_NOWAIT|FFS_ISREG)
|
||||
|
||||
static inline unsigned int io_slot_flags(struct io_fixed_file *slot)
|
||||
static inline unsigned int io_slot_flags(struct io_rsrc_node *node)
|
||||
{
|
||||
return (slot->file_ptr & ~FFS_MASK) << REQ_F_SUPPORT_NOWAIT_BIT;
|
||||
|
||||
return (node->file_ptr & ~FFS_MASK) << REQ_F_SUPPORT_NOWAIT_BIT;
|
||||
}
|
||||
|
||||
static inline struct file *io_slot_file(struct io_fixed_file *slot)
|
||||
static inline struct file *io_slot_file(struct io_rsrc_node *node)
|
||||
{
|
||||
return (struct file *)(slot->file_ptr & FFS_MASK);
|
||||
return (struct file *)(node->file_ptr & FFS_MASK);
|
||||
}
|
||||
|
||||
static inline struct file *io_file_from_index(struct io_file_table *table,
|
||||
int index)
|
||||
{
|
||||
return io_slot_file(io_fixed_file_slot(table, index));
|
||||
}
|
||||
|
||||
static inline void io_fixed_file_set(struct io_fixed_file *file_slot,
|
||||
static inline void io_fixed_file_set(struct io_rsrc_node *node,
|
||||
struct file *file)
|
||||
{
|
||||
file_slot->file_ptr = (unsigned long)file |
|
||||
node->file_ptr = (unsigned long)file |
|
||||
(io_file_get_flags(file) >> REQ_F_SUPPORT_NOWAIT_BIT);
|
||||
}
|
||||
|
||||
static inline void io_reset_alloc_hint(struct io_ring_ctx *ctx)
|
||||
{
|
||||
ctx->file_table.alloc_hint = ctx->file_alloc_start;
|
||||
}
|
||||
|
||||
static inline void io_file_table_set_alloc_range(struct io_ring_ctx *ctx,
|
||||
unsigned off, unsigned len)
|
||||
{
|
||||
ctx->file_alloc_start = off;
|
||||
ctx->file_alloc_end = off + len;
|
||||
io_reset_alloc_hint(ctx);
|
||||
ctx->file_table.alloc_hint = ctx->file_alloc_start;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@ -141,7 +141,7 @@ int io_futex_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd,
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
bool io_futex_remove_all(struct io_ring_ctx *ctx, struct task_struct *task,
|
||||
bool io_futex_remove_all(struct io_ring_ctx *ctx, struct io_uring_task *tctx,
|
||||
bool cancel_all)
|
||||
{
|
||||
struct hlist_node *tmp;
|
||||
@ -151,7 +151,7 @@ bool io_futex_remove_all(struct io_ring_ctx *ctx, struct task_struct *task,
|
||||
lockdep_assert_held(&ctx->uring_lock);
|
||||
|
||||
hlist_for_each_entry_safe(req, tmp, &ctx->futex_list, hash_node) {
|
||||
if (!io_match_task_safe(req, task, cancel_all))
|
||||
if (!io_match_task_safe(req, tctx, cancel_all))
|
||||
continue;
|
||||
hlist_del_init(&req->hash_node);
|
||||
__io_futex_cancel(ctx, req);
|
||||
|
@ -11,7 +11,7 @@ int io_futex_wake(struct io_kiocb *req, unsigned int issue_flags);
|
||||
#if defined(CONFIG_FUTEX)
|
||||
int io_futex_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd,
|
||||
unsigned int issue_flags);
|
||||
bool io_futex_remove_all(struct io_ring_ctx *ctx, struct task_struct *task,
|
||||
bool io_futex_remove_all(struct io_ring_ctx *ctx, struct io_uring_task *tctx,
|
||||
bool cancel_all);
|
||||
bool io_futex_cache_init(struct io_ring_ctx *ctx);
|
||||
void io_futex_cache_free(struct io_ring_ctx *ctx);
|
||||
@ -23,7 +23,7 @@ static inline int io_futex_cancel(struct io_ring_ctx *ctx,
|
||||
return 0;
|
||||
}
|
||||
static inline bool io_futex_remove_all(struct io_ring_ctx *ctx,
|
||||
struct task_struct *task, bool cancel_all)
|
||||
struct io_uring_task *tctx, bool cancel_all)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
@ -69,6 +69,7 @@
|
||||
#include <linux/io_uring/cmd.h>
|
||||
#include <linux/audit.h>
|
||||
#include <linux/security.h>
|
||||
#include <linux/jump_label.h>
|
||||
#include <asm/shmparam.h>
|
||||
|
||||
#define CREATE_TRACE_POINTS
|
||||
@ -103,9 +104,6 @@
|
||||
#include "alloc_cache.h"
|
||||
#include "eventfd.h"
|
||||
|
||||
#define IORING_MAX_ENTRIES 32768
|
||||
#define IORING_MAX_CQ_ENTRIES (2 * IORING_MAX_ENTRIES)
|
||||
|
||||
#define SQE_COMMON_FLAGS (IOSQE_FIXED_FILE | IOSQE_IO_LINK | \
|
||||
IOSQE_IO_HARDLINK | IOSQE_ASYNC)
|
||||
|
||||
@ -143,11 +141,13 @@ struct io_defer_entry {
|
||||
#define IO_CQ_WAKE_FORCE (IO_CQ_WAKE_INIT >> 1)
|
||||
|
||||
static bool io_uring_try_cancel_requests(struct io_ring_ctx *ctx,
|
||||
struct task_struct *task,
|
||||
struct io_uring_task *tctx,
|
||||
bool cancel_all);
|
||||
|
||||
static void io_queue_sqe(struct io_kiocb *req);
|
||||
|
||||
static __read_mostly DEFINE_STATIC_KEY_FALSE(io_key_has_sqarray);
|
||||
|
||||
struct kmem_cache *req_cachep;
|
||||
static struct workqueue_struct *iou_wq __ro_after_init;
|
||||
|
||||
@ -200,12 +200,12 @@ static bool io_match_linked(struct io_kiocb *head)
|
||||
* As io_match_task() but protected against racing with linked timeouts.
|
||||
* User must not hold timeout_lock.
|
||||
*/
|
||||
bool io_match_task_safe(struct io_kiocb *head, struct task_struct *task,
|
||||
bool io_match_task_safe(struct io_kiocb *head, struct io_uring_task *tctx,
|
||||
bool cancel_all)
|
||||
{
|
||||
bool matched;
|
||||
|
||||
if (task && head->task != task)
|
||||
if (tctx && head->tctx != tctx)
|
||||
return false;
|
||||
if (cancel_all)
|
||||
return true;
|
||||
@ -260,15 +260,23 @@ static __cold void io_fallback_req_func(struct work_struct *work)
|
||||
|
||||
static int io_alloc_hash_table(struct io_hash_table *table, unsigned bits)
|
||||
{
|
||||
unsigned hash_buckets = 1U << bits;
|
||||
size_t hash_size = hash_buckets * sizeof(table->hbs[0]);
|
||||
unsigned int hash_buckets;
|
||||
int i;
|
||||
|
||||
table->hbs = kmalloc(hash_size, GFP_KERNEL);
|
||||
if (!table->hbs)
|
||||
return -ENOMEM;
|
||||
do {
|
||||
hash_buckets = 1U << bits;
|
||||
table->hbs = kvmalloc_array(hash_buckets, sizeof(table->hbs[0]),
|
||||
GFP_KERNEL_ACCOUNT);
|
||||
if (table->hbs)
|
||||
break;
|
||||
if (bits == 1)
|
||||
return -ENOMEM;
|
||||
bits--;
|
||||
} while (1);
|
||||
|
||||
table->hash_bits = bits;
|
||||
init_hash_table(table, hash_buckets);
|
||||
for (i = 0; i < hash_buckets; i++)
|
||||
INIT_HLIST_HEAD(&table->hbs[i].list);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -293,21 +301,18 @@ static __cold struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
|
||||
hash_bits = clamp(hash_bits, 1, 8);
|
||||
if (io_alloc_hash_table(&ctx->cancel_table, hash_bits))
|
||||
goto err;
|
||||
if (io_alloc_hash_table(&ctx->cancel_table_locked, hash_bits))
|
||||
goto err;
|
||||
if (percpu_ref_init(&ctx->refs, io_ring_ctx_ref_free,
|
||||
0, GFP_KERNEL))
|
||||
goto err;
|
||||
|
||||
ctx->flags = p->flags;
|
||||
ctx->hybrid_poll_time = LLONG_MAX;
|
||||
atomic_set(&ctx->cq_wait_nr, IO_CQ_WAKE_INIT);
|
||||
init_waitqueue_head(&ctx->sqo_sq_wait);
|
||||
INIT_LIST_HEAD(&ctx->sqd_list);
|
||||
INIT_LIST_HEAD(&ctx->cq_overflow_list);
|
||||
INIT_LIST_HEAD(&ctx->io_buffers_cache);
|
||||
ret = io_alloc_cache_init(&ctx->rsrc_node_cache, IO_NODE_ALLOC_CACHE_MAX,
|
||||
sizeof(struct io_rsrc_node));
|
||||
ret |= io_alloc_cache_init(&ctx->apoll_cache, IO_POLL_ALLOC_CACHE_MAX,
|
||||
ret = io_alloc_cache_init(&ctx->apoll_cache, IO_POLL_ALLOC_CACHE_MAX,
|
||||
sizeof(struct async_poll));
|
||||
ret |= io_alloc_cache_init(&ctx->netmsg_cache, IO_ALLOC_CACHE_MAX,
|
||||
sizeof(struct io_async_msghdr));
|
||||
@ -326,7 +331,6 @@ static __cold struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
|
||||
mutex_init(&ctx->uring_lock);
|
||||
init_waitqueue_head(&ctx->cq_wait);
|
||||
init_waitqueue_head(&ctx->poll_wq);
|
||||
init_waitqueue_head(&ctx->rsrc_quiesce_wq);
|
||||
spin_lock_init(&ctx->completion_lock);
|
||||
spin_lock_init(&ctx->timeout_lock);
|
||||
INIT_WQ_LIST(&ctx->iopoll_list);
|
||||
@ -334,7 +338,6 @@ static __cold struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
|
||||
INIT_LIST_HEAD(&ctx->defer_list);
|
||||
INIT_LIST_HEAD(&ctx->timeout_list);
|
||||
INIT_LIST_HEAD(&ctx->ltimeout_list);
|
||||
INIT_LIST_HEAD(&ctx->rsrc_ref_list);
|
||||
init_llist_head(&ctx->work_llist);
|
||||
INIT_LIST_HEAD(&ctx->tctx_list);
|
||||
ctx->submit_state.free_list.next = NULL;
|
||||
@ -346,21 +349,20 @@ static __cold struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
|
||||
INIT_WQ_LIST(&ctx->submit_state.compl_reqs);
|
||||
INIT_HLIST_HEAD(&ctx->cancelable_uring_cmd);
|
||||
io_napi_init(ctx);
|
||||
mutex_init(&ctx->resize_lock);
|
||||
|
||||
return ctx;
|
||||
|
||||
free_ref:
|
||||
percpu_ref_exit(&ctx->refs);
|
||||
err:
|
||||
io_alloc_cache_free(&ctx->rsrc_node_cache, kfree);
|
||||
io_alloc_cache_free(&ctx->apoll_cache, kfree);
|
||||
io_alloc_cache_free(&ctx->netmsg_cache, io_netmsg_cache_free);
|
||||
io_alloc_cache_free(&ctx->rw_cache, io_rw_cache_free);
|
||||
io_alloc_cache_free(&ctx->uring_cache, kfree);
|
||||
io_alloc_cache_free(&ctx->msg_cache, io_msg_cache_free);
|
||||
io_futex_cache_free(ctx);
|
||||
kfree(ctx->cancel_table.hbs);
|
||||
kfree(ctx->cancel_table_locked.hbs);
|
||||
kvfree(ctx->cancel_table.hbs);
|
||||
xa_destroy(&ctx->io_bl_xa);
|
||||
kfree(ctx);
|
||||
return NULL;
|
||||
@ -404,11 +406,8 @@ static void io_clean_op(struct io_kiocb *req)
|
||||
kfree(req->apoll);
|
||||
req->apoll = NULL;
|
||||
}
|
||||
if (req->flags & REQ_F_INFLIGHT) {
|
||||
struct io_uring_task *tctx = req->task->io_uring;
|
||||
|
||||
atomic_dec(&tctx->inflight_tracked);
|
||||
}
|
||||
if (req->flags & REQ_F_INFLIGHT)
|
||||
atomic_dec(&req->tctx->inflight_tracked);
|
||||
if (req->flags & REQ_F_CREDS)
|
||||
put_cred(req->creds);
|
||||
if (req->flags & REQ_F_ASYNC_DATA) {
|
||||
@ -422,7 +421,7 @@ static inline void io_req_track_inflight(struct io_kiocb *req)
|
||||
{
|
||||
if (!(req->flags & REQ_F_INFLIGHT)) {
|
||||
req->flags |= REQ_F_INFLIGHT;
|
||||
atomic_inc(&req->task->io_uring->inflight_tracked);
|
||||
atomic_inc(&req->tctx->inflight_tracked);
|
||||
}
|
||||
}
|
||||
|
||||
@ -511,7 +510,7 @@ static void io_prep_async_link(struct io_kiocb *req)
|
||||
static void io_queue_iowq(struct io_kiocb *req)
|
||||
{
|
||||
struct io_kiocb *link = io_prep_linked_timeout(req);
|
||||
struct io_uring_task *tctx = req->task->io_uring;
|
||||
struct io_uring_task *tctx = req->tctx;
|
||||
|
||||
BUG_ON(!tctx);
|
||||
BUG_ON(!tctx->io_wq);
|
||||
@ -526,7 +525,7 @@ static void io_queue_iowq(struct io_kiocb *req)
|
||||
* procedure rather than attempt to run this request (or create a new
|
||||
* worker for it).
|
||||
*/
|
||||
if (WARN_ON_ONCE(!same_thread_group(req->task, current)))
|
||||
if (WARN_ON_ONCE(!same_thread_group(tctx->task, current)))
|
||||
atomic_or(IO_WQ_WORK_CANCEL, &req->work.flags);
|
||||
|
||||
trace_io_uring_queue_async_work(req, io_wq_is_hashed(&req->work));
|
||||
@ -674,30 +673,19 @@ static void io_cqring_do_overflow_flush(struct io_ring_ctx *ctx)
|
||||
mutex_unlock(&ctx->uring_lock);
|
||||
}
|
||||
|
||||
/* can be called by any task */
|
||||
static void io_put_task_remote(struct task_struct *task)
|
||||
{
|
||||
struct io_uring_task *tctx = task->io_uring;
|
||||
|
||||
percpu_counter_sub(&tctx->inflight, 1);
|
||||
if (unlikely(atomic_read(&tctx->in_cancel)))
|
||||
wake_up(&tctx->wait);
|
||||
put_task_struct(task);
|
||||
}
|
||||
|
||||
/* used by a task to put its own references */
|
||||
static void io_put_task_local(struct task_struct *task)
|
||||
{
|
||||
task->io_uring->cached_refs++;
|
||||
}
|
||||
|
||||
/* must to be called somewhat shortly after putting a request */
|
||||
static inline void io_put_task(struct task_struct *task)
|
||||
static inline void io_put_task(struct io_kiocb *req)
|
||||
{
|
||||
if (likely(task == current))
|
||||
io_put_task_local(task);
|
||||
else
|
||||
io_put_task_remote(task);
|
||||
struct io_uring_task *tctx = req->tctx;
|
||||
|
||||
if (likely(tctx->task == current)) {
|
||||
tctx->cached_refs++;
|
||||
} else {
|
||||
percpu_counter_sub(&tctx->inflight, 1);
|
||||
if (unlikely(atomic_read(&tctx->in_cancel)))
|
||||
wake_up(&tctx->wait);
|
||||
put_task_struct(tctx->task);
|
||||
}
|
||||
}
|
||||
|
||||
void io_task_refs_refill(struct io_uring_task *tctx)
|
||||
@ -819,8 +807,6 @@ static bool io_fill_cqe_aux(struct io_ring_ctx *ctx, u64 user_data, s32 res,
|
||||
* the ring.
|
||||
*/
|
||||
if (likely(io_get_cqe(ctx, &cqe))) {
|
||||
trace_io_uring_complete(ctx, NULL, user_data, res, cflags, 0, 0);
|
||||
|
||||
WRITE_ONCE(cqe->user_data, user_data);
|
||||
WRITE_ONCE(cqe->res, res);
|
||||
WRITE_ONCE(cqe->flags, cflags);
|
||||
@ -829,6 +815,8 @@ static bool io_fill_cqe_aux(struct io_ring_ctx *ctx, u64 user_data, s32 res,
|
||||
WRITE_ONCE(cqe->big_cqe[0], 0);
|
||||
WRITE_ONCE(cqe->big_cqe[1], 0);
|
||||
}
|
||||
|
||||
trace_io_uring_complete(ctx, NULL, cqe);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
@ -945,6 +933,8 @@ void io_req_defer_failed(struct io_kiocb *req, s32 res)
|
||||
static void io_preinit_req(struct io_kiocb *req, struct io_ring_ctx *ctx)
|
||||
{
|
||||
req->ctx = ctx;
|
||||
req->buf_node = NULL;
|
||||
req->file_node = NULL;
|
||||
req->link = NULL;
|
||||
req->async_data = NULL;
|
||||
/* not necessary, but safer to zero */
|
||||
@ -1075,23 +1065,8 @@ struct llist_node *io_handle_tw_list(struct llist_node *node,
|
||||
return node;
|
||||
}
|
||||
|
||||
/**
|
||||
* io_llist_xchg - swap all entries in a lock-less list
|
||||
* @head: the head of lock-less list to delete all entries
|
||||
* @new: new entry as the head of the list
|
||||
*
|
||||
* If list is empty, return NULL, otherwise, return the pointer to the first entry.
|
||||
* The order of entries returned is from the newest to the oldest added one.
|
||||
*/
|
||||
static inline struct llist_node *io_llist_xchg(struct llist_head *head,
|
||||
struct llist_node *new)
|
||||
static __cold void __io_fallback_tw(struct llist_node *node, bool sync)
|
||||
{
|
||||
return xchg(&head->first, new);
|
||||
}
|
||||
|
||||
static __cold void io_fallback_tw(struct io_uring_task *tctx, bool sync)
|
||||
{
|
||||
struct llist_node *node = llist_del_all(&tctx->task_list);
|
||||
struct io_ring_ctx *last_ctx = NULL;
|
||||
struct io_kiocb *req;
|
||||
|
||||
@ -1117,6 +1092,13 @@ static __cold void io_fallback_tw(struct io_uring_task *tctx, bool sync)
|
||||
}
|
||||
}
|
||||
|
||||
static void io_fallback_tw(struct io_uring_task *tctx, bool sync)
|
||||
{
|
||||
struct llist_node *node = llist_del_all(&tctx->task_list);
|
||||
|
||||
__io_fallback_tw(node, sync);
|
||||
}
|
||||
|
||||
struct llist_node *tctx_task_work_run(struct io_uring_task *tctx,
|
||||
unsigned int max_entries,
|
||||
unsigned int *count)
|
||||
@ -1227,7 +1209,7 @@ static inline void io_req_local_work_add(struct io_kiocb *req,
|
||||
|
||||
static void io_req_normal_work_add(struct io_kiocb *req)
|
||||
{
|
||||
struct io_uring_task *tctx = req->task->io_uring;
|
||||
struct io_uring_task *tctx = req->tctx;
|
||||
struct io_ring_ctx *ctx = req->ctx;
|
||||
|
||||
/* task_work already pending, we're done */
|
||||
@ -1246,7 +1228,7 @@ static void io_req_normal_work_add(struct io_kiocb *req)
|
||||
return;
|
||||
}
|
||||
|
||||
if (likely(!task_work_add(req->task, &tctx->task_work, ctx->notify_method)))
|
||||
if (likely(!task_work_add(tctx->task, &tctx->task_work, ctx->notify_method)))
|
||||
return;
|
||||
|
||||
io_fallback_tw(tctx, false);
|
||||
@ -1270,16 +1252,9 @@ void io_req_task_work_add_remote(struct io_kiocb *req, struct io_ring_ctx *ctx,
|
||||
|
||||
static void __cold io_move_task_work_from_local(struct io_ring_ctx *ctx)
|
||||
{
|
||||
struct llist_node *node;
|
||||
struct llist_node *node = llist_del_all(&ctx->work_llist);
|
||||
|
||||
node = llist_del_all(&ctx->work_llist);
|
||||
while (node) {
|
||||
struct io_kiocb *req = container_of(node, struct io_kiocb,
|
||||
io_task_work.node);
|
||||
|
||||
node = node->next;
|
||||
io_req_normal_work_add(req);
|
||||
}
|
||||
__io_fallback_tw(node, false);
|
||||
}
|
||||
|
||||
static bool io_run_local_work_continue(struct io_ring_ctx *ctx, int events,
|
||||
@ -1310,7 +1285,7 @@ static int __io_run_local_work(struct io_ring_ctx *ctx, struct io_tw_state *ts,
|
||||
* llists are in reverse order, flip it back the right way before
|
||||
* running the pending items.
|
||||
*/
|
||||
node = llist_reverse_order(io_llist_xchg(&ctx->work_llist, NULL));
|
||||
node = llist_reverse_order(llist_del_all(&ctx->work_llist));
|
||||
while (node) {
|
||||
struct llist_node *next = node->next;
|
||||
struct io_kiocb *req = container_of(node, struct io_kiocb,
|
||||
@ -1363,8 +1338,7 @@ static void io_req_task_cancel(struct io_kiocb *req, struct io_tw_state *ts)
|
||||
void io_req_task_submit(struct io_kiocb *req, struct io_tw_state *ts)
|
||||
{
|
||||
io_tw_lock(req->ctx, ts);
|
||||
/* req->task == current here, checking PF_EXITING is safe */
|
||||
if (unlikely(req->task->flags & PF_EXITING))
|
||||
if (unlikely(io_should_terminate_tw()))
|
||||
io_req_defer_failed(req, -EFAULT);
|
||||
else if (req->flags & REQ_F_FORCE_ASYNC)
|
||||
io_queue_iowq(req);
|
||||
@ -1422,8 +1396,8 @@ static void io_free_batch_list(struct io_ring_ctx *ctx,
|
||||
io_clean_op(req);
|
||||
}
|
||||
io_put_file(req);
|
||||
io_put_rsrc_node(ctx, req->rsrc_node);
|
||||
io_put_task(req->task);
|
||||
io_req_put_rsrc_nodes(req);
|
||||
io_put_task(req);
|
||||
|
||||
node = req->comp_list.next;
|
||||
io_req_add_to_cache(req, ctx);
|
||||
@ -1885,20 +1859,16 @@ inline struct file *io_file_get_fixed(struct io_kiocb *req, int fd,
|
||||
unsigned int issue_flags)
|
||||
{
|
||||
struct io_ring_ctx *ctx = req->ctx;
|
||||
struct io_fixed_file *slot;
|
||||
struct io_rsrc_node *node;
|
||||
struct file *file = NULL;
|
||||
|
||||
io_ring_submit_lock(ctx, issue_flags);
|
||||
|
||||
if (unlikely((unsigned int)fd >= ctx->nr_user_files))
|
||||
goto out;
|
||||
fd = array_index_nospec(fd, ctx->nr_user_files);
|
||||
slot = io_fixed_file_slot(&ctx->file_table, fd);
|
||||
if (!req->rsrc_node)
|
||||
__io_req_set_rsrc_node(req, ctx);
|
||||
req->flags |= io_slot_flags(slot);
|
||||
file = io_slot_file(slot);
|
||||
out:
|
||||
node = io_rsrc_node_lookup(&ctx->file_table.data, fd);
|
||||
if (node) {
|
||||
io_req_assign_rsrc_node(&req->file_node, node);
|
||||
req->flags |= io_slot_flags(node);
|
||||
file = io_slot_file(node);
|
||||
}
|
||||
io_ring_submit_unlock(ctx, issue_flags);
|
||||
return file;
|
||||
}
|
||||
@ -2043,8 +2013,7 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
|
||||
req->flags = (__force io_req_flags_t) sqe_flags;
|
||||
req->cqe.user_data = READ_ONCE(sqe->user_data);
|
||||
req->file = NULL;
|
||||
req->rsrc_node = NULL;
|
||||
req->task = current;
|
||||
req->tctx = current->io_uring;
|
||||
req->cancel_seq_set = false;
|
||||
|
||||
if (unlikely(opcode >= IORING_OP_LAST)) {
|
||||
@ -2262,7 +2231,8 @@ static bool io_get_sqe(struct io_ring_ctx *ctx, const struct io_uring_sqe **sqe)
|
||||
unsigned mask = ctx->sq_entries - 1;
|
||||
unsigned head = ctx->cached_sq_head++ & mask;
|
||||
|
||||
if (!(ctx->flags & IORING_SETUP_NO_SQARRAY)) {
|
||||
if (static_branch_unlikely(&io_key_has_sqarray) &&
|
||||
(!(ctx->flags & IORING_SETUP_NO_SQARRAY))) {
|
||||
head = READ_ONCE(ctx->sq_array[head]);
|
||||
if (unlikely(head >= ctx->sq_entries)) {
|
||||
/* drop invalid entries */
|
||||
@ -2273,6 +2243,7 @@ static bool io_get_sqe(struct io_ring_ctx *ctx, const struct io_uring_sqe **sqe)
|
||||
READ_ONCE(ctx->rings->sq_dropped) + 1);
|
||||
return false;
|
||||
}
|
||||
head = array_index_nospec(head, ctx->sq_entries);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -2501,9 +2472,10 @@ static inline int io_cqring_wait_schedule(struct io_ring_ctx *ctx,
|
||||
|
||||
struct ext_arg {
|
||||
size_t argsz;
|
||||
struct __kernel_timespec __user *ts;
|
||||
struct timespec64 ts;
|
||||
const sigset_t __user *sig;
|
||||
ktime_t min_time;
|
||||
bool ts_set;
|
||||
};
|
||||
|
||||
/*
|
||||
@ -2541,13 +2513,8 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events, u32 flags,
|
||||
iowq.timeout = KTIME_MAX;
|
||||
start_time = io_get_time(ctx);
|
||||
|
||||
if (ext_arg->ts) {
|
||||
struct timespec64 ts;
|
||||
|
||||
if (get_timespec64(&ts, ext_arg->ts))
|
||||
return -EFAULT;
|
||||
|
||||
iowq.timeout = timespec64_to_ktime(ts);
|
||||
if (ext_arg->ts_set) {
|
||||
iowq.timeout = timespec64_to_ktime(ext_arg->ts);
|
||||
if (!(flags & IORING_ENTER_ABS_TIMER))
|
||||
iowq.timeout = ktime_add(iowq.timeout, start_time);
|
||||
}
|
||||
@ -2671,8 +2638,8 @@ static void io_rings_free(struct io_ring_ctx *ctx)
|
||||
ctx->sq_sqes = NULL;
|
||||
}
|
||||
|
||||
static unsigned long rings_size(struct io_ring_ctx *ctx, unsigned int sq_entries,
|
||||
unsigned int cq_entries, size_t *sq_offset)
|
||||
unsigned long rings_size(unsigned int flags, unsigned int sq_entries,
|
||||
unsigned int cq_entries, size_t *sq_offset)
|
||||
{
|
||||
struct io_rings *rings;
|
||||
size_t off, sq_array_size;
|
||||
@ -2680,7 +2647,7 @@ static unsigned long rings_size(struct io_ring_ctx *ctx, unsigned int sq_entries
|
||||
off = struct_size(rings, cqes, cq_entries);
|
||||
if (off == SIZE_MAX)
|
||||
return SIZE_MAX;
|
||||
if (ctx->flags & IORING_SETUP_CQE32) {
|
||||
if (flags & IORING_SETUP_CQE32) {
|
||||
if (check_shl_overflow(off, 1, &off))
|
||||
return SIZE_MAX;
|
||||
}
|
||||
@ -2691,7 +2658,7 @@ static unsigned long rings_size(struct io_ring_ctx *ctx, unsigned int sq_entries
|
||||
return SIZE_MAX;
|
||||
#endif
|
||||
|
||||
if (ctx->flags & IORING_SETUP_NO_SQARRAY) {
|
||||
if (flags & IORING_SETUP_NO_SQARRAY) {
|
||||
*sq_offset = SIZE_MAX;
|
||||
return off;
|
||||
}
|
||||
@ -2728,15 +2695,10 @@ static void io_req_caches_free(struct io_ring_ctx *ctx)
|
||||
static __cold void io_ring_ctx_free(struct io_ring_ctx *ctx)
|
||||
{
|
||||
io_sq_thread_finish(ctx);
|
||||
/* __io_rsrc_put_work() may need uring_lock to progress, wait w/o it */
|
||||
if (WARN_ON_ONCE(!list_empty(&ctx->rsrc_ref_list)))
|
||||
return;
|
||||
|
||||
mutex_lock(&ctx->uring_lock);
|
||||
if (ctx->buf_data)
|
||||
__io_sqe_buffers_unregister(ctx);
|
||||
if (ctx->file_data)
|
||||
__io_sqe_files_unregister(ctx);
|
||||
io_sqe_buffers_unregister(ctx);
|
||||
io_sqe_files_unregister(ctx);
|
||||
io_cqring_overflow_kill(ctx);
|
||||
io_eventfd_unregister(ctx);
|
||||
io_alloc_cache_free(&ctx->apoll_cache, kfree);
|
||||
@ -2746,34 +2708,31 @@ static __cold void io_ring_ctx_free(struct io_ring_ctx *ctx)
|
||||
io_alloc_cache_free(&ctx->msg_cache, io_msg_cache_free);
|
||||
io_futex_cache_free(ctx);
|
||||
io_destroy_buffers(ctx);
|
||||
io_free_region(ctx, &ctx->param_region);
|
||||
mutex_unlock(&ctx->uring_lock);
|
||||
if (ctx->sq_creds)
|
||||
put_cred(ctx->sq_creds);
|
||||
if (ctx->submitter_task)
|
||||
put_task_struct(ctx->submitter_task);
|
||||
|
||||
/* there are no registered resources left, nobody uses it */
|
||||
if (ctx->rsrc_node)
|
||||
io_rsrc_node_destroy(ctx, ctx->rsrc_node);
|
||||
|
||||
WARN_ON_ONCE(!list_empty(&ctx->rsrc_ref_list));
|
||||
WARN_ON_ONCE(!list_empty(&ctx->ltimeout_list));
|
||||
|
||||
io_alloc_cache_free(&ctx->rsrc_node_cache, kfree);
|
||||
if (ctx->mm_account) {
|
||||
mmdrop(ctx->mm_account);
|
||||
ctx->mm_account = NULL;
|
||||
}
|
||||
io_rings_free(ctx);
|
||||
|
||||
if (!(ctx->flags & IORING_SETUP_NO_SQARRAY))
|
||||
static_branch_dec(&io_key_has_sqarray);
|
||||
|
||||
percpu_ref_exit(&ctx->refs);
|
||||
free_uid(ctx->user);
|
||||
io_req_caches_free(ctx);
|
||||
if (ctx->hash_map)
|
||||
io_wq_put_hash(ctx->hash_map);
|
||||
io_napi_free(ctx);
|
||||
kfree(ctx->cancel_table.hbs);
|
||||
kfree(ctx->cancel_table_locked.hbs);
|
||||
kvfree(ctx->cancel_table.hbs);
|
||||
xa_destroy(&ctx->io_bl_xa);
|
||||
kfree(ctx);
|
||||
}
|
||||
@ -3012,7 +2971,7 @@ static int io_uring_release(struct inode *inode, struct file *file)
|
||||
}
|
||||
|
||||
struct io_task_cancel {
|
||||
struct task_struct *task;
|
||||
struct io_uring_task *tctx;
|
||||
bool all;
|
||||
};
|
||||
|
||||
@ -3021,11 +2980,11 @@ static bool io_cancel_task_cb(struct io_wq_work *work, void *data)
|
||||
struct io_kiocb *req = container_of(work, struct io_kiocb, work);
|
||||
struct io_task_cancel *cancel = data;
|
||||
|
||||
return io_match_task_safe(req, cancel->task, cancel->all);
|
||||
return io_match_task_safe(req, cancel->tctx, cancel->all);
|
||||
}
|
||||
|
||||
static __cold bool io_cancel_defer_files(struct io_ring_ctx *ctx,
|
||||
struct task_struct *task,
|
||||
struct io_uring_task *tctx,
|
||||
bool cancel_all)
|
||||
{
|
||||
struct io_defer_entry *de;
|
||||
@ -3033,7 +2992,7 @@ static __cold bool io_cancel_defer_files(struct io_ring_ctx *ctx,
|
||||
|
||||
spin_lock(&ctx->completion_lock);
|
||||
list_for_each_entry_reverse(de, &ctx->defer_list, list) {
|
||||
if (io_match_task_safe(de->req, task, cancel_all)) {
|
||||
if (io_match_task_safe(de->req, tctx, cancel_all)) {
|
||||
list_cut_position(&list, &ctx->defer_list, &de->list);
|
||||
break;
|
||||
}
|
||||
@ -3076,11 +3035,10 @@ static __cold bool io_uring_try_cancel_iowq(struct io_ring_ctx *ctx)
|
||||
}
|
||||
|
||||
static __cold bool io_uring_try_cancel_requests(struct io_ring_ctx *ctx,
|
||||
struct task_struct *task,
|
||||
struct io_uring_task *tctx,
|
||||
bool cancel_all)
|
||||
{
|
||||
struct io_task_cancel cancel = { .task = task, .all = cancel_all, };
|
||||
struct io_uring_task *tctx = task ? task->io_uring : NULL;
|
||||
struct io_task_cancel cancel = { .tctx = tctx, .all = cancel_all, };
|
||||
enum io_wq_cancel cret;
|
||||
bool ret = false;
|
||||
|
||||
@ -3094,9 +3052,9 @@ static __cold bool io_uring_try_cancel_requests(struct io_ring_ctx *ctx,
|
||||
if (!ctx->rings)
|
||||
return false;
|
||||
|
||||
if (!task) {
|
||||
if (!tctx) {
|
||||
ret |= io_uring_try_cancel_iowq(ctx);
|
||||
} else if (tctx && tctx->io_wq) {
|
||||
} else if (tctx->io_wq) {
|
||||
/*
|
||||
* Cancels requests of all rings, not only @ctx, but
|
||||
* it's fine as the task is in exit/exec.
|
||||
@ -3119,15 +3077,15 @@ static __cold bool io_uring_try_cancel_requests(struct io_ring_ctx *ctx,
|
||||
if ((ctx->flags & IORING_SETUP_DEFER_TASKRUN) &&
|
||||
io_allowed_defer_tw_run(ctx))
|
||||
ret |= io_run_local_work(ctx, INT_MAX) > 0;
|
||||
ret |= io_cancel_defer_files(ctx, task, cancel_all);
|
||||
ret |= io_cancel_defer_files(ctx, tctx, cancel_all);
|
||||
mutex_lock(&ctx->uring_lock);
|
||||
ret |= io_poll_remove_all(ctx, task, cancel_all);
|
||||
ret |= io_waitid_remove_all(ctx, task, cancel_all);
|
||||
ret |= io_futex_remove_all(ctx, task, cancel_all);
|
||||
ret |= io_uring_try_cancel_uring_cmd(ctx, task, cancel_all);
|
||||
ret |= io_poll_remove_all(ctx, tctx, cancel_all);
|
||||
ret |= io_waitid_remove_all(ctx, tctx, cancel_all);
|
||||
ret |= io_futex_remove_all(ctx, tctx, cancel_all);
|
||||
ret |= io_uring_try_cancel_uring_cmd(ctx, tctx, cancel_all);
|
||||
mutex_unlock(&ctx->uring_lock);
|
||||
ret |= io_kill_timeouts(ctx, task, cancel_all);
|
||||
if (task)
|
||||
ret |= io_kill_timeouts(ctx, tctx, cancel_all);
|
||||
if (tctx)
|
||||
ret |= io_run_task_work() > 0;
|
||||
else
|
||||
ret |= flush_delayed_work(&ctx->fallback_work);
|
||||
@ -3180,12 +3138,13 @@ __cold void io_uring_cancel_generic(bool cancel_all, struct io_sq_data *sqd)
|
||||
if (node->ctx->sq_data)
|
||||
continue;
|
||||
loop |= io_uring_try_cancel_requests(node->ctx,
|
||||
current, cancel_all);
|
||||
current->io_uring,
|
||||
cancel_all);
|
||||
}
|
||||
} else {
|
||||
list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
|
||||
loop |= io_uring_try_cancel_requests(ctx,
|
||||
current,
|
||||
current->io_uring,
|
||||
cancel_all);
|
||||
}
|
||||
|
||||
@ -3232,22 +3191,44 @@ void __io_uring_cancel(bool cancel_all)
|
||||
io_uring_cancel_generic(cancel_all, NULL);
|
||||
}
|
||||
|
||||
static int io_validate_ext_arg(unsigned flags, const void __user *argp, size_t argsz)
|
||||
static struct io_uring_reg_wait *io_get_ext_arg_reg(struct io_ring_ctx *ctx,
|
||||
const struct io_uring_getevents_arg __user *uarg)
|
||||
{
|
||||
if (flags & IORING_ENTER_EXT_ARG) {
|
||||
struct io_uring_getevents_arg arg;
|
||||
unsigned long size = sizeof(struct io_uring_reg_wait);
|
||||
unsigned long offset = (uintptr_t)uarg;
|
||||
unsigned long end;
|
||||
|
||||
if (argsz != sizeof(arg))
|
||||
return -EINVAL;
|
||||
if (copy_from_user(&arg, argp, sizeof(arg)))
|
||||
return -EFAULT;
|
||||
}
|
||||
if (unlikely(offset % sizeof(long)))
|
||||
return ERR_PTR(-EFAULT);
|
||||
|
||||
/* also protects from NULL ->cq_wait_arg as the size would be 0 */
|
||||
if (unlikely(check_add_overflow(offset, size, &end) ||
|
||||
end > ctx->cq_wait_size))
|
||||
return ERR_PTR(-EFAULT);
|
||||
|
||||
return ctx->cq_wait_arg + offset;
|
||||
}
|
||||
|
||||
static int io_validate_ext_arg(struct io_ring_ctx *ctx, unsigned flags,
|
||||
const void __user *argp, size_t argsz)
|
||||
{
|
||||
struct io_uring_getevents_arg arg;
|
||||
|
||||
if (!(flags & IORING_ENTER_EXT_ARG))
|
||||
return 0;
|
||||
if (flags & IORING_ENTER_EXT_ARG_REG)
|
||||
return -EINVAL;
|
||||
if (argsz != sizeof(arg))
|
||||
return -EINVAL;
|
||||
if (copy_from_user(&arg, argp, sizeof(arg)))
|
||||
return -EFAULT;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int io_get_ext_arg(unsigned flags, const void __user *argp,
|
||||
struct ext_arg *ext_arg)
|
||||
static int io_get_ext_arg(struct io_ring_ctx *ctx, unsigned flags,
|
||||
const void __user *argp, struct ext_arg *ext_arg)
|
||||
{
|
||||
const struct io_uring_getevents_arg __user *uarg = argp;
|
||||
struct io_uring_getevents_arg arg;
|
||||
|
||||
/*
|
||||
@ -3256,7 +3237,28 @@ static int io_get_ext_arg(unsigned flags, const void __user *argp,
|
||||
*/
|
||||
if (!(flags & IORING_ENTER_EXT_ARG)) {
|
||||
ext_arg->sig = (const sigset_t __user *) argp;
|
||||
ext_arg->ts = NULL;
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (flags & IORING_ENTER_EXT_ARG_REG) {
|
||||
struct io_uring_reg_wait *w;
|
||||
|
||||
if (ext_arg->argsz != sizeof(struct io_uring_reg_wait))
|
||||
return -EINVAL;
|
||||
w = io_get_ext_arg_reg(ctx, argp);
|
||||
if (IS_ERR(w))
|
||||
return PTR_ERR(w);
|
||||
|
||||
if (w->flags & ~IORING_REG_WAIT_TS)
|
||||
return -EINVAL;
|
||||
ext_arg->min_time = READ_ONCE(w->min_wait_usec) * NSEC_PER_USEC;
|
||||
ext_arg->sig = u64_to_user_ptr(READ_ONCE(w->sigmask));
|
||||
ext_arg->argsz = READ_ONCE(w->sigmask_sz);
|
||||
if (w->flags & IORING_REG_WAIT_TS) {
|
||||
ext_arg->ts.tv_sec = READ_ONCE(w->ts.tv_sec);
|
||||
ext_arg->ts.tv_nsec = READ_ONCE(w->ts.tv_nsec);
|
||||
ext_arg->ts_set = true;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -3266,13 +3268,32 @@ static int io_get_ext_arg(unsigned flags, const void __user *argp,
|
||||
*/
|
||||
if (ext_arg->argsz != sizeof(arg))
|
||||
return -EINVAL;
|
||||
if (copy_from_user(&arg, argp, sizeof(arg)))
|
||||
#ifdef CONFIG_64BIT
|
||||
if (!user_access_begin(uarg, sizeof(*uarg)))
|
||||
return -EFAULT;
|
||||
unsafe_get_user(arg.sigmask, &uarg->sigmask, uaccess_end);
|
||||
unsafe_get_user(arg.sigmask_sz, &uarg->sigmask_sz, uaccess_end);
|
||||
unsafe_get_user(arg.min_wait_usec, &uarg->min_wait_usec, uaccess_end);
|
||||
unsafe_get_user(arg.ts, &uarg->ts, uaccess_end);
|
||||
user_access_end();
|
||||
#else
|
||||
if (copy_from_user(&arg, uarg, sizeof(arg)))
|
||||
return -EFAULT;
|
||||
#endif
|
||||
ext_arg->min_time = arg.min_wait_usec * NSEC_PER_USEC;
|
||||
ext_arg->sig = u64_to_user_ptr(arg.sigmask);
|
||||
ext_arg->argsz = arg.sigmask_sz;
|
||||
ext_arg->ts = u64_to_user_ptr(arg.ts);
|
||||
if (arg.ts) {
|
||||
if (get_timespec64(&ext_arg->ts, u64_to_user_ptr(arg.ts)))
|
||||
return -EFAULT;
|
||||
ext_arg->ts_set = true;
|
||||
}
|
||||
return 0;
|
||||
#ifdef CONFIG_64BIT
|
||||
uaccess_end:
|
||||
user_access_end();
|
||||
return -EFAULT;
|
||||
#endif
|
||||
}
|
||||
|
||||
SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
|
||||
@ -3286,7 +3307,8 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
|
||||
if (unlikely(flags & ~(IORING_ENTER_GETEVENTS | IORING_ENTER_SQ_WAKEUP |
|
||||
IORING_ENTER_SQ_WAIT | IORING_ENTER_EXT_ARG |
|
||||
IORING_ENTER_REGISTERED_RING |
|
||||
IORING_ENTER_ABS_TIMER)))
|
||||
IORING_ENTER_ABS_TIMER |
|
||||
IORING_ENTER_EXT_ARG_REG)))
|
||||
return -EINVAL;
|
||||
|
||||
/*
|
||||
@ -3369,7 +3391,7 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
|
||||
*/
|
||||
mutex_lock(&ctx->uring_lock);
|
||||
iopoll_locked:
|
||||
ret2 = io_validate_ext_arg(flags, argp, argsz);
|
||||
ret2 = io_validate_ext_arg(ctx, flags, argp, argsz);
|
||||
if (likely(!ret2)) {
|
||||
min_complete = min(min_complete,
|
||||
ctx->cq_entries);
|
||||
@ -3379,7 +3401,7 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
|
||||
} else {
|
||||
struct ext_arg ext_arg = { .argsz = argsz };
|
||||
|
||||
ret2 = io_get_ext_arg(flags, argp, &ext_arg);
|
||||
ret2 = io_get_ext_arg(ctx, flags, argp, &ext_arg);
|
||||
if (likely(!ret2)) {
|
||||
min_complete = min(min_complete,
|
||||
ctx->cq_entries);
|
||||
@ -3436,7 +3458,8 @@ static __cold int io_allocate_scq_urings(struct io_ring_ctx *ctx,
|
||||
ctx->sq_entries = p->sq_entries;
|
||||
ctx->cq_entries = p->cq_entries;
|
||||
|
||||
size = rings_size(ctx, p->sq_entries, p->cq_entries, &sq_array_offset);
|
||||
size = rings_size(ctx->flags, p->sq_entries, p->cq_entries,
|
||||
&sq_array_offset);
|
||||
if (size == SIZE_MAX)
|
||||
return -EOVERFLOW;
|
||||
|
||||
@ -3502,14 +3525,8 @@ static struct file *io_uring_get_file(struct io_ring_ctx *ctx)
|
||||
O_RDWR | O_CLOEXEC, NULL);
|
||||
}
|
||||
|
||||
static __cold int io_uring_create(unsigned entries, struct io_uring_params *p,
|
||||
struct io_uring_params __user *params)
|
||||
int io_uring_fill_params(unsigned entries, struct io_uring_params *p)
|
||||
{
|
||||
struct io_ring_ctx *ctx;
|
||||
struct io_uring_task *tctx;
|
||||
struct file *file;
|
||||
int ret;
|
||||
|
||||
if (!entries)
|
||||
return -EINVAL;
|
||||
if (entries > IORING_MAX_ENTRIES) {
|
||||
@ -3551,6 +3568,42 @@ static __cold int io_uring_create(unsigned entries, struct io_uring_params *p,
|
||||
p->cq_entries = 2 * p->sq_entries;
|
||||
}
|
||||
|
||||
p->sq_off.head = offsetof(struct io_rings, sq.head);
|
||||
p->sq_off.tail = offsetof(struct io_rings, sq.tail);
|
||||
p->sq_off.ring_mask = offsetof(struct io_rings, sq_ring_mask);
|
||||
p->sq_off.ring_entries = offsetof(struct io_rings, sq_ring_entries);
|
||||
p->sq_off.flags = offsetof(struct io_rings, sq_flags);
|
||||
p->sq_off.dropped = offsetof(struct io_rings, sq_dropped);
|
||||
p->sq_off.resv1 = 0;
|
||||
if (!(p->flags & IORING_SETUP_NO_MMAP))
|
||||
p->sq_off.user_addr = 0;
|
||||
|
||||
p->cq_off.head = offsetof(struct io_rings, cq.head);
|
||||
p->cq_off.tail = offsetof(struct io_rings, cq.tail);
|
||||
p->cq_off.ring_mask = offsetof(struct io_rings, cq_ring_mask);
|
||||
p->cq_off.ring_entries = offsetof(struct io_rings, cq_ring_entries);
|
||||
p->cq_off.overflow = offsetof(struct io_rings, cq_overflow);
|
||||
p->cq_off.cqes = offsetof(struct io_rings, cqes);
|
||||
p->cq_off.flags = offsetof(struct io_rings, cq_flags);
|
||||
p->cq_off.resv1 = 0;
|
||||
if (!(p->flags & IORING_SETUP_NO_MMAP))
|
||||
p->cq_off.user_addr = 0;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static __cold int io_uring_create(unsigned entries, struct io_uring_params *p,
|
||||
struct io_uring_params __user *params)
|
||||
{
|
||||
struct io_ring_ctx *ctx;
|
||||
struct io_uring_task *tctx;
|
||||
struct file *file;
|
||||
int ret;
|
||||
|
||||
ret = io_uring_fill_params(entries, p);
|
||||
if (unlikely(ret))
|
||||
return ret;
|
||||
|
||||
ctx = io_ring_ctx_alloc(p);
|
||||
if (!ctx)
|
||||
return -ENOMEM;
|
||||
@ -3558,6 +3611,9 @@ static __cold int io_uring_create(unsigned entries, struct io_uring_params *p,
|
||||
ctx->clockid = CLOCK_MONOTONIC;
|
||||
ctx->clock_offset = 0;
|
||||
|
||||
if (!(ctx->flags & IORING_SETUP_NO_SQARRAY))
|
||||
static_branch_inc(&io_key_has_sqarray);
|
||||
|
||||
if ((ctx->flags & IORING_SETUP_DEFER_TASKRUN) &&
|
||||
!(ctx->flags & IORING_SETUP_IOPOLL) &&
|
||||
!(ctx->flags & IORING_SETUP_SQPOLL))
|
||||
@ -3608,6 +3664,11 @@ static __cold int io_uring_create(unsigned entries, struct io_uring_params *p,
|
||||
ctx->notify_method = TWA_SIGNAL;
|
||||
}
|
||||
|
||||
/* HYBRID_IOPOLL only valid with IOPOLL */
|
||||
if ((ctx->flags & (IORING_SETUP_IOPOLL|IORING_SETUP_HYBRID_IOPOLL)) ==
|
||||
IORING_SETUP_HYBRID_IOPOLL)
|
||||
goto err;
|
||||
|
||||
/*
|
||||
* For DEFER_TASKRUN we require the completion task to be the same as the
|
||||
* submission task. This implies that there is only one submitter, so enforce
|
||||
@ -3631,37 +3692,13 @@ static __cold int io_uring_create(unsigned entries, struct io_uring_params *p,
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
if (!(p->flags & IORING_SETUP_NO_SQARRAY))
|
||||
p->sq_off.array = (char *)ctx->sq_array - (char *)ctx->rings;
|
||||
|
||||
ret = io_sq_offload_create(ctx, p);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
ret = io_rsrc_init(ctx);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
p->sq_off.head = offsetof(struct io_rings, sq.head);
|
||||
p->sq_off.tail = offsetof(struct io_rings, sq.tail);
|
||||
p->sq_off.ring_mask = offsetof(struct io_rings, sq_ring_mask);
|
||||
p->sq_off.ring_entries = offsetof(struct io_rings, sq_ring_entries);
|
||||
p->sq_off.flags = offsetof(struct io_rings, sq_flags);
|
||||
p->sq_off.dropped = offsetof(struct io_rings, sq_dropped);
|
||||
if (!(ctx->flags & IORING_SETUP_NO_SQARRAY))
|
||||
p->sq_off.array = (char *)ctx->sq_array - (char *)ctx->rings;
|
||||
p->sq_off.resv1 = 0;
|
||||
if (!(ctx->flags & IORING_SETUP_NO_MMAP))
|
||||
p->sq_off.user_addr = 0;
|
||||
|
||||
p->cq_off.head = offsetof(struct io_rings, cq.head);
|
||||
p->cq_off.tail = offsetof(struct io_rings, cq.tail);
|
||||
p->cq_off.ring_mask = offsetof(struct io_rings, cq_ring_mask);
|
||||
p->cq_off.ring_entries = offsetof(struct io_rings, cq_ring_entries);
|
||||
p->cq_off.overflow = offsetof(struct io_rings, cq_overflow);
|
||||
p->cq_off.cqes = offsetof(struct io_rings, cqes);
|
||||
p->cq_off.flags = offsetof(struct io_rings, cq_flags);
|
||||
p->cq_off.resv1 = 0;
|
||||
if (!(ctx->flags & IORING_SETUP_NO_MMAP))
|
||||
p->cq_off.user_addr = 0;
|
||||
|
||||
p->features = IORING_FEAT_SINGLE_MMAP | IORING_FEAT_NODROP |
|
||||
IORING_FEAT_SUBMIT_STABLE | IORING_FEAT_RW_CUR_POS |
|
||||
IORING_FEAT_CUR_PERSONALITY | IORING_FEAT_FAST_POLL |
|
||||
@ -3737,7 +3774,7 @@ static long io_uring_setup(u32 entries, struct io_uring_params __user *params)
|
||||
IORING_SETUP_SQE128 | IORING_SETUP_CQE32 |
|
||||
IORING_SETUP_SINGLE_ISSUER | IORING_SETUP_DEFER_TASKRUN |
|
||||
IORING_SETUP_NO_MMAP | IORING_SETUP_REGISTERED_FD_ONLY |
|
||||
IORING_SETUP_NO_SQARRAY))
|
||||
IORING_SETUP_NO_SQARRAY | IORING_SETUP_HYBRID_IOPOLL))
|
||||
return -EINVAL;
|
||||
|
||||
return io_uring_create(entries, &p, params);
|
||||
@ -3775,6 +3812,8 @@ static int __init io_uring_init(void)
|
||||
struct kmem_cache_args kmem_args = {
|
||||
.useroffset = offsetof(struct io_kiocb, cmd.data),
|
||||
.usersize = sizeof_field(struct io_kiocb, cmd.data),
|
||||
.freeptr_offset = offsetof(struct io_kiocb, work),
|
||||
.use_freeptr_offset = true,
|
||||
};
|
||||
|
||||
#define __BUILD_BUG_VERIFY_OFFSET_SIZE(stype, eoffset, esize, ename) do { \
|
||||
|
@ -65,6 +65,12 @@ static inline bool io_should_wake(struct io_wait_queue *iowq)
|
||||
return dist >= 0 || atomic_read(&ctx->cq_timeouts) != iowq->nr_timeouts;
|
||||
}
|
||||
|
||||
#define IORING_MAX_ENTRIES 32768
|
||||
#define IORING_MAX_CQ_ENTRIES (2 * IORING_MAX_ENTRIES)
|
||||
|
||||
unsigned long rings_size(unsigned int flags, unsigned int sq_entries,
|
||||
unsigned int cq_entries, size_t *sq_offset);
|
||||
int io_uring_fill_params(unsigned entries, struct io_uring_params *p);
|
||||
bool io_cqe_cache_refill(struct io_ring_ctx *ctx, bool overflow);
|
||||
int io_run_task_work_sig(struct io_ring_ctx *ctx);
|
||||
void io_req_defer_failed(struct io_kiocb *req, s32 res);
|
||||
@ -109,7 +115,7 @@ void io_queue_next(struct io_kiocb *req);
|
||||
void io_task_refs_refill(struct io_uring_task *tctx);
|
||||
bool __io_alloc_req_refill(struct io_ring_ctx *ctx);
|
||||
|
||||
bool io_match_task_safe(struct io_kiocb *head, struct task_struct *task,
|
||||
bool io_match_task_safe(struct io_kiocb *head, struct io_uring_task *tctx,
|
||||
bool cancel_all);
|
||||
|
||||
void io_activate_pollwq(struct io_ring_ctx *ctx);
|
||||
@ -130,7 +136,7 @@ static inline void io_lockdep_assert_cq_locked(struct io_ring_ctx *ctx)
|
||||
* Not from an SQE, as those cannot be submitted, but via
|
||||
* updating tagged resources.
|
||||
*/
|
||||
if (ctx->submitter_task->flags & PF_EXITING)
|
||||
if (percpu_ref_is_dying(&ctx->refs))
|
||||
lockdep_assert(current_work());
|
||||
else
|
||||
lockdep_assert(current == ctx->submitter_task);
|
||||
@ -189,16 +195,15 @@ static __always_inline bool io_fill_cqe_req(struct io_ring_ctx *ctx,
|
||||
if (unlikely(!io_get_cqe(ctx, &cqe)))
|
||||
return false;
|
||||
|
||||
if (trace_io_uring_complete_enabled())
|
||||
trace_io_uring_complete(req->ctx, req, req->cqe.user_data,
|
||||
req->cqe.res, req->cqe.flags,
|
||||
req->big_cqe.extra1, req->big_cqe.extra2);
|
||||
|
||||
memcpy(cqe, &req->cqe, sizeof(*cqe));
|
||||
if (ctx->flags & IORING_SETUP_CQE32) {
|
||||
memcpy(cqe->big_cqe, &req->big_cqe, sizeof(*cqe));
|
||||
memset(&req->big_cqe, 0, sizeof(req->big_cqe));
|
||||
}
|
||||
|
||||
if (trace_io_uring_complete_enabled())
|
||||
trace_io_uring_complete(req->ctx, req, cqe);
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -421,6 +426,19 @@ static inline bool io_allowed_run_tw(struct io_ring_ctx *ctx)
|
||||
ctx->submitter_task == current);
|
||||
}
|
||||
|
||||
/*
|
||||
* Terminate the request if either of these conditions are true:
|
||||
*
|
||||
* 1) It's being executed by the original task, but that task is marked
|
||||
* with PF_EXITING as it's exiting.
|
||||
* 2) PF_KTHREAD is set, in which case the invoker of the task_work is
|
||||
* our fallback task_work.
|
||||
*/
|
||||
static inline bool io_should_terminate_tw(void)
|
||||
{
|
||||
return current->flags & (PF_KTHREAD | PF_EXITING);
|
||||
}
|
||||
|
||||
static inline void io_req_queue_tw_complete(struct io_kiocb *req, s32 res)
|
||||
{
|
||||
io_req_set_res(req, res, 0);
|
||||
|
@ -12,6 +12,7 @@
|
||||
|
||||
#include "memmap.h"
|
||||
#include "kbuf.h"
|
||||
#include "rsrc.h"
|
||||
|
||||
static void *io_mem_alloc_compound(struct page **pages, int nr_pages,
|
||||
size_t size, gfp_t gfp)
|
||||
@ -140,6 +141,8 @@ struct page **io_pin_pages(unsigned long uaddr, unsigned long len, int *npages)
|
||||
nr_pages = end - start;
|
||||
if (WARN_ON_ONCE(!nr_pages))
|
||||
return ERR_PTR(-EINVAL);
|
||||
if (WARN_ON_ONCE(nr_pages > INT_MAX))
|
||||
return ERR_PTR(-EOVERFLOW);
|
||||
|
||||
pages = kvmalloc_array(nr_pages, sizeof(struct page *), GFP_KERNEL);
|
||||
if (!pages)
|
||||
@ -192,6 +195,74 @@ void *__io_uaddr_map(struct page ***pages, unsigned short *npages,
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
void io_free_region(struct io_ring_ctx *ctx, struct io_mapped_region *mr)
|
||||
{
|
||||
if (mr->pages) {
|
||||
unpin_user_pages(mr->pages, mr->nr_pages);
|
||||
kvfree(mr->pages);
|
||||
}
|
||||
if (mr->vmap_ptr)
|
||||
vunmap(mr->vmap_ptr);
|
||||
if (mr->nr_pages && ctx->user)
|
||||
__io_unaccount_mem(ctx->user, mr->nr_pages);
|
||||
|
||||
memset(mr, 0, sizeof(*mr));
|
||||
}
|
||||
|
||||
int io_create_region(struct io_ring_ctx *ctx, struct io_mapped_region *mr,
|
||||
struct io_uring_region_desc *reg)
|
||||
{
|
||||
int pages_accounted = 0;
|
||||
struct page **pages;
|
||||
int nr_pages, ret;
|
||||
void *vptr;
|
||||
u64 end;
|
||||
|
||||
if (WARN_ON_ONCE(mr->pages || mr->vmap_ptr || mr->nr_pages))
|
||||
return -EFAULT;
|
||||
if (memchr_inv(®->__resv, 0, sizeof(reg->__resv)))
|
||||
return -EINVAL;
|
||||
if (reg->flags != IORING_MEM_REGION_TYPE_USER)
|
||||
return -EINVAL;
|
||||
if (!reg->user_addr)
|
||||
return -EFAULT;
|
||||
if (!reg->size || reg->mmap_offset || reg->id)
|
||||
return -EINVAL;
|
||||
if ((reg->size >> PAGE_SHIFT) > INT_MAX)
|
||||
return E2BIG;
|
||||
if ((reg->user_addr | reg->size) & ~PAGE_MASK)
|
||||
return -EINVAL;
|
||||
if (check_add_overflow(reg->user_addr, reg->size, &end))
|
||||
return -EOVERFLOW;
|
||||
|
||||
pages = io_pin_pages(reg->user_addr, reg->size, &nr_pages);
|
||||
if (IS_ERR(pages))
|
||||
return PTR_ERR(pages);
|
||||
|
||||
if (ctx->user) {
|
||||
ret = __io_account_mem(ctx->user, nr_pages);
|
||||
if (ret)
|
||||
goto out_free;
|
||||
pages_accounted = nr_pages;
|
||||
}
|
||||
|
||||
vptr = vmap(pages, nr_pages, VM_MAP, PAGE_KERNEL);
|
||||
if (!vptr) {
|
||||
ret = -ENOMEM;
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
mr->pages = pages;
|
||||
mr->vmap_ptr = vptr;
|
||||
mr->nr_pages = nr_pages;
|
||||
return 0;
|
||||
out_free:
|
||||
if (pages_accounted)
|
||||
__io_unaccount_mem(ctx->user, pages_accounted);
|
||||
io_pages_free(&pages, nr_pages);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void *io_uring_validate_mmap_request(struct file *file, loff_t pgoff,
|
||||
size_t sz)
|
||||
{
|
||||
@ -204,11 +275,15 @@ static void *io_uring_validate_mmap_request(struct file *file, loff_t pgoff,
|
||||
/* Don't allow mmap if the ring was setup without it */
|
||||
if (ctx->flags & IORING_SETUP_NO_MMAP)
|
||||
return ERR_PTR(-EINVAL);
|
||||
if (!ctx->rings)
|
||||
return ERR_PTR(-EFAULT);
|
||||
return ctx->rings;
|
||||
case IORING_OFF_SQES:
|
||||
/* Don't allow mmap if the ring was setup without it */
|
||||
if (ctx->flags & IORING_SETUP_NO_MMAP)
|
||||
return ERR_PTR(-EINVAL);
|
||||
if (!ctx->sq_sqes)
|
||||
return ERR_PTR(-EFAULT);
|
||||
return ctx->sq_sqes;
|
||||
case IORING_OFF_PBUF_RING: {
|
||||
struct io_buffer_list *bl;
|
||||
@ -247,6 +322,8 @@ __cold int io_uring_mmap(struct file *file, struct vm_area_struct *vma)
|
||||
unsigned int npages;
|
||||
void *ptr;
|
||||
|
||||
guard(mutex)(&ctx->resize_lock);
|
||||
|
||||
ptr = io_uring_validate_mmap_request(file, vma->vm_pgoff, sz);
|
||||
if (IS_ERR(ptr))
|
||||
return PTR_ERR(ptr);
|
||||
@ -270,6 +347,7 @@ unsigned long io_uring_get_unmapped_area(struct file *filp, unsigned long addr,
|
||||
unsigned long len, unsigned long pgoff,
|
||||
unsigned long flags)
|
||||
{
|
||||
struct io_ring_ctx *ctx = filp->private_data;
|
||||
void *ptr;
|
||||
|
||||
/*
|
||||
@ -280,6 +358,8 @@ unsigned long io_uring_get_unmapped_area(struct file *filp, unsigned long addr,
|
||||
if (addr)
|
||||
return -EINVAL;
|
||||
|
||||
guard(mutex)(&ctx->resize_lock);
|
||||
|
||||
ptr = io_uring_validate_mmap_request(filp, pgoff, len);
|
||||
if (IS_ERR(ptr))
|
||||
return -ENOMEM;
|
||||
@ -325,8 +405,11 @@ unsigned long io_uring_get_unmapped_area(struct file *file, unsigned long addr,
|
||||
unsigned long len, unsigned long pgoff,
|
||||
unsigned long flags)
|
||||
{
|
||||
struct io_ring_ctx *ctx = file->private_data;
|
||||
void *ptr;
|
||||
|
||||
guard(mutex)(&ctx->resize_lock);
|
||||
|
||||
ptr = io_uring_validate_mmap_request(file, pgoff, len);
|
||||
if (IS_ERR(ptr))
|
||||
return PTR_ERR(ptr);
|
||||
|
@ -22,4 +22,18 @@ unsigned long io_uring_get_unmapped_area(struct file *file, unsigned long addr,
|
||||
unsigned long flags);
|
||||
int io_uring_mmap(struct file *file, struct vm_area_struct *vma);
|
||||
|
||||
void io_free_region(struct io_ring_ctx *ctx, struct io_mapped_region *mr);
|
||||
int io_create_region(struct io_ring_ctx *ctx, struct io_mapped_region *mr,
|
||||
struct io_uring_region_desc *reg);
|
||||
|
||||
static inline void *io_region_get_ptr(struct io_mapped_region *mr)
|
||||
{
|
||||
return mr->vmap_ptr;
|
||||
}
|
||||
|
||||
static inline bool io_region_is_set(struct io_mapped_region *mr)
|
||||
{
|
||||
return !!mr->nr_pages;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@ -89,8 +89,8 @@ static void io_msg_tw_complete(struct io_kiocb *req, struct io_tw_state *ts)
|
||||
static int io_msg_remote_post(struct io_ring_ctx *ctx, struct io_kiocb *req,
|
||||
int res, u32 cflags, u64 user_data)
|
||||
{
|
||||
req->task = READ_ONCE(ctx->submitter_task);
|
||||
if (!req->task) {
|
||||
req->tctx = READ_ONCE(ctx->submitter_task->io_uring);
|
||||
if (!req->tctx) {
|
||||
kmem_cache_free(req_cachep, req);
|
||||
return -EOWNERDEAD;
|
||||
}
|
||||
@ -116,14 +116,13 @@ static struct io_kiocb *io_msg_get_kiocb(struct io_ring_ctx *ctx)
|
||||
return kmem_cache_alloc(req_cachep, GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO);
|
||||
}
|
||||
|
||||
static int io_msg_data_remote(struct io_kiocb *req)
|
||||
static int io_msg_data_remote(struct io_ring_ctx *target_ctx,
|
||||
struct io_msg *msg)
|
||||
{
|
||||
struct io_ring_ctx *target_ctx = req->file->private_data;
|
||||
struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg);
|
||||
struct io_kiocb *target;
|
||||
u32 flags = 0;
|
||||
|
||||
target = io_msg_get_kiocb(req->ctx);
|
||||
target = io_msg_get_kiocb(target_ctx);
|
||||
if (unlikely(!target))
|
||||
return -ENOMEM;
|
||||
|
||||
@ -134,10 +133,9 @@ static int io_msg_data_remote(struct io_kiocb *req)
|
||||
msg->user_data);
|
||||
}
|
||||
|
||||
static int io_msg_ring_data(struct io_kiocb *req, unsigned int issue_flags)
|
||||
static int __io_msg_ring_data(struct io_ring_ctx *target_ctx,
|
||||
struct io_msg *msg, unsigned int issue_flags)
|
||||
{
|
||||
struct io_ring_ctx *target_ctx = req->file->private_data;
|
||||
struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg);
|
||||
u32 flags = 0;
|
||||
int ret;
|
||||
|
||||
@ -149,7 +147,7 @@ static int io_msg_ring_data(struct io_kiocb *req, unsigned int issue_flags)
|
||||
return -EBADFD;
|
||||
|
||||
if (io_msg_need_remote(target_ctx))
|
||||
return io_msg_data_remote(req);
|
||||
return io_msg_data_remote(target_ctx, msg);
|
||||
|
||||
if (msg->flags & IORING_MSG_RING_FLAGS_PASS)
|
||||
flags = msg->cqe_flags;
|
||||
@ -166,22 +164,32 @@ static int io_msg_ring_data(struct io_kiocb *req, unsigned int issue_flags)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static struct file *io_msg_grab_file(struct io_kiocb *req, unsigned int issue_flags)
|
||||
static int io_msg_ring_data(struct io_kiocb *req, unsigned int issue_flags)
|
||||
{
|
||||
struct io_ring_ctx *target_ctx = req->file->private_data;
|
||||
struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg);
|
||||
|
||||
return __io_msg_ring_data(target_ctx, msg, issue_flags);
|
||||
}
|
||||
|
||||
static int io_msg_grab_file(struct io_kiocb *req, unsigned int issue_flags)
|
||||
{
|
||||
struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg);
|
||||
struct io_ring_ctx *ctx = req->ctx;
|
||||
struct file *file = NULL;
|
||||
int idx = msg->src_fd;
|
||||
struct io_rsrc_node *node;
|
||||
int ret = -EBADF;
|
||||
|
||||
io_ring_submit_lock(ctx, issue_flags);
|
||||
if (likely(idx < ctx->nr_user_files)) {
|
||||
idx = array_index_nospec(idx, ctx->nr_user_files);
|
||||
file = io_file_from_index(&ctx->file_table, idx);
|
||||
if (file)
|
||||
get_file(file);
|
||||
node = io_rsrc_node_lookup(&ctx->file_table.data, msg->src_fd);
|
||||
if (node) {
|
||||
msg->src_file = io_slot_file(node);
|
||||
if (msg->src_file)
|
||||
get_file(msg->src_file);
|
||||
req->flags |= REQ_F_NEED_CLEANUP;
|
||||
ret = 0;
|
||||
}
|
||||
io_ring_submit_unlock(ctx, issue_flags);
|
||||
return file;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int io_msg_install_complete(struct io_kiocb *req, unsigned int issue_flags)
|
||||
@ -250,7 +258,6 @@ static int io_msg_send_fd(struct io_kiocb *req, unsigned int issue_flags)
|
||||
struct io_ring_ctx *target_ctx = req->file->private_data;
|
||||
struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg);
|
||||
struct io_ring_ctx *ctx = req->ctx;
|
||||
struct file *src_file = msg->src_file;
|
||||
|
||||
if (msg->len)
|
||||
return -EINVAL;
|
||||
@ -258,12 +265,10 @@ static int io_msg_send_fd(struct io_kiocb *req, unsigned int issue_flags)
|
||||
return -EINVAL;
|
||||
if (target_ctx->flags & IORING_SETUP_R_DISABLED)
|
||||
return -EBADFD;
|
||||
if (!src_file) {
|
||||
src_file = io_msg_grab_file(req, issue_flags);
|
||||
if (!src_file)
|
||||
return -EBADF;
|
||||
msg->src_file = src_file;
|
||||
req->flags |= REQ_F_NEED_CLEANUP;
|
||||
if (!msg->src_file) {
|
||||
int ret = io_msg_grab_file(req, issue_flags);
|
||||
if (unlikely(ret))
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (io_msg_need_remote(target_ctx))
|
||||
@ -271,10 +276,8 @@ static int io_msg_send_fd(struct io_kiocb *req, unsigned int issue_flags)
|
||||
return io_msg_install_complete(req, issue_flags);
|
||||
}
|
||||
|
||||
int io_msg_ring_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
|
||||
static int __io_msg_ring_prep(struct io_msg *msg, const struct io_uring_sqe *sqe)
|
||||
{
|
||||
struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg);
|
||||
|
||||
if (unlikely(sqe->buf_index || sqe->personality))
|
||||
return -EINVAL;
|
||||
|
||||
@ -291,6 +294,11 @@ int io_msg_ring_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
|
||||
return 0;
|
||||
}
|
||||
|
||||
int io_msg_ring_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
|
||||
{
|
||||
return __io_msg_ring_prep(io_kiocb_to_cmd(req, struct io_msg), sqe);
|
||||
}
|
||||
|
||||
int io_msg_ring(struct io_kiocb *req, unsigned int issue_flags)
|
||||
{
|
||||
struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg);
|
||||
@ -322,6 +330,31 @@ int io_msg_ring(struct io_kiocb *req, unsigned int issue_flags)
|
||||
return IOU_OK;
|
||||
}
|
||||
|
||||
int io_uring_sync_msg_ring(struct io_uring_sqe *sqe)
|
||||
{
|
||||
struct io_msg io_msg = { };
|
||||
int ret;
|
||||
|
||||
ret = __io_msg_ring_prep(&io_msg, sqe);
|
||||
if (unlikely(ret))
|
||||
return ret;
|
||||
|
||||
/*
|
||||
* Only data sending supported, not IORING_MSG_SEND_FD as that one
|
||||
* doesn't make sense without a source ring to send files from.
|
||||
*/
|
||||
if (io_msg.cmd != IORING_MSG_DATA)
|
||||
return -EINVAL;
|
||||
|
||||
CLASS(fd, f)(sqe->fd);
|
||||
if (fd_empty(f))
|
||||
return -EBADF;
|
||||
if (!io_is_uring_fops(fd_file(f)))
|
||||
return -EBADFD;
|
||||
return __io_msg_ring_data(fd_file(f)->private_data,
|
||||
&io_msg, IO_URING_F_UNLOCKED);
|
||||
}
|
||||
|
||||
void io_msg_cache_free(const void *entry)
|
||||
{
|
||||
struct io_kiocb *req = (struct io_kiocb *) entry;
|
||||
|
@ -1,5 +1,6 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
|
||||
int io_uring_sync_msg_ring(struct io_uring_sqe *sqe);
|
||||
int io_msg_ring_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
|
||||
int io_msg_ring(struct io_kiocb *req, unsigned int issue_flags);
|
||||
void io_msg_ring_cleanup(struct io_kiocb *req);
|
||||
|
184
io_uring/napi.c
184
io_uring/napi.c
@ -38,67 +38,88 @@ static inline ktime_t net_to_ktime(unsigned long t)
|
||||
return ns_to_ktime(t << 10);
|
||||
}
|
||||
|
||||
void __io_napi_add(struct io_ring_ctx *ctx, struct socket *sock)
|
||||
int __io_napi_add_id(struct io_ring_ctx *ctx, unsigned int napi_id)
|
||||
{
|
||||
struct hlist_head *hash_list;
|
||||
unsigned int napi_id;
|
||||
struct sock *sk;
|
||||
struct io_napi_entry *e;
|
||||
|
||||
sk = sock->sk;
|
||||
if (!sk)
|
||||
return;
|
||||
|
||||
napi_id = READ_ONCE(sk->sk_napi_id);
|
||||
|
||||
/* Non-NAPI IDs can be rejected. */
|
||||
if (napi_id < MIN_NAPI_ID)
|
||||
return;
|
||||
return -EINVAL;
|
||||
|
||||
hash_list = &ctx->napi_ht[hash_min(napi_id, HASH_BITS(ctx->napi_ht))];
|
||||
|
||||
rcu_read_lock();
|
||||
e = io_napi_hash_find(hash_list, napi_id);
|
||||
if (e) {
|
||||
e->timeout = jiffies + NAPI_TIMEOUT;
|
||||
rcu_read_unlock();
|
||||
return;
|
||||
scoped_guard(rcu) {
|
||||
e = io_napi_hash_find(hash_list, napi_id);
|
||||
if (e) {
|
||||
WRITE_ONCE(e->timeout, jiffies + NAPI_TIMEOUT);
|
||||
return -EEXIST;
|
||||
}
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
e = kmalloc(sizeof(*e), GFP_NOWAIT);
|
||||
if (!e)
|
||||
return;
|
||||
return -ENOMEM;
|
||||
|
||||
e->napi_id = napi_id;
|
||||
e->timeout = jiffies + NAPI_TIMEOUT;
|
||||
|
||||
/*
|
||||
* guard(spinlock) is not used to manually unlock it before calling
|
||||
* kfree()
|
||||
*/
|
||||
spin_lock(&ctx->napi_lock);
|
||||
if (unlikely(io_napi_hash_find(hash_list, napi_id))) {
|
||||
spin_unlock(&ctx->napi_lock);
|
||||
kfree(e);
|
||||
return;
|
||||
return -EEXIST;
|
||||
}
|
||||
|
||||
hlist_add_tail_rcu(&e->node, hash_list);
|
||||
list_add_tail(&e->list, &ctx->napi_list);
|
||||
list_add_tail_rcu(&e->list, &ctx->napi_list);
|
||||
spin_unlock(&ctx->napi_lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __io_napi_del_id(struct io_ring_ctx *ctx, unsigned int napi_id)
|
||||
{
|
||||
struct hlist_head *hash_list;
|
||||
struct io_napi_entry *e;
|
||||
|
||||
/* Non-NAPI IDs can be rejected. */
|
||||
if (napi_id < MIN_NAPI_ID)
|
||||
return -EINVAL;
|
||||
|
||||
hash_list = &ctx->napi_ht[hash_min(napi_id, HASH_BITS(ctx->napi_ht))];
|
||||
guard(spinlock)(&ctx->napi_lock);
|
||||
e = io_napi_hash_find(hash_list, napi_id);
|
||||
if (!e)
|
||||
return -ENOENT;
|
||||
|
||||
list_del_rcu(&e->list);
|
||||
hash_del_rcu(&e->node);
|
||||
kfree_rcu(e, rcu);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void __io_napi_remove_stale(struct io_ring_ctx *ctx)
|
||||
{
|
||||
struct io_napi_entry *e;
|
||||
unsigned int i;
|
||||
|
||||
spin_lock(&ctx->napi_lock);
|
||||
hash_for_each(ctx->napi_ht, i, e, node) {
|
||||
if (time_after(jiffies, e->timeout)) {
|
||||
list_del(&e->list);
|
||||
guard(spinlock)(&ctx->napi_lock);
|
||||
/*
|
||||
* list_for_each_entry_safe() is not required as long as:
|
||||
* 1. list_del_rcu() does not reset the deleted node next pointer
|
||||
* 2. kfree_rcu() delays the memory freeing until the next quiescent
|
||||
* state
|
||||
*/
|
||||
list_for_each_entry(e, &ctx->napi_list, list) {
|
||||
if (time_after(jiffies, READ_ONCE(e->timeout))) {
|
||||
list_del_rcu(&e->list);
|
||||
hash_del_rcu(&e->node);
|
||||
kfree_rcu(e, rcu);
|
||||
}
|
||||
}
|
||||
spin_unlock(&ctx->napi_lock);
|
||||
}
|
||||
|
||||
static inline void io_napi_remove_stale(struct io_ring_ctx *ctx, bool is_stale)
|
||||
@ -136,45 +157,73 @@ static bool io_napi_busy_loop_should_end(void *data,
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool __io_napi_do_busy_loop(struct io_ring_ctx *ctx,
|
||||
void *loop_end_arg)
|
||||
/*
|
||||
* never report stale entries
|
||||
*/
|
||||
static bool static_tracking_do_busy_loop(struct io_ring_ctx *ctx,
|
||||
bool (*loop_end)(void *, unsigned long),
|
||||
void *loop_end_arg)
|
||||
{
|
||||
struct io_napi_entry *e;
|
||||
bool (*loop_end)(void *, unsigned long) = NULL;
|
||||
bool is_stale = false;
|
||||
|
||||
if (loop_end_arg)
|
||||
loop_end = io_napi_busy_loop_should_end;
|
||||
list_for_each_entry_rcu(e, &ctx->napi_list, list)
|
||||
napi_busy_loop_rcu(e->napi_id, loop_end, loop_end_arg,
|
||||
ctx->napi_prefer_busy_poll, BUSY_POLL_BUDGET);
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool
|
||||
dynamic_tracking_do_busy_loop(struct io_ring_ctx *ctx,
|
||||
bool (*loop_end)(void *, unsigned long),
|
||||
void *loop_end_arg)
|
||||
{
|
||||
struct io_napi_entry *e;
|
||||
bool is_stale = false;
|
||||
|
||||
list_for_each_entry_rcu(e, &ctx->napi_list, list) {
|
||||
napi_busy_loop_rcu(e->napi_id, loop_end, loop_end_arg,
|
||||
ctx->napi_prefer_busy_poll, BUSY_POLL_BUDGET);
|
||||
|
||||
if (time_after(jiffies, e->timeout))
|
||||
if (time_after(jiffies, READ_ONCE(e->timeout)))
|
||||
is_stale = true;
|
||||
}
|
||||
|
||||
return is_stale;
|
||||
}
|
||||
|
||||
static inline bool
|
||||
__io_napi_do_busy_loop(struct io_ring_ctx *ctx,
|
||||
bool (*loop_end)(void *, unsigned long),
|
||||
void *loop_end_arg)
|
||||
{
|
||||
if (READ_ONCE(ctx->napi_track_mode) == IO_URING_NAPI_TRACKING_STATIC)
|
||||
return static_tracking_do_busy_loop(ctx, loop_end, loop_end_arg);
|
||||
return dynamic_tracking_do_busy_loop(ctx, loop_end, loop_end_arg);
|
||||
}
|
||||
|
||||
static void io_napi_blocking_busy_loop(struct io_ring_ctx *ctx,
|
||||
struct io_wait_queue *iowq)
|
||||
{
|
||||
unsigned long start_time = busy_loop_current_time();
|
||||
bool (*loop_end)(void *, unsigned long) = NULL;
|
||||
void *loop_end_arg = NULL;
|
||||
bool is_stale = false;
|
||||
|
||||
/* Singular lists use a different napi loop end check function and are
|
||||
* only executed once.
|
||||
*/
|
||||
if (list_is_singular(&ctx->napi_list))
|
||||
if (list_is_singular(&ctx->napi_list)) {
|
||||
loop_end = io_napi_busy_loop_should_end;
|
||||
loop_end_arg = iowq;
|
||||
}
|
||||
|
||||
rcu_read_lock();
|
||||
do {
|
||||
is_stale = __io_napi_do_busy_loop(ctx, loop_end_arg);
|
||||
} while (!io_napi_busy_loop_should_end(iowq, start_time) && !loop_end_arg);
|
||||
rcu_read_unlock();
|
||||
scoped_guard(rcu) {
|
||||
do {
|
||||
is_stale = __io_napi_do_busy_loop(ctx, loop_end,
|
||||
loop_end_arg);
|
||||
} while (!io_napi_busy_loop_should_end(iowq, start_time) &&
|
||||
!loop_end_arg);
|
||||
}
|
||||
|
||||
io_napi_remove_stale(ctx, is_stale);
|
||||
}
|
||||
@ -193,6 +242,7 @@ void io_napi_init(struct io_ring_ctx *ctx)
|
||||
spin_lock_init(&ctx->napi_lock);
|
||||
ctx->napi_prefer_busy_poll = false;
|
||||
ctx->napi_busy_poll_dt = ns_to_ktime(sys_dt);
|
||||
ctx->napi_track_mode = IO_URING_NAPI_TRACKING_INACTIVE;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -204,14 +254,31 @@ void io_napi_init(struct io_ring_ctx *ctx)
|
||||
void io_napi_free(struct io_ring_ctx *ctx)
|
||||
{
|
||||
struct io_napi_entry *e;
|
||||
unsigned int i;
|
||||
|
||||
spin_lock(&ctx->napi_lock);
|
||||
hash_for_each(ctx->napi_ht, i, e, node) {
|
||||
guard(spinlock)(&ctx->napi_lock);
|
||||
list_for_each_entry(e, &ctx->napi_list, list) {
|
||||
hash_del_rcu(&e->node);
|
||||
kfree_rcu(e, rcu);
|
||||
}
|
||||
spin_unlock(&ctx->napi_lock);
|
||||
INIT_LIST_HEAD_RCU(&ctx->napi_list);
|
||||
}
|
||||
|
||||
static int io_napi_register_napi(struct io_ring_ctx *ctx,
|
||||
struct io_uring_napi *napi)
|
||||
{
|
||||
switch (napi->op_param) {
|
||||
case IO_URING_NAPI_TRACKING_DYNAMIC:
|
||||
case IO_URING_NAPI_TRACKING_STATIC:
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
/* clean the napi list for new settings */
|
||||
io_napi_free(ctx);
|
||||
WRITE_ONCE(ctx->napi_track_mode, napi->op_param);
|
||||
WRITE_ONCE(ctx->napi_busy_poll_dt, napi->busy_poll_to * NSEC_PER_USEC);
|
||||
WRITE_ONCE(ctx->napi_prefer_busy_poll, !!napi->prefer_busy_poll);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -225,7 +292,8 @@ int io_register_napi(struct io_ring_ctx *ctx, void __user *arg)
|
||||
{
|
||||
const struct io_uring_napi curr = {
|
||||
.busy_poll_to = ktime_to_us(ctx->napi_busy_poll_dt),
|
||||
.prefer_busy_poll = ctx->napi_prefer_busy_poll
|
||||
.prefer_busy_poll = ctx->napi_prefer_busy_poll,
|
||||
.op_param = ctx->napi_track_mode
|
||||
};
|
||||
struct io_uring_napi napi;
|
||||
|
||||
@ -233,16 +301,26 @@ int io_register_napi(struct io_ring_ctx *ctx, void __user *arg)
|
||||
return -EINVAL;
|
||||
if (copy_from_user(&napi, arg, sizeof(napi)))
|
||||
return -EFAULT;
|
||||
if (napi.pad[0] || napi.pad[1] || napi.pad[2] || napi.resv)
|
||||
if (napi.pad[0] || napi.pad[1] || napi.resv)
|
||||
return -EINVAL;
|
||||
|
||||
if (copy_to_user(arg, &curr, sizeof(curr)))
|
||||
return -EFAULT;
|
||||
|
||||
WRITE_ONCE(ctx->napi_busy_poll_dt, napi.busy_poll_to * NSEC_PER_USEC);
|
||||
WRITE_ONCE(ctx->napi_prefer_busy_poll, !!napi.prefer_busy_poll);
|
||||
WRITE_ONCE(ctx->napi_enabled, true);
|
||||
return 0;
|
||||
switch (napi.opcode) {
|
||||
case IO_URING_NAPI_REGISTER_OP:
|
||||
return io_napi_register_napi(ctx, &napi);
|
||||
case IO_URING_NAPI_STATIC_ADD_ID:
|
||||
if (curr.op_param != IO_URING_NAPI_TRACKING_STATIC)
|
||||
return -EINVAL;
|
||||
return __io_napi_add_id(ctx, napi.op_param);
|
||||
case IO_URING_NAPI_STATIC_DEL_ID:
|
||||
if (curr.op_param != IO_URING_NAPI_TRACKING_STATIC)
|
||||
return -EINVAL;
|
||||
return __io_napi_del_id(ctx, napi.op_param);
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
@ -265,7 +343,7 @@ int io_unregister_napi(struct io_ring_ctx *ctx, void __user *arg)
|
||||
|
||||
WRITE_ONCE(ctx->napi_busy_poll_dt, 0);
|
||||
WRITE_ONCE(ctx->napi_prefer_busy_poll, false);
|
||||
WRITE_ONCE(ctx->napi_enabled, false);
|
||||
WRITE_ONCE(ctx->napi_track_mode, IO_URING_NAPI_TRACKING_INACTIVE);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -307,9 +385,9 @@ int io_napi_sqpoll_busy_poll(struct io_ring_ctx *ctx)
|
||||
if (list_empty_careful(&ctx->napi_list))
|
||||
return 0;
|
||||
|
||||
rcu_read_lock();
|
||||
is_stale = __io_napi_do_busy_loop(ctx, NULL);
|
||||
rcu_read_unlock();
|
||||
scoped_guard(rcu) {
|
||||
is_stale = __io_napi_do_busy_loop(ctx, NULL, NULL);
|
||||
}
|
||||
|
||||
io_napi_remove_stale(ctx, is_stale);
|
||||
return 1;
|
||||
|
@ -15,7 +15,7 @@ void io_napi_free(struct io_ring_ctx *ctx);
|
||||
int io_register_napi(struct io_ring_ctx *ctx, void __user *arg);
|
||||
int io_unregister_napi(struct io_ring_ctx *ctx, void __user *arg);
|
||||
|
||||
void __io_napi_add(struct io_ring_ctx *ctx, struct socket *sock);
|
||||
int __io_napi_add_id(struct io_ring_ctx *ctx, unsigned int napi_id);
|
||||
|
||||
void __io_napi_busy_loop(struct io_ring_ctx *ctx, struct io_wait_queue *iowq);
|
||||
int io_napi_sqpoll_busy_poll(struct io_ring_ctx *ctx);
|
||||
@ -44,12 +44,12 @@ static inline void io_napi_add(struct io_kiocb *req)
|
||||
struct io_ring_ctx *ctx = req->ctx;
|
||||
struct socket *sock;
|
||||
|
||||
if (!READ_ONCE(ctx->napi_enabled))
|
||||
if (READ_ONCE(ctx->napi_track_mode) != IO_URING_NAPI_TRACKING_DYNAMIC)
|
||||
return;
|
||||
|
||||
sock = sock_from_file(req->file);
|
||||
if (sock)
|
||||
__io_napi_add(ctx, sock);
|
||||
if (sock && sock->sk)
|
||||
__io_napi_add_id(ctx, READ_ONCE(sock->sk->sk_napi_id));
|
||||
}
|
||||
|
||||
#else
|
||||
|
112
io_uring/net.c
112
io_uring/net.c
@ -74,9 +74,8 @@ struct io_sr_msg {
|
||||
unsigned nr_multishot_loops;
|
||||
u16 flags;
|
||||
/* initialised and used only by !msg send variants */
|
||||
u16 addr_len;
|
||||
u16 buf_group;
|
||||
void __user *addr;
|
||||
u16 buf_index;
|
||||
void __user *msg_control;
|
||||
/* used only for send zerocopy */
|
||||
struct io_kiocb *notif;
|
||||
@ -263,6 +262,7 @@ static int io_msg_copy_hdr(struct io_kiocb *req, struct io_async_msghdr *iomsg,
|
||||
struct user_msghdr *msg, int ddir)
|
||||
{
|
||||
struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
|
||||
struct user_msghdr __user *umsg = sr->umsg;
|
||||
struct iovec *iov;
|
||||
int ret, nr_segs;
|
||||
|
||||
@ -274,16 +274,16 @@ static int io_msg_copy_hdr(struct io_kiocb *req, struct io_async_msghdr *iomsg,
|
||||
nr_segs = 1;
|
||||
}
|
||||
|
||||
if (!user_access_begin(sr->umsg, sizeof(*sr->umsg)))
|
||||
if (!user_access_begin(umsg, sizeof(*umsg)))
|
||||
return -EFAULT;
|
||||
|
||||
ret = -EFAULT;
|
||||
unsafe_get_user(msg->msg_name, &sr->umsg->msg_name, ua_end);
|
||||
unsafe_get_user(msg->msg_namelen, &sr->umsg->msg_namelen, ua_end);
|
||||
unsafe_get_user(msg->msg_iov, &sr->umsg->msg_iov, ua_end);
|
||||
unsafe_get_user(msg->msg_iovlen, &sr->umsg->msg_iovlen, ua_end);
|
||||
unsafe_get_user(msg->msg_control, &sr->umsg->msg_control, ua_end);
|
||||
unsafe_get_user(msg->msg_controllen, &sr->umsg->msg_controllen, ua_end);
|
||||
unsafe_get_user(msg->msg_name, &umsg->msg_name, ua_end);
|
||||
unsafe_get_user(msg->msg_namelen, &umsg->msg_namelen, ua_end);
|
||||
unsafe_get_user(msg->msg_iov, &umsg->msg_iov, ua_end);
|
||||
unsafe_get_user(msg->msg_iovlen, &umsg->msg_iovlen, ua_end);
|
||||
unsafe_get_user(msg->msg_control, &umsg->msg_control, ua_end);
|
||||
unsafe_get_user(msg->msg_controllen, &umsg->msg_controllen, ua_end);
|
||||
msg->msg_flags = 0;
|
||||
|
||||
if (req->flags & REQ_F_BUFFER_SELECT) {
|
||||
@ -356,24 +356,33 @@ void io_sendmsg_recvmsg_cleanup(struct io_kiocb *req)
|
||||
io_netmsg_iovec_free(io);
|
||||
}
|
||||
|
||||
static int io_send_setup(struct io_kiocb *req)
|
||||
static int io_send_setup(struct io_kiocb *req, const struct io_uring_sqe *sqe)
|
||||
{
|
||||
struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
|
||||
struct io_async_msghdr *kmsg = req->async_data;
|
||||
void __user *addr;
|
||||
u16 addr_len;
|
||||
int ret;
|
||||
|
||||
sr->buf = u64_to_user_ptr(READ_ONCE(sqe->addr));
|
||||
|
||||
if (READ_ONCE(sqe->__pad3[0]))
|
||||
return -EINVAL;
|
||||
|
||||
kmsg->msg.msg_name = NULL;
|
||||
kmsg->msg.msg_namelen = 0;
|
||||
kmsg->msg.msg_control = NULL;
|
||||
kmsg->msg.msg_controllen = 0;
|
||||
kmsg->msg.msg_ubuf = NULL;
|
||||
|
||||
if (sr->addr) {
|
||||
ret = move_addr_to_kernel(sr->addr, sr->addr_len, &kmsg->addr);
|
||||
addr = u64_to_user_ptr(READ_ONCE(sqe->addr2));
|
||||
addr_len = READ_ONCE(sqe->addr_len);
|
||||
if (addr) {
|
||||
ret = move_addr_to_kernel(addr, addr_len, &kmsg->addr);
|
||||
if (unlikely(ret < 0))
|
||||
return ret;
|
||||
kmsg->msg.msg_name = &kmsg->addr;
|
||||
kmsg->msg.msg_namelen = sr->addr_len;
|
||||
kmsg->msg.msg_namelen = addr_len;
|
||||
}
|
||||
if (!io_do_buffer_select(req)) {
|
||||
ret = import_ubuf(ITER_SOURCE, sr->buf, sr->len,
|
||||
@ -384,16 +393,14 @@ static int io_send_setup(struct io_kiocb *req)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int io_sendmsg_prep_setup(struct io_kiocb *req, int is_msg)
|
||||
static int io_sendmsg_setup(struct io_kiocb *req, const struct io_uring_sqe *sqe)
|
||||
{
|
||||
struct io_async_msghdr *kmsg;
|
||||
struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
|
||||
struct io_async_msghdr *kmsg = req->async_data;
|
||||
int ret;
|
||||
|
||||
kmsg = io_msg_alloc_async(req);
|
||||
if (unlikely(!kmsg))
|
||||
return -ENOMEM;
|
||||
if (!is_msg)
|
||||
return io_send_setup(req);
|
||||
sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
|
||||
|
||||
ret = io_sendmsg_copy_hdr(req, kmsg);
|
||||
if (!ret)
|
||||
req->flags |= REQ_F_NEED_CLEANUP;
|
||||
@ -408,16 +415,11 @@ int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
|
||||
|
||||
sr->done_io = 0;
|
||||
|
||||
if (req->opcode == IORING_OP_SEND) {
|
||||
if (READ_ONCE(sqe->__pad3[0]))
|
||||
if (req->opcode != IORING_OP_SEND) {
|
||||
if (sqe->addr2 || sqe->file_index)
|
||||
return -EINVAL;
|
||||
sr->addr = u64_to_user_ptr(READ_ONCE(sqe->addr2));
|
||||
sr->addr_len = READ_ONCE(sqe->addr_len);
|
||||
} else if (sqe->addr2 || sqe->file_index) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
|
||||
sr->len = READ_ONCE(sqe->len);
|
||||
sr->flags = READ_ONCE(sqe->ioprio);
|
||||
if (sr->flags & ~SENDMSG_FLAGS)
|
||||
@ -439,7 +441,11 @@ int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
|
||||
if (req->ctx->compat)
|
||||
sr->msg_flags |= MSG_CMSG_COMPAT;
|
||||
#endif
|
||||
return io_sendmsg_prep_setup(req, req->opcode == IORING_OP_SENDMSG);
|
||||
if (unlikely(!io_msg_alloc_async(req)))
|
||||
return -ENOMEM;
|
||||
if (req->opcode != IORING_OP_SENDMSG)
|
||||
return io_send_setup(req, sqe);
|
||||
return io_sendmsg_setup(req, sqe);
|
||||
}
|
||||
|
||||
static void io_req_msg_cleanup(struct io_kiocb *req,
|
||||
@ -1254,31 +1260,16 @@ int io_send_zc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
|
||||
}
|
||||
}
|
||||
|
||||
if (zc->flags & IORING_RECVSEND_FIXED_BUF) {
|
||||
unsigned idx = READ_ONCE(sqe->buf_index);
|
||||
|
||||
if (unlikely(idx >= ctx->nr_user_bufs))
|
||||
return -EFAULT;
|
||||
idx = array_index_nospec(idx, ctx->nr_user_bufs);
|
||||
req->imu = READ_ONCE(ctx->user_bufs[idx]);
|
||||
io_req_set_rsrc_node(notif, ctx, 0);
|
||||
}
|
||||
|
||||
if (req->opcode == IORING_OP_SEND_ZC) {
|
||||
if (READ_ONCE(sqe->__pad3[0]))
|
||||
return -EINVAL;
|
||||
zc->addr = u64_to_user_ptr(READ_ONCE(sqe->addr2));
|
||||
zc->addr_len = READ_ONCE(sqe->addr_len);
|
||||
} else {
|
||||
if (req->opcode != IORING_OP_SEND_ZC) {
|
||||
if (unlikely(sqe->addr2 || sqe->file_index))
|
||||
return -EINVAL;
|
||||
if (unlikely(zc->flags & IORING_RECVSEND_FIXED_BUF))
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
zc->buf = u64_to_user_ptr(READ_ONCE(sqe->addr));
|
||||
zc->len = READ_ONCE(sqe->len);
|
||||
zc->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL | MSG_ZEROCOPY;
|
||||
zc->buf_index = READ_ONCE(sqe->buf_index);
|
||||
if (zc->msg_flags & MSG_DONTWAIT)
|
||||
req->flags |= REQ_F_NOWAIT;
|
||||
|
||||
@ -1286,7 +1277,11 @@ int io_send_zc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
|
||||
if (req->ctx->compat)
|
||||
zc->msg_flags |= MSG_CMSG_COMPAT;
|
||||
#endif
|
||||
return io_sendmsg_prep_setup(req, req->opcode == IORING_OP_SENDMSG_ZC);
|
||||
if (unlikely(!io_msg_alloc_async(req)))
|
||||
return -ENOMEM;
|
||||
if (req->opcode != IORING_OP_SENDMSG_ZC)
|
||||
return io_send_setup(req, sqe);
|
||||
return io_sendmsg_setup(req, sqe);
|
||||
}
|
||||
|
||||
static int io_sg_from_iter_iovec(struct sk_buff *skb,
|
||||
@ -1339,14 +1334,31 @@ static int io_sg_from_iter(struct sk_buff *skb,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int io_send_zc_import(struct io_kiocb *req, struct io_async_msghdr *kmsg)
|
||||
static int io_send_zc_import(struct io_kiocb *req, unsigned int issue_flags)
|
||||
{
|
||||
struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
|
||||
struct io_async_msghdr *kmsg = req->async_data;
|
||||
int ret;
|
||||
|
||||
if (sr->flags & IORING_RECVSEND_FIXED_BUF) {
|
||||
ret = io_import_fixed(ITER_SOURCE, &kmsg->msg.msg_iter, req->imu,
|
||||
(u64)(uintptr_t)sr->buf, sr->len);
|
||||
struct io_ring_ctx *ctx = req->ctx;
|
||||
struct io_rsrc_node *node;
|
||||
|
||||
ret = -EFAULT;
|
||||
io_ring_submit_lock(ctx, issue_flags);
|
||||
node = io_rsrc_node_lookup(&ctx->buf_table, sr->buf_index);
|
||||
if (node) {
|
||||
io_req_assign_buf_node(sr->notif, node);
|
||||
ret = 0;
|
||||
}
|
||||
io_ring_submit_unlock(ctx, issue_flags);
|
||||
|
||||
if (unlikely(ret))
|
||||
return ret;
|
||||
|
||||
ret = io_import_fixed(ITER_SOURCE, &kmsg->msg.msg_iter,
|
||||
node->buf, (u64)(uintptr_t)sr->buf,
|
||||
sr->len);
|
||||
if (unlikely(ret))
|
||||
return ret;
|
||||
kmsg->msg.sg_from_iter = io_sg_from_iter;
|
||||
@ -1382,7 +1394,7 @@ int io_send_zc(struct io_kiocb *req, unsigned int issue_flags)
|
||||
return -EAGAIN;
|
||||
|
||||
if (!zc->done_io) {
|
||||
ret = io_send_zc_import(req, kmsg);
|
||||
ret = io_send_zc_import(req, issue_flags);
|
||||
if (unlikely(ret))
|
||||
return ret;
|
||||
}
|
||||
|
@ -8,35 +8,72 @@
|
||||
#include <uapi/linux/io_uring.h>
|
||||
|
||||
#include "io_uring.h"
|
||||
#include "rsrc.h"
|
||||
#include "nop.h"
|
||||
|
||||
struct io_nop {
|
||||
/* NOTE: kiocb has the file as the first member, so don't do it here */
|
||||
struct file *file;
|
||||
int result;
|
||||
int fd;
|
||||
int buffer;
|
||||
unsigned int flags;
|
||||
};
|
||||
|
||||
#define NOP_FLAGS (IORING_NOP_INJECT_RESULT | IORING_NOP_FIXED_FILE | \
|
||||
IORING_NOP_FIXED_BUFFER | IORING_NOP_FILE)
|
||||
|
||||
int io_nop_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
|
||||
{
|
||||
unsigned int flags;
|
||||
struct io_nop *nop = io_kiocb_to_cmd(req, struct io_nop);
|
||||
|
||||
flags = READ_ONCE(sqe->nop_flags);
|
||||
if (flags & ~IORING_NOP_INJECT_RESULT)
|
||||
nop->flags = READ_ONCE(sqe->nop_flags);
|
||||
if (nop->flags & ~NOP_FLAGS)
|
||||
return -EINVAL;
|
||||
|
||||
if (flags & IORING_NOP_INJECT_RESULT)
|
||||
if (nop->flags & IORING_NOP_INJECT_RESULT)
|
||||
nop->result = READ_ONCE(sqe->len);
|
||||
else
|
||||
nop->result = 0;
|
||||
if (nop->flags & IORING_NOP_FIXED_FILE)
|
||||
nop->fd = READ_ONCE(sqe->fd);
|
||||
if (nop->flags & IORING_NOP_FIXED_BUFFER)
|
||||
nop->buffer = READ_ONCE(sqe->buf_index);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int io_nop(struct io_kiocb *req, unsigned int issue_flags)
|
||||
{
|
||||
struct io_nop *nop = io_kiocb_to_cmd(req, struct io_nop);
|
||||
int ret = nop->result;
|
||||
|
||||
if (nop->result < 0)
|
||||
if (nop->flags & IORING_NOP_FILE) {
|
||||
if (nop->flags & IORING_NOP_FIXED_FILE) {
|
||||
req->file = io_file_get_fixed(req, nop->fd, issue_flags);
|
||||
req->flags |= REQ_F_FIXED_FILE;
|
||||
} else {
|
||||
req->file = io_file_get_normal(req, nop->fd);
|
||||
}
|
||||
if (!req->file) {
|
||||
ret = -EBADF;
|
||||
goto done;
|
||||
}
|
||||
}
|
||||
if (nop->flags & IORING_NOP_FIXED_BUFFER) {
|
||||
struct io_ring_ctx *ctx = req->ctx;
|
||||
struct io_rsrc_node *node;
|
||||
|
||||
ret = -EFAULT;
|
||||
io_ring_submit_lock(ctx, issue_flags);
|
||||
node = io_rsrc_node_lookup(&ctx->buf_table, nop->buffer);
|
||||
if (node) {
|
||||
io_req_assign_buf_node(req, node);
|
||||
ret = 0;
|
||||
}
|
||||
io_ring_submit_unlock(ctx, issue_flags);
|
||||
}
|
||||
done:
|
||||
if (ret < 0)
|
||||
req_set_fail(req);
|
||||
io_req_set_res(req, nop->result, 0);
|
||||
return IOU_OK;
|
||||
|
@ -89,7 +89,7 @@ static int io_link_skb(struct sk_buff *skb, struct ubuf_info *uarg)
|
||||
|
||||
/* make sure all noifications can be finished in the same task_work */
|
||||
if (unlikely(notif->ctx != prev_notif->ctx ||
|
||||
notif->task != prev_notif->task))
|
||||
notif->tctx != prev_notif->tctx))
|
||||
return -EEXIST;
|
||||
|
||||
nd->head = prev_nd->head;
|
||||
@ -115,9 +115,10 @@ struct io_kiocb *io_alloc_notif(struct io_ring_ctx *ctx)
|
||||
notif->opcode = IORING_OP_NOP;
|
||||
notif->flags = 0;
|
||||
notif->file = NULL;
|
||||
notif->task = current;
|
||||
notif->tctx = current->io_uring;
|
||||
io_get_task_refs(1);
|
||||
notif->rsrc_node = NULL;
|
||||
notif->file_node = NULL;
|
||||
notif->buf_node = NULL;
|
||||
|
||||
nd = io_notif_to_data(notif);
|
||||
nd->zc_report = false;
|
||||
|
@ -641,6 +641,7 @@ const struct io_cold_def io_cold_defs[] = {
|
||||
},
|
||||
[IORING_OP_SPLICE] = {
|
||||
.name = "SPLICE",
|
||||
.cleanup = io_splice_cleanup,
|
||||
},
|
||||
[IORING_OP_PROVIDE_BUFFERS] = {
|
||||
.name = "PROVIDE_BUFFERS",
|
||||
@ -650,6 +651,7 @@ const struct io_cold_def io_cold_defs[] = {
|
||||
},
|
||||
[IORING_OP_TEE] = {
|
||||
.name = "TEE",
|
||||
.cleanup = io_splice_cleanup,
|
||||
},
|
||||
[IORING_OP_SHUTDOWN] = {
|
||||
.name = "SHUTDOWN",
|
||||
|
183
io_uring/poll.c
183
io_uring/poll.c
@ -122,53 +122,12 @@ static void io_poll_req_insert(struct io_kiocb *req)
|
||||
{
|
||||
struct io_hash_table *table = &req->ctx->cancel_table;
|
||||
u32 index = hash_long(req->cqe.user_data, table->hash_bits);
|
||||
struct io_hash_bucket *hb = &table->hbs[index];
|
||||
|
||||
spin_lock(&hb->lock);
|
||||
hlist_add_head(&req->hash_node, &hb->list);
|
||||
spin_unlock(&hb->lock);
|
||||
}
|
||||
|
||||
static void io_poll_req_delete(struct io_kiocb *req, struct io_ring_ctx *ctx)
|
||||
{
|
||||
struct io_hash_table *table = &req->ctx->cancel_table;
|
||||
u32 index = hash_long(req->cqe.user_data, table->hash_bits);
|
||||
spinlock_t *lock = &table->hbs[index].lock;
|
||||
|
||||
spin_lock(lock);
|
||||
hash_del(&req->hash_node);
|
||||
spin_unlock(lock);
|
||||
}
|
||||
|
||||
static void io_poll_req_insert_locked(struct io_kiocb *req)
|
||||
{
|
||||
struct io_hash_table *table = &req->ctx->cancel_table_locked;
|
||||
u32 index = hash_long(req->cqe.user_data, table->hash_bits);
|
||||
|
||||
lockdep_assert_held(&req->ctx->uring_lock);
|
||||
|
||||
hlist_add_head(&req->hash_node, &table->hbs[index].list);
|
||||
}
|
||||
|
||||
static void io_poll_tw_hash_eject(struct io_kiocb *req, struct io_tw_state *ts)
|
||||
{
|
||||
struct io_ring_ctx *ctx = req->ctx;
|
||||
|
||||
if (req->flags & REQ_F_HASH_LOCKED) {
|
||||
/*
|
||||
* ->cancel_table_locked is protected by ->uring_lock in
|
||||
* contrast to per bucket spinlocks. Likely, tctx_task_work()
|
||||
* already grabbed the mutex for us, but there is a chance it
|
||||
* failed.
|
||||
*/
|
||||
io_tw_lock(ctx, ts);
|
||||
hash_del(&req->hash_node);
|
||||
req->flags &= ~REQ_F_HASH_LOCKED;
|
||||
} else {
|
||||
io_poll_req_delete(req, ctx);
|
||||
}
|
||||
}
|
||||
|
||||
static void io_init_poll_iocb(struct io_poll *poll, __poll_t events)
|
||||
{
|
||||
poll->head = NULL;
|
||||
@ -265,8 +224,7 @@ static int io_poll_check_events(struct io_kiocb *req, struct io_tw_state *ts)
|
||||
{
|
||||
int v;
|
||||
|
||||
/* req->task == current here, checking PF_EXITING is safe */
|
||||
if (unlikely(req->task->flags & PF_EXITING))
|
||||
if (unlikely(io_should_terminate_tw()))
|
||||
return -ECANCELED;
|
||||
|
||||
do {
|
||||
@ -363,7 +321,8 @@ void io_poll_task_func(struct io_kiocb *req, struct io_tw_state *ts)
|
||||
return;
|
||||
}
|
||||
io_poll_remove_entries(req);
|
||||
io_poll_tw_hash_eject(req, ts);
|
||||
/* task_work always has ->uring_lock held */
|
||||
hash_del(&req->hash_node);
|
||||
|
||||
if (req->opcode == IORING_OP_POLL_ADD) {
|
||||
if (ret == IOU_POLL_DONE) {
|
||||
@ -563,12 +522,13 @@ static bool io_poll_can_finish_inline(struct io_kiocb *req,
|
||||
return pt->owning || io_poll_get_ownership(req);
|
||||
}
|
||||
|
||||
static void io_poll_add_hash(struct io_kiocb *req)
|
||||
static void io_poll_add_hash(struct io_kiocb *req, unsigned int issue_flags)
|
||||
{
|
||||
if (req->flags & REQ_F_HASH_LOCKED)
|
||||
io_poll_req_insert_locked(req);
|
||||
else
|
||||
io_poll_req_insert(req);
|
||||
struct io_ring_ctx *ctx = req->ctx;
|
||||
|
||||
io_ring_submit_lock(ctx, issue_flags);
|
||||
io_poll_req_insert(req);
|
||||
io_ring_submit_unlock(ctx, issue_flags);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -605,11 +565,6 @@ static int __io_arm_poll_handler(struct io_kiocb *req,
|
||||
ipt->owning = issue_flags & IO_URING_F_UNLOCKED;
|
||||
atomic_set(&req->poll_refs, (int)ipt->owning);
|
||||
|
||||
/* io-wq doesn't hold uring_lock */
|
||||
if (issue_flags & IO_URING_F_UNLOCKED)
|
||||
req->flags &= ~REQ_F_HASH_LOCKED;
|
||||
|
||||
|
||||
/*
|
||||
* Exclusive waits may only wake a limited amount of entries
|
||||
* rather than all of them, this may interfere with lazy
|
||||
@ -638,7 +593,7 @@ static int __io_arm_poll_handler(struct io_kiocb *req,
|
||||
if (mask &&
|
||||
((poll->events & (EPOLLET|EPOLLONESHOT)) == (EPOLLET|EPOLLONESHOT))) {
|
||||
if (!io_poll_can_finish_inline(req, ipt)) {
|
||||
io_poll_add_hash(req);
|
||||
io_poll_add_hash(req, issue_flags);
|
||||
return 0;
|
||||
}
|
||||
io_poll_remove_entries(req);
|
||||
@ -647,7 +602,7 @@ static int __io_arm_poll_handler(struct io_kiocb *req,
|
||||
return 1;
|
||||
}
|
||||
|
||||
io_poll_add_hash(req);
|
||||
io_poll_add_hash(req, issue_flags);
|
||||
|
||||
if (mask && (poll->events & EPOLLET) &&
|
||||
io_poll_can_finish_inline(req, ipt)) {
|
||||
@ -720,12 +675,6 @@ int io_arm_poll_handler(struct io_kiocb *req, unsigned issue_flags)
|
||||
__poll_t mask = POLLPRI | POLLERR | EPOLLET;
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* apoll requests already grab the mutex to complete in the tw handler,
|
||||
* so removal from the mutex-backed hash is free, use it by default.
|
||||
*/
|
||||
req->flags |= REQ_F_HASH_LOCKED;
|
||||
|
||||
if (!def->pollin && !def->pollout)
|
||||
return IO_APOLL_ABORTED;
|
||||
if (!io_file_can_poll(req))
|
||||
@ -761,58 +710,41 @@ int io_arm_poll_handler(struct io_kiocb *req, unsigned issue_flags)
|
||||
return IO_APOLL_OK;
|
||||
}
|
||||
|
||||
static __cold bool io_poll_remove_all_table(struct task_struct *tsk,
|
||||
struct io_hash_table *table,
|
||||
bool cancel_all)
|
||||
/*
|
||||
* Returns true if we found and killed one or more poll requests
|
||||
*/
|
||||
__cold bool io_poll_remove_all(struct io_ring_ctx *ctx, struct io_uring_task *tctx,
|
||||
bool cancel_all)
|
||||
{
|
||||
unsigned nr_buckets = 1U << table->hash_bits;
|
||||
unsigned nr_buckets = 1U << ctx->cancel_table.hash_bits;
|
||||
struct hlist_node *tmp;
|
||||
struct io_kiocb *req;
|
||||
bool found = false;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < nr_buckets; i++) {
|
||||
struct io_hash_bucket *hb = &table->hbs[i];
|
||||
lockdep_assert_held(&ctx->uring_lock);
|
||||
|
||||
for (i = 0; i < nr_buckets; i++) {
|
||||
struct io_hash_bucket *hb = &ctx->cancel_table.hbs[i];
|
||||
|
||||
spin_lock(&hb->lock);
|
||||
hlist_for_each_entry_safe(req, tmp, &hb->list, hash_node) {
|
||||
if (io_match_task_safe(req, tsk, cancel_all)) {
|
||||
if (io_match_task_safe(req, tctx, cancel_all)) {
|
||||
hlist_del_init(&req->hash_node);
|
||||
io_poll_cancel_req(req);
|
||||
found = true;
|
||||
}
|
||||
}
|
||||
spin_unlock(&hb->lock);
|
||||
}
|
||||
return found;
|
||||
}
|
||||
|
||||
/*
|
||||
* Returns true if we found and killed one or more poll requests
|
||||
*/
|
||||
__cold bool io_poll_remove_all(struct io_ring_ctx *ctx, struct task_struct *tsk,
|
||||
bool cancel_all)
|
||||
__must_hold(&ctx->uring_lock)
|
||||
{
|
||||
bool ret;
|
||||
|
||||
ret = io_poll_remove_all_table(tsk, &ctx->cancel_table, cancel_all);
|
||||
ret |= io_poll_remove_all_table(tsk, &ctx->cancel_table_locked, cancel_all);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static struct io_kiocb *io_poll_find(struct io_ring_ctx *ctx, bool poll_only,
|
||||
struct io_cancel_data *cd,
|
||||
struct io_hash_table *table,
|
||||
struct io_hash_bucket **out_bucket)
|
||||
struct io_cancel_data *cd)
|
||||
{
|
||||
struct io_kiocb *req;
|
||||
u32 index = hash_long(cd->data, table->hash_bits);
|
||||
struct io_hash_bucket *hb = &table->hbs[index];
|
||||
u32 index = hash_long(cd->data, ctx->cancel_table.hash_bits);
|
||||
struct io_hash_bucket *hb = &ctx->cancel_table.hbs[index];
|
||||
|
||||
*out_bucket = NULL;
|
||||
|
||||
spin_lock(&hb->lock);
|
||||
hlist_for_each_entry(req, &hb->list, hash_node) {
|
||||
if (cd->data != req->cqe.user_data)
|
||||
continue;
|
||||
@ -822,35 +754,25 @@ static struct io_kiocb *io_poll_find(struct io_ring_ctx *ctx, bool poll_only,
|
||||
if (io_cancel_match_sequence(req, cd->seq))
|
||||
continue;
|
||||
}
|
||||
*out_bucket = hb;
|
||||
return req;
|
||||
}
|
||||
spin_unlock(&hb->lock);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static struct io_kiocb *io_poll_file_find(struct io_ring_ctx *ctx,
|
||||
struct io_cancel_data *cd,
|
||||
struct io_hash_table *table,
|
||||
struct io_hash_bucket **out_bucket)
|
||||
struct io_cancel_data *cd)
|
||||
{
|
||||
unsigned nr_buckets = 1U << table->hash_bits;
|
||||
unsigned nr_buckets = 1U << ctx->cancel_table.hash_bits;
|
||||
struct io_kiocb *req;
|
||||
int i;
|
||||
|
||||
*out_bucket = NULL;
|
||||
|
||||
for (i = 0; i < nr_buckets; i++) {
|
||||
struct io_hash_bucket *hb = &table->hbs[i];
|
||||
struct io_hash_bucket *hb = &ctx->cancel_table.hbs[i];
|
||||
|
||||
spin_lock(&hb->lock);
|
||||
hlist_for_each_entry(req, &hb->list, hash_node) {
|
||||
if (io_cancel_req_match(req, cd)) {
|
||||
*out_bucket = hb;
|
||||
if (io_cancel_req_match(req, cd))
|
||||
return req;
|
||||
}
|
||||
}
|
||||
spin_unlock(&hb->lock);
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
@ -866,23 +788,21 @@ static int io_poll_disarm(struct io_kiocb *req)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __io_poll_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd,
|
||||
struct io_hash_table *table)
|
||||
static int __io_poll_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd)
|
||||
{
|
||||
struct io_hash_bucket *bucket;
|
||||
struct io_kiocb *req;
|
||||
|
||||
if (cd->flags & (IORING_ASYNC_CANCEL_FD | IORING_ASYNC_CANCEL_OP |
|
||||
IORING_ASYNC_CANCEL_ANY))
|
||||
req = io_poll_file_find(ctx, cd, table, &bucket);
|
||||
req = io_poll_file_find(ctx, cd);
|
||||
else
|
||||
req = io_poll_find(ctx, false, cd, table, &bucket);
|
||||
req = io_poll_find(ctx, false, cd);
|
||||
|
||||
if (req)
|
||||
if (req) {
|
||||
io_poll_cancel_req(req);
|
||||
if (bucket)
|
||||
spin_unlock(&bucket->lock);
|
||||
return req ? 0 : -ENOENT;
|
||||
return 0;
|
||||
}
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
int io_poll_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd,
|
||||
@ -890,12 +810,8 @@ int io_poll_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd,
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = __io_poll_cancel(ctx, cd, &ctx->cancel_table);
|
||||
if (ret != -ENOENT)
|
||||
return ret;
|
||||
|
||||
io_ring_submit_lock(ctx, issue_flags);
|
||||
ret = __io_poll_cancel(ctx, cd, &ctx->cancel_table_locked);
|
||||
ret = __io_poll_cancel(ctx, cd);
|
||||
io_ring_submit_unlock(ctx, issue_flags);
|
||||
return ret;
|
||||
}
|
||||
@ -972,13 +888,6 @@ int io_poll_add(struct io_kiocb *req, unsigned int issue_flags)
|
||||
|
||||
ipt.pt._qproc = io_poll_queue_proc;
|
||||
|
||||
/*
|
||||
* If sqpoll or single issuer, there is no contention for ->uring_lock
|
||||
* and we'll end up holding it in tw handlers anyway.
|
||||
*/
|
||||
if (req->ctx->flags & (IORING_SETUP_SQPOLL|IORING_SETUP_SINGLE_ISSUER))
|
||||
req->flags |= REQ_F_HASH_LOCKED;
|
||||
|
||||
ret = __io_arm_poll_handler(req, poll, &ipt, poll->events, issue_flags);
|
||||
if (ret > 0) {
|
||||
io_req_set_res(req, ipt.result_mask, 0);
|
||||
@ -992,32 +901,16 @@ int io_poll_remove(struct io_kiocb *req, unsigned int issue_flags)
|
||||
struct io_poll_update *poll_update = io_kiocb_to_cmd(req, struct io_poll_update);
|
||||
struct io_ring_ctx *ctx = req->ctx;
|
||||
struct io_cancel_data cd = { .ctx = ctx, .data = poll_update->old_user_data, };
|
||||
struct io_hash_bucket *bucket;
|
||||
struct io_kiocb *preq;
|
||||
int ret2, ret = 0;
|
||||
|
||||
io_ring_submit_lock(ctx, issue_flags);
|
||||
preq = io_poll_find(ctx, true, &cd, &ctx->cancel_table, &bucket);
|
||||
preq = io_poll_find(ctx, true, &cd);
|
||||
ret2 = io_poll_disarm(preq);
|
||||
if (bucket)
|
||||
spin_unlock(&bucket->lock);
|
||||
if (!ret2)
|
||||
goto found;
|
||||
if (ret2 != -ENOENT) {
|
||||
ret = ret2;
|
||||
goto out;
|
||||
}
|
||||
|
||||
preq = io_poll_find(ctx, true, &cd, &ctx->cancel_table_locked, &bucket);
|
||||
ret2 = io_poll_disarm(preq);
|
||||
if (bucket)
|
||||
spin_unlock(&bucket->lock);
|
||||
if (ret2) {
|
||||
ret = ret2;
|
||||
goto out;
|
||||
}
|
||||
|
||||
found:
|
||||
if (WARN_ON_ONCE(preq->opcode != IORING_OP_POLL_ADD)) {
|
||||
ret = -EFAULT;
|
||||
goto out;
|
||||
|
@ -40,7 +40,7 @@ struct io_cancel_data;
|
||||
int io_poll_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd,
|
||||
unsigned issue_flags);
|
||||
int io_arm_poll_handler(struct io_kiocb *req, unsigned issue_flags);
|
||||
bool io_poll_remove_all(struct io_ring_ctx *ctx, struct task_struct *tsk,
|
||||
bool io_poll_remove_all(struct io_ring_ctx *ctx, struct io_uring_task *tctx,
|
||||
bool cancel_all);
|
||||
|
||||
void io_poll_task_func(struct io_kiocb *req, struct io_tw_state *ts);
|
||||
|
@ -28,6 +28,8 @@
|
||||
#include "kbuf.h"
|
||||
#include "napi.h"
|
||||
#include "eventfd.h"
|
||||
#include "msg_ring.h"
|
||||
#include "memmap.h"
|
||||
|
||||
#define IORING_MAX_RESTRICTIONS (IORING_RESTRICTION_LAST + \
|
||||
IORING_REGISTER_LAST + IORING_OP_LAST)
|
||||
@ -360,6 +362,259 @@ static int io_register_clock(struct io_ring_ctx *ctx,
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* State to maintain until we can swap. Both new and old state, used for
|
||||
* either mapping or freeing.
|
||||
*/
|
||||
struct io_ring_ctx_rings {
|
||||
unsigned short n_ring_pages;
|
||||
unsigned short n_sqe_pages;
|
||||
struct page **ring_pages;
|
||||
struct page **sqe_pages;
|
||||
struct io_uring_sqe *sq_sqes;
|
||||
struct io_rings *rings;
|
||||
};
|
||||
|
||||
static void io_register_free_rings(struct io_uring_params *p,
|
||||
struct io_ring_ctx_rings *r)
|
||||
{
|
||||
if (!(p->flags & IORING_SETUP_NO_MMAP)) {
|
||||
io_pages_unmap(r->rings, &r->ring_pages, &r->n_ring_pages,
|
||||
true);
|
||||
io_pages_unmap(r->sq_sqes, &r->sqe_pages, &r->n_sqe_pages,
|
||||
true);
|
||||
} else {
|
||||
io_pages_free(&r->ring_pages, r->n_ring_pages);
|
||||
io_pages_free(&r->sqe_pages, r->n_sqe_pages);
|
||||
vunmap(r->rings);
|
||||
vunmap(r->sq_sqes);
|
||||
}
|
||||
}
|
||||
|
||||
#define swap_old(ctx, o, n, field) \
|
||||
do { \
|
||||
(o).field = (ctx)->field; \
|
||||
(ctx)->field = (n).field; \
|
||||
} while (0)
|
||||
|
||||
#define RESIZE_FLAGS (IORING_SETUP_CQSIZE | IORING_SETUP_CLAMP)
|
||||
#define COPY_FLAGS (IORING_SETUP_NO_SQARRAY | IORING_SETUP_SQE128 | \
|
||||
IORING_SETUP_CQE32 | IORING_SETUP_NO_MMAP)
|
||||
|
||||
static int io_register_resize_rings(struct io_ring_ctx *ctx, void __user *arg)
|
||||
{
|
||||
struct io_ring_ctx_rings o = { }, n = { }, *to_free = NULL;
|
||||
size_t size, sq_array_offset;
|
||||
struct io_uring_params p;
|
||||
unsigned i, tail;
|
||||
void *ptr;
|
||||
int ret;
|
||||
|
||||
/* for single issuer, must be owner resizing */
|
||||
if (ctx->flags & IORING_SETUP_SINGLE_ISSUER &&
|
||||
current != ctx->submitter_task)
|
||||
return -EEXIST;
|
||||
if (copy_from_user(&p, arg, sizeof(p)))
|
||||
return -EFAULT;
|
||||
if (p.flags & ~RESIZE_FLAGS)
|
||||
return -EINVAL;
|
||||
|
||||
/* properties that are always inherited */
|
||||
p.flags |= (ctx->flags & COPY_FLAGS);
|
||||
|
||||
ret = io_uring_fill_params(p.sq_entries, &p);
|
||||
if (unlikely(ret))
|
||||
return ret;
|
||||
|
||||
/* nothing to do, but copy params back */
|
||||
if (p.sq_entries == ctx->sq_entries && p.cq_entries == ctx->cq_entries) {
|
||||
if (copy_to_user(arg, &p, sizeof(p)))
|
||||
return -EFAULT;
|
||||
return 0;
|
||||
}
|
||||
|
||||
size = rings_size(p.flags, p.sq_entries, p.cq_entries,
|
||||
&sq_array_offset);
|
||||
if (size == SIZE_MAX)
|
||||
return -EOVERFLOW;
|
||||
|
||||
if (!(p.flags & IORING_SETUP_NO_MMAP))
|
||||
n.rings = io_pages_map(&n.ring_pages, &n.n_ring_pages, size);
|
||||
else
|
||||
n.rings = __io_uaddr_map(&n.ring_pages, &n.n_ring_pages,
|
||||
p.cq_off.user_addr, size);
|
||||
if (IS_ERR(n.rings))
|
||||
return PTR_ERR(n.rings);
|
||||
|
||||
n.rings->sq_ring_mask = p.sq_entries - 1;
|
||||
n.rings->cq_ring_mask = p.cq_entries - 1;
|
||||
n.rings->sq_ring_entries = p.sq_entries;
|
||||
n.rings->cq_ring_entries = p.cq_entries;
|
||||
|
||||
if (copy_to_user(arg, &p, sizeof(p))) {
|
||||
io_register_free_rings(&p, &n);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
if (p.flags & IORING_SETUP_SQE128)
|
||||
size = array_size(2 * sizeof(struct io_uring_sqe), p.sq_entries);
|
||||
else
|
||||
size = array_size(sizeof(struct io_uring_sqe), p.sq_entries);
|
||||
if (size == SIZE_MAX) {
|
||||
io_register_free_rings(&p, &n);
|
||||
return -EOVERFLOW;
|
||||
}
|
||||
|
||||
if (!(p.flags & IORING_SETUP_NO_MMAP))
|
||||
ptr = io_pages_map(&n.sqe_pages, &n.n_sqe_pages, size);
|
||||
else
|
||||
ptr = __io_uaddr_map(&n.sqe_pages, &n.n_sqe_pages,
|
||||
p.sq_off.user_addr,
|
||||
size);
|
||||
if (IS_ERR(ptr)) {
|
||||
io_register_free_rings(&p, &n);
|
||||
return PTR_ERR(ptr);
|
||||
}
|
||||
|
||||
/*
|
||||
* If using SQPOLL, park the thread
|
||||
*/
|
||||
if (ctx->sq_data) {
|
||||
mutex_unlock(&ctx->uring_lock);
|
||||
io_sq_thread_park(ctx->sq_data);
|
||||
mutex_lock(&ctx->uring_lock);
|
||||
}
|
||||
|
||||
/*
|
||||
* We'll do the swap. Grab the ctx->resize_lock, which will exclude
|
||||
* any new mmap's on the ring fd. Clear out existing mappings to prevent
|
||||
* mmap from seeing them, as we'll unmap them. Any attempt to mmap
|
||||
* existing rings beyond this point will fail. Not that it could proceed
|
||||
* at this point anyway, as the io_uring mmap side needs go grab the
|
||||
* ctx->resize_lock as well. Likewise, hold the completion lock over the
|
||||
* duration of the actual swap.
|
||||
*/
|
||||
mutex_lock(&ctx->resize_lock);
|
||||
spin_lock(&ctx->completion_lock);
|
||||
o.rings = ctx->rings;
|
||||
ctx->rings = NULL;
|
||||
o.sq_sqes = ctx->sq_sqes;
|
||||
ctx->sq_sqes = NULL;
|
||||
|
||||
/*
|
||||
* Now copy SQ and CQ entries, if any. If either of the destination
|
||||
* rings can't hold what is already there, then fail the operation.
|
||||
*/
|
||||
n.sq_sqes = ptr;
|
||||
tail = o.rings->sq.tail;
|
||||
if (tail - o.rings->sq.head > p.sq_entries)
|
||||
goto overflow;
|
||||
for (i = o.rings->sq.head; i < tail; i++) {
|
||||
unsigned src_head = i & (ctx->sq_entries - 1);
|
||||
unsigned dst_head = i & n.rings->sq_ring_mask;
|
||||
|
||||
n.sq_sqes[dst_head] = o.sq_sqes[src_head];
|
||||
}
|
||||
n.rings->sq.head = o.rings->sq.head;
|
||||
n.rings->sq.tail = o.rings->sq.tail;
|
||||
|
||||
tail = o.rings->cq.tail;
|
||||
if (tail - o.rings->cq.head > p.cq_entries) {
|
||||
overflow:
|
||||
/* restore old rings, and return -EOVERFLOW via cleanup path */
|
||||
ctx->rings = o.rings;
|
||||
ctx->sq_sqes = o.sq_sqes;
|
||||
to_free = &n;
|
||||
ret = -EOVERFLOW;
|
||||
goto out;
|
||||
}
|
||||
for (i = o.rings->cq.head; i < tail; i++) {
|
||||
unsigned src_head = i & (ctx->cq_entries - 1);
|
||||
unsigned dst_head = i & n.rings->cq_ring_mask;
|
||||
|
||||
n.rings->cqes[dst_head] = o.rings->cqes[src_head];
|
||||
}
|
||||
n.rings->cq.head = o.rings->cq.head;
|
||||
n.rings->cq.tail = o.rings->cq.tail;
|
||||
/* invalidate cached cqe refill */
|
||||
ctx->cqe_cached = ctx->cqe_sentinel = NULL;
|
||||
|
||||
n.rings->sq_dropped = o.rings->sq_dropped;
|
||||
n.rings->sq_flags = o.rings->sq_flags;
|
||||
n.rings->cq_flags = o.rings->cq_flags;
|
||||
n.rings->cq_overflow = o.rings->cq_overflow;
|
||||
|
||||
/* all done, store old pointers and assign new ones */
|
||||
if (!(ctx->flags & IORING_SETUP_NO_SQARRAY))
|
||||
ctx->sq_array = (u32 *)((char *)n.rings + sq_array_offset);
|
||||
|
||||
ctx->sq_entries = p.sq_entries;
|
||||
ctx->cq_entries = p.cq_entries;
|
||||
|
||||
ctx->rings = n.rings;
|
||||
ctx->sq_sqes = n.sq_sqes;
|
||||
swap_old(ctx, o, n, n_ring_pages);
|
||||
swap_old(ctx, o, n, n_sqe_pages);
|
||||
swap_old(ctx, o, n, ring_pages);
|
||||
swap_old(ctx, o, n, sqe_pages);
|
||||
to_free = &o;
|
||||
ret = 0;
|
||||
out:
|
||||
spin_unlock(&ctx->completion_lock);
|
||||
mutex_unlock(&ctx->resize_lock);
|
||||
io_register_free_rings(&p, to_free);
|
||||
|
||||
if (ctx->sq_data)
|
||||
io_sq_thread_unpark(ctx->sq_data);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int io_register_mem_region(struct io_ring_ctx *ctx, void __user *uarg)
|
||||
{
|
||||
struct io_uring_mem_region_reg __user *reg_uptr = uarg;
|
||||
struct io_uring_mem_region_reg reg;
|
||||
struct io_uring_region_desc __user *rd_uptr;
|
||||
struct io_uring_region_desc rd;
|
||||
int ret;
|
||||
|
||||
if (io_region_is_set(&ctx->param_region))
|
||||
return -EBUSY;
|
||||
if (copy_from_user(®, reg_uptr, sizeof(reg)))
|
||||
return -EFAULT;
|
||||
rd_uptr = u64_to_user_ptr(reg.region_uptr);
|
||||
if (copy_from_user(&rd, rd_uptr, sizeof(rd)))
|
||||
return -EFAULT;
|
||||
|
||||
if (memchr_inv(®.__resv, 0, sizeof(reg.__resv)))
|
||||
return -EINVAL;
|
||||
if (reg.flags & ~IORING_MEM_REGION_REG_WAIT_ARG)
|
||||
return -EINVAL;
|
||||
|
||||
/*
|
||||
* This ensures there are no waiters. Waiters are unlocked and it's
|
||||
* hard to synchronise with them, especially if we need to initialise
|
||||
* the region.
|
||||
*/
|
||||
if ((reg.flags & IORING_MEM_REGION_REG_WAIT_ARG) &&
|
||||
!(ctx->flags & IORING_SETUP_R_DISABLED))
|
||||
return -EINVAL;
|
||||
|
||||
ret = io_create_region(ctx, &ctx->param_region, &rd);
|
||||
if (ret)
|
||||
return ret;
|
||||
if (copy_to_user(rd_uptr, &rd, sizeof(rd))) {
|
||||
io_free_region(ctx, &ctx->param_region);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
if (reg.flags & IORING_MEM_REGION_REG_WAIT_ARG) {
|
||||
ctx->cq_wait_arg = io_region_get_ptr(&ctx->param_region);
|
||||
ctx->cq_wait_size = rd.size;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode,
|
||||
void __user *arg, unsigned nr_args)
|
||||
__releases(ctx->uring_lock)
|
||||
@ -548,6 +803,18 @@ static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode,
|
||||
break;
|
||||
ret = io_register_clone_buffers(ctx, arg);
|
||||
break;
|
||||
case IORING_REGISTER_RESIZE_RINGS:
|
||||
ret = -EINVAL;
|
||||
if (!arg || nr_args != 1)
|
||||
break;
|
||||
ret = io_register_resize_rings(ctx, arg);
|
||||
break;
|
||||
case IORING_REGISTER_MEM_REGION:
|
||||
ret = -EINVAL;
|
||||
if (!arg || nr_args != 1)
|
||||
break;
|
||||
ret = io_register_mem_region(ctx, arg);
|
||||
break;
|
||||
default:
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
@ -588,6 +855,32 @@ struct file *io_uring_register_get_file(unsigned int fd, bool registered)
|
||||
return ERR_PTR(-EOPNOTSUPP);
|
||||
}
|
||||
|
||||
/*
|
||||
* "blind" registration opcodes are ones where there's no ring given, and
|
||||
* hence the source fd must be -1.
|
||||
*/
|
||||
static int io_uring_register_blind(unsigned int opcode, void __user *arg,
|
||||
unsigned int nr_args)
|
||||
{
|
||||
switch (opcode) {
|
||||
case IORING_REGISTER_SEND_MSG_RING: {
|
||||
struct io_uring_sqe sqe;
|
||||
|
||||
if (!arg || nr_args != 1)
|
||||
return -EINVAL;
|
||||
if (copy_from_user(&sqe, arg, sizeof(sqe)))
|
||||
return -EFAULT;
|
||||
/* no flags supported */
|
||||
if (sqe.flags)
|
||||
return -EINVAL;
|
||||
if (sqe.opcode == IORING_OP_MSG_RING)
|
||||
return io_uring_sync_msg_ring(&sqe);
|
||||
}
|
||||
}
|
||||
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
SYSCALL_DEFINE4(io_uring_register, unsigned int, fd, unsigned int, opcode,
|
||||
void __user *, arg, unsigned int, nr_args)
|
||||
{
|
||||
@ -602,6 +895,9 @@ SYSCALL_DEFINE4(io_uring_register, unsigned int, fd, unsigned int, opcode,
|
||||
if (opcode >= IORING_REGISTER_LAST)
|
||||
return -EINVAL;
|
||||
|
||||
if (fd == -1)
|
||||
return io_uring_register_blind(opcode, arg, nr_args);
|
||||
|
||||
file = io_uring_register_get_file(fd, use_registered_ring);
|
||||
if (IS_ERR(file))
|
||||
return PTR_ERR(file);
|
||||
@ -610,7 +906,8 @@ SYSCALL_DEFINE4(io_uring_register, unsigned int, fd, unsigned int, opcode,
|
||||
mutex_lock(&ctx->uring_lock);
|
||||
ret = __io_uring_register(ctx, opcode, arg, nr_args);
|
||||
mutex_unlock(&ctx->uring_lock);
|
||||
trace_io_uring_register(ctx, opcode, ctx->nr_user_files, ctx->nr_user_bufs, ret);
|
||||
trace_io_uring_register(ctx, opcode, ctx->file_table.data.nr,
|
||||
ctx->buf_table.nr, ret);
|
||||
if (!use_registered_ring)
|
||||
fput(file);
|
||||
return ret;
|
||||
|
659
io_uring/rsrc.c
659
io_uring/rsrc.c
@ -13,7 +13,6 @@
|
||||
#include <uapi/linux/io_uring.h>
|
||||
|
||||
#include "io_uring.h"
|
||||
#include "alloc_cache.h"
|
||||
#include "openclose.h"
|
||||
#include "rsrc.h"
|
||||
#include "memmap.h"
|
||||
@ -26,21 +25,13 @@ struct io_rsrc_update {
|
||||
u32 offset;
|
||||
};
|
||||
|
||||
static void io_rsrc_buf_put(struct io_ring_ctx *ctx, struct io_rsrc_put *prsrc);
|
||||
static int io_sqe_buffer_register(struct io_ring_ctx *ctx, struct iovec *iov,
|
||||
struct io_mapped_ubuf **pimu,
|
||||
struct page **last_hpage);
|
||||
static struct io_rsrc_node *io_sqe_buffer_register(struct io_ring_ctx *ctx,
|
||||
struct iovec *iov, struct page **last_hpage);
|
||||
|
||||
/* only define max */
|
||||
#define IORING_MAX_FIXED_FILES (1U << 20)
|
||||
#define IORING_MAX_REG_BUFFERS (1U << 14)
|
||||
|
||||
static const struct io_mapped_ubuf dummy_ubuf = {
|
||||
/* set invalid range, so io_import_fixed() fails meeting it */
|
||||
.ubuf = -1UL,
|
||||
.len = UINT_MAX,
|
||||
};
|
||||
|
||||
int __io_account_mem(struct user_struct *user, unsigned long nr_pages)
|
||||
{
|
||||
unsigned long page_limit, cur_pages, new_pages;
|
||||
@ -110,13 +101,13 @@ static int io_buffer_validate(struct iovec *iov)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void io_buffer_unmap(struct io_ring_ctx *ctx, struct io_mapped_ubuf **slot)
|
||||
static void io_buffer_unmap(struct io_ring_ctx *ctx, struct io_rsrc_node *node)
|
||||
{
|
||||
struct io_mapped_ubuf *imu = *slot;
|
||||
unsigned int i;
|
||||
|
||||
*slot = NULL;
|
||||
if (imu != &dummy_ubuf) {
|
||||
if (node->buf) {
|
||||
struct io_mapped_ubuf *imu = node->buf;
|
||||
|
||||
if (!refcount_dec_and_test(&imu->refs))
|
||||
return;
|
||||
for (i = 0; i < imu->nr_bvecs; i++)
|
||||
@ -127,205 +118,40 @@ static void io_buffer_unmap(struct io_ring_ctx *ctx, struct io_mapped_ubuf **slo
|
||||
}
|
||||
}
|
||||
|
||||
static void io_rsrc_put_work(struct io_rsrc_node *node)
|
||||
struct io_rsrc_node *io_rsrc_node_alloc(struct io_ring_ctx *ctx, int type)
|
||||
{
|
||||
struct io_rsrc_put *prsrc = &node->item;
|
||||
struct io_rsrc_node *node;
|
||||
|
||||
if (prsrc->tag)
|
||||
io_post_aux_cqe(node->ctx, prsrc->tag, 0, 0);
|
||||
|
||||
switch (node->type) {
|
||||
case IORING_RSRC_FILE:
|
||||
fput(prsrc->file);
|
||||
break;
|
||||
case IORING_RSRC_BUFFER:
|
||||
io_rsrc_buf_put(node->ctx, prsrc);
|
||||
break;
|
||||
default:
|
||||
WARN_ON_ONCE(1);
|
||||
break;
|
||||
node = kzalloc(sizeof(*node), GFP_KERNEL);
|
||||
if (node) {
|
||||
node->type = type;
|
||||
node->refs = 1;
|
||||
}
|
||||
return node;
|
||||
}
|
||||
|
||||
void io_rsrc_node_destroy(struct io_ring_ctx *ctx, struct io_rsrc_node *node)
|
||||
__cold void io_rsrc_data_free(struct io_ring_ctx *ctx, struct io_rsrc_data *data)
|
||||
{
|
||||
if (!io_alloc_cache_put(&ctx->rsrc_node_cache, node))
|
||||
kfree(node);
|
||||
}
|
||||
|
||||
void io_rsrc_node_ref_zero(struct io_rsrc_node *node)
|
||||
__must_hold(&node->ctx->uring_lock)
|
||||
{
|
||||
struct io_ring_ctx *ctx = node->ctx;
|
||||
|
||||
while (!list_empty(&ctx->rsrc_ref_list)) {
|
||||
node = list_first_entry(&ctx->rsrc_ref_list,
|
||||
struct io_rsrc_node, node);
|
||||
/* recycle ref nodes in order */
|
||||
if (node->refs)
|
||||
break;
|
||||
list_del(&node->node);
|
||||
|
||||
if (likely(!node->empty))
|
||||
io_rsrc_put_work(node);
|
||||
io_rsrc_node_destroy(ctx, node);
|
||||
if (!data->nr)
|
||||
return;
|
||||
while (data->nr--) {
|
||||
if (data->nodes[data->nr])
|
||||
io_put_rsrc_node(ctx, data->nodes[data->nr]);
|
||||
}
|
||||
if (list_empty(&ctx->rsrc_ref_list) && unlikely(ctx->rsrc_quiesce))
|
||||
wake_up_all(&ctx->rsrc_quiesce_wq);
|
||||
kvfree(data->nodes);
|
||||
data->nodes = NULL;
|
||||
data->nr = 0;
|
||||
}
|
||||
|
||||
struct io_rsrc_node *io_rsrc_node_alloc(struct io_ring_ctx *ctx)
|
||||
__cold int io_rsrc_data_alloc(struct io_rsrc_data *data, unsigned nr)
|
||||
{
|
||||
struct io_rsrc_node *ref_node;
|
||||
|
||||
ref_node = io_alloc_cache_get(&ctx->rsrc_node_cache);
|
||||
if (!ref_node) {
|
||||
ref_node = kzalloc(sizeof(*ref_node), GFP_KERNEL);
|
||||
if (!ref_node)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
ref_node->ctx = ctx;
|
||||
ref_node->empty = 0;
|
||||
ref_node->refs = 1;
|
||||
return ref_node;
|
||||
}
|
||||
|
||||
__cold static int io_rsrc_ref_quiesce(struct io_rsrc_data *data,
|
||||
struct io_ring_ctx *ctx)
|
||||
{
|
||||
struct io_rsrc_node *backup;
|
||||
DEFINE_WAIT(we);
|
||||
int ret;
|
||||
|
||||
/* As We may drop ->uring_lock, other task may have started quiesce */
|
||||
if (data->quiesce)
|
||||
return -ENXIO;
|
||||
|
||||
backup = io_rsrc_node_alloc(ctx);
|
||||
if (!backup)
|
||||
return -ENOMEM;
|
||||
ctx->rsrc_node->empty = true;
|
||||
ctx->rsrc_node->type = -1;
|
||||
list_add_tail(&ctx->rsrc_node->node, &ctx->rsrc_ref_list);
|
||||
io_put_rsrc_node(ctx, ctx->rsrc_node);
|
||||
ctx->rsrc_node = backup;
|
||||
|
||||
if (list_empty(&ctx->rsrc_ref_list))
|
||||
data->nodes = kvmalloc_array(nr, sizeof(struct io_rsrc_node *),
|
||||
GFP_KERNEL_ACCOUNT | __GFP_ZERO);
|
||||
if (data->nodes) {
|
||||
data->nr = nr;
|
||||
return 0;
|
||||
|
||||
if (ctx->flags & IORING_SETUP_DEFER_TASKRUN) {
|
||||
atomic_set(&ctx->cq_wait_nr, 1);
|
||||
smp_mb();
|
||||
}
|
||||
|
||||
ctx->rsrc_quiesce++;
|
||||
data->quiesce = true;
|
||||
do {
|
||||
prepare_to_wait(&ctx->rsrc_quiesce_wq, &we, TASK_INTERRUPTIBLE);
|
||||
mutex_unlock(&ctx->uring_lock);
|
||||
|
||||
ret = io_run_task_work_sig(ctx);
|
||||
if (ret < 0) {
|
||||
finish_wait(&ctx->rsrc_quiesce_wq, &we);
|
||||
mutex_lock(&ctx->uring_lock);
|
||||
if (list_empty(&ctx->rsrc_ref_list))
|
||||
ret = 0;
|
||||
break;
|
||||
}
|
||||
|
||||
schedule();
|
||||
mutex_lock(&ctx->uring_lock);
|
||||
ret = 0;
|
||||
} while (!list_empty(&ctx->rsrc_ref_list));
|
||||
|
||||
finish_wait(&ctx->rsrc_quiesce_wq, &we);
|
||||
data->quiesce = false;
|
||||
ctx->rsrc_quiesce--;
|
||||
|
||||
if (ctx->flags & IORING_SETUP_DEFER_TASKRUN) {
|
||||
atomic_set(&ctx->cq_wait_nr, 0);
|
||||
smp_mb();
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void io_free_page_table(void **table, size_t size)
|
||||
{
|
||||
unsigned i, nr_tables = DIV_ROUND_UP(size, PAGE_SIZE);
|
||||
|
||||
for (i = 0; i < nr_tables; i++)
|
||||
kfree(table[i]);
|
||||
kfree(table);
|
||||
}
|
||||
|
||||
static void io_rsrc_data_free(struct io_rsrc_data *data)
|
||||
{
|
||||
size_t size = data->nr * sizeof(data->tags[0][0]);
|
||||
|
||||
if (data->tags)
|
||||
io_free_page_table((void **)data->tags, size);
|
||||
kfree(data);
|
||||
}
|
||||
|
||||
static __cold void **io_alloc_page_table(size_t size)
|
||||
{
|
||||
unsigned i, nr_tables = DIV_ROUND_UP(size, PAGE_SIZE);
|
||||
size_t init_size = size;
|
||||
void **table;
|
||||
|
||||
table = kcalloc(nr_tables, sizeof(*table), GFP_KERNEL_ACCOUNT);
|
||||
if (!table)
|
||||
return NULL;
|
||||
|
||||
for (i = 0; i < nr_tables; i++) {
|
||||
unsigned int this_size = min_t(size_t, size, PAGE_SIZE);
|
||||
|
||||
table[i] = kzalloc(this_size, GFP_KERNEL_ACCOUNT);
|
||||
if (!table[i]) {
|
||||
io_free_page_table(table, init_size);
|
||||
return NULL;
|
||||
}
|
||||
size -= this_size;
|
||||
}
|
||||
return table;
|
||||
}
|
||||
|
||||
__cold static int io_rsrc_data_alloc(struct io_ring_ctx *ctx, int type,
|
||||
u64 __user *utags,
|
||||
unsigned nr, struct io_rsrc_data **pdata)
|
||||
{
|
||||
struct io_rsrc_data *data;
|
||||
int ret = 0;
|
||||
unsigned i;
|
||||
|
||||
data = kzalloc(sizeof(*data), GFP_KERNEL);
|
||||
if (!data)
|
||||
return -ENOMEM;
|
||||
data->tags = (u64 **)io_alloc_page_table(nr * sizeof(data->tags[0][0]));
|
||||
if (!data->tags) {
|
||||
kfree(data);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
data->nr = nr;
|
||||
data->ctx = ctx;
|
||||
data->rsrc_type = type;
|
||||
if (utags) {
|
||||
ret = -EFAULT;
|
||||
for (i = 0; i < nr; i++) {
|
||||
u64 *tag_slot = io_get_tag_slot(data, i);
|
||||
|
||||
if (copy_from_user(tag_slot, &utags[i],
|
||||
sizeof(*tag_slot)))
|
||||
goto fail;
|
||||
}
|
||||
}
|
||||
*pdata = data;
|
||||
return 0;
|
||||
fail:
|
||||
io_rsrc_data_free(data);
|
||||
return ret;
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
static int __io_sqe_files_update(struct io_ring_ctx *ctx,
|
||||
@ -334,14 +160,12 @@ static int __io_sqe_files_update(struct io_ring_ctx *ctx,
|
||||
{
|
||||
u64 __user *tags = u64_to_user_ptr(up->tags);
|
||||
__s32 __user *fds = u64_to_user_ptr(up->data);
|
||||
struct io_rsrc_data *data = ctx->file_data;
|
||||
struct io_fixed_file *file_slot;
|
||||
int fd, i, err = 0;
|
||||
unsigned int done;
|
||||
|
||||
if (!ctx->file_data)
|
||||
if (!ctx->file_table.data.nr)
|
||||
return -ENXIO;
|
||||
if (up->offset + nr_args > ctx->nr_user_files)
|
||||
if (up->offset + nr_args > ctx->file_table.data.nr)
|
||||
return -EINVAL;
|
||||
|
||||
for (done = 0; done < nr_args; done++) {
|
||||
@ -359,19 +183,13 @@ static int __io_sqe_files_update(struct io_ring_ctx *ctx,
|
||||
if (fd == IORING_REGISTER_FILES_SKIP)
|
||||
continue;
|
||||
|
||||
i = array_index_nospec(up->offset + done, ctx->nr_user_files);
|
||||
file_slot = io_fixed_file_slot(&ctx->file_table, i);
|
||||
|
||||
if (file_slot->file_ptr) {
|
||||
err = io_queue_rsrc_removal(data, i,
|
||||
io_slot_file(file_slot));
|
||||
if (err)
|
||||
break;
|
||||
file_slot->file_ptr = 0;
|
||||
i = up->offset + done;
|
||||
if (io_reset_rsrc_node(ctx, &ctx->file_table.data, i))
|
||||
io_file_bitmap_clear(&ctx->file_table, i);
|
||||
}
|
||||
|
||||
if (fd != -1) {
|
||||
struct file *file = fget(fd);
|
||||
struct io_rsrc_node *node;
|
||||
|
||||
if (!file) {
|
||||
err = -EBADF;
|
||||
@ -385,8 +203,16 @@ static int __io_sqe_files_update(struct io_ring_ctx *ctx,
|
||||
err = -EBADF;
|
||||
break;
|
||||
}
|
||||
*io_get_tag_slot(data, i) = tag;
|
||||
io_fixed_file_set(file_slot, file);
|
||||
node = io_rsrc_node_alloc(ctx, IORING_RSRC_FILE);
|
||||
if (!node) {
|
||||
err = -ENOMEM;
|
||||
fput(file);
|
||||
break;
|
||||
}
|
||||
ctx->file_table.data.nodes[i] = node;
|
||||
if (tag)
|
||||
node->tag = tag;
|
||||
io_fixed_file_set(node, file);
|
||||
io_file_bitmap_set(&ctx->file_table, i);
|
||||
}
|
||||
}
|
||||
@ -405,13 +231,13 @@ static int __io_sqe_buffers_update(struct io_ring_ctx *ctx,
|
||||
__u32 done;
|
||||
int i, err;
|
||||
|
||||
if (!ctx->buf_data)
|
||||
if (!ctx->buf_table.nr)
|
||||
return -ENXIO;
|
||||
if (up->offset + nr_args > ctx->nr_user_bufs)
|
||||
if (up->offset + nr_args > ctx->buf_table.nr)
|
||||
return -EINVAL;
|
||||
|
||||
for (done = 0; done < nr_args; done++) {
|
||||
struct io_mapped_ubuf *imu;
|
||||
struct io_rsrc_node *node;
|
||||
u64 tag = 0;
|
||||
|
||||
uvec = u64_to_user_ptr(user_data);
|
||||
@ -427,27 +253,21 @@ static int __io_sqe_buffers_update(struct io_ring_ctx *ctx,
|
||||
err = io_buffer_validate(iov);
|
||||
if (err)
|
||||
break;
|
||||
if (!iov->iov_base && tag) {
|
||||
err = -EINVAL;
|
||||
node = io_sqe_buffer_register(ctx, iov, &last_hpage);
|
||||
if (IS_ERR(node)) {
|
||||
err = PTR_ERR(node);
|
||||
break;
|
||||
}
|
||||
err = io_sqe_buffer_register(ctx, iov, &imu, &last_hpage);
|
||||
if (err)
|
||||
break;
|
||||
|
||||
i = array_index_nospec(up->offset + done, ctx->nr_user_bufs);
|
||||
if (ctx->user_bufs[i] != &dummy_ubuf) {
|
||||
err = io_queue_rsrc_removal(ctx->buf_data, i,
|
||||
ctx->user_bufs[i]);
|
||||
if (unlikely(err)) {
|
||||
io_buffer_unmap(ctx, &imu);
|
||||
if (tag) {
|
||||
if (!node) {
|
||||
err = -EINVAL;
|
||||
break;
|
||||
}
|
||||
ctx->user_bufs[i] = (struct io_mapped_ubuf *)&dummy_ubuf;
|
||||
node->tag = tag;
|
||||
}
|
||||
|
||||
ctx->user_bufs[i] = imu;
|
||||
*io_get_tag_slot(ctx->buf_data, i) = tag;
|
||||
i = array_index_nospec(up->offset + done, ctx->buf_table.nr);
|
||||
io_reset_rsrc_node(ctx, &ctx->buf_table, i);
|
||||
ctx->buf_table.nodes[i] = node;
|
||||
if (ctx->compat)
|
||||
user_data += sizeof(struct compat_iovec);
|
||||
else
|
||||
@ -563,7 +383,7 @@ static int io_files_update_with_index_alloc(struct io_kiocb *req,
|
||||
struct file *file;
|
||||
int ret, fd;
|
||||
|
||||
if (!req->ctx->file_data)
|
||||
if (!req->ctx->file_table.data.nr)
|
||||
return -ENXIO;
|
||||
|
||||
for (done = 0; done < up->nr_args; done++) {
|
||||
@ -622,65 +442,38 @@ int io_files_update(struct io_kiocb *req, unsigned int issue_flags)
|
||||
return IOU_OK;
|
||||
}
|
||||
|
||||
int io_queue_rsrc_removal(struct io_rsrc_data *data, unsigned idx, void *rsrc)
|
||||
void io_free_rsrc_node(struct io_ring_ctx *ctx, struct io_rsrc_node *node)
|
||||
{
|
||||
struct io_ring_ctx *ctx = data->ctx;
|
||||
struct io_rsrc_node *node = ctx->rsrc_node;
|
||||
u64 *tag_slot = io_get_tag_slot(data, idx);
|
||||
lockdep_assert_held(&ctx->uring_lock);
|
||||
|
||||
ctx->rsrc_node = io_rsrc_node_alloc(ctx);
|
||||
if (unlikely(!ctx->rsrc_node)) {
|
||||
ctx->rsrc_node = node;
|
||||
return -ENOMEM;
|
||||
if (node->tag)
|
||||
io_post_aux_cqe(ctx, node->tag, 0, 0);
|
||||
|
||||
switch (node->type) {
|
||||
case IORING_RSRC_FILE:
|
||||
if (io_slot_file(node))
|
||||
fput(io_slot_file(node));
|
||||
break;
|
||||
case IORING_RSRC_BUFFER:
|
||||
if (node->buf)
|
||||
io_buffer_unmap(ctx, node);
|
||||
break;
|
||||
default:
|
||||
WARN_ON_ONCE(1);
|
||||
break;
|
||||
}
|
||||
|
||||
node->item.rsrc = rsrc;
|
||||
node->type = data->rsrc_type;
|
||||
node->item.tag = *tag_slot;
|
||||
*tag_slot = 0;
|
||||
list_add_tail(&node->node, &ctx->rsrc_ref_list);
|
||||
io_put_rsrc_node(ctx, node);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void __io_sqe_files_unregister(struct io_ring_ctx *ctx)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ctx->nr_user_files; i++) {
|
||||
struct file *file = io_file_from_index(&ctx->file_table, i);
|
||||
|
||||
if (!file)
|
||||
continue;
|
||||
io_file_bitmap_clear(&ctx->file_table, i);
|
||||
fput(file);
|
||||
}
|
||||
|
||||
io_free_file_tables(&ctx->file_table);
|
||||
io_file_table_set_alloc_range(ctx, 0, 0);
|
||||
io_rsrc_data_free(ctx->file_data);
|
||||
ctx->file_data = NULL;
|
||||
ctx->nr_user_files = 0;
|
||||
kfree(node);
|
||||
}
|
||||
|
||||
int io_sqe_files_unregister(struct io_ring_ctx *ctx)
|
||||
{
|
||||
unsigned nr = ctx->nr_user_files;
|
||||
int ret;
|
||||
|
||||
if (!ctx->file_data)
|
||||
if (!ctx->file_table.data.nr)
|
||||
return -ENXIO;
|
||||
|
||||
/*
|
||||
* Quiesce may unlock ->uring_lock, and while it's not held
|
||||
* prevent new requests using the table.
|
||||
*/
|
||||
ctx->nr_user_files = 0;
|
||||
ret = io_rsrc_ref_quiesce(ctx->file_data, ctx);
|
||||
ctx->nr_user_files = nr;
|
||||
if (!ret)
|
||||
__io_sqe_files_unregister(ctx);
|
||||
return ret;
|
||||
io_free_file_tables(ctx, &ctx->file_table);
|
||||
io_file_table_set_alloc_range(ctx, 0, 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
|
||||
@ -691,7 +484,7 @@ int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
|
||||
int fd, ret;
|
||||
unsigned i;
|
||||
|
||||
if (ctx->file_data)
|
||||
if (ctx->file_table.data.nr)
|
||||
return -EBUSY;
|
||||
if (!nr_args)
|
||||
return -EINVAL;
|
||||
@ -699,28 +492,22 @@ int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
|
||||
return -EMFILE;
|
||||
if (nr_args > rlimit(RLIMIT_NOFILE))
|
||||
return -EMFILE;
|
||||
ret = io_rsrc_data_alloc(ctx, IORING_RSRC_FILE, tags, nr_args,
|
||||
&ctx->file_data);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (!io_alloc_file_tables(&ctx->file_table, nr_args)) {
|
||||
io_rsrc_data_free(ctx->file_data);
|
||||
ctx->file_data = NULL;
|
||||
if (!io_alloc_file_tables(ctx, &ctx->file_table, nr_args))
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
for (i = 0; i < nr_args; i++, ctx->nr_user_files++) {
|
||||
struct io_fixed_file *file_slot;
|
||||
for (i = 0; i < nr_args; i++) {
|
||||
struct io_rsrc_node *node;
|
||||
u64 tag = 0;
|
||||
|
||||
if (fds && copy_from_user(&fd, &fds[i], sizeof(fd))) {
|
||||
ret = -EFAULT;
|
||||
ret = -EFAULT;
|
||||
if (tags && copy_from_user(&tag, &tags[i], sizeof(tag)))
|
||||
goto fail;
|
||||
if (fds && copy_from_user(&fd, &fds[i], sizeof(fd)))
|
||||
goto fail;
|
||||
}
|
||||
/* allow sparse sets */
|
||||
if (!fds || fd == -1) {
|
||||
ret = -EINVAL;
|
||||
if (unlikely(*io_get_tag_slot(ctx->file_data, i)))
|
||||
if (tag)
|
||||
goto fail;
|
||||
continue;
|
||||
}
|
||||
@ -737,56 +524,33 @@ int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
|
||||
fput(file);
|
||||
goto fail;
|
||||
}
|
||||
file_slot = io_fixed_file_slot(&ctx->file_table, i);
|
||||
io_fixed_file_set(file_slot, file);
|
||||
ret = -ENOMEM;
|
||||
node = io_rsrc_node_alloc(ctx, IORING_RSRC_FILE);
|
||||
if (!node) {
|
||||
fput(file);
|
||||
goto fail;
|
||||
}
|
||||
if (tag)
|
||||
node->tag = tag;
|
||||
ctx->file_table.data.nodes[i] = node;
|
||||
io_fixed_file_set(node, file);
|
||||
io_file_bitmap_set(&ctx->file_table, i);
|
||||
}
|
||||
|
||||
/* default it to the whole table */
|
||||
io_file_table_set_alloc_range(ctx, 0, ctx->nr_user_files);
|
||||
io_file_table_set_alloc_range(ctx, 0, ctx->file_table.data.nr);
|
||||
return 0;
|
||||
fail:
|
||||
__io_sqe_files_unregister(ctx);
|
||||
io_sqe_files_unregister(ctx);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void io_rsrc_buf_put(struct io_ring_ctx *ctx, struct io_rsrc_put *prsrc)
|
||||
{
|
||||
io_buffer_unmap(ctx, &prsrc->buf);
|
||||
prsrc->buf = NULL;
|
||||
}
|
||||
|
||||
void __io_sqe_buffers_unregister(struct io_ring_ctx *ctx)
|
||||
{
|
||||
unsigned int i;
|
||||
|
||||
for (i = 0; i < ctx->nr_user_bufs; i++)
|
||||
io_buffer_unmap(ctx, &ctx->user_bufs[i]);
|
||||
kfree(ctx->user_bufs);
|
||||
io_rsrc_data_free(ctx->buf_data);
|
||||
ctx->user_bufs = NULL;
|
||||
ctx->buf_data = NULL;
|
||||
ctx->nr_user_bufs = 0;
|
||||
}
|
||||
|
||||
int io_sqe_buffers_unregister(struct io_ring_ctx *ctx)
|
||||
{
|
||||
unsigned nr = ctx->nr_user_bufs;
|
||||
int ret;
|
||||
|
||||
if (!ctx->buf_data)
|
||||
if (!ctx->buf_table.nr)
|
||||
return -ENXIO;
|
||||
|
||||
/*
|
||||
* Quiesce may unlock ->uring_lock, and while it's not held
|
||||
* prevent new requests using the table.
|
||||
*/
|
||||
ctx->nr_user_bufs = 0;
|
||||
ret = io_rsrc_ref_quiesce(ctx->buf_data, ctx);
|
||||
ctx->nr_user_bufs = nr;
|
||||
if (!ret)
|
||||
__io_sqe_buffers_unregister(ctx);
|
||||
return ret;
|
||||
io_rsrc_data_free(ctx, &ctx->buf_table);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -812,9 +576,13 @@ static bool headpage_already_acct(struct io_ring_ctx *ctx, struct page **pages,
|
||||
}
|
||||
|
||||
/* check previously registered pages */
|
||||
for (i = 0; i < ctx->nr_user_bufs; i++) {
|
||||
struct io_mapped_ubuf *imu = ctx->user_bufs[i];
|
||||
for (i = 0; i < ctx->buf_table.nr; i++) {
|
||||
struct io_rsrc_node *node = ctx->buf_table.nodes[i];
|
||||
struct io_mapped_ubuf *imu;
|
||||
|
||||
if (!node)
|
||||
continue;
|
||||
imu = node->buf;
|
||||
for (j = 0; j < imu->nr_bvecs; j++) {
|
||||
if (!PageCompound(imu->bvec[j].bv_page))
|
||||
continue;
|
||||
@ -950,21 +718,26 @@ static bool io_try_coalesce_buffer(struct page ***pages, int *nr_pages,
|
||||
return io_do_coalesce_buffer(pages, nr_pages, data, nr_folios);
|
||||
}
|
||||
|
||||
static int io_sqe_buffer_register(struct io_ring_ctx *ctx, struct iovec *iov,
|
||||
struct io_mapped_ubuf **pimu,
|
||||
struct page **last_hpage)
|
||||
static struct io_rsrc_node *io_sqe_buffer_register(struct io_ring_ctx *ctx,
|
||||
struct iovec *iov,
|
||||
struct page **last_hpage)
|
||||
{
|
||||
struct io_mapped_ubuf *imu = NULL;
|
||||
struct page **pages = NULL;
|
||||
struct io_rsrc_node *node;
|
||||
unsigned long off;
|
||||
size_t size;
|
||||
int ret, nr_pages, i;
|
||||
struct io_imu_folio_data data;
|
||||
bool coalesced;
|
||||
|
||||
*pimu = (struct io_mapped_ubuf *)&dummy_ubuf;
|
||||
if (!iov->iov_base)
|
||||
return 0;
|
||||
return NULL;
|
||||
|
||||
node = io_rsrc_node_alloc(ctx, IORING_RSRC_BUFFER);
|
||||
if (!node)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
node->buf = NULL;
|
||||
|
||||
ret = -ENOMEM;
|
||||
pages = io_pin_pages((unsigned long) iov->iov_base, iov->iov_len,
|
||||
@ -998,7 +771,7 @@ static int io_sqe_buffer_register(struct io_ring_ctx *ctx, struct iovec *iov,
|
||||
imu->folio_shift = data.folio_shift;
|
||||
refcount_set(&imu->refs, 1);
|
||||
off = (unsigned long) iov->iov_base & ((1UL << imu->folio_shift) - 1);
|
||||
*pimu = imu;
|
||||
node->buf = imu;
|
||||
ret = 0;
|
||||
|
||||
for (i = 0; i < nr_pages; i++) {
|
||||
@ -1010,46 +783,42 @@ static int io_sqe_buffer_register(struct io_ring_ctx *ctx, struct iovec *iov,
|
||||
size -= vec_len;
|
||||
}
|
||||
done:
|
||||
if (ret)
|
||||
if (ret) {
|
||||
kvfree(imu);
|
||||
if (node)
|
||||
io_put_rsrc_node(ctx, node);
|
||||
node = ERR_PTR(ret);
|
||||
}
|
||||
kvfree(pages);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int io_buffers_map_alloc(struct io_ring_ctx *ctx, unsigned int nr_args)
|
||||
{
|
||||
ctx->user_bufs = kcalloc(nr_args, sizeof(*ctx->user_bufs), GFP_KERNEL);
|
||||
return ctx->user_bufs ? 0 : -ENOMEM;
|
||||
return node;
|
||||
}
|
||||
|
||||
int io_sqe_buffers_register(struct io_ring_ctx *ctx, void __user *arg,
|
||||
unsigned int nr_args, u64 __user *tags)
|
||||
{
|
||||
struct page *last_hpage = NULL;
|
||||
struct io_rsrc_data *data;
|
||||
struct io_rsrc_data data;
|
||||
struct iovec fast_iov, *iov = &fast_iov;
|
||||
const struct iovec __user *uvec;
|
||||
int i, ret;
|
||||
|
||||
BUILD_BUG_ON(IORING_MAX_REG_BUFFERS >= (1u << 16));
|
||||
|
||||
if (ctx->user_bufs)
|
||||
if (ctx->buf_table.nr)
|
||||
return -EBUSY;
|
||||
if (!nr_args || nr_args > IORING_MAX_REG_BUFFERS)
|
||||
return -EINVAL;
|
||||
ret = io_rsrc_data_alloc(ctx, IORING_RSRC_BUFFER, tags, nr_args, &data);
|
||||
ret = io_rsrc_data_alloc(&data, nr_args);
|
||||
if (ret)
|
||||
return ret;
|
||||
ret = io_buffers_map_alloc(ctx, nr_args);
|
||||
if (ret) {
|
||||
io_rsrc_data_free(data);
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (!arg)
|
||||
memset(iov, 0, sizeof(*iov));
|
||||
|
||||
for (i = 0; i < nr_args; i++, ctx->nr_user_bufs++) {
|
||||
for (i = 0; i < nr_args; i++) {
|
||||
struct io_rsrc_node *node;
|
||||
u64 tag = 0;
|
||||
|
||||
if (arg) {
|
||||
uvec = (struct iovec __user *) arg;
|
||||
iov = iovec_from_user(uvec, 1, 1, &fast_iov, ctx->compat);
|
||||
@ -1066,22 +835,31 @@ int io_sqe_buffers_register(struct io_ring_ctx *ctx, void __user *arg,
|
||||
arg += sizeof(struct iovec);
|
||||
}
|
||||
|
||||
if (!iov->iov_base && *io_get_tag_slot(data, i)) {
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
if (tags) {
|
||||
if (copy_from_user(&tag, &tags[i], sizeof(tag))) {
|
||||
ret = -EFAULT;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
ret = io_sqe_buffer_register(ctx, iov, &ctx->user_bufs[i],
|
||||
&last_hpage);
|
||||
if (ret)
|
||||
node = io_sqe_buffer_register(ctx, iov, &last_hpage);
|
||||
if (IS_ERR(node)) {
|
||||
ret = PTR_ERR(node);
|
||||
break;
|
||||
}
|
||||
if (tag) {
|
||||
if (!node) {
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
}
|
||||
node->tag = tag;
|
||||
}
|
||||
data.nodes[i] = node;
|
||||
}
|
||||
|
||||
WARN_ON_ONCE(ctx->buf_data);
|
||||
|
||||
ctx->buf_data = data;
|
||||
ctx->buf_table = data;
|
||||
if (ret)
|
||||
__io_sqe_buffers_unregister(ctx);
|
||||
io_sqe_buffers_unregister(ctx);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -1127,7 +905,6 @@ int io_import_fixed(int ddir, struct iov_iter *iter,
|
||||
const struct bio_vec *bvec = imu->bvec;
|
||||
|
||||
if (offset < bvec->bv_len) {
|
||||
iter->bvec = bvec;
|
||||
iter->count -= offset;
|
||||
iter->iov_offset = offset;
|
||||
} else {
|
||||
@ -1137,7 +914,7 @@ int io_import_fixed(int ddir, struct iov_iter *iter,
|
||||
offset -= bvec->bv_len;
|
||||
seg_skip = 1 + (offset >> imu->folio_shift);
|
||||
|
||||
iter->bvec = bvec + seg_skip;
|
||||
iter->bvec += seg_skip;
|
||||
iter->nr_segs -= seg_skip;
|
||||
iter->count -= bvec->bv_len + offset;
|
||||
iter->iov_offset = offset & ((1UL << imu->folio_shift) - 1);
|
||||
@ -1147,11 +924,43 @@ int io_import_fixed(int ddir, struct iov_iter *iter,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int io_clone_buffers(struct io_ring_ctx *ctx, struct io_ring_ctx *src_ctx)
|
||||
static int io_clone_buffers(struct io_ring_ctx *ctx, struct io_ring_ctx *src_ctx,
|
||||
struct io_uring_clone_buffers *arg)
|
||||
{
|
||||
struct io_mapped_ubuf **user_bufs;
|
||||
struct io_rsrc_data *data;
|
||||
int i, ret, nbufs;
|
||||
struct io_rsrc_data data;
|
||||
int i, ret, off, nr;
|
||||
unsigned int nbufs;
|
||||
|
||||
/* if offsets are given, must have nr specified too */
|
||||
if (!arg->nr && (arg->dst_off || arg->src_off))
|
||||
return -EINVAL;
|
||||
/* not allowed unless REPLACE is set */
|
||||
if (ctx->buf_table.nr && !(arg->flags & IORING_REGISTER_DST_REPLACE))
|
||||
return -EBUSY;
|
||||
|
||||
nbufs = READ_ONCE(src_ctx->buf_table.nr);
|
||||
if (!arg->nr)
|
||||
arg->nr = nbufs;
|
||||
else if (arg->nr > nbufs)
|
||||
return -EINVAL;
|
||||
else if (arg->nr > IORING_MAX_REG_BUFFERS)
|
||||
return -EINVAL;
|
||||
if (check_add_overflow(arg->nr, arg->dst_off, &nbufs))
|
||||
return -EOVERFLOW;
|
||||
|
||||
ret = io_rsrc_data_alloc(&data, max(nbufs, ctx->buf_table.nr));
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* Fill entries in data from dst that won't overlap with src */
|
||||
for (i = 0; i < min(arg->dst_off, ctx->buf_table.nr); i++) {
|
||||
struct io_rsrc_node *src_node = ctx->buf_table.nodes[i];
|
||||
|
||||
if (src_node) {
|
||||
data.nodes[i] = src_node;
|
||||
src_node->refs++;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Drop our own lock here. We'll setup the data we need and reference
|
||||
@ -1161,45 +970,77 @@ static int io_clone_buffers(struct io_ring_ctx *ctx, struct io_ring_ctx *src_ctx
|
||||
|
||||
mutex_lock(&src_ctx->uring_lock);
|
||||
ret = -ENXIO;
|
||||
nbufs = src_ctx->nr_user_bufs;
|
||||
nbufs = src_ctx->buf_table.nr;
|
||||
if (!nbufs)
|
||||
goto out_unlock;
|
||||
ret = io_rsrc_data_alloc(ctx, IORING_RSRC_BUFFER, NULL, nbufs, &data);
|
||||
if (ret)
|
||||
ret = -EINVAL;
|
||||
if (!arg->nr)
|
||||
arg->nr = nbufs;
|
||||
else if (arg->nr > nbufs)
|
||||
goto out_unlock;
|
||||
ret = -EOVERFLOW;
|
||||
if (check_add_overflow(arg->nr, arg->src_off, &off))
|
||||
goto out_unlock;
|
||||
if (off > nbufs)
|
||||
goto out_unlock;
|
||||
|
||||
ret = -ENOMEM;
|
||||
user_bufs = kcalloc(nbufs, sizeof(*ctx->user_bufs), GFP_KERNEL);
|
||||
if (!user_bufs)
|
||||
goto out_free_data;
|
||||
off = arg->dst_off;
|
||||
i = arg->src_off;
|
||||
nr = arg->nr;
|
||||
while (nr--) {
|
||||
struct io_rsrc_node *dst_node, *src_node;
|
||||
|
||||
for (i = 0; i < nbufs; i++) {
|
||||
struct io_mapped_ubuf *src = src_ctx->user_bufs[i];
|
||||
src_node = io_rsrc_node_lookup(&src_ctx->buf_table, i);
|
||||
if (!src_node) {
|
||||
dst_node = NULL;
|
||||
} else {
|
||||
dst_node = io_rsrc_node_alloc(ctx, IORING_RSRC_BUFFER);
|
||||
if (!dst_node) {
|
||||
ret = -ENOMEM;
|
||||
goto out_put_free;
|
||||
}
|
||||
|
||||
if (src != &dummy_ubuf)
|
||||
refcount_inc(&src->refs);
|
||||
user_bufs[i] = src;
|
||||
refcount_inc(&src_node->buf->refs);
|
||||
dst_node->buf = src_node->buf;
|
||||
}
|
||||
data.nodes[off++] = dst_node;
|
||||
i++;
|
||||
}
|
||||
|
||||
/* Have a ref on the bufs now, drop src lock and re-grab our own lock */
|
||||
mutex_unlock(&src_ctx->uring_lock);
|
||||
mutex_lock(&ctx->uring_lock);
|
||||
if (!ctx->user_bufs) {
|
||||
ctx->user_bufs = user_bufs;
|
||||
ctx->buf_data = data;
|
||||
ctx->nr_user_bufs = nbufs;
|
||||
|
||||
/*
|
||||
* If asked for replace, put the old table. data->nodes[] holds both
|
||||
* old and new nodes at this point.
|
||||
*/
|
||||
if (arg->flags & IORING_REGISTER_DST_REPLACE)
|
||||
io_rsrc_data_free(ctx, &ctx->buf_table);
|
||||
|
||||
/*
|
||||
* ctx->buf_table should be empty now - either the contents are being
|
||||
* replaced and we just freed the table, or someone raced setting up
|
||||
* a buffer table while the clone was happening. If not empty, fall
|
||||
* through to failure handling.
|
||||
*/
|
||||
if (!ctx->buf_table.nr) {
|
||||
ctx->buf_table = data;
|
||||
return 0;
|
||||
}
|
||||
|
||||
mutex_unlock(&ctx->uring_lock);
|
||||
mutex_lock(&src_ctx->uring_lock);
|
||||
/* someone raced setting up buffers, dump ours */
|
||||
for (i = 0; i < nbufs; i++)
|
||||
io_buffer_unmap(ctx, &user_bufs[i]);
|
||||
io_rsrc_data_free(data);
|
||||
kfree(user_bufs);
|
||||
return -EBUSY;
|
||||
out_free_data:
|
||||
io_rsrc_data_free(data);
|
||||
ret = -EBUSY;
|
||||
out_put_free:
|
||||
i = data.nr;
|
||||
while (i--) {
|
||||
io_buffer_unmap(src_ctx, data.nodes[i]);
|
||||
kfree(data.nodes[i]);
|
||||
}
|
||||
out_unlock:
|
||||
io_rsrc_data_free(ctx, &data);
|
||||
mutex_unlock(&src_ctx->uring_lock);
|
||||
mutex_lock(&ctx->uring_lock);
|
||||
return ret;
|
||||
@ -1219,12 +1060,12 @@ int io_register_clone_buffers(struct io_ring_ctx *ctx, void __user *arg)
|
||||
struct file *file;
|
||||
int ret;
|
||||
|
||||
if (ctx->user_bufs || ctx->nr_user_bufs)
|
||||
return -EBUSY;
|
||||
if (copy_from_user(&buf, arg, sizeof(buf)))
|
||||
return -EFAULT;
|
||||
if (buf.flags & ~IORING_REGISTER_SRC_REGISTERED)
|
||||
if (buf.flags & ~(IORING_REGISTER_SRC_REGISTERED|IORING_REGISTER_DST_REPLACE))
|
||||
return -EINVAL;
|
||||
if (!(buf.flags & IORING_REGISTER_DST_REPLACE) && ctx->buf_table.nr)
|
||||
return -EBUSY;
|
||||
if (memchr_inv(buf.pad, 0, sizeof(buf.pad)))
|
||||
return -EINVAL;
|
||||
|
||||
@ -1232,7 +1073,7 @@ int io_register_clone_buffers(struct io_ring_ctx *ctx, void __user *arg)
|
||||
file = io_uring_register_get_file(buf.src_fd, registered_src);
|
||||
if (IS_ERR(file))
|
||||
return PTR_ERR(file);
|
||||
ret = io_clone_buffers(ctx, file->private_data);
|
||||
ret = io_clone_buffers(ctx, file->private_data, &buf);
|
||||
if (!registered_src)
|
||||
fput(file);
|
||||
return ret;
|
||||
|
@ -13,33 +13,17 @@ enum {
|
||||
IORING_RSRC_BUFFER = 1,
|
||||
};
|
||||
|
||||
struct io_rsrc_put {
|
||||
struct io_rsrc_node {
|
||||
unsigned char type;
|
||||
int refs;
|
||||
|
||||
u64 tag;
|
||||
union {
|
||||
void *rsrc;
|
||||
struct file *file;
|
||||
unsigned long file_ptr;
|
||||
struct io_mapped_ubuf *buf;
|
||||
};
|
||||
};
|
||||
|
||||
struct io_rsrc_data {
|
||||
struct io_ring_ctx *ctx;
|
||||
|
||||
u64 **tags;
|
||||
unsigned int nr;
|
||||
u16 rsrc_type;
|
||||
bool quiesce;
|
||||
};
|
||||
|
||||
struct io_rsrc_node {
|
||||
struct io_ring_ctx *ctx;
|
||||
int refs;
|
||||
bool empty;
|
||||
u16 type;
|
||||
struct list_head node;
|
||||
struct io_rsrc_put item;
|
||||
};
|
||||
|
||||
struct io_mapped_ubuf {
|
||||
u64 ubuf;
|
||||
unsigned int len;
|
||||
@ -58,21 +42,19 @@ struct io_imu_folio_data {
|
||||
unsigned int folio_shift;
|
||||
};
|
||||
|
||||
void io_rsrc_node_ref_zero(struct io_rsrc_node *node);
|
||||
void io_rsrc_node_destroy(struct io_ring_ctx *ctx, struct io_rsrc_node *ref_node);
|
||||
struct io_rsrc_node *io_rsrc_node_alloc(struct io_ring_ctx *ctx);
|
||||
int io_queue_rsrc_removal(struct io_rsrc_data *data, unsigned idx, void *rsrc);
|
||||
struct io_rsrc_node *io_rsrc_node_alloc(struct io_ring_ctx *ctx, int type);
|
||||
void io_free_rsrc_node(struct io_ring_ctx *ctx, struct io_rsrc_node *node);
|
||||
void io_rsrc_data_free(struct io_ring_ctx *ctx, struct io_rsrc_data *data);
|
||||
int io_rsrc_data_alloc(struct io_rsrc_data *data, unsigned nr);
|
||||
|
||||
int io_import_fixed(int ddir, struct iov_iter *iter,
|
||||
struct io_mapped_ubuf *imu,
|
||||
u64 buf_addr, size_t len);
|
||||
|
||||
int io_register_clone_buffers(struct io_ring_ctx *ctx, void __user *arg);
|
||||
void __io_sqe_buffers_unregister(struct io_ring_ctx *ctx);
|
||||
int io_sqe_buffers_unregister(struct io_ring_ctx *ctx);
|
||||
int io_sqe_buffers_register(struct io_ring_ctx *ctx, void __user *arg,
|
||||
unsigned int nr_args, u64 __user *tags);
|
||||
void __io_sqe_files_unregister(struct io_ring_ctx *ctx);
|
||||
int io_sqe_files_unregister(struct io_ring_ctx *ctx);
|
||||
int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
|
||||
unsigned nr_args, u64 __user *tags);
|
||||
@ -84,51 +66,56 @@ int io_register_rsrc_update(struct io_ring_ctx *ctx, void __user *arg,
|
||||
int io_register_rsrc(struct io_ring_ctx *ctx, void __user *arg,
|
||||
unsigned int size, unsigned int type);
|
||||
|
||||
static inline struct io_rsrc_node *io_rsrc_node_lookup(struct io_rsrc_data *data,
|
||||
int index)
|
||||
{
|
||||
if (index < data->nr)
|
||||
return data->nodes[array_index_nospec(index, data->nr)];
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline void io_put_rsrc_node(struct io_ring_ctx *ctx, struct io_rsrc_node *node)
|
||||
{
|
||||
lockdep_assert_held(&ctx->uring_lock);
|
||||
|
||||
if (node && !--node->refs)
|
||||
io_rsrc_node_ref_zero(node);
|
||||
io_free_rsrc_node(ctx, node);
|
||||
}
|
||||
|
||||
static inline void io_charge_rsrc_node(struct io_ring_ctx *ctx,
|
||||
struct io_rsrc_node *node)
|
||||
static inline bool io_reset_rsrc_node(struct io_ring_ctx *ctx,
|
||||
struct io_rsrc_data *data, int index)
|
||||
{
|
||||
node->refs++;
|
||||
struct io_rsrc_node *node = data->nodes[index];
|
||||
|
||||
if (!node)
|
||||
return false;
|
||||
io_put_rsrc_node(ctx, node);
|
||||
data->nodes[index] = NULL;
|
||||
return true;
|
||||
}
|
||||
|
||||
static inline void __io_req_set_rsrc_node(struct io_kiocb *req,
|
||||
struct io_ring_ctx *ctx)
|
||||
static inline void io_req_put_rsrc_nodes(struct io_kiocb *req)
|
||||
{
|
||||
lockdep_assert_held(&ctx->uring_lock);
|
||||
req->rsrc_node = ctx->rsrc_node;
|
||||
io_charge_rsrc_node(ctx, ctx->rsrc_node);
|
||||
}
|
||||
|
||||
static inline void io_req_set_rsrc_node(struct io_kiocb *req,
|
||||
struct io_ring_ctx *ctx,
|
||||
unsigned int issue_flags)
|
||||
{
|
||||
if (!req->rsrc_node) {
|
||||
io_ring_submit_lock(ctx, issue_flags);
|
||||
__io_req_set_rsrc_node(req, ctx);
|
||||
io_ring_submit_unlock(ctx, issue_flags);
|
||||
if (req->file_node) {
|
||||
io_put_rsrc_node(req->ctx, req->file_node);
|
||||
req->file_node = NULL;
|
||||
}
|
||||
if (req->flags & REQ_F_BUF_NODE) {
|
||||
io_put_rsrc_node(req->ctx, req->buf_node);
|
||||
req->buf_node = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
static inline u64 *io_get_tag_slot(struct io_rsrc_data *data, unsigned int idx)
|
||||
static inline void io_req_assign_rsrc_node(struct io_rsrc_node **dst_node,
|
||||
struct io_rsrc_node *node)
|
||||
{
|
||||
unsigned int off = idx & IO_RSRC_TAG_TABLE_MASK;
|
||||
unsigned int table_idx = idx >> IO_RSRC_TAG_TABLE_SHIFT;
|
||||
|
||||
return &data->tags[table_idx][off];
|
||||
node->refs++;
|
||||
*dst_node = node;
|
||||
}
|
||||
|
||||
static inline int io_rsrc_init(struct io_ring_ctx *ctx)
|
||||
static inline void io_req_assign_buf_node(struct io_kiocb *req,
|
||||
struct io_rsrc_node *node)
|
||||
{
|
||||
ctx->rsrc_node = io_rsrc_node_alloc(ctx);
|
||||
return ctx->rsrc_node ? 0 : -ENOMEM;
|
||||
io_req_assign_rsrc_node(&req->buf_node, node);
|
||||
req->flags |= REQ_F_BUF_NODE;
|
||||
}
|
||||
|
||||
int io_files_update(struct io_kiocb *req, unsigned int issue_flags);
|
||||
|
105
io_uring/rw.c
105
io_uring/rw.c
@ -330,22 +330,21 @@ static int io_prep_rw_fixed(struct io_kiocb *req, const struct io_uring_sqe *sqe
|
||||
{
|
||||
struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
|
||||
struct io_ring_ctx *ctx = req->ctx;
|
||||
struct io_rsrc_node *node;
|
||||
struct io_async_rw *io;
|
||||
u16 index;
|
||||
int ret;
|
||||
|
||||
ret = io_prep_rw(req, sqe, ddir, false);
|
||||
if (unlikely(ret))
|
||||
return ret;
|
||||
|
||||
if (unlikely(req->buf_index >= ctx->nr_user_bufs))
|
||||
node = io_rsrc_node_lookup(&ctx->buf_table, req->buf_index);
|
||||
if (!node)
|
||||
return -EFAULT;
|
||||
index = array_index_nospec(req->buf_index, ctx->nr_user_bufs);
|
||||
req->imu = ctx->user_bufs[index];
|
||||
io_req_set_rsrc_node(req, ctx, 0);
|
||||
io_req_assign_buf_node(req, node);
|
||||
|
||||
io = req->async_data;
|
||||
ret = io_import_fixed(ddir, &io->iter, req->imu, rw->addr, rw->len);
|
||||
ret = io_import_fixed(ddir, &io->iter, node->buf, rw->addr, rw->len);
|
||||
iov_iter_save_state(&io->iter, &io->iter_state);
|
||||
return ret;
|
||||
}
|
||||
@ -435,7 +434,7 @@ static bool io_rw_should_reissue(struct io_kiocb *req)
|
||||
* Play it safe and assume not safe to re-import and reissue if we're
|
||||
* not in the original thread group (or in task context).
|
||||
*/
|
||||
if (!same_thread_group(req->task, current) || !in_task())
|
||||
if (!same_thread_group(req->tctx->task, current) || !in_task())
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
@ -818,6 +817,11 @@ static int io_rw_init_file(struct io_kiocb *req, fmode_t mode, int rw_type)
|
||||
kiocb->ki_flags |= IOCB_HIPRI;
|
||||
kiocb->ki_complete = io_complete_rw_iopoll;
|
||||
req->iopoll_completed = 0;
|
||||
if (ctx->flags & IORING_SETUP_HYBRID_IOPOLL) {
|
||||
/* make sure every req only blocks once*/
|
||||
req->flags &= ~REQ_F_IOPOLL_STATE;
|
||||
req->iopoll_start = ktime_get_ns();
|
||||
}
|
||||
} else {
|
||||
if (kiocb->ki_flags & IOCB_HIPRI)
|
||||
return -EINVAL;
|
||||
@ -1135,6 +1139,78 @@ void io_rw_fail(struct io_kiocb *req)
|
||||
io_req_set_res(req, res, req->cqe.flags);
|
||||
}
|
||||
|
||||
static int io_uring_classic_poll(struct io_kiocb *req, struct io_comp_batch *iob,
|
||||
unsigned int poll_flags)
|
||||
{
|
||||
struct file *file = req->file;
|
||||
|
||||
if (req->opcode == IORING_OP_URING_CMD) {
|
||||
struct io_uring_cmd *ioucmd;
|
||||
|
||||
ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
|
||||
return file->f_op->uring_cmd_iopoll(ioucmd, iob, poll_flags);
|
||||
} else {
|
||||
struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
|
||||
|
||||
return file->f_op->iopoll(&rw->kiocb, iob, poll_flags);
|
||||
}
|
||||
}
|
||||
|
||||
static u64 io_hybrid_iopoll_delay(struct io_ring_ctx *ctx, struct io_kiocb *req)
|
||||
{
|
||||
struct hrtimer_sleeper timer;
|
||||
enum hrtimer_mode mode;
|
||||
ktime_t kt;
|
||||
u64 sleep_time;
|
||||
|
||||
if (req->flags & REQ_F_IOPOLL_STATE)
|
||||
return 0;
|
||||
|
||||
if (ctx->hybrid_poll_time == LLONG_MAX)
|
||||
return 0;
|
||||
|
||||
/* Using half the running time to do schedule */
|
||||
sleep_time = ctx->hybrid_poll_time / 2;
|
||||
|
||||
kt = ktime_set(0, sleep_time);
|
||||
req->flags |= REQ_F_IOPOLL_STATE;
|
||||
|
||||
mode = HRTIMER_MODE_REL;
|
||||
hrtimer_init_sleeper_on_stack(&timer, CLOCK_MONOTONIC, mode);
|
||||
hrtimer_set_expires(&timer.timer, kt);
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
hrtimer_sleeper_start_expires(&timer, mode);
|
||||
|
||||
if (timer.task)
|
||||
io_schedule();
|
||||
|
||||
hrtimer_cancel(&timer.timer);
|
||||
__set_current_state(TASK_RUNNING);
|
||||
destroy_hrtimer_on_stack(&timer.timer);
|
||||
return sleep_time;
|
||||
}
|
||||
|
||||
static int io_uring_hybrid_poll(struct io_kiocb *req,
|
||||
struct io_comp_batch *iob, unsigned int poll_flags)
|
||||
{
|
||||
struct io_ring_ctx *ctx = req->ctx;
|
||||
u64 runtime, sleep_time;
|
||||
int ret;
|
||||
|
||||
sleep_time = io_hybrid_iopoll_delay(ctx, req);
|
||||
ret = io_uring_classic_poll(req, iob, poll_flags);
|
||||
runtime = ktime_get_ns() - req->iopoll_start - sleep_time;
|
||||
|
||||
/*
|
||||
* Use minimum sleep time if we're polling devices with different
|
||||
* latencies. We could get more completions from the faster ones.
|
||||
*/
|
||||
if (ctx->hybrid_poll_time > runtime)
|
||||
ctx->hybrid_poll_time = runtime;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin)
|
||||
{
|
||||
struct io_wq_work_node *pos, *start, *prev;
|
||||
@ -1151,7 +1227,6 @@ int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin)
|
||||
|
||||
wq_list_for_each(pos, start, &ctx->iopoll_list) {
|
||||
struct io_kiocb *req = container_of(pos, struct io_kiocb, comp_list);
|
||||
struct file *file = req->file;
|
||||
int ret;
|
||||
|
||||
/*
|
||||
@ -1162,17 +1237,11 @@ int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin)
|
||||
if (READ_ONCE(req->iopoll_completed))
|
||||
break;
|
||||
|
||||
if (req->opcode == IORING_OP_URING_CMD) {
|
||||
struct io_uring_cmd *ioucmd;
|
||||
if (ctx->flags & IORING_SETUP_HYBRID_IOPOLL)
|
||||
ret = io_uring_hybrid_poll(req, &iob, poll_flags);
|
||||
else
|
||||
ret = io_uring_classic_poll(req, &iob, poll_flags);
|
||||
|
||||
ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
|
||||
ret = file->f_op->uring_cmd_iopoll(ioucmd, &iob,
|
||||
poll_flags);
|
||||
} else {
|
||||
struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
|
||||
|
||||
ret = file->f_op->iopoll(&rw->kiocb, &iob, poll_flags);
|
||||
}
|
||||
if (unlikely(ret < 0))
|
||||
return ret;
|
||||
else if (ret)
|
||||
|
@ -21,6 +21,7 @@ struct io_splice {
|
||||
u64 len;
|
||||
int splice_fd_in;
|
||||
unsigned int flags;
|
||||
struct io_rsrc_node *rsrc_node;
|
||||
};
|
||||
|
||||
static int __io_splice_prep(struct io_kiocb *req,
|
||||
@ -34,6 +35,7 @@ static int __io_splice_prep(struct io_kiocb *req,
|
||||
if (unlikely(sp->flags & ~valid_flags))
|
||||
return -EINVAL;
|
||||
sp->splice_fd_in = READ_ONCE(sqe->splice_fd_in);
|
||||
sp->rsrc_node = NULL;
|
||||
req->flags |= REQ_F_FORCE_ASYNC;
|
||||
return 0;
|
||||
}
|
||||
@ -45,6 +47,36 @@ int io_tee_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
|
||||
return __io_splice_prep(req, sqe);
|
||||
}
|
||||
|
||||
void io_splice_cleanup(struct io_kiocb *req)
|
||||
{
|
||||
struct io_splice *sp = io_kiocb_to_cmd(req, struct io_splice);
|
||||
|
||||
io_put_rsrc_node(req->ctx, sp->rsrc_node);
|
||||
}
|
||||
|
||||
static struct file *io_splice_get_file(struct io_kiocb *req,
|
||||
unsigned int issue_flags)
|
||||
{
|
||||
struct io_splice *sp = io_kiocb_to_cmd(req, struct io_splice);
|
||||
struct io_ring_ctx *ctx = req->ctx;
|
||||
struct io_rsrc_node *node;
|
||||
struct file *file = NULL;
|
||||
|
||||
if (!(sp->flags & SPLICE_F_FD_IN_FIXED))
|
||||
return io_file_get_normal(req, sp->splice_fd_in);
|
||||
|
||||
io_ring_submit_lock(ctx, issue_flags);
|
||||
node = io_rsrc_node_lookup(&ctx->file_table.data, sp->splice_fd_in);
|
||||
if (node) {
|
||||
node->refs++;
|
||||
sp->rsrc_node = node;
|
||||
file = io_slot_file(node);
|
||||
req->flags |= REQ_F_NEED_CLEANUP;
|
||||
}
|
||||
io_ring_submit_unlock(ctx, issue_flags);
|
||||
return file;
|
||||
}
|
||||
|
||||
int io_tee(struct io_kiocb *req, unsigned int issue_flags)
|
||||
{
|
||||
struct io_splice *sp = io_kiocb_to_cmd(req, struct io_splice);
|
||||
@ -55,10 +87,7 @@ int io_tee(struct io_kiocb *req, unsigned int issue_flags)
|
||||
|
||||
WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK);
|
||||
|
||||
if (sp->flags & SPLICE_F_FD_IN_FIXED)
|
||||
in = io_file_get_fixed(req, sp->splice_fd_in, issue_flags);
|
||||
else
|
||||
in = io_file_get_normal(req, sp->splice_fd_in);
|
||||
in = io_splice_get_file(req, issue_flags);
|
||||
if (!in) {
|
||||
ret = -EBADF;
|
||||
goto done;
|
||||
@ -96,10 +125,7 @@ int io_splice(struct io_kiocb *req, unsigned int issue_flags)
|
||||
|
||||
WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK);
|
||||
|
||||
if (sp->flags & SPLICE_F_FD_IN_FIXED)
|
||||
in = io_file_get_fixed(req, sp->splice_fd_in, issue_flags);
|
||||
else
|
||||
in = io_file_get_normal(req, sp->splice_fd_in);
|
||||
in = io_splice_get_file(req, issue_flags);
|
||||
if (!in) {
|
||||
ret = -EBADF;
|
||||
goto done;
|
||||
|
@ -3,5 +3,6 @@
|
||||
int io_tee_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
|
||||
int io_tee(struct io_kiocb *req, unsigned int issue_flags);
|
||||
|
||||
void io_splice_cleanup(struct io_kiocb *req);
|
||||
int io_splice_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
|
||||
int io_splice(struct io_kiocb *req, unsigned int issue_flags);
|
||||
|
@ -40,6 +40,7 @@ void io_sq_thread_unpark(struct io_sq_data *sqd)
|
||||
if (atomic_dec_return(&sqd->park_pending))
|
||||
set_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state);
|
||||
mutex_unlock(&sqd->lock);
|
||||
wake_up(&sqd->wait);
|
||||
}
|
||||
|
||||
void io_sq_thread_park(struct io_sq_data *sqd)
|
||||
@ -207,7 +208,7 @@ static bool io_sqd_handle_event(struct io_sq_data *sqd)
|
||||
mutex_unlock(&sqd->lock);
|
||||
if (signal_pending(current))
|
||||
did_sig = get_signal(&ksig);
|
||||
cond_resched();
|
||||
wait_event(sqd->wait, !atomic_read(&sqd->park_pending));
|
||||
mutex_lock(&sqd->lock);
|
||||
sqd->sq_cpu = raw_smp_processor_id();
|
||||
}
|
||||
|
@ -81,6 +81,7 @@ __cold int io_uring_alloc_task_context(struct task_struct *task,
|
||||
return ret;
|
||||
}
|
||||
|
||||
tctx->task = task;
|
||||
xa_init(&tctx->xa);
|
||||
init_waitqueue_head(&tctx->wait);
|
||||
atomic_set(&tctx->in_cancel, 0);
|
||||
|
@ -300,16 +300,18 @@ static void io_req_task_link_timeout(struct io_kiocb *req, struct io_tw_state *t
|
||||
{
|
||||
struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout);
|
||||
struct io_kiocb *prev = timeout->prev;
|
||||
int ret = -ENOENT;
|
||||
int ret;
|
||||
|
||||
if (prev) {
|
||||
if (!(req->task->flags & PF_EXITING)) {
|
||||
if (!io_should_terminate_tw()) {
|
||||
struct io_cancel_data cd = {
|
||||
.ctx = req->ctx,
|
||||
.data = prev->cqe.user_data,
|
||||
};
|
||||
|
||||
ret = io_try_cancel(req->task->io_uring, &cd, 0);
|
||||
ret = io_try_cancel(req->tctx, &cd, 0);
|
||||
} else {
|
||||
ret = -ECANCELED;
|
||||
}
|
||||
io_req_set_res(req, ret ?: -ETIME, 0);
|
||||
io_req_task_complete(req, ts);
|
||||
@ -637,13 +639,13 @@ void io_queue_linked_timeout(struct io_kiocb *req)
|
||||
io_put_req(req);
|
||||
}
|
||||
|
||||
static bool io_match_task(struct io_kiocb *head, struct task_struct *task,
|
||||
static bool io_match_task(struct io_kiocb *head, struct io_uring_task *tctx,
|
||||
bool cancel_all)
|
||||
__must_hold(&head->ctx->timeout_lock)
|
||||
{
|
||||
struct io_kiocb *req;
|
||||
|
||||
if (task && head->task != task)
|
||||
if (tctx && head->tctx != tctx)
|
||||
return false;
|
||||
if (cancel_all)
|
||||
return true;
|
||||
@ -656,7 +658,7 @@ static bool io_match_task(struct io_kiocb *head, struct task_struct *task,
|
||||
}
|
||||
|
||||
/* Returns true if we found and killed one or more timeouts */
|
||||
__cold bool io_kill_timeouts(struct io_ring_ctx *ctx, struct task_struct *tsk,
|
||||
__cold bool io_kill_timeouts(struct io_ring_ctx *ctx, struct io_uring_task *tctx,
|
||||
bool cancel_all)
|
||||
{
|
||||
struct io_timeout *timeout, *tmp;
|
||||
@ -671,7 +673,7 @@ __cold bool io_kill_timeouts(struct io_ring_ctx *ctx, struct task_struct *tsk,
|
||||
list_for_each_entry_safe(timeout, tmp, &ctx->timeout_list, list) {
|
||||
struct io_kiocb *req = cmd_to_io_kiocb(timeout);
|
||||
|
||||
if (io_match_task(req, tsk, cancel_all) &&
|
||||
if (io_match_task(req, tctx, cancel_all) &&
|
||||
io_kill_timeout(req, -ECANCELED))
|
||||
canceled++;
|
||||
}
|
||||
|
@ -24,7 +24,7 @@ static inline struct io_kiocb *io_disarm_linked_timeout(struct io_kiocb *req)
|
||||
__cold void io_flush_timeouts(struct io_ring_ctx *ctx);
|
||||
struct io_cancel_data;
|
||||
int io_timeout_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd);
|
||||
__cold bool io_kill_timeouts(struct io_ring_ctx *ctx, struct task_struct *tsk,
|
||||
__cold bool io_kill_timeouts(struct io_ring_ctx *ctx, struct io_uring_task *tctx,
|
||||
bool cancel_all);
|
||||
void io_queue_linked_timeout(struct io_kiocb *req);
|
||||
void io_disarm_next(struct io_kiocb *req);
|
||||
|
@ -47,7 +47,7 @@ static void io_req_uring_cleanup(struct io_kiocb *req, unsigned int issue_flags)
|
||||
}
|
||||
|
||||
bool io_uring_try_cancel_uring_cmd(struct io_ring_ctx *ctx,
|
||||
struct task_struct *task, bool cancel_all)
|
||||
struct io_uring_task *tctx, bool cancel_all)
|
||||
{
|
||||
struct hlist_node *tmp;
|
||||
struct io_kiocb *req;
|
||||
@ -61,7 +61,7 @@ bool io_uring_try_cancel_uring_cmd(struct io_ring_ctx *ctx,
|
||||
struct io_uring_cmd);
|
||||
struct file *file = req->file;
|
||||
|
||||
if (!cancel_all && req->task != task)
|
||||
if (!cancel_all && req->tctx != tctx)
|
||||
continue;
|
||||
|
||||
if (cmd->flags & IORING_URING_CMD_CANCELABLE) {
|
||||
@ -213,14 +213,18 @@ int io_uring_cmd_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
|
||||
|
||||
if (ioucmd->flags & IORING_URING_CMD_FIXED) {
|
||||
struct io_ring_ctx *ctx = req->ctx;
|
||||
u16 index;
|
||||
struct io_rsrc_node *node;
|
||||
u16 index = READ_ONCE(sqe->buf_index);
|
||||
|
||||
req->buf_index = READ_ONCE(sqe->buf_index);
|
||||
if (unlikely(req->buf_index >= ctx->nr_user_bufs))
|
||||
node = io_rsrc_node_lookup(&ctx->buf_table, index);
|
||||
if (unlikely(!node))
|
||||
return -EFAULT;
|
||||
index = array_index_nospec(req->buf_index, ctx->nr_user_bufs);
|
||||
req->imu = ctx->user_bufs[index];
|
||||
io_req_set_rsrc_node(req, ctx, 0);
|
||||
/*
|
||||
* Pi node upfront, prior to io_uring_cmd_import_fixed()
|
||||
* being called. This prevents destruction of the mapped buffer
|
||||
* we'll need at actual import time.
|
||||
*/
|
||||
io_req_assign_buf_node(req, node);
|
||||
}
|
||||
ioucmd->cmd_op = READ_ONCE(sqe->cmd_op);
|
||||
|
||||
@ -276,8 +280,13 @@ int io_uring_cmd_import_fixed(u64 ubuf, unsigned long len, int rw,
|
||||
struct iov_iter *iter, void *ioucmd)
|
||||
{
|
||||
struct io_kiocb *req = cmd_to_io_kiocb(ioucmd);
|
||||
struct io_rsrc_node *node = req->buf_node;
|
||||
|
||||
return io_import_fixed(rw, iter, req->imu, ubuf, len);
|
||||
/* Must have had rsrc_node assigned at prep time */
|
||||
if (node)
|
||||
return io_import_fixed(rw, iter, node->buf, ubuf, len);
|
||||
|
||||
return -EFAULT;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(io_uring_cmd_import_fixed);
|
||||
|
||||
|
@ -8,4 +8,4 @@ int io_uring_cmd(struct io_kiocb *req, unsigned int issue_flags);
|
||||
int io_uring_cmd_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
|
||||
|
||||
bool io_uring_try_cancel_uring_cmd(struct io_ring_ctx *ctx,
|
||||
struct task_struct *task, bool cancel_all);
|
||||
struct io_uring_task *tctx, bool cancel_all);
|
||||
|
@ -184,7 +184,7 @@ int io_waitid_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd,
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
bool io_waitid_remove_all(struct io_ring_ctx *ctx, struct task_struct *task,
|
||||
bool io_waitid_remove_all(struct io_ring_ctx *ctx, struct io_uring_task *tctx,
|
||||
bool cancel_all)
|
||||
{
|
||||
struct hlist_node *tmp;
|
||||
@ -194,7 +194,7 @@ bool io_waitid_remove_all(struct io_ring_ctx *ctx, struct task_struct *task,
|
||||
lockdep_assert_held(&ctx->uring_lock);
|
||||
|
||||
hlist_for_each_entry_safe(req, tmp, &ctx->waitid_list, hash_node) {
|
||||
if (!io_match_task_safe(req, task, cancel_all))
|
||||
if (!io_match_task_safe(req, tctx, cancel_all))
|
||||
continue;
|
||||
hlist_del_init(&req->hash_node);
|
||||
__io_waitid_cancel(ctx, req);
|
||||
@ -331,7 +331,7 @@ int io_waitid(struct io_kiocb *req, unsigned int issue_flags)
|
||||
hlist_add_head(&req->hash_node, &ctx->waitid_list);
|
||||
|
||||
init_waitqueue_func_entry(&iwa->wo.child_wait, io_waitid_wait);
|
||||
iwa->wo.child_wait.private = req->task;
|
||||
iwa->wo.child_wait.private = req->tctx->task;
|
||||
iw->head = ¤t->signal->wait_chldexit;
|
||||
add_wait_queue(iw->head, &iwa->wo.child_wait);
|
||||
|
||||
|
@ -11,5 +11,5 @@ int io_waitid_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
|
||||
int io_waitid(struct io_kiocb *req, unsigned int issue_flags);
|
||||
int io_waitid_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd,
|
||||
unsigned int issue_flags);
|
||||
bool io_waitid_remove_all(struct io_ring_ctx *ctx, struct task_struct *task,
|
||||
bool io_waitid_remove_all(struct io_ring_ctx *ctx, struct io_uring_task *tctx,
|
||||
bool cancel_all);
|
||||
|
Loading…
Reference in New Issue
Block a user