linux-stable/io_uring/net.c

1572 lines
39 KiB
C
Raw Normal View History

// SPDX-License-Identifier: GPL-2.0
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/file.h>
#include <linux/slab.h>
#include <linux/net.h>
#include <linux/compat.h>
#include <net/compat.h>
#include <linux/io_uring.h>
#include <uapi/linux/io_uring.h>
#include "io_uring.h"
#include "kbuf.h"
#include "alloc_cache.h"
#include "net.h"
#include "notif.h"
#include "rsrc.h"
#if defined(CONFIG_NET)
struct io_shutdown {
struct file *file;
int how;
};
struct io_accept {
struct file *file;
struct sockaddr __user *addr;
int __user *addr_len;
int flags;
u32 file_slot;
unsigned long nofile;
};
struct io_socket {
struct file *file;
int domain;
int type;
int protocol;
int flags;
u32 file_slot;
unsigned long nofile;
};
struct io_connect {
struct file *file;
struct sockaddr __user *addr;
int addr_len;
bool in_progress;
bool seen_econnaborted;
};
struct io_sr_msg {
struct file *file;
union {
struct compat_msghdr __user *umsg_compat;
struct user_msghdr __user *umsg;
void __user *buf;
};
unsigned len;
unsigned done_io;
unsigned msg_flags;
unsigned nr_multishot_loops;
u16 flags;
/* initialised and used only by !msg send variants */
u16 addr_len;
u16 buf_group;
void __user *addr;
void __user *msg_control;
/* used only for send zerocopy */
struct io_kiocb *notif;
};
/*
* Number of times we'll try and do receives if there's more data. If we
* exceed this limit, then add us to the back of the queue and retry from
* there. This helps fairness between flooding clients.
*/
#define MULTISHOT_MAX_RETRY 32
int io_shutdown_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
struct io_shutdown *shutdown = io_kiocb_to_cmd(req, struct io_shutdown);
if (unlikely(sqe->off || sqe->addr || sqe->rw_flags ||
sqe->buf_index || sqe->splice_fd_in))
return -EINVAL;
shutdown->how = READ_ONCE(sqe->len);
req->flags |= REQ_F_FORCE_ASYNC;
return 0;
}
int io_shutdown(struct io_kiocb *req, unsigned int issue_flags)
{
struct io_shutdown *shutdown = io_kiocb_to_cmd(req, struct io_shutdown);
struct socket *sock;
int ret;
WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK);
sock = sock_from_file(req->file);
if (unlikely(!sock))
return -ENOTSOCK;
ret = __sys_shutdown_sock(sock, shutdown->how);
io_req_set_res(req, ret, 0);
return IOU_OK;
}
static bool io_net_retry(struct socket *sock, int flags)
{
if (!(flags & MSG_WAITALL))
return false;
return sock->type == SOCK_STREAM || sock->type == SOCK_SEQPACKET;
}
static void io_netmsg_recycle(struct io_kiocb *req, unsigned int issue_flags)
{
struct io_async_msghdr *hdr = req->async_data;
if (!req_has_async_data(req) || issue_flags & IO_URING_F_UNLOCKED)
return;
/* Let normal cleanup path reap it if we fail adding to the cache */
if (io_alloc_cache_put(&req->ctx->netmsg_cache, &hdr->cache)) {
req->async_data = NULL;
req->flags &= ~REQ_F_ASYNC_DATA;
}
}
static struct io_async_msghdr *io_msg_alloc_async(struct io_kiocb *req,
unsigned int issue_flags)
{
struct io_ring_ctx *ctx = req->ctx;
struct io_cache_entry *entry;
struct io_async_msghdr *hdr;
if (!(issue_flags & IO_URING_F_UNLOCKED)) {
entry = io_alloc_cache_get(&ctx->netmsg_cache);
if (entry) {
hdr = container_of(entry, struct io_async_msghdr, cache);
hdr->free_iov = NULL;
req->flags |= REQ_F_ASYNC_DATA;
req->async_data = hdr;
return hdr;
}
}
if (!io_alloc_async_data(req)) {
hdr = req->async_data;
hdr->free_iov = NULL;
return hdr;
}
return NULL;
}
static inline struct io_async_msghdr *io_msg_alloc_async_prep(struct io_kiocb *req)
{
/* ->prep_async is always called from the submission context */
return io_msg_alloc_async(req, 0);
}
static int io_setup_async_msg(struct io_kiocb *req,
struct io_async_msghdr *kmsg,
unsigned int issue_flags)
{
struct io_async_msghdr *async_msg;
if (req_has_async_data(req))
return -EAGAIN;
async_msg = io_msg_alloc_async(req, issue_flags);
if (!async_msg) {
kfree(kmsg->free_iov);
return -ENOMEM;
}
req->flags |= REQ_F_NEED_CLEANUP;
memcpy(async_msg, kmsg, sizeof(*kmsg));
if (async_msg->msg.msg_name)
async_msg->msg.msg_name = &async_msg->addr;
if ((req->flags & REQ_F_BUFFER_SELECT) && !async_msg->msg.msg_iter.nr_segs)
return -EAGAIN;
/* if were using fast_iov, set it to the new one */
if (iter_is_iovec(&kmsg->msg.msg_iter) && !kmsg->free_iov) {
size_t fast_idx = iter_iov(&kmsg->msg.msg_iter) - kmsg->fast_iov;
async_msg->msg.msg_iter.__iov = &async_msg->fast_iov[fast_idx];
io_uring/net: fix fast_iov assignment in io_setup_async_msg() I hit a very bad problem during my tests of SENDMSG_ZC. BUG(); in first_iovec_segment() triggered very easily. The problem was io_setup_async_msg() in the partial retry case, which seems to happen more often with _ZC. iov_iter_iovec_advance() may change i->iov in order to have i->iov_offset being only relative to the first element. Which means kmsg->msg.msg_iter.iov is no longer the same as kmsg->fast_iov. But this would rewind the copy to be the start of async_msg->fast_iov, which means the internal state of sync_msg->msg.msg_iter is inconsitent. I tested with 5 vectors with length like this 4, 0, 64, 20, 8388608 and got a short writes with: - ret=2675244 min_ret=8388692 => remaining 5713448 sr->done_io=2675244 - ret=-EAGAIN => io_uring_poll_arm - ret=4911225 min_ret=5713448 => remaining 802223 sr->done_io=7586469 - ret=-EAGAIN => io_uring_poll_arm - ret=802223 min_ret=802223 => res=8388692 While this was easily triggered with SENDMSG_ZC (queued for 6.1), it was a potential problem starting with 7ba89d2af17aa879dda30f5d5d3f152e587fc551 in 5.18 for IORING_OP_RECVMSG. And also with 4c3c09439c08b03d9503df0ca4c7619c5842892e in 5.19 for IORING_OP_SENDMSG. However 257e84a5377fbbc336ff563833a8712619acce56 introduced the critical code into io_setup_async_msg() in 5.11. Fixes: 7ba89d2af17aa ("io_uring: ensure recv and recvmsg handle MSG_WAITALL correctly") Fixes: 257e84a5377fb ("io_uring: refactor sendmsg/recvmsg iov managing") Cc: stable@vger.kernel.org Signed-off-by: Stefan Metzmacher <metze@samba.org> Reviewed-by: Pavel Begunkov <asml.silence@gmail.com> Link: https://lore.kernel.org/r/b2e7be246e2fb173520862b0c7098e55767567a2.1664436949.git.metze@samba.org Signed-off-by: Jens Axboe <axboe@kernel.dk>
2022-09-29 07:39:10 +00:00
}
return -EAGAIN;
}
#ifdef CONFIG_COMPAT
static int io_compat_msg_copy_hdr(struct io_kiocb *req,
struct io_async_msghdr *iomsg,
struct compat_msghdr *msg, int ddir)
{
struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
struct compat_iovec __user *uiov;
int ret;
if (copy_from_user(msg, sr->umsg_compat, sizeof(*msg)))
return -EFAULT;
uiov = compat_ptr(msg->msg_iov);
if (req->flags & REQ_F_BUFFER_SELECT) {
compat_ssize_t clen;
iomsg->free_iov = NULL;
if (msg->msg_iovlen == 0) {
sr->len = 0;
} else if (msg->msg_iovlen > 1) {
return -EINVAL;
} else {
if (!access_ok(uiov, sizeof(*uiov)))
return -EFAULT;
if (__get_user(clen, &uiov->iov_len))
return -EFAULT;
if (clen < 0)
return -EINVAL;
sr->len = clen;
}
return 0;
}
iomsg->free_iov = iomsg->fast_iov;
ret = __import_iovec(ddir, (struct iovec __user *)uiov, msg->msg_iovlen,
UIO_FASTIOV, &iomsg->free_iov,
&iomsg->msg.msg_iter, true);
if (unlikely(ret < 0))
return ret;
return 0;
}
#endif
static int io_msg_copy_hdr(struct io_kiocb *req, struct io_async_msghdr *iomsg,
struct user_msghdr *msg, int ddir)
{
struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
int ret;
if (!user_access_begin(sr->umsg, sizeof(*sr->umsg)))
return -EFAULT;
ret = -EFAULT;
unsafe_get_user(msg->msg_name, &sr->umsg->msg_name, ua_end);
unsafe_get_user(msg->msg_namelen, &sr->umsg->msg_namelen, ua_end);
unsafe_get_user(msg->msg_iov, &sr->umsg->msg_iov, ua_end);
unsafe_get_user(msg->msg_iovlen, &sr->umsg->msg_iovlen, ua_end);
unsafe_get_user(msg->msg_control, &sr->umsg->msg_control, ua_end);
unsafe_get_user(msg->msg_controllen, &sr->umsg->msg_controllen, ua_end);
msg->msg_flags = 0;
if (req->flags & REQ_F_BUFFER_SELECT) {
if (msg->msg_iovlen == 0) {
sr->len = iomsg->fast_iov[0].iov_len = 0;
iomsg->fast_iov[0].iov_base = NULL;
iomsg->free_iov = NULL;
} else if (msg->msg_iovlen > 1) {
ret = -EINVAL;
goto ua_end;
} else {
/* we only need the length for provided buffers */
if (!access_ok(&msg->msg_iov[0].iov_len, sizeof(__kernel_size_t)))
goto ua_end;
unsafe_get_user(iomsg->fast_iov[0].iov_len,
&msg->msg_iov[0].iov_len, ua_end);
sr->len = iomsg->fast_iov[0].iov_len;
iomsg->free_iov = NULL;
}
ret = 0;
ua_end:
user_access_end();
return ret;
}
user_access_end();
iomsg->free_iov = iomsg->fast_iov;
ret = __import_iovec(ddir, msg->msg_iov, msg->msg_iovlen, UIO_FASTIOV,
&iomsg->free_iov, &iomsg->msg.msg_iter, false);
if (unlikely(ret < 0))
return ret;
return 0;
}
static int io_sendmsg_copy_hdr(struct io_kiocb *req,
struct io_async_msghdr *iomsg)
{
struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
struct user_msghdr msg;
int ret;
iomsg->msg.msg_name = &iomsg->addr;
iomsg->msg.msg_iter.nr_segs = 0;
#ifdef CONFIG_COMPAT
if (unlikely(req->ctx->compat)) {
struct compat_msghdr cmsg;
ret = io_compat_msg_copy_hdr(req, iomsg, &cmsg, ITER_SOURCE);
if (unlikely(ret))
return ret;
return __get_compat_msghdr(&iomsg->msg, &cmsg, NULL);
}
#endif
ret = io_msg_copy_hdr(req, iomsg, &msg, ITER_SOURCE);
if (unlikely(ret))
return ret;
ret = __copy_msghdr(&iomsg->msg, &msg, NULL);
/* save msg_control as sys_sendmsg() overwrites it */
sr->msg_control = iomsg->msg.msg_control_user;
return ret;
}
int io_send_prep_async(struct io_kiocb *req)
{
struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);
struct io_async_msghdr *io;
int ret;
if (!zc->addr || req_has_async_data(req))
return 0;
io = io_msg_alloc_async_prep(req);
if (!io)
return -ENOMEM;
ret = move_addr_to_kernel(zc->addr, zc->addr_len, &io->addr);
return ret;
}
static int io_setup_async_addr(struct io_kiocb *req,
struct sockaddr_storage *addr_storage,
unsigned int issue_flags)
{
struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
struct io_async_msghdr *io;
if (!sr->addr || req_has_async_data(req))
return -EAGAIN;
io = io_msg_alloc_async(req, issue_flags);
if (!io)
return -ENOMEM;
memcpy(&io->addr, addr_storage, sizeof(io->addr));
return -EAGAIN;
}
int io_sendmsg_prep_async(struct io_kiocb *req)
{
int ret;
if (!io_msg_alloc_async_prep(req))
return -ENOMEM;
ret = io_sendmsg_copy_hdr(req, req->async_data);
if (!ret)
req->flags |= REQ_F_NEED_CLEANUP;
return ret;
}
void io_sendmsg_recvmsg_cleanup(struct io_kiocb *req)
{
struct io_async_msghdr *io = req->async_data;
kfree(io->free_iov);
}
int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
sr->done_io = 0;
if (req->opcode == IORING_OP_SEND) {
if (READ_ONCE(sqe->__pad3[0]))
return -EINVAL;
sr->addr = u64_to_user_ptr(READ_ONCE(sqe->addr2));
sr->addr_len = READ_ONCE(sqe->addr_len);
} else if (sqe->addr2 || sqe->file_index) {
return -EINVAL;
}
sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
sr->len = READ_ONCE(sqe->len);
sr->flags = READ_ONCE(sqe->ioprio);
if (sr->flags & ~IORING_RECVSEND_POLL_FIRST)
return -EINVAL;
sr->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL;
if (sr->msg_flags & MSG_DONTWAIT)
req->flags |= REQ_F_NOWAIT;
#ifdef CONFIG_COMPAT
if (req->ctx->compat)
sr->msg_flags |= MSG_CMSG_COMPAT;
#endif
return 0;
}
static void io_req_msg_cleanup(struct io_kiocb *req,
struct io_async_msghdr *kmsg,
unsigned int issue_flags)
{
req->flags &= ~REQ_F_NEED_CLEANUP;
/* fast path, check for non-NULL to avoid function call */
if (kmsg->free_iov)
kfree(kmsg->free_iov);
io_netmsg_recycle(req, issue_flags);
}
int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags)
{
struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
struct io_async_msghdr iomsg, *kmsg;
struct socket *sock;
unsigned flags;
int min_ret = 0;
int ret;
sock = sock_from_file(req->file);
if (unlikely(!sock))
return -ENOTSOCK;
if (req_has_async_data(req)) {
kmsg = req->async_data;
kmsg->msg.msg_control_user = sr->msg_control;
} else {
ret = io_sendmsg_copy_hdr(req, &iomsg);
if (ret)
return ret;
kmsg = &iomsg;
}
if (!(req->flags & REQ_F_POLLED) &&
(sr->flags & IORING_RECVSEND_POLL_FIRST))
return io_setup_async_msg(req, kmsg, issue_flags);
flags = sr->msg_flags;
if (issue_flags & IO_URING_F_NONBLOCK)
flags |= MSG_DONTWAIT;
if (flags & MSG_WAITALL)
min_ret = iov_iter_count(&kmsg->msg.msg_iter);
ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags);
if (ret < min_ret) {
if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
return io_setup_async_msg(req, kmsg, issue_flags);
if (ret > 0 && io_net_retry(sock, flags)) {
kmsg->msg.msg_controllen = 0;
kmsg->msg.msg_control = NULL;
sr->done_io += ret;
req->flags |= REQ_F_BL_NO_RECYCLE;
return io_setup_async_msg(req, kmsg, issue_flags);
}
if (ret == -ERESTARTSYS)
ret = -EINTR;
req_set_fail(req);
}
io_req_msg_cleanup(req, kmsg, issue_flags);
if (ret >= 0)
ret += sr->done_io;
else if (sr->done_io)
ret = sr->done_io;
io_req_set_res(req, ret, 0);
return IOU_OK;
}
int io_send(struct io_kiocb *req, unsigned int issue_flags)
{
struct sockaddr_storage __address;
struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
struct msghdr msg;
struct socket *sock;
unsigned flags;
int min_ret = 0;
int ret;
msg.msg_name = NULL;
msg.msg_control = NULL;
msg.msg_controllen = 0;
msg.msg_namelen = 0;
msg.msg_ubuf = NULL;
if (sr->addr) {
if (req_has_async_data(req)) {
struct io_async_msghdr *io = req->async_data;
msg.msg_name = &io->addr;
} else {
ret = move_addr_to_kernel(sr->addr, sr->addr_len, &__address);
if (unlikely(ret < 0))
return ret;
msg.msg_name = (struct sockaddr *)&__address;
}
msg.msg_namelen = sr->addr_len;
}
if (!(req->flags & REQ_F_POLLED) &&
(sr->flags & IORING_RECVSEND_POLL_FIRST))
return io_setup_async_addr(req, &__address, issue_flags);
sock = sock_from_file(req->file);
if (unlikely(!sock))
return -ENOTSOCK;
ret = import_ubuf(ITER_SOURCE, sr->buf, sr->len, &msg.msg_iter);
if (unlikely(ret))
return ret;
flags = sr->msg_flags;
if (issue_flags & IO_URING_F_NONBLOCK)
flags |= MSG_DONTWAIT;
if (flags & MSG_WAITALL)
min_ret = iov_iter_count(&msg.msg_iter);
flags &= ~MSG_INTERNAL_SENDMSG_FLAGS;
msg.msg_flags = flags;
ret = sock_sendmsg(sock, &msg);
if (ret < min_ret) {
if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
return io_setup_async_addr(req, &__address, issue_flags);
if (ret > 0 && io_net_retry(sock, flags)) {
sr->len -= ret;
sr->buf += ret;
sr->done_io += ret;
req->flags |= REQ_F_BL_NO_RECYCLE;
return io_setup_async_addr(req, &__address, issue_flags);
}
if (ret == -ERESTARTSYS)
ret = -EINTR;
req_set_fail(req);
}
if (ret >= 0)
ret += sr->done_io;
else if (sr->done_io)
ret = sr->done_io;
io_req_set_res(req, ret, 0);
return IOU_OK;
}
static int io_recvmsg_mshot_prep(struct io_kiocb *req,
struct io_async_msghdr *iomsg,
int namelen, size_t controllen)
{
if ((req->flags & (REQ_F_APOLL_MULTISHOT|REQ_F_BUFFER_SELECT)) ==
(REQ_F_APOLL_MULTISHOT|REQ_F_BUFFER_SELECT)) {
int hdr;
if (unlikely(namelen < 0))
return -EOVERFLOW;
if (check_add_overflow(sizeof(struct io_uring_recvmsg_out),
namelen, &hdr))
return -EOVERFLOW;
if (check_add_overflow(hdr, controllen, &hdr))
return -EOVERFLOW;
iomsg->namelen = namelen;
iomsg->controllen = controllen;
return 0;
}
return 0;
}
static int io_recvmsg_copy_hdr(struct io_kiocb *req,
struct io_async_msghdr *iomsg)
{
struct user_msghdr msg;
int ret;
iomsg->msg.msg_name = &iomsg->addr;
iomsg->msg.msg_iter.nr_segs = 0;
#ifdef CONFIG_COMPAT
if (unlikely(req->ctx->compat)) {
struct compat_msghdr cmsg;
ret = io_compat_msg_copy_hdr(req, iomsg, &cmsg, ITER_DEST);
if (unlikely(ret))
return ret;
ret = __get_compat_msghdr(&iomsg->msg, &cmsg, &iomsg->uaddr);
if (unlikely(ret))
return ret;
return io_recvmsg_mshot_prep(req, iomsg, cmsg.msg_namelen,
cmsg.msg_controllen);
}
#endif
ret = io_msg_copy_hdr(req, iomsg, &msg, ITER_DEST);
if (unlikely(ret))
return ret;
ret = __copy_msghdr(&iomsg->msg, &msg, &iomsg->uaddr);
if (unlikely(ret))
return ret;
return io_recvmsg_mshot_prep(req, iomsg, msg.msg_namelen,
msg.msg_controllen);
}
int io_recvmsg_prep_async(struct io_kiocb *req)
{
struct io_async_msghdr *iomsg;
int ret;
if (!io_msg_alloc_async_prep(req))
return -ENOMEM;
iomsg = req->async_data;
ret = io_recvmsg_copy_hdr(req, iomsg);
if (!ret)
req->flags |= REQ_F_NEED_CLEANUP;
return ret;
}
#define RECVMSG_FLAGS (IORING_RECVSEND_POLL_FIRST | IORING_RECV_MULTISHOT)
int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
sr->done_io = 0;
if (unlikely(sqe->file_index || sqe->addr2))
return -EINVAL;
sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
sr->len = READ_ONCE(sqe->len);
sr->flags = READ_ONCE(sqe->ioprio);
if (sr->flags & ~(RECVMSG_FLAGS))
return -EINVAL;
sr->msg_flags = READ_ONCE(sqe->msg_flags);
if (sr->msg_flags & MSG_DONTWAIT)
req->flags |= REQ_F_NOWAIT;
if (sr->msg_flags & MSG_ERRQUEUE)
req->flags |= REQ_F_CLEAR_POLLIN;
if (sr->flags & IORING_RECV_MULTISHOT) {
if (!(req->flags & REQ_F_BUFFER_SELECT))
return -EINVAL;
if (sr->msg_flags & MSG_WAITALL)
return -EINVAL;
if (req->opcode == IORING_OP_RECV && sr->len)
return -EINVAL;
req->flags |= REQ_F_APOLL_MULTISHOT;
/*
* Store the buffer group for this multishot receive separately,
* as if we end up doing an io-wq based issue that selects a
* buffer, it has to be committed immediately and that will
* clear ->buf_list. This means we lose the link to the buffer
* list, and the eventual buffer put on completion then cannot
* restore it.
*/
sr->buf_group = req->buf_index;
}
#ifdef CONFIG_COMPAT
if (req->ctx->compat)
sr->msg_flags |= MSG_CMSG_COMPAT;
#endif
sr->nr_multishot_loops = 0;
return 0;
}
static inline void io_recv_prep_retry(struct io_kiocb *req)
{
struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
req->flags &= ~REQ_F_BL_EMPTY;
sr->done_io = 0;
sr->len = 0; /* get from the provided buffer */
req->buf_index = sr->buf_group;
}
/*
* Finishes io_recv and io_recvmsg.
*
* Returns true if it is actually finished, or false if it should run
* again (for multishot).
*/
static inline bool io_recv_finish(struct io_kiocb *req, int *ret,
struct msghdr *msg, bool mshot_finished,
unsigned issue_flags)
{
unsigned int cflags;
cflags = io_put_kbuf(req, issue_flags);
if (msg->msg_inq > 0)
cflags |= IORING_CQE_F_SOCK_NONEMPTY;
/*
* Fill CQE for this receive and see if we should keep trying to
* receive from this socket.
*/
if ((req->flags & REQ_F_APOLL_MULTISHOT) && !mshot_finished &&
io_fill_cqe_req_aux(req, issue_flags & IO_URING_F_COMPLETE_DEFER,
*ret, cflags | IORING_CQE_F_MORE)) {
struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
int mshot_retry_ret = IOU_ISSUE_SKIP_COMPLETE;
io_recv_prep_retry(req);
/* Known not-empty or unknown state, retry */
if (cflags & IORING_CQE_F_SOCK_NONEMPTY || msg->msg_inq < 0) {
if (sr->nr_multishot_loops++ < MULTISHOT_MAX_RETRY)
return false;
/* mshot retries exceeded, force a requeue */
sr->nr_multishot_loops = 0;
mshot_retry_ret = IOU_REQUEUE;
}
if (issue_flags & IO_URING_F_MULTISHOT)
*ret = mshot_retry_ret;
else
*ret = -EAGAIN;
return true;
}
/* Finish the request / stop multishot. */
io_req_set_res(req, *ret, cflags);
if (issue_flags & IO_URING_F_MULTISHOT)
*ret = IOU_STOP_MULTISHOT;
else
*ret = IOU_OK;
return true;
}
static int io_recvmsg_prep_multishot(struct io_async_msghdr *kmsg,
struct io_sr_msg *sr, void __user **buf,
size_t *len)
{
unsigned long ubuf = (unsigned long) *buf;
unsigned long hdr;
hdr = sizeof(struct io_uring_recvmsg_out) + kmsg->namelen +
kmsg->controllen;
if (*len < hdr)
return -EFAULT;
if (kmsg->controllen) {
unsigned long control = ubuf + hdr - kmsg->controllen;
kmsg->msg.msg_control_user = (void __user *) control;
kmsg->msg.msg_controllen = kmsg->controllen;
}
sr->buf = *buf; /* stash for later copy */
*buf = (void __user *) (ubuf + hdr);
kmsg->payloadlen = *len = *len - hdr;
return 0;
}
struct io_recvmsg_multishot_hdr {
struct io_uring_recvmsg_out msg;
struct sockaddr_storage addr;
};
static int io_recvmsg_multishot(struct socket *sock, struct io_sr_msg *io,
struct io_async_msghdr *kmsg,
unsigned int flags, bool *finished)
{
int err;
int copy_len;
struct io_recvmsg_multishot_hdr hdr;
if (kmsg->namelen)
kmsg->msg.msg_name = &hdr.addr;
kmsg->msg.msg_flags = flags & (MSG_CMSG_CLOEXEC|MSG_CMSG_COMPAT);
kmsg->msg.msg_namelen = 0;
if (sock->file->f_flags & O_NONBLOCK)
flags |= MSG_DONTWAIT;
err = sock_recvmsg(sock, &kmsg->msg, flags);
*finished = err <= 0;
if (err < 0)
return err;
hdr.msg = (struct io_uring_recvmsg_out) {
.controllen = kmsg->controllen - kmsg->msg.msg_controllen,
.flags = kmsg->msg.msg_flags & ~MSG_CMSG_COMPAT
};
hdr.msg.payloadlen = err;
if (err > kmsg->payloadlen)
err = kmsg->payloadlen;
copy_len = sizeof(struct io_uring_recvmsg_out);
if (kmsg->msg.msg_namelen > kmsg->namelen)
copy_len += kmsg->namelen;
else
copy_len += kmsg->msg.msg_namelen;
/*
* "fromlen shall refer to the value before truncation.."
* 1003.1g
*/
hdr.msg.namelen = kmsg->msg.msg_namelen;
/* ensure that there is no gap between hdr and sockaddr_storage */
BUILD_BUG_ON(offsetof(struct io_recvmsg_multishot_hdr, addr) !=
sizeof(struct io_uring_recvmsg_out));
if (copy_to_user(io->buf, &hdr, copy_len)) {
*finished = true;
return -EFAULT;
}
return sizeof(struct io_uring_recvmsg_out) + kmsg->namelen +
kmsg->controllen + err;
}
int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags)
{
struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
struct io_async_msghdr iomsg, *kmsg;
struct socket *sock;
unsigned flags;
int ret, min_ret = 0;
bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
bool mshot_finished = true;
sock = sock_from_file(req->file);
if (unlikely(!sock))
return -ENOTSOCK;
if (req_has_async_data(req)) {
kmsg = req->async_data;
} else {
ret = io_recvmsg_copy_hdr(req, &iomsg);
if (ret)
return ret;
kmsg = &iomsg;
}
if (!(req->flags & REQ_F_POLLED) &&
(sr->flags & IORING_RECVSEND_POLL_FIRST))
return io_setup_async_msg(req, kmsg, issue_flags);
flags = sr->msg_flags;
if (force_nonblock)
flags |= MSG_DONTWAIT;
retry_multishot:
if (io_do_buffer_select(req)) {
void __user *buf;
size_t len = sr->len;
buf = io_buffer_select(req, &len, issue_flags);
if (!buf)
return -ENOBUFS;
if (req->flags & REQ_F_APOLL_MULTISHOT) {
ret = io_recvmsg_prep_multishot(kmsg, sr, &buf, &len);
if (ret) {
io_kbuf_recycle(req, issue_flags);
return ret;
}
}
iov_iter_ubuf(&kmsg->msg.msg_iter, ITER_DEST, buf, len);
}
kmsg->msg.msg_get_inq = 1;
kmsg->msg.msg_inq = -1;
if (req->flags & REQ_F_APOLL_MULTISHOT) {
ret = io_recvmsg_multishot(sock, sr, kmsg, flags,
&mshot_finished);
} else {
/* disable partial retry for recvmsg with cmsg attached */
if (flags & MSG_WAITALL && !kmsg->msg.msg_controllen)
min_ret = iov_iter_count(&kmsg->msg.msg_iter);
ret = __sys_recvmsg_sock(sock, &kmsg->msg, sr->umsg,
kmsg->uaddr, flags);
}
if (ret < min_ret) {
if (ret == -EAGAIN && force_nonblock) {
ret = io_setup_async_msg(req, kmsg, issue_flags);
if (ret == -EAGAIN && (issue_flags & IO_URING_F_MULTISHOT)) {
io_kbuf_recycle(req, issue_flags);
return IOU_ISSUE_SKIP_COMPLETE;
}
return ret;
}
if (ret > 0 && io_net_retry(sock, flags)) {
sr->done_io += ret;
req->flags |= REQ_F_BL_NO_RECYCLE;
return io_setup_async_msg(req, kmsg, issue_flags);
}
if (ret == -ERESTARTSYS)
ret = -EINTR;
req_set_fail(req);
} else if ((flags & MSG_WAITALL) && (kmsg->msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) {
req_set_fail(req);
}
if (ret > 0)
ret += sr->done_io;
else if (sr->done_io)
ret = sr->done_io;
else
io_kbuf_recycle(req, issue_flags);
if (!io_recv_finish(req, &ret, &kmsg->msg, mshot_finished, issue_flags))
goto retry_multishot;
if (mshot_finished)
io_req_msg_cleanup(req, kmsg, issue_flags);
else if (ret == -EAGAIN)
return io_setup_async_msg(req, kmsg, issue_flags);
return ret;
}
int io_recv(struct io_kiocb *req, unsigned int issue_flags)
{
struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
struct msghdr msg;
struct socket *sock;
unsigned flags;
int ret, min_ret = 0;
bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
size_t len = sr->len;
if (!(req->flags & REQ_F_POLLED) &&
(sr->flags & IORING_RECVSEND_POLL_FIRST))
return -EAGAIN;
sock = sock_from_file(req->file);
if (unlikely(!sock))
return -ENOTSOCK;
msg.msg_name = NULL;
msg.msg_namelen = 0;
msg.msg_control = NULL;
msg.msg_get_inq = 1;
msg.msg_controllen = 0;
msg.msg_iocb = NULL;
msg.msg_ubuf = NULL;
flags = sr->msg_flags;
if (force_nonblock)
flags |= MSG_DONTWAIT;
retry_multishot:
if (io_do_buffer_select(req)) {
void __user *buf;
buf = io_buffer_select(req, &len, issue_flags);
if (!buf)
return -ENOBUFS;
sr->buf = buf;
sr->len = len;
}
ret = import_ubuf(ITER_DEST, sr->buf, len, &msg.msg_iter);
if (unlikely(ret))
goto out_free;
msg.msg_inq = -1;
msg.msg_flags = 0;
if (flags & MSG_WAITALL)
min_ret = iov_iter_count(&msg.msg_iter);
ret = sock_recvmsg(sock, &msg, flags);
if (ret < min_ret) {
if (ret == -EAGAIN && force_nonblock) {
if (issue_flags & IO_URING_F_MULTISHOT) {
io_kbuf_recycle(req, issue_flags);
return IOU_ISSUE_SKIP_COMPLETE;
}
return -EAGAIN;
}
if (ret > 0 && io_net_retry(sock, flags)) {
sr->len -= ret;
sr->buf += ret;
sr->done_io += ret;
req->flags |= REQ_F_BL_NO_RECYCLE;
return -EAGAIN;
}
if (ret == -ERESTARTSYS)
ret = -EINTR;
req_set_fail(req);
} else if ((flags & MSG_WAITALL) && (msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) {
out_free:
req_set_fail(req);
}
if (ret > 0)
ret += sr->done_io;
else if (sr->done_io)
ret = sr->done_io;
else
io_kbuf_recycle(req, issue_flags);
if (!io_recv_finish(req, &ret, &msg, ret <= 0, issue_flags))
goto retry_multishot;
return ret;
}
void io_send_zc_cleanup(struct io_kiocb *req)
{
struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);
struct io_async_msghdr *io;
if (req_has_async_data(req)) {
io = req->async_data;
/* might be ->fast_iov if *msg_copy_hdr failed */
if (io->free_iov != io->fast_iov)
kfree(io->free_iov);
}
io_uring/net: fix UAF in io_sendrecv_fail() We should not assume anything about ->free_iov just from REQ_F_ASYNC_DATA but rather rely on REQ_F_NEED_CLEANUP, as we may allocate ->async_data but failed init would leave the field in not consistent state. The easiest solution is to remove removing REQ_F_NEED_CLEANUP and so ->async_data dealloc from io_sendrecv_fail() and let io_send_zc_cleanup() do the job. The catch here is that we also need to prevent double notif flushing, just test it for NULL and zero where it's needed. BUG: KASAN: use-after-free in io_sendrecv_fail+0x3b0/0x3e0 io_uring/net.c:1221 Write of size 8 at addr ffff8880771b4080 by task syz-executor.3/30199 CPU: 1 PID: 30199 Comm: syz-executor.3 Not tainted 6.0.0-rc6-next-20220923-syzkaller #0 Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 08/26/2022 Call Trace: <TASK> __dump_stack lib/dump_stack.c:88 [inline] dump_stack_lvl+0xcd/0x134 lib/dump_stack.c:106 print_address_description mm/kasan/report.c:284 [inline] print_report+0x15e/0x45d mm/kasan/report.c:395 kasan_report+0xbb/0x1f0 mm/kasan/report.c:495 io_sendrecv_fail+0x3b0/0x3e0 io_uring/net.c:1221 io_req_complete_failed+0x155/0x1b0 io_uring/io_uring.c:873 io_drain_req io_uring/io_uring.c:1648 [inline] io_queue_sqe_fallback.cold+0x29f/0x788 io_uring/io_uring.c:1931 io_submit_sqe io_uring/io_uring.c:2160 [inline] io_submit_sqes+0x1180/0x1df0 io_uring/io_uring.c:2276 __do_sys_io_uring_enter+0xac6/0x2410 io_uring/io_uring.c:3216 do_syscall_x64 arch/x86/entry/common.c:50 [inline] do_syscall_64+0x35/0xb0 arch/x86/entry/common.c:80 entry_SYSCALL_64_after_hwframe+0x63/0xcd Fixes: c4c0009e0b56e ("io_uring/net: combine fail handlers") Reported-by: syzbot+4c597a574a3f5a251bda@syzkaller.appspotmail.com Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Link: https://lore.kernel.org/r/23ab8346e407ea50b1198a172c8a97e1cf22915b.1663945875.git.asml.silence@gmail.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
2022-09-23 15:23:34 +00:00
if (zc->notif) {
io_notif_flush(zc->notif);
zc->notif = NULL;
}
}
#define IO_ZC_FLAGS_COMMON (IORING_RECVSEND_POLL_FIRST | IORING_RECVSEND_FIXED_BUF)
#define IO_ZC_FLAGS_VALID (IO_ZC_FLAGS_COMMON | IORING_SEND_ZC_REPORT_USAGE)
int io_send_zc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);
struct io_ring_ctx *ctx = req->ctx;
struct io_kiocb *notif;
zc->done_io = 0;
if (unlikely(READ_ONCE(sqe->__pad2[0]) || READ_ONCE(sqe->addr3)))
return -EINVAL;
/* we don't support IOSQE_CQE_SKIP_SUCCESS just yet */
if (req->flags & REQ_F_CQE_SKIP)
return -EINVAL;
notif = zc->notif = io_alloc_notif(ctx);
if (!notif)
return -ENOMEM;
notif->cqe.user_data = req->cqe.user_data;
notif->cqe.res = 0;
notif->cqe.flags = IORING_CQE_F_NOTIF;
req->flags |= REQ_F_NEED_CLEANUP;
zc->flags = READ_ONCE(sqe->ioprio);
if (unlikely(zc->flags & ~IO_ZC_FLAGS_COMMON)) {
if (zc->flags & ~IO_ZC_FLAGS_VALID)
return -EINVAL;
if (zc->flags & IORING_SEND_ZC_REPORT_USAGE) {
io_notif_set_extended(notif);
io_notif_to_data(notif)->zc_report = true;
}
}
if (zc->flags & IORING_RECVSEND_FIXED_BUF) {
unsigned idx = READ_ONCE(sqe->buf_index);
if (unlikely(idx >= ctx->nr_user_bufs))
return -EFAULT;
idx = array_index_nospec(idx, ctx->nr_user_bufs);
req->imu = READ_ONCE(ctx->user_bufs[idx]);
io_req_set_rsrc_node(notif, ctx, 0);
}
if (req->opcode == IORING_OP_SEND_ZC) {
if (READ_ONCE(sqe->__pad3[0]))
return -EINVAL;
zc->addr = u64_to_user_ptr(READ_ONCE(sqe->addr2));
zc->addr_len = READ_ONCE(sqe->addr_len);
} else {
if (unlikely(sqe->addr2 || sqe->file_index))
return -EINVAL;
if (unlikely(zc->flags & IORING_RECVSEND_FIXED_BUF))
return -EINVAL;
}
zc->buf = u64_to_user_ptr(READ_ONCE(sqe->addr));
zc->len = READ_ONCE(sqe->len);
zc->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL;
if (zc->msg_flags & MSG_DONTWAIT)
req->flags |= REQ_F_NOWAIT;
#ifdef CONFIG_COMPAT
if (req->ctx->compat)
zc->msg_flags |= MSG_CMSG_COMPAT;
#endif
return 0;
}
static int io_sg_from_iter_iovec(struct sock *sk, struct sk_buff *skb,
struct iov_iter *from, size_t length)
{
skb_zcopy_downgrade_managed(skb);
return __zerocopy_sg_from_iter(NULL, sk, skb, from, length);
}
static int io_sg_from_iter(struct sock *sk, struct sk_buff *skb,
struct iov_iter *from, size_t length)
{
struct skb_shared_info *shinfo = skb_shinfo(skb);
int frag = shinfo->nr_frags;
int ret = 0;
struct bvec_iter bi;
ssize_t copied = 0;
unsigned long truesize = 0;
if (!frag)
shinfo->flags |= SKBFL_MANAGED_FRAG_REFS;
else if (unlikely(!skb_zcopy_managed(skb)))
return __zerocopy_sg_from_iter(NULL, sk, skb, from, length);
bi.bi_size = min(from->count, length);
bi.bi_bvec_done = from->iov_offset;
bi.bi_idx = 0;
while (bi.bi_size && frag < MAX_SKB_FRAGS) {
struct bio_vec v = mp_bvec_iter_bvec(from->bvec, bi);
copied += v.bv_len;
truesize += PAGE_ALIGN(v.bv_len + v.bv_offset);
__skb_fill_page_desc_noacc(shinfo, frag++, v.bv_page,
v.bv_offset, v.bv_len);
bvec_iter_advance_single(from->bvec, &bi, v.bv_len);
}
if (bi.bi_size)
ret = -EMSGSIZE;
shinfo->nr_frags = frag;
from->bvec += bi.bi_idx;
from->nr_segs -= bi.bi_idx;
from->count -= copied;
from->iov_offset = bi.bi_bvec_done;
skb->data_len += copied;
skb->len += copied;
skb->truesize += truesize;
if (sk && sk->sk_type == SOCK_STREAM) {
sk_wmem_queued_add(sk, truesize);
if (!skb_zcopy_pure(skb))
sk_mem_charge(sk, truesize);
} else {
refcount_add(truesize, &skb->sk->sk_wmem_alloc);
}
return ret;
}
int io_send_zc(struct io_kiocb *req, unsigned int issue_flags)
{
struct sockaddr_storage __address;
struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);
struct msghdr msg;
struct socket *sock;
unsigned msg_flags;
int ret, min_ret = 0;
sock = sock_from_file(req->file);
if (unlikely(!sock))
return -ENOTSOCK;
if (!test_bit(SOCK_SUPPORT_ZC, &sock->flags))
return -EOPNOTSUPP;
msg.msg_name = NULL;
msg.msg_control = NULL;
msg.msg_controllen = 0;
msg.msg_namelen = 0;
if (zc->addr) {
if (req_has_async_data(req)) {
struct io_async_msghdr *io = req->async_data;
msg.msg_name = &io->addr;
} else {
ret = move_addr_to_kernel(zc->addr, zc->addr_len, &__address);
if (unlikely(ret < 0))
return ret;
msg.msg_name = (struct sockaddr *)&__address;
}
msg.msg_namelen = zc->addr_len;
}
if (!(req->flags & REQ_F_POLLED) &&
(zc->flags & IORING_RECVSEND_POLL_FIRST))
return io_setup_async_addr(req, &__address, issue_flags);
if (zc->flags & IORING_RECVSEND_FIXED_BUF) {
ret = io_import_fixed(ITER_SOURCE, &msg.msg_iter, req->imu,
(u64)(uintptr_t)zc->buf, zc->len);
if (unlikely(ret))
return ret;
msg.sg_from_iter = io_sg_from_iter;
} else {
io_notif_set_extended(zc->notif);
ret = import_ubuf(ITER_SOURCE, zc->buf, zc->len, &msg.msg_iter);
if (unlikely(ret))
return ret;
ret = io_notif_account_mem(zc->notif, zc->len);
if (unlikely(ret))
return ret;
msg.sg_from_iter = io_sg_from_iter_iovec;
}
msg_flags = zc->msg_flags | MSG_ZEROCOPY;
if (issue_flags & IO_URING_F_NONBLOCK)
msg_flags |= MSG_DONTWAIT;
if (msg_flags & MSG_WAITALL)
min_ret = iov_iter_count(&msg.msg_iter);
msg_flags &= ~MSG_INTERNAL_SENDMSG_FLAGS;
msg.msg_flags = msg_flags;
msg.msg_ubuf = &io_notif_to_data(zc->notif)->uarg;
ret = sock_sendmsg(sock, &msg);
if (unlikely(ret < min_ret)) {
if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
return io_setup_async_addr(req, &__address, issue_flags);
if (ret > 0 && io_net_retry(sock, msg.msg_flags)) {
zc->len -= ret;
zc->buf += ret;
zc->done_io += ret;
req->flags |= REQ_F_BL_NO_RECYCLE;
return io_setup_async_addr(req, &__address, issue_flags);
}
if (ret == -ERESTARTSYS)
ret = -EINTR;
req_set_fail(req);
}
if (ret >= 0)
ret += zc->done_io;
else if (zc->done_io)
ret = zc->done_io;
/*
* If we're in io-wq we can't rely on tw ordering guarantees, defer
* flushing notif to io_send_zc_cleanup()
*/
if (!(issue_flags & IO_URING_F_UNLOCKED)) {
io_notif_flush(zc->notif);
req->flags &= ~REQ_F_NEED_CLEANUP;
}
io_req_set_res(req, ret, IORING_CQE_F_MORE);
return IOU_OK;
}
int io_sendmsg_zc(struct io_kiocb *req, unsigned int issue_flags)
{
struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
struct io_async_msghdr iomsg, *kmsg;
struct socket *sock;
unsigned flags;
int ret, min_ret = 0;
io_notif_set_extended(sr->notif);
sock = sock_from_file(req->file);
if (unlikely(!sock))
return -ENOTSOCK;
if (!test_bit(SOCK_SUPPORT_ZC, &sock->flags))
return -EOPNOTSUPP;
if (req_has_async_data(req)) {
kmsg = req->async_data;
} else {
ret = io_sendmsg_copy_hdr(req, &iomsg);
if (ret)
return ret;
kmsg = &iomsg;
}
if (!(req->flags & REQ_F_POLLED) &&
(sr->flags & IORING_RECVSEND_POLL_FIRST))
return io_setup_async_msg(req, kmsg, issue_flags);
flags = sr->msg_flags | MSG_ZEROCOPY;
if (issue_flags & IO_URING_F_NONBLOCK)
flags |= MSG_DONTWAIT;
if (flags & MSG_WAITALL)
min_ret = iov_iter_count(&kmsg->msg.msg_iter);
kmsg->msg.msg_ubuf = &io_notif_to_data(sr->notif)->uarg;
kmsg->msg.sg_from_iter = io_sg_from_iter_iovec;
ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags);
if (unlikely(ret < min_ret)) {
if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
return io_setup_async_msg(req, kmsg, issue_flags);
if (ret > 0 && io_net_retry(sock, flags)) {
sr->done_io += ret;
req->flags |= REQ_F_BL_NO_RECYCLE;
return io_setup_async_msg(req, kmsg, issue_flags);
}
if (ret == -ERESTARTSYS)
ret = -EINTR;
req_set_fail(req);
}
/* fast path, check for non-NULL to avoid function call */
if (kmsg->free_iov) {
kfree(kmsg->free_iov);
kmsg->free_iov = NULL;
}
io_netmsg_recycle(req, issue_flags);
if (ret >= 0)
ret += sr->done_io;
else if (sr->done_io)
ret = sr->done_io;
/*
* If we're in io-wq we can't rely on tw ordering guarantees, defer
* flushing notif to io_send_zc_cleanup()
*/
if (!(issue_flags & IO_URING_F_UNLOCKED)) {
io_notif_flush(sr->notif);
req->flags &= ~REQ_F_NEED_CLEANUP;
}
io_req_set_res(req, ret, IORING_CQE_F_MORE);
return IOU_OK;
}
void io_sendrecv_fail(struct io_kiocb *req)
{
struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
if (sr->done_io)
req->cqe.res = sr->done_io;
if ((req->flags & REQ_F_NEED_CLEANUP) &&
(req->opcode == IORING_OP_SEND_ZC || req->opcode == IORING_OP_SENDMSG_ZC))
req->cqe.flags |= IORING_CQE_F_MORE;
}
int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
struct io_accept *accept = io_kiocb_to_cmd(req, struct io_accept);
unsigned flags;
if (sqe->len || sqe->buf_index)
return -EINVAL;
accept->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
accept->addr_len = u64_to_user_ptr(READ_ONCE(sqe->addr2));
accept->flags = READ_ONCE(sqe->accept_flags);
accept->nofile = rlimit(RLIMIT_NOFILE);
flags = READ_ONCE(sqe->ioprio);
if (flags & ~IORING_ACCEPT_MULTISHOT)
return -EINVAL;
accept->file_slot = READ_ONCE(sqe->file_index);
if (accept->file_slot) {
if (accept->flags & SOCK_CLOEXEC)
return -EINVAL;
if (flags & IORING_ACCEPT_MULTISHOT &&
accept->file_slot != IORING_FILE_INDEX_ALLOC)
return -EINVAL;
}
if (accept->flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK))
return -EINVAL;
if (SOCK_NONBLOCK != O_NONBLOCK && (accept->flags & SOCK_NONBLOCK))
accept->flags = (accept->flags & ~SOCK_NONBLOCK) | O_NONBLOCK;
if (flags & IORING_ACCEPT_MULTISHOT)
req->flags |= REQ_F_APOLL_MULTISHOT;
return 0;
}
int io_accept(struct io_kiocb *req, unsigned int issue_flags)
{
struct io_accept *accept = io_kiocb_to_cmd(req, struct io_accept);
bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
unsigned int file_flags = force_nonblock ? O_NONBLOCK : 0;
bool fixed = !!accept->file_slot;
struct file *file;
int ret, fd;
retry:
if (!fixed) {
fd = __get_unused_fd_flags(accept->flags, accept->nofile);
if (unlikely(fd < 0))
return fd;
}
file = do_accept(req->file, file_flags, accept->addr, accept->addr_len,
accept->flags);
if (IS_ERR(file)) {
if (!fixed)
put_unused_fd(fd);
ret = PTR_ERR(file);
if (ret == -EAGAIN && force_nonblock) {
/*
* if it's multishot and polled, we don't need to
* return EAGAIN to arm the poll infra since it
* has already been done
*/
if (issue_flags & IO_URING_F_MULTISHOT)
return IOU_ISSUE_SKIP_COMPLETE;
return ret;
}
if (ret == -ERESTARTSYS)
ret = -EINTR;
req_set_fail(req);
} else if (!fixed) {
fd_install(fd, file);
ret = fd;
} else {
ret = io_fixed_fd_install(req, issue_flags, file,
accept->file_slot);
}
if (!(req->flags & REQ_F_APOLL_MULTISHOT)) {
io_req_set_res(req, ret, 0);
return IOU_OK;
}
if (ret < 0)
return ret;
if (io_fill_cqe_req_aux(req, issue_flags & IO_URING_F_COMPLETE_DEFER,
ret, IORING_CQE_F_MORE))
goto retry;
io_req_set_res(req, ret, 0);
return IOU_STOP_MULTISHOT;
}
int io_socket_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
struct io_socket *sock = io_kiocb_to_cmd(req, struct io_socket);
if (sqe->addr || sqe->rw_flags || sqe->buf_index)
return -EINVAL;
sock->domain = READ_ONCE(sqe->fd);
sock->type = READ_ONCE(sqe->off);
sock->protocol = READ_ONCE(sqe->len);
sock->file_slot = READ_ONCE(sqe->file_index);
sock->nofile = rlimit(RLIMIT_NOFILE);
sock->flags = sock->type & ~SOCK_TYPE_MASK;
if (sock->file_slot && (sock->flags & SOCK_CLOEXEC))
return -EINVAL;
if (sock->flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK))
return -EINVAL;
return 0;
}
int io_socket(struct io_kiocb *req, unsigned int issue_flags)
{
struct io_socket *sock = io_kiocb_to_cmd(req, struct io_socket);
bool fixed = !!sock->file_slot;
struct file *file;
int ret, fd;
if (!fixed) {
fd = __get_unused_fd_flags(sock->flags, sock->nofile);
if (unlikely(fd < 0))
return fd;
}
file = __sys_socket_file(sock->domain, sock->type, sock->protocol);
if (IS_ERR(file)) {
if (!fixed)
put_unused_fd(fd);
ret = PTR_ERR(file);
if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
return -EAGAIN;
if (ret == -ERESTARTSYS)
ret = -EINTR;
req_set_fail(req);
} else if (!fixed) {
fd_install(fd, file);
ret = fd;
} else {
ret = io_fixed_fd_install(req, issue_flags, file,
sock->file_slot);
}
io_req_set_res(req, ret, 0);
return IOU_OK;
}
int io_connect_prep_async(struct io_kiocb *req)
{
struct io_async_connect *io = req->async_data;
struct io_connect *conn = io_kiocb_to_cmd(req, struct io_connect);
return move_addr_to_kernel(conn->addr, conn->addr_len, &io->address);
}
int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
struct io_connect *conn = io_kiocb_to_cmd(req, struct io_connect);
if (sqe->len || sqe->buf_index || sqe->rw_flags || sqe->splice_fd_in)
return -EINVAL;
conn->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
conn->addr_len = READ_ONCE(sqe->addr2);
conn->in_progress = conn->seen_econnaborted = false;
return 0;
}
int io_connect(struct io_kiocb *req, unsigned int issue_flags)
{
struct io_connect *connect = io_kiocb_to_cmd(req, struct io_connect);
struct io_async_connect __io, *io;
unsigned file_flags;
int ret;
bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
if (req_has_async_data(req)) {
io = req->async_data;
} else {
ret = move_addr_to_kernel(connect->addr,
connect->addr_len,
&__io.address);
if (ret)
goto out;
io = &__io;
}
file_flags = force_nonblock ? O_NONBLOCK : 0;
ret = __sys_connect_file(req->file, &io->address,
connect->addr_len, file_flags);
if ((ret == -EAGAIN || ret == -EINPROGRESS || ret == -ECONNABORTED)
&& force_nonblock) {
if (ret == -EINPROGRESS) {
connect->in_progress = true;
io_uring/net: ensure socket is marked connected on connect retry io_uring does non-blocking connection attempts, which can yield some unexpected results if a connect request is re-attempted by an an application. This is equivalent to the following sync syscall sequence: sock = socket(AF_INET, SOCK_STREAM | SOCK_NONBLOCK, IPPROTO_TCP); connect(sock, &addr, sizeof(addr); ret == -1 and errno == EINPROGRESS expected here. Now poll for POLLOUT on sock, and when that returns, we expect the socket to be connected. But if we follow that procedure with: connect(sock, &addr, sizeof(addr)); you'd expect ret == -1 and errno == EISCONN here, but you actually get ret == 0. If we attempt the connection one more time, then we get EISCON as expected. io_uring used to do this, but turns out that bluetooth fails with EBADFD if you attempt to re-connect. Also looks like EISCONN _could_ occur with this sequence. Retain the ->in_progress logic, but work-around a potential EISCONN or EBADFD error and only in those cases look at the sock_error(). This should work in general and avoid the odd sequence of a repeated connect request returning success when the socket is already connected. This is all a side effect of the socket state being in a CONNECTING state when we get EINPROGRESS, and only a re-connect or other related operation will turn that into CONNECTED. Cc: stable@vger.kernel.org Fixes: 3fb1bd688172 ("io_uring/net: handle -EINPROGRESS correct for IORING_OP_CONNECT") Link: https://github.com/axboe/liburing/issues/980 Signed-off-by: Jens Axboe <axboe@kernel.dk>
2023-11-03 16:35:40 +00:00
} else if (ret == -ECONNABORTED) {
if (connect->seen_econnaborted)
goto out;
connect->seen_econnaborted = true;
}
if (req_has_async_data(req))
return -EAGAIN;
if (io_alloc_async_data(req)) {
ret = -ENOMEM;
goto out;
}
memcpy(req->async_data, &__io, sizeof(__io));
return -EAGAIN;
}
io_uring/net: ensure socket is marked connected on connect retry io_uring does non-blocking connection attempts, which can yield some unexpected results if a connect request is re-attempted by an an application. This is equivalent to the following sync syscall sequence: sock = socket(AF_INET, SOCK_STREAM | SOCK_NONBLOCK, IPPROTO_TCP); connect(sock, &addr, sizeof(addr); ret == -1 and errno == EINPROGRESS expected here. Now poll for POLLOUT on sock, and when that returns, we expect the socket to be connected. But if we follow that procedure with: connect(sock, &addr, sizeof(addr)); you'd expect ret == -1 and errno == EISCONN here, but you actually get ret == 0. If we attempt the connection one more time, then we get EISCON as expected. io_uring used to do this, but turns out that bluetooth fails with EBADFD if you attempt to re-connect. Also looks like EISCONN _could_ occur with this sequence. Retain the ->in_progress logic, but work-around a potential EISCONN or EBADFD error and only in those cases look at the sock_error(). This should work in general and avoid the odd sequence of a repeated connect request returning success when the socket is already connected. This is all a side effect of the socket state being in a CONNECTING state when we get EINPROGRESS, and only a re-connect or other related operation will turn that into CONNECTED. Cc: stable@vger.kernel.org Fixes: 3fb1bd688172 ("io_uring/net: handle -EINPROGRESS correct for IORING_OP_CONNECT") Link: https://github.com/axboe/liburing/issues/980 Signed-off-by: Jens Axboe <axboe@kernel.dk>
2023-11-03 16:35:40 +00:00
if (connect->in_progress) {
/*
* At least bluetooth will return -EBADFD on a re-connect
* attempt, and it's (supposedly) also valid to get -EISCONN
* which means the previous result is good. For both of these,
* grab the sock_error() and use that for the completion.
*/
if (ret == -EBADFD || ret == -EISCONN)
ret = sock_error(sock_from_file(req->file)->sk);
}
if (ret == -ERESTARTSYS)
ret = -EINTR;
out:
if (ret < 0)
req_set_fail(req);
io_req_set_res(req, ret, 0);
return IOU_OK;
}
void io_netmsg_cache_free(struct io_cache_entry *entry)
{
kfree(container_of(entry, struct io_async_msghdr, cache));
}
#endif