mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2024-12-29 09:12:07 +00:00
7519134178
Right now the io_async_msghdr is recycled to avoid the overhead of allocating+freeing it for every request. But the iovec is not included, hence that will be allocated and freed for each transfer regardless. This commit enables recyling of the iovec between io_async_msghdr recycles. This avoids alloc+free for each one if an iovec is used, and on top of that, it extends the cache hot nature of msg to the iovec as well. Also enables KASAN for the iovec entries, so that reuse can be detected even while they are in the cache. The io_async_msghdr also shrinks from 376 -> 288 bytes, an 88 byte saving (or ~23% smaller), as the fast_iovec entry is dropped from 8 entries to a single entry. There's no point keeping a big fast iovec entry, if iovecs aren't being allocated and freed continually. Signed-off-by: Jens Axboe <axboe@kernel.dk>
71 lines
2.1 KiB
C
71 lines
2.1 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
|
|
#include <linux/net.h>
|
|
#include <linux/uio.h>
|
|
|
|
#include "alloc_cache.h"
|
|
|
|
struct io_async_msghdr {
|
|
#if defined(CONFIG_NET)
|
|
union {
|
|
struct iovec fast_iov;
|
|
struct {
|
|
struct io_cache_entry cache;
|
|
/* entry size of ->free_iov, if valid */
|
|
int free_iov_nr;
|
|
};
|
|
};
|
|
/* points to an allocated iov, if NULL we use fast_iov instead */
|
|
struct iovec *free_iov;
|
|
__kernel_size_t controllen;
|
|
__kernel_size_t payloadlen;
|
|
int namelen;
|
|
struct sockaddr __user *uaddr;
|
|
struct msghdr msg;
|
|
struct sockaddr_storage addr;
|
|
#endif
|
|
};
|
|
|
|
#if defined(CONFIG_NET)
|
|
|
|
struct io_async_connect {
|
|
struct sockaddr_storage address;
|
|
};
|
|
|
|
int io_shutdown_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
|
|
int io_shutdown(struct io_kiocb *req, unsigned int issue_flags);
|
|
|
|
void io_sendmsg_recvmsg_cleanup(struct io_kiocb *req);
|
|
int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
|
|
int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags);
|
|
|
|
int io_send(struct io_kiocb *req, unsigned int issue_flags);
|
|
|
|
int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
|
|
int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags);
|
|
int io_recv(struct io_kiocb *req, unsigned int issue_flags);
|
|
|
|
void io_sendrecv_fail(struct io_kiocb *req);
|
|
|
|
int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
|
|
int io_accept(struct io_kiocb *req, unsigned int issue_flags);
|
|
|
|
int io_socket_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
|
|
int io_socket(struct io_kiocb *req, unsigned int issue_flags);
|
|
|
|
int io_connect_prep_async(struct io_kiocb *req);
|
|
int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
|
|
int io_connect(struct io_kiocb *req, unsigned int issue_flags);
|
|
|
|
int io_send_zc(struct io_kiocb *req, unsigned int issue_flags);
|
|
int io_sendmsg_zc(struct io_kiocb *req, unsigned int issue_flags);
|
|
int io_send_zc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
|
|
void io_send_zc_cleanup(struct io_kiocb *req);
|
|
|
|
void io_netmsg_cache_free(struct io_cache_entry *entry);
|
|
#else
|
|
static inline void io_netmsg_cache_free(struct io_cache_entry *entry)
|
|
{
|
|
}
|
|
#endif
|