mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-12-29 09:16:33 +00:00
342b2e395d
It's more natural to use ktime/ns instead of keeping around usec, especially since we're comparing it against user provided timers, so convert napi busy poll internal handling to ktime. It's also nicer since the type (ktime_t vs unsigned long) now tells the unit of measure. Keep everything as ktime, which we convert to/from micro seconds for IORING_[UN]REGISTER_NAPI. The net/ busy polling works seems to work with usec, however it's not real usec as shift by 10 is used to get it from nsecs, see busy_loop_current_time(), so it's easy to get truncated nsec back and we get back better precision. Note, we can further improve it later by removing the truncation and maybe convincing net/ to use ktime/ns instead. Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Link: https://lore.kernel.org/r/95e7ec8d095069a3ed5d40a4bc6f8b586698bc7e.1722003776.git.asml.silence@gmail.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
105 lines
2.4 KiB
C
105 lines
2.4 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
|
|
#ifndef IOU_NAPI_H
|
|
#define IOU_NAPI_H
|
|
|
|
#include <linux/kernel.h>
|
|
#include <linux/io_uring.h>
|
|
#include <net/busy_poll.h>
|
|
|
|
#ifdef CONFIG_NET_RX_BUSY_POLL
|
|
|
|
void io_napi_init(struct io_ring_ctx *ctx);
|
|
void io_napi_free(struct io_ring_ctx *ctx);
|
|
|
|
int io_register_napi(struct io_ring_ctx *ctx, void __user *arg);
|
|
int io_unregister_napi(struct io_ring_ctx *ctx, void __user *arg);
|
|
|
|
void __io_napi_add(struct io_ring_ctx *ctx, struct socket *sock);
|
|
|
|
void __io_napi_adjust_timeout(struct io_ring_ctx *ctx,
|
|
struct io_wait_queue *iowq, struct timespec64 *ts);
|
|
void __io_napi_busy_loop(struct io_ring_ctx *ctx, struct io_wait_queue *iowq);
|
|
int io_napi_sqpoll_busy_poll(struct io_ring_ctx *ctx);
|
|
|
|
static inline bool io_napi(struct io_ring_ctx *ctx)
|
|
{
|
|
return !list_empty(&ctx->napi_list);
|
|
}
|
|
|
|
static inline void io_napi_adjust_timeout(struct io_ring_ctx *ctx,
|
|
struct io_wait_queue *iowq,
|
|
struct timespec64 *ts)
|
|
{
|
|
if (!io_napi(ctx))
|
|
return;
|
|
__io_napi_adjust_timeout(ctx, iowq, ts);
|
|
}
|
|
|
|
static inline void io_napi_busy_loop(struct io_ring_ctx *ctx,
|
|
struct io_wait_queue *iowq)
|
|
{
|
|
if (!io_napi(ctx))
|
|
return;
|
|
__io_napi_busy_loop(ctx, iowq);
|
|
}
|
|
|
|
/*
|
|
* io_napi_add() - Add napi id to the busy poll list
|
|
* @req: pointer to io_kiocb request
|
|
*
|
|
* Add the napi id of the socket to the napi busy poll list and hash table.
|
|
*/
|
|
static inline void io_napi_add(struct io_kiocb *req)
|
|
{
|
|
struct io_ring_ctx *ctx = req->ctx;
|
|
struct socket *sock;
|
|
|
|
if (!READ_ONCE(ctx->napi_busy_poll_dt))
|
|
return;
|
|
|
|
sock = sock_from_file(req->file);
|
|
if (sock)
|
|
__io_napi_add(ctx, sock);
|
|
}
|
|
|
|
#else
|
|
|
|
static inline void io_napi_init(struct io_ring_ctx *ctx)
|
|
{
|
|
}
|
|
static inline void io_napi_free(struct io_ring_ctx *ctx)
|
|
{
|
|
}
|
|
static inline int io_register_napi(struct io_ring_ctx *ctx, void __user *arg)
|
|
{
|
|
return -EOPNOTSUPP;
|
|
}
|
|
static inline int io_unregister_napi(struct io_ring_ctx *ctx, void __user *arg)
|
|
{
|
|
return -EOPNOTSUPP;
|
|
}
|
|
static inline bool io_napi(struct io_ring_ctx *ctx)
|
|
{
|
|
return false;
|
|
}
|
|
static inline void io_napi_add(struct io_kiocb *req)
|
|
{
|
|
}
|
|
static inline void io_napi_adjust_timeout(struct io_ring_ctx *ctx,
|
|
struct io_wait_queue *iowq,
|
|
struct timespec64 *ts)
|
|
{
|
|
}
|
|
static inline void io_napi_busy_loop(struct io_ring_ctx *ctx,
|
|
struct io_wait_queue *iowq)
|
|
{
|
|
}
|
|
static inline int io_napi_sqpoll_busy_poll(struct io_ring_ctx *ctx)
|
|
{
|
|
return 0;
|
|
}
|
|
#endif /* CONFIG_NET_RX_BUSY_POLL */
|
|
|
|
#endif
|