linux-stable/io_uring/notif.c
Pavel Begunkov 42385b02ba io_uring/net: move mm accounting to a slower path
We can also move mm accounting to the extended callbacks. It removes a
few cycles from the hot path including skipping one function call and
setting io_req_task_complete as a callback directly. For user backed I/O
it shouldn't make any difference taking into considering atomic mm
accounting and page pinning.

Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/1062f270273ad11c1b7b45ec59a6a317533d5e64.1667557923.git.asml.silence@gmail.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
2022-11-21 07:38:31 -07:00

88 lines
2.3 KiB
C

#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/file.h>
#include <linux/slab.h>
#include <linux/net.h>
#include <linux/io_uring.h>
#include "io_uring.h"
#include "notif.h"
#include "rsrc.h"
static void io_notif_complete_tw_ext(struct io_kiocb *notif, bool *locked)
{
struct io_notif_data *nd = io_notif_to_data(notif);
struct io_ring_ctx *ctx = notif->ctx;
if (nd->zc_report && (nd->zc_copied || !nd->zc_used))
notif->cqe.res |= IORING_NOTIF_USAGE_ZC_COPIED;
if (nd->account_pages && ctx->user) {
__io_unaccount_mem(ctx->user, nd->account_pages);
nd->account_pages = 0;
}
io_req_task_complete(notif, locked);
}
static void io_tx_ubuf_callback(struct sk_buff *skb, struct ubuf_info *uarg,
bool success)
{
struct io_notif_data *nd = container_of(uarg, struct io_notif_data, uarg);
struct io_kiocb *notif = cmd_to_io_kiocb(nd);
if (refcount_dec_and_test(&uarg->refcnt))
io_req_task_work_add(notif);
}
static void io_tx_ubuf_callback_ext(struct sk_buff *skb, struct ubuf_info *uarg,
bool success)
{
struct io_notif_data *nd = container_of(uarg, struct io_notif_data, uarg);
if (nd->zc_report) {
if (success && !nd->zc_used && skb)
WRITE_ONCE(nd->zc_used, true);
else if (!success && !nd->zc_copied)
WRITE_ONCE(nd->zc_copied, true);
}
io_tx_ubuf_callback(skb, uarg, success);
}
void io_notif_set_extended(struct io_kiocb *notif)
{
struct io_notif_data *nd = io_notif_to_data(notif);
if (nd->uarg.callback != io_tx_ubuf_callback_ext) {
nd->account_pages = 0;
nd->zc_report = false;
nd->zc_used = false;
nd->zc_copied = false;
nd->uarg.callback = io_tx_ubuf_callback_ext;
notif->io_task_work.func = io_notif_complete_tw_ext;
}
}
struct io_kiocb *io_alloc_notif(struct io_ring_ctx *ctx)
__must_hold(&ctx->uring_lock)
{
struct io_kiocb *notif;
struct io_notif_data *nd;
if (unlikely(!io_alloc_req_refill(ctx)))
return NULL;
notif = io_alloc_req(ctx);
notif->opcode = IORING_OP_NOP;
notif->flags = 0;
notif->file = NULL;
notif->task = current;
io_get_task_refs(1);
notif->rsrc_node = NULL;
notif->io_task_work.func = io_req_task_complete;
nd = io_notif_to_data(notif);
nd->uarg.flags = SKBFL_ZEROCOPY_FRAG | SKBFL_DONT_ORPHAN;
nd->uarg.callback = io_tx_ubuf_callback;
refcount_set(&nd->uarg.refcnt, 1);
return notif;
}