mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2025-01-09 22:50:41 +00:00
Merge branch 'virtio-net-support-af_xdp-zero-copy-tx'
Xuan Zhuo says: ==================== virtio-net: support AF_XDP zero copy (tx) XDP socket(AF_XDP) is an excellent bypass kernel network framework. The zero copy feature of xsk (XDP socket) needs to be supported by the driver. The performance of zero copy is very good. mlx5 and intel ixgbe already support this feature, This patch set allows virtio-net to support xsk's zerocopy xmit feature. At present, we have completed some preparation: 1. vq-reset (virtio spec and kernel code) 2. virtio-core premapped dma 3. virtio-net xdp refactor So it is time for Virtio-Net to complete the support for the XDP Socket Zerocopy. Virtio-net can not increase the queue num at will, so xsk shares the queue with kernel. This patch set includes some refactor to the virtio-net to let that to support AF_XDP. The current configuration sets the virtqueue (vq) to premapped mode, implying that all buffers submitted to this queue must be mapped ahead of time. This presents a challenge for the virtnet send queue (sq): the virtnet driver would be required to keep track of dma information for vq size * 17, which can be substantial. However, if the premapped mode were applied on a per-buffer basis, the complexity would be greatly reduced. With AF_XDP enabled, AF_XDP buffers would become premapped, while kernel skb buffers could remain unmapped. We can distinguish them by sg_page(sg), When sg_page(sg) is NULL, this indicates that the driver has performed DMA mapping in advance, allowing the Virtio core to directly utilize sg_dma_address(sg) without conducting any internal DMA mapping. Additionally, DMA unmap operations for this buffer will be bypassed. ENV: Qemu with vhost-user(polling mode). Host CPU: Intel(R) Xeon(R) Platinum 8163 CPU @ 2.50GHz testpmd> show port stats all ######################## NIC statistics for port 0 ######################## RX-packets: 19531092064 RX-missed: 0 RX-bytes: 1093741155584 RX-errors: 0 RX-nombuf: 0 TX-packets: 5959955552 TX-errors: 0 TX-bytes: 371030645664 Throughput (since last show) Rx-pps: 8861574 Rx-bps: 3969985208 Tx-pps: 8861493 Tx-bps: 3969962736 ############################################################################ testpmd> show port stats all ######################## NIC statistics for port 0 ######################## RX-packets: 68152727 RX-missed: 0 RX-bytes: 3816552712 RX-errors: 0 RX-nombuf: 0 TX-packets: 68114967 TX-errors: 33216 TX-bytes: 3814438152 Throughput (since last show) Rx-pps: 6333196 Rx-bps: 2837272088 Tx-pps: 6333227 Tx-bps: 2837285936 ############################################################################ But AF_XDP consumes more CPU for tx and rx napi(100% and 86%). ==================== Link: https://patch.msgid.link/20241112012928.102478-1-xuanzhuo@linux.alibaba.com Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
commit
38f83a57aa
@ -45,9 +45,6 @@ module_param(napi_tx, bool, 0644);
|
||||
#define VIRTIO_XDP_TX BIT(0)
|
||||
#define VIRTIO_XDP_REDIR BIT(1)
|
||||
|
||||
#define VIRTIO_XDP_FLAG BIT(0)
|
||||
#define VIRTIO_ORPHAN_FLAG BIT(1)
|
||||
|
||||
/* RX packet size EWMA. The average packet size is used to determine the packet
|
||||
* buffer size when refilling RX rings. As the entire RX ring may be refilled
|
||||
* at once, the weight is chosen so that the EWMA will be insensitive to short-
|
||||
@ -86,6 +83,7 @@ struct virtnet_sq_free_stats {
|
||||
u64 bytes;
|
||||
u64 napi_packets;
|
||||
u64 napi_bytes;
|
||||
u64 xsk;
|
||||
};
|
||||
|
||||
struct virtnet_sq_stats {
|
||||
@ -298,6 +296,10 @@ struct send_queue {
|
||||
|
||||
/* Record whether sq is in reset state. */
|
||||
bool reset;
|
||||
|
||||
struct xsk_buff_pool *xsk_pool;
|
||||
|
||||
dma_addr_t xsk_hdr_dma_addr;
|
||||
};
|
||||
|
||||
/* Internal representation of a receive virtqueue */
|
||||
@ -498,6 +500,8 @@ struct virtio_net_common_hdr {
|
||||
};
|
||||
};
|
||||
|
||||
static struct virtio_net_common_hdr xsk_hdr;
|
||||
|
||||
static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf);
|
||||
static int virtnet_xdp_handler(struct bpf_prog *xdp_prog, struct xdp_buff *xdp,
|
||||
struct net_device *dev,
|
||||
@ -509,6 +513,14 @@ static struct sk_buff *virtnet_skb_append_frag(struct sk_buff *head_skb,
|
||||
struct sk_buff *curr_skb,
|
||||
struct page *page, void *buf,
|
||||
int len, int truesize);
|
||||
static void virtnet_xsk_completed(struct send_queue *sq, int num);
|
||||
|
||||
enum virtnet_xmit_type {
|
||||
VIRTNET_XMIT_TYPE_SKB,
|
||||
VIRTNET_XMIT_TYPE_SKB_ORPHAN,
|
||||
VIRTNET_XMIT_TYPE_XDP,
|
||||
VIRTNET_XMIT_TYPE_XSK,
|
||||
};
|
||||
|
||||
static int rss_indirection_table_alloc(struct virtio_net_ctrl_rss *rss, u16 indir_table_size)
|
||||
{
|
||||
@ -529,67 +541,99 @@ static void rss_indirection_table_free(struct virtio_net_ctrl_rss *rss)
|
||||
kfree(rss->indirection_table);
|
||||
}
|
||||
|
||||
static bool is_xdp_frame(void *ptr)
|
||||
/* We use the last two bits of the pointer to distinguish the xmit type. */
|
||||
#define VIRTNET_XMIT_TYPE_MASK (BIT(0) | BIT(1))
|
||||
|
||||
#define VIRTIO_XSK_FLAG_OFFSET 2
|
||||
|
||||
static enum virtnet_xmit_type virtnet_xmit_ptr_unpack(void **ptr)
|
||||
{
|
||||
return (unsigned long)ptr & VIRTIO_XDP_FLAG;
|
||||
unsigned long p = (unsigned long)*ptr;
|
||||
|
||||
*ptr = (void *)(p & ~VIRTNET_XMIT_TYPE_MASK);
|
||||
|
||||
return p & VIRTNET_XMIT_TYPE_MASK;
|
||||
}
|
||||
|
||||
static void *xdp_to_ptr(struct xdp_frame *ptr)
|
||||
static void *virtnet_xmit_ptr_pack(void *ptr, enum virtnet_xmit_type type)
|
||||
{
|
||||
return (void *)((unsigned long)ptr | VIRTIO_XDP_FLAG);
|
||||
return (void *)((unsigned long)ptr | type);
|
||||
}
|
||||
|
||||
static struct xdp_frame *ptr_to_xdp(void *ptr)
|
||||
static int virtnet_add_outbuf(struct send_queue *sq, int num, void *data,
|
||||
enum virtnet_xmit_type type)
|
||||
{
|
||||
return (struct xdp_frame *)((unsigned long)ptr & ~VIRTIO_XDP_FLAG);
|
||||
return virtqueue_add_outbuf(sq->vq, sq->sg, num,
|
||||
virtnet_xmit_ptr_pack(data, type),
|
||||
GFP_ATOMIC);
|
||||
}
|
||||
|
||||
static bool is_orphan_skb(void *ptr)
|
||||
static u32 virtnet_ptr_to_xsk_buff_len(void *ptr)
|
||||
{
|
||||
return (unsigned long)ptr & VIRTIO_ORPHAN_FLAG;
|
||||
return ((unsigned long)ptr) >> VIRTIO_XSK_FLAG_OFFSET;
|
||||
}
|
||||
|
||||
static void *skb_to_ptr(struct sk_buff *skb, bool orphan)
|
||||
static void sg_fill_dma(struct scatterlist *sg, dma_addr_t addr, u32 len)
|
||||
{
|
||||
return (void *)((unsigned long)skb | (orphan ? VIRTIO_ORPHAN_FLAG : 0));
|
||||
}
|
||||
|
||||
static struct sk_buff *ptr_to_skb(void *ptr)
|
||||
{
|
||||
return (struct sk_buff *)((unsigned long)ptr & ~VIRTIO_ORPHAN_FLAG);
|
||||
sg_dma_address(sg) = addr;
|
||||
sg_dma_len(sg) = len;
|
||||
}
|
||||
|
||||
static void __free_old_xmit(struct send_queue *sq, struct netdev_queue *txq,
|
||||
bool in_napi, struct virtnet_sq_free_stats *stats)
|
||||
{
|
||||
struct xdp_frame *frame;
|
||||
struct sk_buff *skb;
|
||||
unsigned int len;
|
||||
void *ptr;
|
||||
|
||||
while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) {
|
||||
if (!is_xdp_frame(ptr)) {
|
||||
struct sk_buff *skb = ptr_to_skb(ptr);
|
||||
switch (virtnet_xmit_ptr_unpack(&ptr)) {
|
||||
case VIRTNET_XMIT_TYPE_SKB:
|
||||
skb = ptr;
|
||||
|
||||
pr_debug("Sent skb %p\n", skb);
|
||||
|
||||
if (is_orphan_skb(ptr)) {
|
||||
stats->packets++;
|
||||
stats->bytes += skb->len;
|
||||
} else {
|
||||
stats->napi_packets++;
|
||||
stats->napi_bytes += skb->len;
|
||||
}
|
||||
stats->napi_packets++;
|
||||
stats->napi_bytes += skb->len;
|
||||
napi_consume_skb(skb, in_napi);
|
||||
} else {
|
||||
struct xdp_frame *frame = ptr_to_xdp(ptr);
|
||||
break;
|
||||
|
||||
case VIRTNET_XMIT_TYPE_SKB_ORPHAN:
|
||||
skb = ptr;
|
||||
|
||||
stats->packets++;
|
||||
stats->bytes += skb->len;
|
||||
napi_consume_skb(skb, in_napi);
|
||||
break;
|
||||
|
||||
case VIRTNET_XMIT_TYPE_XDP:
|
||||
frame = ptr;
|
||||
|
||||
stats->packets++;
|
||||
stats->bytes += xdp_get_frame_len(frame);
|
||||
xdp_return_frame(frame);
|
||||
break;
|
||||
|
||||
case VIRTNET_XMIT_TYPE_XSK:
|
||||
stats->bytes += virtnet_ptr_to_xsk_buff_len(ptr);
|
||||
stats->xsk++;
|
||||
break;
|
||||
}
|
||||
}
|
||||
netdev_tx_completed_queue(txq, stats->napi_packets, stats->napi_bytes);
|
||||
}
|
||||
|
||||
static void virtnet_free_old_xmit(struct send_queue *sq,
|
||||
struct netdev_queue *txq,
|
||||
bool in_napi,
|
||||
struct virtnet_sq_free_stats *stats)
|
||||
{
|
||||
__free_old_xmit(sq, txq, in_napi, stats);
|
||||
|
||||
if (stats->xsk)
|
||||
virtnet_xsk_completed(sq, stats->xsk);
|
||||
}
|
||||
|
||||
/* Converting between virtqueue no. and kernel tx/rx queue no.
|
||||
* 0:rx0 1:tx0 2:rx1 3:tx1 ... 2N:rxN 2N+1:txN 2N+2:cvq
|
||||
*/
|
||||
@ -936,8 +980,7 @@ static void virtnet_rq_init_one_sg(struct receive_queue *rq, void *buf, u32 len)
|
||||
addr = dma->addr - sizeof(*dma) + offset;
|
||||
|
||||
sg_init_table(rq->sg, 1);
|
||||
rq->sg[0].dma_address = addr;
|
||||
rq->sg[0].length = len;
|
||||
sg_fill_dma(rq->sg, addr, len);
|
||||
}
|
||||
|
||||
static void *virtnet_rq_alloc(struct receive_queue *rq, u32 size, gfp_t gfp)
|
||||
@ -1020,7 +1063,7 @@ static void free_old_xmit(struct send_queue *sq, struct netdev_queue *txq,
|
||||
{
|
||||
struct virtnet_sq_free_stats stats = {0};
|
||||
|
||||
__free_old_xmit(sq, txq, in_napi, &stats);
|
||||
virtnet_free_old_xmit(sq, txq, in_napi, &stats);
|
||||
|
||||
/* Avoid overhead when no packets have been processed
|
||||
* happens when called speculatively from start_xmit.
|
||||
@ -1087,12 +1130,6 @@ static void check_sq_full_and_disable(struct virtnet_info *vi,
|
||||
}
|
||||
}
|
||||
|
||||
static void sg_fill_dma(struct scatterlist *sg, dma_addr_t addr, u32 len)
|
||||
{
|
||||
sg->dma_address = addr;
|
||||
sg->length = len;
|
||||
}
|
||||
|
||||
static struct xdp_buff *buf_to_xdp(struct virtnet_info *vi,
|
||||
struct receive_queue *rq, void *buf, u32 len)
|
||||
{
|
||||
@ -1373,7 +1410,8 @@ static int virtnet_add_recvbuf_xsk(struct virtnet_info *vi, struct receive_queue
|
||||
sg_init_table(rq->sg, 1);
|
||||
sg_fill_dma(rq->sg, addr, len);
|
||||
|
||||
err = virtqueue_add_inbuf(rq->vq, rq->sg, 1, xsk_buffs[i], gfp);
|
||||
err = virtqueue_add_inbuf_premapped(rq->vq, rq->sg, 1,
|
||||
xsk_buffs[i], NULL, gfp);
|
||||
if (err)
|
||||
goto err;
|
||||
}
|
||||
@ -1387,6 +1425,120 @@ err:
|
||||
return err;
|
||||
}
|
||||
|
||||
static void *virtnet_xsk_to_ptr(u32 len)
|
||||
{
|
||||
unsigned long p;
|
||||
|
||||
p = len << VIRTIO_XSK_FLAG_OFFSET;
|
||||
|
||||
return virtnet_xmit_ptr_pack((void *)p, VIRTNET_XMIT_TYPE_XSK);
|
||||
}
|
||||
|
||||
static int virtnet_xsk_xmit_one(struct send_queue *sq,
|
||||
struct xsk_buff_pool *pool,
|
||||
struct xdp_desc *desc)
|
||||
{
|
||||
struct virtnet_info *vi;
|
||||
dma_addr_t addr;
|
||||
|
||||
vi = sq->vq->vdev->priv;
|
||||
|
||||
addr = xsk_buff_raw_get_dma(pool, desc->addr);
|
||||
xsk_buff_raw_dma_sync_for_device(pool, addr, desc->len);
|
||||
|
||||
sg_init_table(sq->sg, 2);
|
||||
sg_fill_dma(sq->sg, sq->xsk_hdr_dma_addr, vi->hdr_len);
|
||||
sg_fill_dma(sq->sg + 1, addr, desc->len);
|
||||
|
||||
return virtqueue_add_outbuf_premapped(sq->vq, sq->sg, 2,
|
||||
virtnet_xsk_to_ptr(desc->len),
|
||||
GFP_ATOMIC);
|
||||
}
|
||||
|
||||
static int virtnet_xsk_xmit_batch(struct send_queue *sq,
|
||||
struct xsk_buff_pool *pool,
|
||||
unsigned int budget,
|
||||
u64 *kicks)
|
||||
{
|
||||
struct xdp_desc *descs = pool->tx_descs;
|
||||
bool kick = false;
|
||||
u32 nb_pkts, i;
|
||||
int err;
|
||||
|
||||
budget = min_t(u32, budget, sq->vq->num_free);
|
||||
|
||||
nb_pkts = xsk_tx_peek_release_desc_batch(pool, budget);
|
||||
if (!nb_pkts)
|
||||
return 0;
|
||||
|
||||
for (i = 0; i < nb_pkts; i++) {
|
||||
err = virtnet_xsk_xmit_one(sq, pool, &descs[i]);
|
||||
if (unlikely(err)) {
|
||||
xsk_tx_completed(sq->xsk_pool, nb_pkts - i);
|
||||
break;
|
||||
}
|
||||
|
||||
kick = true;
|
||||
}
|
||||
|
||||
if (kick && virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq))
|
||||
(*kicks)++;
|
||||
|
||||
return i;
|
||||
}
|
||||
|
||||
static bool virtnet_xsk_xmit(struct send_queue *sq, struct xsk_buff_pool *pool,
|
||||
int budget)
|
||||
{
|
||||
struct virtnet_info *vi = sq->vq->vdev->priv;
|
||||
struct virtnet_sq_free_stats stats = {};
|
||||
struct net_device *dev = vi->dev;
|
||||
u64 kicks = 0;
|
||||
int sent;
|
||||
|
||||
/* Avoid to wakeup napi meanless, so call __free_old_xmit instead of
|
||||
* free_old_xmit().
|
||||
*/
|
||||
__free_old_xmit(sq, netdev_get_tx_queue(dev, sq - vi->sq), true, &stats);
|
||||
|
||||
if (stats.xsk)
|
||||
xsk_tx_completed(sq->xsk_pool, stats.xsk);
|
||||
|
||||
sent = virtnet_xsk_xmit_batch(sq, pool, budget, &kicks);
|
||||
|
||||
if (!is_xdp_raw_buffer_queue(vi, sq - vi->sq))
|
||||
check_sq_full_and_disable(vi, vi->dev, sq);
|
||||
|
||||
if (sent) {
|
||||
struct netdev_queue *txq;
|
||||
|
||||
txq = netdev_get_tx_queue(vi->dev, sq - vi->sq);
|
||||
txq_trans_cond_update(txq);
|
||||
}
|
||||
|
||||
u64_stats_update_begin(&sq->stats.syncp);
|
||||
u64_stats_add(&sq->stats.packets, stats.packets);
|
||||
u64_stats_add(&sq->stats.bytes, stats.bytes);
|
||||
u64_stats_add(&sq->stats.kicks, kicks);
|
||||
u64_stats_add(&sq->stats.xdp_tx, sent);
|
||||
u64_stats_update_end(&sq->stats.syncp);
|
||||
|
||||
if (xsk_uses_need_wakeup(pool))
|
||||
xsk_set_tx_need_wakeup(pool);
|
||||
|
||||
return sent;
|
||||
}
|
||||
|
||||
static void xsk_wakeup(struct send_queue *sq)
|
||||
{
|
||||
if (napi_if_scheduled_mark_missed(&sq->napi))
|
||||
return;
|
||||
|
||||
local_bh_disable();
|
||||
virtqueue_napi_schedule(&sq->napi, sq->vq);
|
||||
local_bh_enable();
|
||||
}
|
||||
|
||||
static int virtnet_xsk_wakeup(struct net_device *dev, u32 qid, u32 flag)
|
||||
{
|
||||
struct virtnet_info *vi = netdev_priv(dev);
|
||||
@ -1400,16 +1552,21 @@ static int virtnet_xsk_wakeup(struct net_device *dev, u32 qid, u32 flag)
|
||||
|
||||
sq = &vi->sq[qid];
|
||||
|
||||
if (napi_if_scheduled_mark_missed(&sq->napi))
|
||||
return 0;
|
||||
|
||||
local_bh_disable();
|
||||
virtqueue_napi_schedule(&sq->napi, sq->vq);
|
||||
local_bh_enable();
|
||||
|
||||
xsk_wakeup(sq);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void virtnet_xsk_completed(struct send_queue *sq, int num)
|
||||
{
|
||||
xsk_tx_completed(sq->xsk_pool, num);
|
||||
|
||||
/* If this is called by rx poll, start_xmit and xdp xmit we should
|
||||
* wakeup the tx napi to consume the xsk tx queue, because the tx
|
||||
* interrupt may not be triggered.
|
||||
*/
|
||||
xsk_wakeup(sq);
|
||||
}
|
||||
|
||||
static int __virtnet_xdp_xmit_one(struct virtnet_info *vi,
|
||||
struct send_queue *sq,
|
||||
struct xdp_frame *xdpf)
|
||||
@ -1450,8 +1607,7 @@ static int __virtnet_xdp_xmit_one(struct virtnet_info *vi,
|
||||
skb_frag_size(frag), skb_frag_off(frag));
|
||||
}
|
||||
|
||||
err = virtqueue_add_outbuf(sq->vq, sq->sg, nr_frags + 1,
|
||||
xdp_to_ptr(xdpf), GFP_ATOMIC);
|
||||
err = virtnet_add_outbuf(sq, nr_frags + 1, xdpf, VIRTNET_XMIT_TYPE_XDP);
|
||||
if (unlikely(err))
|
||||
return -ENOSPC; /* Caller handle free/refcnt */
|
||||
|
||||
@ -1524,8 +1680,8 @@ static int virtnet_xdp_xmit(struct net_device *dev,
|
||||
}
|
||||
|
||||
/* Free up any pending old buffers before queueing new ones. */
|
||||
__free_old_xmit(sq, netdev_get_tx_queue(dev, sq - vi->sq),
|
||||
false, &stats);
|
||||
virtnet_free_old_xmit(sq, netdev_get_tx_queue(dev, sq - vi->sq),
|
||||
false, &stats);
|
||||
|
||||
for (i = 0; i < n; i++) {
|
||||
struct xdp_frame *xdpf = frames[i];
|
||||
@ -2453,7 +2609,7 @@ static int add_recvbuf_small(struct virtnet_info *vi, struct receive_queue *rq,
|
||||
|
||||
virtnet_rq_init_one_sg(rq, buf, vi->hdr_len + GOOD_PACKET_LEN);
|
||||
|
||||
err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp);
|
||||
err = virtqueue_add_inbuf_premapped(rq->vq, rq->sg, 1, buf, ctx, gfp);
|
||||
if (err < 0) {
|
||||
virtnet_rq_unmap(rq, buf, 0);
|
||||
put_page(virt_to_head_page(buf));
|
||||
@ -2573,7 +2729,7 @@ static int add_recvbuf_mergeable(struct virtnet_info *vi,
|
||||
virtnet_rq_init_one_sg(rq, buf, len);
|
||||
|
||||
ctx = mergeable_len_to_ctx(len + room, headroom);
|
||||
err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp);
|
||||
err = virtqueue_add_inbuf_premapped(rq->vq, rq->sg, 1, buf, ctx, gfp);
|
||||
if (err < 0) {
|
||||
virtnet_rq_unmap(rq, buf, 0);
|
||||
put_page(virt_to_head_page(buf));
|
||||
@ -2982,7 +3138,7 @@ static int virtnet_poll_tx(struct napi_struct *napi, int budget)
|
||||
struct virtnet_info *vi = sq->vq->vdev->priv;
|
||||
unsigned int index = vq2txq(sq->vq);
|
||||
struct netdev_queue *txq;
|
||||
int opaque;
|
||||
int opaque, xsk_done = 0;
|
||||
bool done;
|
||||
|
||||
if (unlikely(is_xdp_raw_buffer_queue(vi, index))) {
|
||||
@ -2994,7 +3150,11 @@ static int virtnet_poll_tx(struct napi_struct *napi, int budget)
|
||||
txq = netdev_get_tx_queue(vi->dev, index);
|
||||
__netif_tx_lock(txq, raw_smp_processor_id());
|
||||
virtqueue_disable_cb(sq->vq);
|
||||
free_old_xmit(sq, txq, !!budget);
|
||||
|
||||
if (sq->xsk_pool)
|
||||
xsk_done = virtnet_xsk_xmit(sq, sq->xsk_pool, budget);
|
||||
else
|
||||
free_old_xmit(sq, txq, !!budget);
|
||||
|
||||
if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS) {
|
||||
if (netif_tx_queue_stopped(txq)) {
|
||||
@ -3005,6 +3165,11 @@ static int virtnet_poll_tx(struct napi_struct *napi, int budget)
|
||||
netif_tx_wake_queue(txq);
|
||||
}
|
||||
|
||||
if (xsk_done >= budget) {
|
||||
__netif_tx_unlock(txq);
|
||||
return budget;
|
||||
}
|
||||
|
||||
opaque = virtqueue_enable_cb_prepare(sq->vq);
|
||||
|
||||
done = napi_complete_done(napi, 0);
|
||||
@ -3072,8 +3237,9 @@ static int xmit_skb(struct send_queue *sq, struct sk_buff *skb, bool orphan)
|
||||
return num_sg;
|
||||
num_sg++;
|
||||
}
|
||||
return virtqueue_add_outbuf(sq->vq, sq->sg, num_sg,
|
||||
skb_to_ptr(skb, orphan), GFP_ATOMIC);
|
||||
|
||||
return virtnet_add_outbuf(sq, num_sg, skb,
|
||||
orphan ? VIRTNET_XMIT_TYPE_SKB_ORPHAN : VIRTNET_XMIT_TYPE_SKB);
|
||||
}
|
||||
|
||||
static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
@ -5078,7 +5244,7 @@ static int virtnet_set_coalesce(struct net_device *dev,
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
struct virtnet_info *vi = netdev_priv(dev);
|
||||
int ret, queue_number, napi_weight;
|
||||
int ret, queue_number, napi_weight, i;
|
||||
bool update_napi = false;
|
||||
|
||||
/* Can't change NAPI weight if the link is up */
|
||||
@ -5107,6 +5273,14 @@ static int virtnet_set_coalesce(struct net_device *dev,
|
||||
return ret;
|
||||
|
||||
if (update_napi) {
|
||||
/* xsk xmit depends on the tx napi. So if xsk is active,
|
||||
* prevent modifications to tx napi.
|
||||
*/
|
||||
for (i = queue_number; i < vi->max_queue_pairs; i++) {
|
||||
if (vi->sq[i].xsk_pool)
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
for (; queue_number < vi->max_queue_pairs; queue_number++)
|
||||
vi->sq[queue_number].napi.weight = napi_weight;
|
||||
}
|
||||
@ -5555,6 +5729,29 @@ unreg:
|
||||
return err;
|
||||
}
|
||||
|
||||
static int virtnet_sq_bind_xsk_pool(struct virtnet_info *vi,
|
||||
struct send_queue *sq,
|
||||
struct xsk_buff_pool *pool)
|
||||
{
|
||||
int err, qindex;
|
||||
|
||||
qindex = sq - vi->sq;
|
||||
|
||||
virtnet_tx_pause(vi, sq);
|
||||
|
||||
err = virtqueue_reset(sq->vq, virtnet_sq_free_unused_buf);
|
||||
if (err) {
|
||||
netdev_err(vi->dev, "reset tx fail: tx queue index: %d err: %d\n", qindex, err);
|
||||
pool = NULL;
|
||||
}
|
||||
|
||||
sq->xsk_pool = pool;
|
||||
|
||||
virtnet_tx_resume(vi, sq);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static int virtnet_xsk_pool_enable(struct net_device *dev,
|
||||
struct xsk_buff_pool *pool,
|
||||
u16 qid)
|
||||
@ -5563,6 +5760,7 @@ static int virtnet_xsk_pool_enable(struct net_device *dev,
|
||||
struct receive_queue *rq;
|
||||
struct device *dma_dev;
|
||||
struct send_queue *sq;
|
||||
dma_addr_t hdr_dma;
|
||||
int err, size;
|
||||
|
||||
if (vi->hdr_len > xsk_pool_get_headroom(pool))
|
||||
@ -5600,6 +5798,11 @@ static int virtnet_xsk_pool_enable(struct net_device *dev,
|
||||
if (!rq->xsk_buffs)
|
||||
return -ENOMEM;
|
||||
|
||||
hdr_dma = virtqueue_dma_map_single_attrs(sq->vq, &xsk_hdr, vi->hdr_len,
|
||||
DMA_TO_DEVICE, 0);
|
||||
if (virtqueue_dma_mapping_error(sq->vq, hdr_dma))
|
||||
return -ENOMEM;
|
||||
|
||||
err = xsk_pool_dma_map(pool, dma_dev, 0);
|
||||
if (err)
|
||||
goto err_xsk_map;
|
||||
@ -5608,11 +5811,24 @@ static int virtnet_xsk_pool_enable(struct net_device *dev,
|
||||
if (err)
|
||||
goto err_rq;
|
||||
|
||||
err = virtnet_sq_bind_xsk_pool(vi, sq, pool);
|
||||
if (err)
|
||||
goto err_sq;
|
||||
|
||||
/* Now, we do not support tx offload(such as tx csum), so all the tx
|
||||
* virtnet hdr is zero. So all the tx packets can share a single hdr.
|
||||
*/
|
||||
sq->xsk_hdr_dma_addr = hdr_dma;
|
||||
|
||||
return 0;
|
||||
|
||||
err_sq:
|
||||
virtnet_rq_bind_xsk_pool(vi, rq, NULL);
|
||||
err_rq:
|
||||
xsk_pool_dma_unmap(pool, 0);
|
||||
err_xsk_map:
|
||||
virtqueue_dma_unmap_single_attrs(rq->vq, hdr_dma, vi->hdr_len,
|
||||
DMA_TO_DEVICE, 0);
|
||||
return err;
|
||||
}
|
||||
|
||||
@ -5621,19 +5837,24 @@ static int virtnet_xsk_pool_disable(struct net_device *dev, u16 qid)
|
||||
struct virtnet_info *vi = netdev_priv(dev);
|
||||
struct xsk_buff_pool *pool;
|
||||
struct receive_queue *rq;
|
||||
struct send_queue *sq;
|
||||
int err;
|
||||
|
||||
if (qid >= vi->curr_queue_pairs)
|
||||
return -EINVAL;
|
||||
|
||||
sq = &vi->sq[qid];
|
||||
rq = &vi->rq[qid];
|
||||
|
||||
pool = rq->xsk_pool;
|
||||
|
||||
err = virtnet_rq_bind_xsk_pool(vi, rq, NULL);
|
||||
err |= virtnet_sq_bind_xsk_pool(vi, sq, NULL);
|
||||
|
||||
xsk_pool_dma_unmap(pool, 0);
|
||||
|
||||
virtqueue_dma_unmap_single_attrs(sq->vq, sq->xsk_hdr_dma_addr,
|
||||
vi->hdr_len, DMA_TO_DEVICE, 0);
|
||||
kvfree(rq->xsk_buffs);
|
||||
|
||||
return err;
|
||||
@ -5991,10 +6212,26 @@ static void free_receive_page_frags(struct virtnet_info *vi)
|
||||
|
||||
static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf)
|
||||
{
|
||||
if (!is_xdp_frame(buf))
|
||||
struct virtnet_info *vi = vq->vdev->priv;
|
||||
struct send_queue *sq;
|
||||
int i = vq2rxq(vq);
|
||||
|
||||
sq = &vi->sq[i];
|
||||
|
||||
switch (virtnet_xmit_ptr_unpack(&buf)) {
|
||||
case VIRTNET_XMIT_TYPE_SKB:
|
||||
case VIRTNET_XMIT_TYPE_SKB_ORPHAN:
|
||||
dev_kfree_skb(buf);
|
||||
else
|
||||
xdp_return_frame(ptr_to_xdp(buf));
|
||||
break;
|
||||
|
||||
case VIRTNET_XMIT_TYPE_XDP:
|
||||
xdp_return_frame(buf);
|
||||
break;
|
||||
|
||||
case VIRTNET_XMIT_TYPE_XSK:
|
||||
xsk_tx_completed(sq->xsk_pool, 1);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static void free_unused_bufs(struct virtnet_info *vi)
|
||||
@ -6168,15 +6405,6 @@ err_ctrl:
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
static void virtnet_rq_set_premapped(struct virtnet_info *vi)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < vi->max_queue_pairs; i++)
|
||||
/* error should never happen */
|
||||
BUG_ON(virtqueue_set_dma_premapped(vi->rq[i].vq));
|
||||
}
|
||||
|
||||
static int init_vqs(struct virtnet_info *vi)
|
||||
{
|
||||
int ret;
|
||||
@ -6190,10 +6418,6 @@ static int init_vqs(struct virtnet_info *vi)
|
||||
if (ret)
|
||||
goto err_free;
|
||||
|
||||
/* disable for big mode */
|
||||
if (!vi->big_packets || vi->mergeable_rx_bufs)
|
||||
virtnet_rq_set_premapped(vi);
|
||||
|
||||
cpus_read_lock();
|
||||
virtnet_set_affinity(vi);
|
||||
cpus_read_unlock();
|
||||
@ -6455,7 +6679,8 @@ static int virtnet_probe(struct virtio_device *vdev)
|
||||
dev->hw_features |= NETIF_F_GRO_HW;
|
||||
|
||||
dev->vlan_features = dev->features;
|
||||
dev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT;
|
||||
dev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
|
||||
NETDEV_XDP_ACT_XSK_ZEROCOPY;
|
||||
|
||||
/* MTU range: 68 - 65535 */
|
||||
dev->min_mtu = MIN_MTU;
|
||||
|
@ -69,12 +69,20 @@
|
||||
|
||||
struct vring_desc_state_split {
|
||||
void *data; /* Data for callback. */
|
||||
struct vring_desc *indir_desc; /* Indirect descriptor, if any. */
|
||||
|
||||
/* Indirect desc table and extra table, if any. These two will be
|
||||
* allocated together. So we won't stress more to the memory allocator.
|
||||
*/
|
||||
struct vring_desc *indir_desc;
|
||||
};
|
||||
|
||||
struct vring_desc_state_packed {
|
||||
void *data; /* Data for callback. */
|
||||
struct vring_packed_desc *indir_desc; /* Indirect descriptor, if any. */
|
||||
|
||||
/* Indirect desc table and extra table, if any. These two will be
|
||||
* allocated together. So we won't stress more to the memory allocator.
|
||||
*/
|
||||
struct vring_packed_desc *indir_desc;
|
||||
u16 num; /* Descriptor list length. */
|
||||
u16 last; /* The last desc state in a list. */
|
||||
};
|
||||
@ -172,14 +180,6 @@ struct vring_virtqueue {
|
||||
/* Host publishes avail event idx */
|
||||
bool event;
|
||||
|
||||
/* Do DMA mapping by driver */
|
||||
bool premapped;
|
||||
|
||||
/* Do unmap or not for desc. Just when premapped is False and
|
||||
* use_dma_api is true, this is true.
|
||||
*/
|
||||
bool do_unmap;
|
||||
|
||||
/* Head of free buffer list. */
|
||||
unsigned int free_head;
|
||||
/* Number we've added since last sync. */
|
||||
@ -297,6 +297,12 @@ static bool vring_use_dma_api(const struct virtio_device *vdev)
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool vring_need_unmap_buffer(const struct vring_virtqueue *vring,
|
||||
const struct vring_desc_extra *extra)
|
||||
{
|
||||
return vring->use_dma_api && (extra->addr != DMA_MAPPING_ERROR);
|
||||
}
|
||||
|
||||
size_t virtio_max_dma_size(const struct virtio_device *vdev)
|
||||
{
|
||||
size_t max_segment_size = SIZE_MAX;
|
||||
@ -364,13 +370,17 @@ static struct device *vring_dma_dev(const struct vring_virtqueue *vq)
|
||||
|
||||
/* Map one sg entry. */
|
||||
static int vring_map_one_sg(const struct vring_virtqueue *vq, struct scatterlist *sg,
|
||||
enum dma_data_direction direction, dma_addr_t *addr)
|
||||
enum dma_data_direction direction, dma_addr_t *addr,
|
||||
u32 *len, bool premapped)
|
||||
{
|
||||
if (vq->premapped) {
|
||||
if (premapped) {
|
||||
*addr = sg_dma_address(sg);
|
||||
*len = sg_dma_len(sg);
|
||||
return 0;
|
||||
}
|
||||
|
||||
*len = sg->length;
|
||||
|
||||
if (!vq->use_dma_api) {
|
||||
/*
|
||||
* If DMA is not used, KMSAN doesn't know that the scatterlist
|
||||
@ -440,61 +450,44 @@ static void virtqueue_init(struct vring_virtqueue *vq, u32 num)
|
||||
* Split ring specific functions - *_split().
|
||||
*/
|
||||
|
||||
static void vring_unmap_one_split_indirect(const struct vring_virtqueue *vq,
|
||||
const struct vring_desc *desc)
|
||||
{
|
||||
u16 flags;
|
||||
|
||||
if (!vq->do_unmap)
|
||||
return;
|
||||
|
||||
flags = virtio16_to_cpu(vq->vq.vdev, desc->flags);
|
||||
|
||||
dma_unmap_page(vring_dma_dev(vq),
|
||||
virtio64_to_cpu(vq->vq.vdev, desc->addr),
|
||||
virtio32_to_cpu(vq->vq.vdev, desc->len),
|
||||
(flags & VRING_DESC_F_WRITE) ?
|
||||
DMA_FROM_DEVICE : DMA_TO_DEVICE);
|
||||
}
|
||||
|
||||
static unsigned int vring_unmap_one_split(const struct vring_virtqueue *vq,
|
||||
unsigned int i)
|
||||
struct vring_desc_extra *extra)
|
||||
{
|
||||
struct vring_desc_extra *extra = vq->split.desc_extra;
|
||||
u16 flags;
|
||||
|
||||
flags = extra[i].flags;
|
||||
flags = extra->flags;
|
||||
|
||||
if (flags & VRING_DESC_F_INDIRECT) {
|
||||
if (!vq->use_dma_api)
|
||||
goto out;
|
||||
|
||||
dma_unmap_single(vring_dma_dev(vq),
|
||||
extra[i].addr,
|
||||
extra[i].len,
|
||||
extra->addr,
|
||||
extra->len,
|
||||
(flags & VRING_DESC_F_WRITE) ?
|
||||
DMA_FROM_DEVICE : DMA_TO_DEVICE);
|
||||
} else {
|
||||
if (!vq->do_unmap)
|
||||
if (!vring_need_unmap_buffer(vq, extra))
|
||||
goto out;
|
||||
|
||||
dma_unmap_page(vring_dma_dev(vq),
|
||||
extra[i].addr,
|
||||
extra[i].len,
|
||||
extra->addr,
|
||||
extra->len,
|
||||
(flags & VRING_DESC_F_WRITE) ?
|
||||
DMA_FROM_DEVICE : DMA_TO_DEVICE);
|
||||
}
|
||||
|
||||
out:
|
||||
return extra[i].next;
|
||||
return extra->next;
|
||||
}
|
||||
|
||||
static struct vring_desc *alloc_indirect_split(struct virtqueue *_vq,
|
||||
unsigned int total_sg,
|
||||
gfp_t gfp)
|
||||
{
|
||||
struct vring_desc_extra *extra;
|
||||
struct vring_desc *desc;
|
||||
unsigned int i;
|
||||
unsigned int i, size;
|
||||
|
||||
/*
|
||||
* We require lowmem mappings for the descriptors because
|
||||
@ -503,40 +496,41 @@ static struct vring_desc *alloc_indirect_split(struct virtqueue *_vq,
|
||||
*/
|
||||
gfp &= ~__GFP_HIGHMEM;
|
||||
|
||||
desc = kmalloc_array(total_sg, sizeof(struct vring_desc), gfp);
|
||||
size = sizeof(*desc) * total_sg + sizeof(*extra) * total_sg;
|
||||
|
||||
desc = kmalloc(size, gfp);
|
||||
if (!desc)
|
||||
return NULL;
|
||||
|
||||
extra = (struct vring_desc_extra *)&desc[total_sg];
|
||||
|
||||
for (i = 0; i < total_sg; i++)
|
||||
desc[i].next = cpu_to_virtio16(_vq->vdev, i + 1);
|
||||
extra[i].next = i + 1;
|
||||
|
||||
return desc;
|
||||
}
|
||||
|
||||
static inline unsigned int virtqueue_add_desc_split(struct virtqueue *vq,
|
||||
struct vring_desc *desc,
|
||||
struct vring_desc_extra *extra,
|
||||
unsigned int i,
|
||||
dma_addr_t addr,
|
||||
unsigned int len,
|
||||
u16 flags,
|
||||
bool indirect)
|
||||
u16 flags, bool premapped)
|
||||
{
|
||||
struct vring_virtqueue *vring = to_vvq(vq);
|
||||
struct vring_desc_extra *extra = vring->split.desc_extra;
|
||||
u16 next;
|
||||
|
||||
desc[i].flags = cpu_to_virtio16(vq->vdev, flags);
|
||||
desc[i].addr = cpu_to_virtio64(vq->vdev, addr);
|
||||
desc[i].len = cpu_to_virtio32(vq->vdev, len);
|
||||
|
||||
if (!indirect) {
|
||||
next = extra[i].next;
|
||||
desc[i].next = cpu_to_virtio16(vq->vdev, next);
|
||||
extra[i].addr = premapped ? DMA_MAPPING_ERROR : addr;
|
||||
extra[i].len = len;
|
||||
extra[i].flags = flags;
|
||||
|
||||
extra[i].addr = addr;
|
||||
extra[i].len = len;
|
||||
extra[i].flags = flags;
|
||||
} else
|
||||
next = virtio16_to_cpu(vq->vdev, desc[i].next);
|
||||
next = extra[i].next;
|
||||
|
||||
desc[i].next = cpu_to_virtio16(vq->vdev, next);
|
||||
|
||||
return next;
|
||||
}
|
||||
@ -548,9 +542,11 @@ static inline int virtqueue_add_split(struct virtqueue *_vq,
|
||||
unsigned int in_sgs,
|
||||
void *data,
|
||||
void *ctx,
|
||||
bool premapped,
|
||||
gfp_t gfp)
|
||||
{
|
||||
struct vring_virtqueue *vq = to_vvq(_vq);
|
||||
struct vring_desc_extra *extra;
|
||||
struct scatterlist *sg;
|
||||
struct vring_desc *desc;
|
||||
unsigned int i, n, avail, descs_used, prev, err_idx;
|
||||
@ -586,9 +582,11 @@ static inline int virtqueue_add_split(struct virtqueue *_vq,
|
||||
/* Set up rest to use this indirect table. */
|
||||
i = 0;
|
||||
descs_used = 1;
|
||||
extra = (struct vring_desc_extra *)&desc[total_sg];
|
||||
} else {
|
||||
indirect = false;
|
||||
desc = vq->split.vring.desc;
|
||||
extra = vq->split.desc_extra;
|
||||
i = head;
|
||||
descs_used = total_sg;
|
||||
}
|
||||
@ -610,40 +608,41 @@ static inline int virtqueue_add_split(struct virtqueue *_vq,
|
||||
for (n = 0; n < out_sgs; n++) {
|
||||
for (sg = sgs[n]; sg; sg = sg_next(sg)) {
|
||||
dma_addr_t addr;
|
||||
u32 len;
|
||||
|
||||
if (vring_map_one_sg(vq, sg, DMA_TO_DEVICE, &addr))
|
||||
if (vring_map_one_sg(vq, sg, DMA_TO_DEVICE, &addr, &len, premapped))
|
||||
goto unmap_release;
|
||||
|
||||
prev = i;
|
||||
/* Note that we trust indirect descriptor
|
||||
* table since it use stream DMA mapping.
|
||||
*/
|
||||
i = virtqueue_add_desc_split(_vq, desc, i, addr, sg->length,
|
||||
i = virtqueue_add_desc_split(_vq, desc, extra, i, addr, len,
|
||||
VRING_DESC_F_NEXT,
|
||||
indirect);
|
||||
premapped);
|
||||
}
|
||||
}
|
||||
for (; n < (out_sgs + in_sgs); n++) {
|
||||
for (sg = sgs[n]; sg; sg = sg_next(sg)) {
|
||||
dma_addr_t addr;
|
||||
u32 len;
|
||||
|
||||
if (vring_map_one_sg(vq, sg, DMA_FROM_DEVICE, &addr))
|
||||
if (vring_map_one_sg(vq, sg, DMA_FROM_DEVICE, &addr, &len, premapped))
|
||||
goto unmap_release;
|
||||
|
||||
prev = i;
|
||||
/* Note that we trust indirect descriptor
|
||||
* table since it use stream DMA mapping.
|
||||
*/
|
||||
i = virtqueue_add_desc_split(_vq, desc, i, addr,
|
||||
sg->length,
|
||||
i = virtqueue_add_desc_split(_vq, desc, extra, i, addr, len,
|
||||
VRING_DESC_F_NEXT |
|
||||
VRING_DESC_F_WRITE,
|
||||
indirect);
|
||||
premapped);
|
||||
}
|
||||
}
|
||||
/* Last one doesn't continue. */
|
||||
desc[prev].flags &= cpu_to_virtio16(_vq->vdev, ~VRING_DESC_F_NEXT);
|
||||
if (!indirect && vq->do_unmap)
|
||||
if (!indirect && vring_need_unmap_buffer(vq, &extra[prev]))
|
||||
vq->split.desc_extra[prev & (vq->split.vring.num - 1)].flags &=
|
||||
~VRING_DESC_F_NEXT;
|
||||
|
||||
@ -652,18 +651,14 @@ static inline int virtqueue_add_split(struct virtqueue *_vq,
|
||||
dma_addr_t addr = vring_map_single(
|
||||
vq, desc, total_sg * sizeof(struct vring_desc),
|
||||
DMA_TO_DEVICE);
|
||||
if (vring_mapping_error(vq, addr)) {
|
||||
if (vq->premapped)
|
||||
goto free_indirect;
|
||||
|
||||
if (vring_mapping_error(vq, addr))
|
||||
goto unmap_release;
|
||||
}
|
||||
|
||||
virtqueue_add_desc_split(_vq, vq->split.vring.desc,
|
||||
vq->split.desc_extra,
|
||||
head, addr,
|
||||
total_sg * sizeof(struct vring_desc),
|
||||
VRING_DESC_F_INDIRECT,
|
||||
false);
|
||||
VRING_DESC_F_INDIRECT, false);
|
||||
}
|
||||
|
||||
/* We're using some buffers from the free list. */
|
||||
@ -716,14 +711,10 @@ unmap_release:
|
||||
for (n = 0; n < total_sg; n++) {
|
||||
if (i == err_idx)
|
||||
break;
|
||||
if (indirect) {
|
||||
vring_unmap_one_split_indirect(vq, &desc[i]);
|
||||
i = virtio16_to_cpu(_vq->vdev, desc[i].next);
|
||||
} else
|
||||
i = vring_unmap_one_split(vq, i);
|
||||
|
||||
i = vring_unmap_one_split(vq, &extra[i]);
|
||||
}
|
||||
|
||||
free_indirect:
|
||||
if (indirect)
|
||||
kfree(desc);
|
||||
|
||||
@ -765,22 +756,25 @@ static bool virtqueue_kick_prepare_split(struct virtqueue *_vq)
|
||||
static void detach_buf_split(struct vring_virtqueue *vq, unsigned int head,
|
||||
void **ctx)
|
||||
{
|
||||
struct vring_desc_extra *extra;
|
||||
unsigned int i, j;
|
||||
__virtio16 nextflag = cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_NEXT);
|
||||
|
||||
/* Clear data ptr. */
|
||||
vq->split.desc_state[head].data = NULL;
|
||||
|
||||
extra = vq->split.desc_extra;
|
||||
|
||||
/* Put back on free list: unmap first-level descriptors and find end */
|
||||
i = head;
|
||||
|
||||
while (vq->split.vring.desc[i].flags & nextflag) {
|
||||
vring_unmap_one_split(vq, i);
|
||||
vring_unmap_one_split(vq, &extra[i]);
|
||||
i = vq->split.desc_extra[i].next;
|
||||
vq->vq.num_free++;
|
||||
}
|
||||
|
||||
vring_unmap_one_split(vq, i);
|
||||
vring_unmap_one_split(vq, &extra[i]);
|
||||
vq->split.desc_extra[i].next = vq->free_head;
|
||||
vq->free_head = head;
|
||||
|
||||
@ -790,21 +784,24 @@ static void detach_buf_split(struct vring_virtqueue *vq, unsigned int head,
|
||||
if (vq->indirect) {
|
||||
struct vring_desc *indir_desc =
|
||||
vq->split.desc_state[head].indir_desc;
|
||||
u32 len;
|
||||
u32 len, num;
|
||||
|
||||
/* Free the indirect table, if any, now that it's unmapped. */
|
||||
if (!indir_desc)
|
||||
return;
|
||||
|
||||
len = vq->split.desc_extra[head].len;
|
||||
|
||||
BUG_ON(!(vq->split.desc_extra[head].flags &
|
||||
VRING_DESC_F_INDIRECT));
|
||||
BUG_ON(len == 0 || len % sizeof(struct vring_desc));
|
||||
|
||||
if (vq->do_unmap) {
|
||||
for (j = 0; j < len / sizeof(struct vring_desc); j++)
|
||||
vring_unmap_one_split_indirect(vq, &indir_desc[j]);
|
||||
num = len / sizeof(struct vring_desc);
|
||||
|
||||
extra = (struct vring_desc_extra *)&indir_desc[num];
|
||||
|
||||
if (vq->use_dma_api) {
|
||||
for (j = 0; j < num; j++)
|
||||
vring_unmap_one_split(vq, &extra[j]);
|
||||
}
|
||||
|
||||
kfree(indir_desc);
|
||||
@ -1236,7 +1233,7 @@ static void vring_unmap_extra_packed(const struct vring_virtqueue *vq,
|
||||
(flags & VRING_DESC_F_WRITE) ?
|
||||
DMA_FROM_DEVICE : DMA_TO_DEVICE);
|
||||
} else {
|
||||
if (!vq->do_unmap)
|
||||
if (!vring_need_unmap_buffer(vq, extra))
|
||||
return;
|
||||
|
||||
dma_unmap_page(vring_dma_dev(vq),
|
||||
@ -1246,27 +1243,12 @@ static void vring_unmap_extra_packed(const struct vring_virtqueue *vq,
|
||||
}
|
||||
}
|
||||
|
||||
static void vring_unmap_desc_packed(const struct vring_virtqueue *vq,
|
||||
const struct vring_packed_desc *desc)
|
||||
{
|
||||
u16 flags;
|
||||
|
||||
if (!vq->do_unmap)
|
||||
return;
|
||||
|
||||
flags = le16_to_cpu(desc->flags);
|
||||
|
||||
dma_unmap_page(vring_dma_dev(vq),
|
||||
le64_to_cpu(desc->addr),
|
||||
le32_to_cpu(desc->len),
|
||||
(flags & VRING_DESC_F_WRITE) ?
|
||||
DMA_FROM_DEVICE : DMA_TO_DEVICE);
|
||||
}
|
||||
|
||||
static struct vring_packed_desc *alloc_indirect_packed(unsigned int total_sg,
|
||||
gfp_t gfp)
|
||||
{
|
||||
struct vring_desc_extra *extra;
|
||||
struct vring_packed_desc *desc;
|
||||
int i, size;
|
||||
|
||||
/*
|
||||
* We require lowmem mappings for the descriptors because
|
||||
@ -1275,7 +1257,16 @@ static struct vring_packed_desc *alloc_indirect_packed(unsigned int total_sg,
|
||||
*/
|
||||
gfp &= ~__GFP_HIGHMEM;
|
||||
|
||||
desc = kmalloc_array(total_sg, sizeof(struct vring_packed_desc), gfp);
|
||||
size = (sizeof(*desc) + sizeof(*extra)) * total_sg;
|
||||
|
||||
desc = kmalloc(size, gfp);
|
||||
if (!desc)
|
||||
return NULL;
|
||||
|
||||
extra = (struct vring_desc_extra *)&desc[total_sg];
|
||||
|
||||
for (i = 0; i < total_sg; i++)
|
||||
extra[i].next = i + 1;
|
||||
|
||||
return desc;
|
||||
}
|
||||
@ -1286,11 +1277,13 @@ static int virtqueue_add_indirect_packed(struct vring_virtqueue *vq,
|
||||
unsigned int out_sgs,
|
||||
unsigned int in_sgs,
|
||||
void *data,
|
||||
bool premapped,
|
||||
gfp_t gfp)
|
||||
{
|
||||
struct vring_desc_extra *extra;
|
||||
struct vring_packed_desc *desc;
|
||||
struct scatterlist *sg;
|
||||
unsigned int i, n, err_idx;
|
||||
unsigned int i, n, err_idx, len;
|
||||
u16 head, id;
|
||||
dma_addr_t addr;
|
||||
|
||||
@ -1299,6 +1292,8 @@ static int virtqueue_add_indirect_packed(struct vring_virtqueue *vq,
|
||||
if (!desc)
|
||||
return -ENOMEM;
|
||||
|
||||
extra = (struct vring_desc_extra *)&desc[total_sg];
|
||||
|
||||
if (unlikely(vq->vq.num_free < 1)) {
|
||||
pr_debug("Can't add buf len 1 - avail = 0\n");
|
||||
kfree(desc);
|
||||
@ -1313,13 +1308,21 @@ static int virtqueue_add_indirect_packed(struct vring_virtqueue *vq,
|
||||
for (n = 0; n < out_sgs + in_sgs; n++) {
|
||||
for (sg = sgs[n]; sg; sg = sg_next(sg)) {
|
||||
if (vring_map_one_sg(vq, sg, n < out_sgs ?
|
||||
DMA_TO_DEVICE : DMA_FROM_DEVICE, &addr))
|
||||
DMA_TO_DEVICE : DMA_FROM_DEVICE,
|
||||
&addr, &len, premapped))
|
||||
goto unmap_release;
|
||||
|
||||
desc[i].flags = cpu_to_le16(n < out_sgs ?
|
||||
0 : VRING_DESC_F_WRITE);
|
||||
desc[i].addr = cpu_to_le64(addr);
|
||||
desc[i].len = cpu_to_le32(sg->length);
|
||||
desc[i].len = cpu_to_le32(len);
|
||||
|
||||
if (unlikely(vq->use_dma_api)) {
|
||||
extra[i].addr = premapped ? DMA_MAPPING_ERROR : addr;
|
||||
extra[i].len = len;
|
||||
extra[i].flags = n < out_sgs ? 0 : VRING_DESC_F_WRITE;
|
||||
}
|
||||
|
||||
i++;
|
||||
}
|
||||
}
|
||||
@ -1328,12 +1331,8 @@ static int virtqueue_add_indirect_packed(struct vring_virtqueue *vq,
|
||||
addr = vring_map_single(vq, desc,
|
||||
total_sg * sizeof(struct vring_packed_desc),
|
||||
DMA_TO_DEVICE);
|
||||
if (vring_mapping_error(vq, addr)) {
|
||||
if (vq->premapped)
|
||||
goto free_desc;
|
||||
|
||||
if (vring_mapping_error(vq, addr))
|
||||
goto unmap_release;
|
||||
}
|
||||
|
||||
vq->packed.vring.desc[head].addr = cpu_to_le64(addr);
|
||||
vq->packed.vring.desc[head].len = cpu_to_le32(total_sg *
|
||||
@ -1389,9 +1388,8 @@ unmap_release:
|
||||
err_idx = i;
|
||||
|
||||
for (i = 0; i < err_idx; i++)
|
||||
vring_unmap_desc_packed(vq, &desc[i]);
|
||||
vring_unmap_extra_packed(vq, &extra[i]);
|
||||
|
||||
free_desc:
|
||||
kfree(desc);
|
||||
|
||||
END_USE(vq);
|
||||
@ -1405,12 +1403,13 @@ static inline int virtqueue_add_packed(struct virtqueue *_vq,
|
||||
unsigned int in_sgs,
|
||||
void *data,
|
||||
void *ctx,
|
||||
bool premapped,
|
||||
gfp_t gfp)
|
||||
{
|
||||
struct vring_virtqueue *vq = to_vvq(_vq);
|
||||
struct vring_packed_desc *desc;
|
||||
struct scatterlist *sg;
|
||||
unsigned int i, n, c, descs_used, err_idx;
|
||||
unsigned int i, n, c, descs_used, err_idx, len;
|
||||
__le16 head_flags, flags;
|
||||
u16 head, id, prev, curr, avail_used_flags;
|
||||
int err;
|
||||
@ -1431,7 +1430,7 @@ static inline int virtqueue_add_packed(struct virtqueue *_vq,
|
||||
|
||||
if (virtqueue_use_indirect(vq, total_sg)) {
|
||||
err = virtqueue_add_indirect_packed(vq, sgs, total_sg, out_sgs,
|
||||
in_sgs, data, gfp);
|
||||
in_sgs, data, premapped, gfp);
|
||||
if (err != -ENOMEM) {
|
||||
END_USE(vq);
|
||||
return err;
|
||||
@ -1466,7 +1465,8 @@ static inline int virtqueue_add_packed(struct virtqueue *_vq,
|
||||
dma_addr_t addr;
|
||||
|
||||
if (vring_map_one_sg(vq, sg, n < out_sgs ?
|
||||
DMA_TO_DEVICE : DMA_FROM_DEVICE, &addr))
|
||||
DMA_TO_DEVICE : DMA_FROM_DEVICE,
|
||||
&addr, &len, premapped))
|
||||
goto unmap_release;
|
||||
|
||||
flags = cpu_to_le16(vq->packed.avail_used_flags |
|
||||
@ -1478,12 +1478,13 @@ static inline int virtqueue_add_packed(struct virtqueue *_vq,
|
||||
desc[i].flags = flags;
|
||||
|
||||
desc[i].addr = cpu_to_le64(addr);
|
||||
desc[i].len = cpu_to_le32(sg->length);
|
||||
desc[i].len = cpu_to_le32(len);
|
||||
desc[i].id = cpu_to_le16(id);
|
||||
|
||||
if (unlikely(vq->use_dma_api)) {
|
||||
vq->packed.desc_extra[curr].addr = addr;
|
||||
vq->packed.desc_extra[curr].len = sg->length;
|
||||
vq->packed.desc_extra[curr].addr = premapped ?
|
||||
DMA_MAPPING_ERROR : addr;
|
||||
vq->packed.desc_extra[curr].len = len;
|
||||
vq->packed.desc_extra[curr].flags =
|
||||
le16_to_cpu(flags);
|
||||
}
|
||||
@ -1625,18 +1626,22 @@ static void detach_buf_packed(struct vring_virtqueue *vq,
|
||||
}
|
||||
|
||||
if (vq->indirect) {
|
||||
u32 len;
|
||||
struct vring_desc_extra *extra;
|
||||
u32 len, num;
|
||||
|
||||
/* Free the indirect table, if any, now that it's unmapped. */
|
||||
desc = state->indir_desc;
|
||||
if (!desc)
|
||||
return;
|
||||
|
||||
if (vq->do_unmap) {
|
||||
if (vq->use_dma_api) {
|
||||
len = vq->packed.desc_extra[id].len;
|
||||
for (i = 0; i < len / sizeof(struct vring_packed_desc);
|
||||
i++)
|
||||
vring_unmap_desc_packed(vq, &desc[i]);
|
||||
num = len / sizeof(struct vring_packed_desc);
|
||||
|
||||
extra = (struct vring_desc_extra *)&desc[num];
|
||||
|
||||
for (i = 0; i < num; i++)
|
||||
vring_unmap_extra_packed(vq, &extra[i]);
|
||||
}
|
||||
kfree(desc);
|
||||
state->indir_desc = NULL;
|
||||
@ -2090,8 +2095,6 @@ static struct virtqueue *vring_create_virtqueue_packed(
|
||||
vq->packed_ring = true;
|
||||
vq->dma_dev = dma_dev;
|
||||
vq->use_dma_api = vring_use_dma_api(vdev);
|
||||
vq->premapped = false;
|
||||
vq->do_unmap = vq->use_dma_api;
|
||||
|
||||
vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC) &&
|
||||
!context;
|
||||
@ -2201,14 +2204,15 @@ static inline int virtqueue_add(struct virtqueue *_vq,
|
||||
unsigned int in_sgs,
|
||||
void *data,
|
||||
void *ctx,
|
||||
bool premapped,
|
||||
gfp_t gfp)
|
||||
{
|
||||
struct vring_virtqueue *vq = to_vvq(_vq);
|
||||
|
||||
return vq->packed_ring ? virtqueue_add_packed(_vq, sgs, total_sg,
|
||||
out_sgs, in_sgs, data, ctx, gfp) :
|
||||
out_sgs, in_sgs, data, ctx, premapped, gfp) :
|
||||
virtqueue_add_split(_vq, sgs, total_sg,
|
||||
out_sgs, in_sgs, data, ctx, gfp);
|
||||
out_sgs, in_sgs, data, ctx, premapped, gfp);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -2242,7 +2246,7 @@ int virtqueue_add_sgs(struct virtqueue *_vq,
|
||||
total_sg++;
|
||||
}
|
||||
return virtqueue_add(_vq, sgs, total_sg, out_sgs, in_sgs,
|
||||
data, NULL, gfp);
|
||||
data, NULL, false, gfp);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(virtqueue_add_sgs);
|
||||
|
||||
@ -2264,10 +2268,33 @@ int virtqueue_add_outbuf(struct virtqueue *vq,
|
||||
void *data,
|
||||
gfp_t gfp)
|
||||
{
|
||||
return virtqueue_add(vq, &sg, num, 1, 0, data, NULL, gfp);
|
||||
return virtqueue_add(vq, &sg, num, 1, 0, data, NULL, false, gfp);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(virtqueue_add_outbuf);
|
||||
|
||||
/**
|
||||
* virtqueue_add_outbuf_premapped - expose output buffers to other end
|
||||
* @vq: the struct virtqueue we're talking about.
|
||||
* @sg: scatterlist (must be well-formed and terminated!)
|
||||
* @num: the number of entries in @sg readable by other side
|
||||
* @data: the token identifying the buffer.
|
||||
* @gfp: how to do memory allocations (if necessary).
|
||||
*
|
||||
* Caller must ensure we don't call this with other virtqueue operations
|
||||
* at the same time (except where noted).
|
||||
*
|
||||
* Return:
|
||||
* Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
|
||||
*/
|
||||
int virtqueue_add_outbuf_premapped(struct virtqueue *vq,
|
||||
struct scatterlist *sg, unsigned int num,
|
||||
void *data,
|
||||
gfp_t gfp)
|
||||
{
|
||||
return virtqueue_add(vq, &sg, num, 1, 0, data, NULL, true, gfp);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(virtqueue_add_outbuf_premapped);
|
||||
|
||||
/**
|
||||
* virtqueue_add_inbuf - expose input buffers to other end
|
||||
* @vq: the struct virtqueue we're talking about.
|
||||
@ -2286,7 +2313,7 @@ int virtqueue_add_inbuf(struct virtqueue *vq,
|
||||
void *data,
|
||||
gfp_t gfp)
|
||||
{
|
||||
return virtqueue_add(vq, &sg, num, 0, 1, data, NULL, gfp);
|
||||
return virtqueue_add(vq, &sg, num, 0, 1, data, NULL, false, gfp);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(virtqueue_add_inbuf);
|
||||
|
||||
@ -2310,10 +2337,35 @@ int virtqueue_add_inbuf_ctx(struct virtqueue *vq,
|
||||
void *ctx,
|
||||
gfp_t gfp)
|
||||
{
|
||||
return virtqueue_add(vq, &sg, num, 0, 1, data, ctx, gfp);
|
||||
return virtqueue_add(vq, &sg, num, 0, 1, data, ctx, false, gfp);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(virtqueue_add_inbuf_ctx);
|
||||
|
||||
/**
|
||||
* virtqueue_add_inbuf_premapped - expose input buffers to other end
|
||||
* @vq: the struct virtqueue we're talking about.
|
||||
* @sg: scatterlist (must be well-formed and terminated!)
|
||||
* @num: the number of entries in @sg writable by other side
|
||||
* @data: the token identifying the buffer.
|
||||
* @ctx: extra context for the token
|
||||
* @gfp: how to do memory allocations (if necessary).
|
||||
*
|
||||
* Caller must ensure we don't call this with other virtqueue operations
|
||||
* at the same time (except where noted).
|
||||
*
|
||||
* Return:
|
||||
* Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
|
||||
*/
|
||||
int virtqueue_add_inbuf_premapped(struct virtqueue *vq,
|
||||
struct scatterlist *sg, unsigned int num,
|
||||
void *data,
|
||||
void *ctx,
|
||||
gfp_t gfp)
|
||||
{
|
||||
return virtqueue_add(vq, &sg, num, 0, 1, data, ctx, true, gfp);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(virtqueue_add_inbuf_premapped);
|
||||
|
||||
/**
|
||||
* virtqueue_dma_dev - get the dma dev
|
||||
* @_vq: the struct virtqueue we're talking about.
|
||||
@ -2635,8 +2687,6 @@ static struct virtqueue *__vring_new_virtqueue(unsigned int index,
|
||||
#endif
|
||||
vq->dma_dev = dma_dev;
|
||||
vq->use_dma_api = vring_use_dma_api(vdev);
|
||||
vq->premapped = false;
|
||||
vq->do_unmap = vq->use_dma_api;
|
||||
|
||||
vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC) &&
|
||||
!context;
|
||||
@ -2763,50 +2813,6 @@ int virtqueue_resize(struct virtqueue *_vq, u32 num,
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(virtqueue_resize);
|
||||
|
||||
/**
|
||||
* virtqueue_set_dma_premapped - set the vring premapped mode
|
||||
* @_vq: the struct virtqueue we're talking about.
|
||||
*
|
||||
* Enable the premapped mode of the vq.
|
||||
*
|
||||
* The vring in premapped mode does not do dma internally, so the driver must
|
||||
* do dma mapping in advance. The driver must pass the dma_address through
|
||||
* dma_address of scatterlist. When the driver got a used buffer from
|
||||
* the vring, it has to unmap the dma address.
|
||||
*
|
||||
* This function must be called immediately after creating the vq, or after vq
|
||||
* reset, and before adding any buffers to it.
|
||||
*
|
||||
* Caller must ensure we don't call this with other virtqueue operations
|
||||
* at the same time (except where noted).
|
||||
*
|
||||
* Returns zero or a negative error.
|
||||
* 0: success.
|
||||
* -EINVAL: too late to enable premapped mode, the vq already contains buffers.
|
||||
*/
|
||||
int virtqueue_set_dma_premapped(struct virtqueue *_vq)
|
||||
{
|
||||
struct vring_virtqueue *vq = to_vvq(_vq);
|
||||
u32 num;
|
||||
|
||||
START_USE(vq);
|
||||
|
||||
num = vq->packed_ring ? vq->packed.vring.num : vq->split.vring.num;
|
||||
|
||||
if (num != vq->vq.num_free) {
|
||||
END_USE(vq);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
vq->premapped = true;
|
||||
vq->do_unmap = false;
|
||||
|
||||
END_USE(vq);
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(virtqueue_set_dma_premapped);
|
||||
|
||||
/**
|
||||
* virtqueue_reset - detach and recycle all unused buffers
|
||||
* @_vq: the struct virtqueue we're talking about.
|
||||
|
@ -56,6 +56,17 @@ int virtqueue_add_inbuf_ctx(struct virtqueue *vq,
|
||||
void *ctx,
|
||||
gfp_t gfp);
|
||||
|
||||
int virtqueue_add_inbuf_premapped(struct virtqueue *vq,
|
||||
struct scatterlist *sg, unsigned int num,
|
||||
void *data,
|
||||
void *ctx,
|
||||
gfp_t gfp);
|
||||
|
||||
int virtqueue_add_outbuf_premapped(struct virtqueue *vq,
|
||||
struct scatterlist *sg, unsigned int num,
|
||||
void *data,
|
||||
gfp_t gfp);
|
||||
|
||||
int virtqueue_add_sgs(struct virtqueue *vq,
|
||||
struct scatterlist *sgs[],
|
||||
unsigned int out_sgs,
|
||||
@ -82,8 +93,6 @@ bool virtqueue_enable_cb(struct virtqueue *vq);
|
||||
|
||||
unsigned virtqueue_enable_cb_prepare(struct virtqueue *vq);
|
||||
|
||||
int virtqueue_set_dma_premapped(struct virtqueue *_vq);
|
||||
|
||||
bool virtqueue_poll(struct virtqueue *vq, unsigned);
|
||||
|
||||
bool virtqueue_enable_cb_delayed(struct virtqueue *vq);
|
||||
|
Loading…
x
Reference in New Issue
Block a user