mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-01-08 14:23:19 +00:00
Merge branch 'enic-report-per-queue-stats'
Nelson Escobar says: ==================== enic: Report per queue stats Patch #1: Use a macro instead of static const variables for array sizes. I didn't want to add more static const variables in the next patch so clean up the existing ones first. Patch #2: Collect per queue statistics Patch #3: Report per queue stats in netdev qstats Patch #4: Report some per queue stats in ethtool # NETIF="eno6" tools/testing/selftests/drivers/net/stats.py KTAP version 1 1..5 ok 1 stats.check_pause # XFAIL pause not supported by the device ok 2 stats.check_fec # XFAIL FEC not supported by the device ok 3 stats.pkt_byte_sum ok 4 stats.qstat_by_ifindex ok 5 stats.check_down # tools/net/ynl/cli.py --spec Documentation/netlink/specs/netdev.yaml \ --dump qstats-get --json '{"ifindex": "34"}' [{'ifindex': 34, 'rx-bytes': 66762680, 'rx-csum-unnecessary': 1009345, 'rx-hw-drop-overruns': 0, 'rx-hw-drops': 0, 'rx-packets': 1009673, 'tx-bytes': 137936674899, 'tx-csum-none': 125, 'tx-hw-gso-packets': 2408712, 'tx-needs-csum': 2431531, 'tx-packets': 15475466, 'tx-stop': 0, 'tx-wake': 0}] v2: https://lore.kernel.org/20240905010900.24152-1-neescoba@cisco.com v1: https://lore.kernel.org/20240823235401.29996-1-neescoba@cisco.com ==================== Link: https://patch.msgid.link/20240912005039.10797-1-neescoba@cisco.com Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
commit
158135dcb4
@ -128,6 +128,40 @@ struct vxlan_offload {
|
||||
u8 flags;
|
||||
};
|
||||
|
||||
struct enic_wq_stats {
|
||||
u64 packets; /* pkts queued for Tx */
|
||||
u64 stopped; /* Tx ring almost full, queue stopped */
|
||||
u64 wake; /* Tx ring no longer full, queue woken up*/
|
||||
u64 tso; /* non-encap tso pkt */
|
||||
u64 encap_tso; /* encap tso pkt */
|
||||
u64 encap_csum; /* encap HW csum */
|
||||
u64 csum_partial; /* skb->ip_summed = CHECKSUM_PARTIAL */
|
||||
u64 csum_none; /* HW csum not required */
|
||||
u64 bytes; /* bytes queued for Tx */
|
||||
u64 add_vlan; /* HW adds vlan tag */
|
||||
u64 cq_work; /* Tx completions processed */
|
||||
u64 cq_bytes; /* Tx bytes processed */
|
||||
u64 null_pkt; /* skb length <= 0 */
|
||||
u64 skb_linear_fail; /* linearize failures */
|
||||
u64 desc_full_awake; /* TX ring full while queue awake */
|
||||
};
|
||||
|
||||
struct enic_rq_stats {
|
||||
u64 packets; /* pkts received */
|
||||
u64 bytes; /* bytes received */
|
||||
u64 l4_rss_hash; /* hashed on l4 */
|
||||
u64 l3_rss_hash; /* hashed on l3 */
|
||||
u64 csum_unnecessary; /* HW verified csum */
|
||||
u64 csum_unnecessary_encap; /* HW verified csum on encap packet */
|
||||
u64 vlan_stripped; /* HW stripped vlan */
|
||||
u64 napi_complete; /* napi complete intr reenabled */
|
||||
u64 napi_repoll; /* napi poll again */
|
||||
u64 bad_fcs; /* bad pkts */
|
||||
u64 pkt_truncated; /* truncated pkts */
|
||||
u64 no_skb; /* out of skbs */
|
||||
u64 desc_skip; /* Rx pkt went into later buffer */
|
||||
};
|
||||
|
||||
/* Per-instance private data structure */
|
||||
struct enic {
|
||||
struct net_device *netdev;
|
||||
@ -162,16 +196,16 @@ struct enic {
|
||||
/* work queue cache line section */
|
||||
____cacheline_aligned struct vnic_wq wq[ENIC_WQ_MAX];
|
||||
spinlock_t wq_lock[ENIC_WQ_MAX];
|
||||
struct enic_wq_stats wq_stats[ENIC_WQ_MAX];
|
||||
unsigned int wq_count;
|
||||
u16 loop_enable;
|
||||
u16 loop_tag;
|
||||
|
||||
/* receive queue cache line section */
|
||||
____cacheline_aligned struct vnic_rq rq[ENIC_RQ_MAX];
|
||||
struct enic_rq_stats rq_stats[ENIC_RQ_MAX];
|
||||
unsigned int rq_count;
|
||||
struct vxlan_offload vxlan;
|
||||
u64 rq_truncated_pkts;
|
||||
u64 rq_bad_fcs;
|
||||
struct napi_struct napi[ENIC_RQ_MAX + ENIC_WQ_MAX];
|
||||
|
||||
/* interrupt resource cache line section */
|
||||
|
@ -32,6 +32,41 @@ struct enic_stat {
|
||||
.index = offsetof(struct vnic_gen_stats, stat) / sizeof(u64)\
|
||||
}
|
||||
|
||||
#define ENIC_PER_RQ_STAT(stat) { \
|
||||
.name = "rq[%d]_"#stat, \
|
||||
.index = offsetof(struct enic_rq_stats, stat) / sizeof(u64) \
|
||||
}
|
||||
|
||||
#define ENIC_PER_WQ_STAT(stat) { \
|
||||
.name = "wq[%d]_"#stat, \
|
||||
.index = offsetof(struct enic_wq_stats, stat) / sizeof(u64) \
|
||||
}
|
||||
|
||||
static const struct enic_stat enic_per_rq_stats[] = {
|
||||
ENIC_PER_RQ_STAT(l4_rss_hash),
|
||||
ENIC_PER_RQ_STAT(l3_rss_hash),
|
||||
ENIC_PER_RQ_STAT(csum_unnecessary_encap),
|
||||
ENIC_PER_RQ_STAT(vlan_stripped),
|
||||
ENIC_PER_RQ_STAT(napi_complete),
|
||||
ENIC_PER_RQ_STAT(napi_repoll),
|
||||
ENIC_PER_RQ_STAT(no_skb),
|
||||
ENIC_PER_RQ_STAT(desc_skip),
|
||||
};
|
||||
|
||||
#define NUM_ENIC_PER_RQ_STATS ARRAY_SIZE(enic_per_rq_stats)
|
||||
|
||||
static const struct enic_stat enic_per_wq_stats[] = {
|
||||
ENIC_PER_WQ_STAT(encap_tso),
|
||||
ENIC_PER_WQ_STAT(encap_csum),
|
||||
ENIC_PER_WQ_STAT(add_vlan),
|
||||
ENIC_PER_WQ_STAT(cq_work),
|
||||
ENIC_PER_WQ_STAT(cq_bytes),
|
||||
ENIC_PER_WQ_STAT(null_pkt),
|
||||
ENIC_PER_WQ_STAT(skb_linear_fail),
|
||||
ENIC_PER_WQ_STAT(desc_full_awake),
|
||||
};
|
||||
|
||||
#define NUM_ENIC_PER_WQ_STATS ARRAY_SIZE(enic_per_wq_stats)
|
||||
static const struct enic_stat enic_tx_stats[] = {
|
||||
ENIC_TX_STAT(tx_frames_ok),
|
||||
ENIC_TX_STAT(tx_unicast_frames_ok),
|
||||
@ -46,6 +81,8 @@ static const struct enic_stat enic_tx_stats[] = {
|
||||
ENIC_TX_STAT(tx_tso),
|
||||
};
|
||||
|
||||
#define NUM_ENIC_TX_STATS ARRAY_SIZE(enic_tx_stats)
|
||||
|
||||
static const struct enic_stat enic_rx_stats[] = {
|
||||
ENIC_RX_STAT(rx_frames_ok),
|
||||
ENIC_RX_STAT(rx_frames_total),
|
||||
@ -70,13 +107,13 @@ static const struct enic_stat enic_rx_stats[] = {
|
||||
ENIC_RX_STAT(rx_frames_to_max),
|
||||
};
|
||||
|
||||
#define NUM_ENIC_RX_STATS ARRAY_SIZE(enic_rx_stats)
|
||||
|
||||
static const struct enic_stat enic_gen_stats[] = {
|
||||
ENIC_GEN_STAT(dma_map_error),
|
||||
};
|
||||
|
||||
static const unsigned int enic_n_tx_stats = ARRAY_SIZE(enic_tx_stats);
|
||||
static const unsigned int enic_n_rx_stats = ARRAY_SIZE(enic_rx_stats);
|
||||
static const unsigned int enic_n_gen_stats = ARRAY_SIZE(enic_gen_stats);
|
||||
#define NUM_ENIC_GEN_STATS ARRAY_SIZE(enic_gen_stats)
|
||||
|
||||
static void enic_intr_coal_set_rx(struct enic *enic, u32 timer)
|
||||
{
|
||||
@ -141,22 +178,38 @@ static void enic_get_drvinfo(struct net_device *netdev,
|
||||
static void enic_get_strings(struct net_device *netdev, u32 stringset,
|
||||
u8 *data)
|
||||
{
|
||||
struct enic *enic = netdev_priv(netdev);
|
||||
unsigned int i;
|
||||
unsigned int j;
|
||||
|
||||
switch (stringset) {
|
||||
case ETH_SS_STATS:
|
||||
for (i = 0; i < enic_n_tx_stats; i++) {
|
||||
for (i = 0; i < NUM_ENIC_TX_STATS; i++) {
|
||||
memcpy(data, enic_tx_stats[i].name, ETH_GSTRING_LEN);
|
||||
data += ETH_GSTRING_LEN;
|
||||
}
|
||||
for (i = 0; i < enic_n_rx_stats; i++) {
|
||||
for (i = 0; i < NUM_ENIC_RX_STATS; i++) {
|
||||
memcpy(data, enic_rx_stats[i].name, ETH_GSTRING_LEN);
|
||||
data += ETH_GSTRING_LEN;
|
||||
}
|
||||
for (i = 0; i < enic_n_gen_stats; i++) {
|
||||
for (i = 0; i < NUM_ENIC_GEN_STATS; i++) {
|
||||
memcpy(data, enic_gen_stats[i].name, ETH_GSTRING_LEN);
|
||||
data += ETH_GSTRING_LEN;
|
||||
}
|
||||
for (i = 0; i < enic->rq_count; i++) {
|
||||
for (j = 0; j < NUM_ENIC_PER_RQ_STATS; j++) {
|
||||
snprintf(data, ETH_GSTRING_LEN,
|
||||
enic_per_rq_stats[j].name, i);
|
||||
data += ETH_GSTRING_LEN;
|
||||
}
|
||||
}
|
||||
for (i = 0; i < enic->wq_count; i++) {
|
||||
for (j = 0; j < NUM_ENIC_PER_WQ_STATS; j++) {
|
||||
snprintf(data, ETH_GSTRING_LEN,
|
||||
enic_per_wq_stats[j].name, i);
|
||||
data += ETH_GSTRING_LEN;
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
@ -242,9 +295,19 @@ static int enic_set_ringparam(struct net_device *netdev,
|
||||
|
||||
static int enic_get_sset_count(struct net_device *netdev, int sset)
|
||||
{
|
||||
struct enic *enic = netdev_priv(netdev);
|
||||
unsigned int n_per_rq_stats;
|
||||
unsigned int n_per_wq_stats;
|
||||
unsigned int n_stats;
|
||||
|
||||
switch (sset) {
|
||||
case ETH_SS_STATS:
|
||||
return enic_n_tx_stats + enic_n_rx_stats + enic_n_gen_stats;
|
||||
n_per_rq_stats = NUM_ENIC_PER_RQ_STATS * enic->rq_count;
|
||||
n_per_wq_stats = NUM_ENIC_PER_WQ_STATS * enic->wq_count;
|
||||
n_stats = NUM_ENIC_TX_STATS + NUM_ENIC_RX_STATS +
|
||||
NUM_ENIC_GEN_STATS +
|
||||
n_per_rq_stats + n_per_wq_stats;
|
||||
return n_stats;
|
||||
default:
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
@ -256,6 +319,7 @@ static void enic_get_ethtool_stats(struct net_device *netdev,
|
||||
struct enic *enic = netdev_priv(netdev);
|
||||
struct vnic_stats *vstats;
|
||||
unsigned int i;
|
||||
unsigned int j;
|
||||
int err;
|
||||
|
||||
err = enic_dev_stats_dump(enic, &vstats);
|
||||
@ -266,12 +330,30 @@ static void enic_get_ethtool_stats(struct net_device *netdev,
|
||||
if (err == -ENOMEM)
|
||||
return;
|
||||
|
||||
for (i = 0; i < enic_n_tx_stats; i++)
|
||||
for (i = 0; i < NUM_ENIC_TX_STATS; i++)
|
||||
*(data++) = ((u64 *)&vstats->tx)[enic_tx_stats[i].index];
|
||||
for (i = 0; i < enic_n_rx_stats; i++)
|
||||
for (i = 0; i < NUM_ENIC_RX_STATS; i++)
|
||||
*(data++) = ((u64 *)&vstats->rx)[enic_rx_stats[i].index];
|
||||
for (i = 0; i < enic_n_gen_stats; i++)
|
||||
for (i = 0; i < NUM_ENIC_GEN_STATS; i++)
|
||||
*(data++) = ((u64 *)&enic->gen_stats)[enic_gen_stats[i].index];
|
||||
for (i = 0; i < enic->rq_count; i++) {
|
||||
struct enic_rq_stats *rqstats = &enic->rq_stats[i];
|
||||
int index;
|
||||
|
||||
for (j = 0; j < NUM_ENIC_PER_RQ_STATS; j++) {
|
||||
index = enic_per_rq_stats[j].index;
|
||||
*(data++) = ((u64 *)rqstats)[index];
|
||||
}
|
||||
}
|
||||
for (i = 0; i < enic->wq_count; i++) {
|
||||
struct enic_wq_stats *wqstats = &enic->wq_stats[i];
|
||||
int index;
|
||||
|
||||
for (j = 0; j < NUM_ENIC_PER_WQ_STATS; j++) {
|
||||
index = enic_per_wq_stats[j].index;
|
||||
*(data++) = ((u64 *)wqstats)[index];
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static u32 enic_get_msglevel(struct net_device *netdev)
|
||||
|
@ -46,6 +46,7 @@
|
||||
#include <linux/crash_dump.h>
|
||||
#include <net/busy_poll.h>
|
||||
#include <net/vxlan.h>
|
||||
#include <net/netdev_queues.h>
|
||||
|
||||
#include "cq_enet_desc.h"
|
||||
#include "vnic_dev.h"
|
||||
@ -339,6 +340,10 @@ static void enic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf)
|
||||
static void enic_wq_free_buf(struct vnic_wq *wq,
|
||||
struct cq_desc *cq_desc, struct vnic_wq_buf *buf, void *opaque)
|
||||
{
|
||||
struct enic *enic = vnic_dev_priv(wq->vdev);
|
||||
|
||||
enic->wq_stats[wq->index].cq_work++;
|
||||
enic->wq_stats[wq->index].cq_bytes += buf->len;
|
||||
enic_free_wq_buf(wq, buf);
|
||||
}
|
||||
|
||||
@ -355,8 +360,10 @@ static int enic_wq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc,
|
||||
|
||||
if (netif_tx_queue_stopped(netdev_get_tx_queue(enic->netdev, q_number)) &&
|
||||
vnic_wq_desc_avail(&enic->wq[q_number]) >=
|
||||
(MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS))
|
||||
(MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS)) {
|
||||
netif_wake_subqueue(enic->netdev, q_number);
|
||||
enic->wq_stats[q_number].wake++;
|
||||
}
|
||||
|
||||
spin_unlock(&enic->wq_lock[q_number]);
|
||||
|
||||
@ -590,6 +597,11 @@ static int enic_queue_wq_skb_vlan(struct enic *enic, struct vnic_wq *wq,
|
||||
if (!eop)
|
||||
err = enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback);
|
||||
|
||||
/* The enic_queue_wq_desc() above does not do HW checksum */
|
||||
enic->wq_stats[wq->index].csum_none++;
|
||||
enic->wq_stats[wq->index].packets++;
|
||||
enic->wq_stats[wq->index].bytes += skb->len;
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
@ -622,6 +634,10 @@ static int enic_queue_wq_skb_csum_l4(struct enic *enic, struct vnic_wq *wq,
|
||||
if (!eop)
|
||||
err = enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback);
|
||||
|
||||
enic->wq_stats[wq->index].csum_partial++;
|
||||
enic->wq_stats[wq->index].packets++;
|
||||
enic->wq_stats[wq->index].bytes += skb->len;
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
@ -676,15 +692,18 @@ static int enic_queue_wq_skb_tso(struct enic *enic, struct vnic_wq *wq,
|
||||
unsigned int offset = 0;
|
||||
unsigned int hdr_len;
|
||||
dma_addr_t dma_addr;
|
||||
unsigned int pkts;
|
||||
unsigned int len;
|
||||
skb_frag_t *frag;
|
||||
|
||||
if (skb->encapsulation) {
|
||||
hdr_len = skb_inner_tcp_all_headers(skb);
|
||||
enic_preload_tcp_csum_encap(skb);
|
||||
enic->wq_stats[wq->index].encap_tso++;
|
||||
} else {
|
||||
hdr_len = skb_tcp_all_headers(skb);
|
||||
enic_preload_tcp_csum(skb);
|
||||
enic->wq_stats[wq->index].tso++;
|
||||
}
|
||||
|
||||
/* Queue WQ_ENET_MAX_DESC_LEN length descriptors
|
||||
@ -705,7 +724,7 @@ static int enic_queue_wq_skb_tso(struct enic *enic, struct vnic_wq *wq,
|
||||
}
|
||||
|
||||
if (eop)
|
||||
return 0;
|
||||
goto tso_out_stats;
|
||||
|
||||
/* Queue WQ_ENET_MAX_DESC_LEN length descriptors
|
||||
* for additional data fragments
|
||||
@ -732,6 +751,15 @@ static int enic_queue_wq_skb_tso(struct enic *enic, struct vnic_wq *wq,
|
||||
}
|
||||
}
|
||||
|
||||
tso_out_stats:
|
||||
/* calculate how many packets tso sent */
|
||||
len = skb->len - hdr_len;
|
||||
pkts = len / mss;
|
||||
if ((len % mss) > 0)
|
||||
pkts++;
|
||||
enic->wq_stats[wq->index].packets += pkts;
|
||||
enic->wq_stats[wq->index].bytes += (len + (pkts * hdr_len));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -764,6 +792,10 @@ static inline int enic_queue_wq_skb_encap(struct enic *enic, struct vnic_wq *wq,
|
||||
if (!eop)
|
||||
err = enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback);
|
||||
|
||||
enic->wq_stats[wq->index].encap_csum++;
|
||||
enic->wq_stats[wq->index].packets++;
|
||||
enic->wq_stats[wq->index].bytes += skb->len;
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
@ -780,6 +812,7 @@ static inline int enic_queue_wq_skb(struct enic *enic,
|
||||
/* VLAN tag from trunking driver */
|
||||
vlan_tag_insert = 1;
|
||||
vlan_tag = skb_vlan_tag_get(skb);
|
||||
enic->wq_stats[wq->index].add_vlan++;
|
||||
} else if (enic->loop_enable) {
|
||||
vlan_tag = enic->loop_tag;
|
||||
loopback = 1;
|
||||
@ -792,7 +825,7 @@ static inline int enic_queue_wq_skb(struct enic *enic,
|
||||
else if (skb->encapsulation)
|
||||
err = enic_queue_wq_skb_encap(enic, wq, skb, vlan_tag_insert,
|
||||
vlan_tag, loopback);
|
||||
else if (skb->ip_summed == CHECKSUM_PARTIAL)
|
||||
else if (skb->ip_summed == CHECKSUM_PARTIAL)
|
||||
err = enic_queue_wq_skb_csum_l4(enic, wq, skb, vlan_tag_insert,
|
||||
vlan_tag, loopback);
|
||||
else
|
||||
@ -825,13 +858,15 @@ static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb,
|
||||
unsigned int txq_map;
|
||||
struct netdev_queue *txq;
|
||||
|
||||
txq_map = skb_get_queue_mapping(skb) % enic->wq_count;
|
||||
wq = &enic->wq[txq_map];
|
||||
|
||||
if (skb->len <= 0) {
|
||||
dev_kfree_skb_any(skb);
|
||||
enic->wq_stats[wq->index].null_pkt++;
|
||||
return NETDEV_TX_OK;
|
||||
}
|
||||
|
||||
txq_map = skb_get_queue_mapping(skb) % enic->wq_count;
|
||||
wq = &enic->wq[txq_map];
|
||||
txq = netdev_get_tx_queue(netdev, txq_map);
|
||||
|
||||
/* Non-TSO sends must fit within ENIC_NON_TSO_MAX_DESC descs,
|
||||
@ -843,6 +878,7 @@ static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb,
|
||||
skb_shinfo(skb)->nr_frags + 1 > ENIC_NON_TSO_MAX_DESC &&
|
||||
skb_linearize(skb)) {
|
||||
dev_kfree_skb_any(skb);
|
||||
enic->wq_stats[wq->index].skb_linear_fail++;
|
||||
return NETDEV_TX_OK;
|
||||
}
|
||||
|
||||
@ -854,14 +890,17 @@ static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb,
|
||||
/* This is a hard error, log it */
|
||||
netdev_err(netdev, "BUG! Tx ring full when queue awake!\n");
|
||||
spin_unlock(&enic->wq_lock[txq_map]);
|
||||
enic->wq_stats[wq->index].desc_full_awake++;
|
||||
return NETDEV_TX_BUSY;
|
||||
}
|
||||
|
||||
if (enic_queue_wq_skb(enic, wq, skb))
|
||||
goto error;
|
||||
|
||||
if (vnic_wq_desc_avail(wq) < MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS)
|
||||
if (vnic_wq_desc_avail(wq) < MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS) {
|
||||
netif_tx_stop_queue(txq);
|
||||
enic->wq_stats[wq->index].stopped++;
|
||||
}
|
||||
skb_tx_timestamp(skb);
|
||||
if (!netdev_xmit_more() || netif_xmit_stopped(txq))
|
||||
vnic_wq_doorbell(wq);
|
||||
@ -878,7 +917,10 @@ static void enic_get_stats(struct net_device *netdev,
|
||||
{
|
||||
struct enic *enic = netdev_priv(netdev);
|
||||
struct vnic_stats *stats;
|
||||
u64 pkt_truncated = 0;
|
||||
u64 bad_fcs = 0;
|
||||
int err;
|
||||
int i;
|
||||
|
||||
err = enic_dev_stats_dump(enic, &stats);
|
||||
/* return only when dma_alloc_coherent fails in vnic_dev_stats_dump
|
||||
@ -897,8 +939,17 @@ static void enic_get_stats(struct net_device *netdev,
|
||||
net_stats->rx_bytes = stats->rx.rx_bytes_ok;
|
||||
net_stats->rx_errors = stats->rx.rx_errors;
|
||||
net_stats->multicast = stats->rx.rx_multicast_frames_ok;
|
||||
net_stats->rx_over_errors = enic->rq_truncated_pkts;
|
||||
net_stats->rx_crc_errors = enic->rq_bad_fcs;
|
||||
|
||||
for (i = 0; i < ENIC_RQ_MAX; i++) {
|
||||
struct enic_rq_stats *rqs = &enic->rq_stats[i];
|
||||
|
||||
if (!enic->rq->ctrl)
|
||||
break;
|
||||
pkt_truncated += rqs->pkt_truncated;
|
||||
bad_fcs += rqs->bad_fcs;
|
||||
}
|
||||
net_stats->rx_over_errors = pkt_truncated;
|
||||
net_stats->rx_crc_errors = bad_fcs;
|
||||
net_stats->rx_dropped = stats->rx.rx_no_bufs + stats->rx.rx_drop;
|
||||
}
|
||||
|
||||
@ -1261,8 +1312,10 @@ static int enic_rq_alloc_buf(struct vnic_rq *rq)
|
||||
return 0;
|
||||
}
|
||||
skb = netdev_alloc_skb_ip_align(netdev, len);
|
||||
if (!skb)
|
||||
if (!skb) {
|
||||
enic->rq_stats[rq->index].no_skb++;
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
dma_addr = dma_map_single(&enic->pdev->dev, skb->data, len,
|
||||
DMA_FROM_DEVICE);
|
||||
@ -1313,6 +1366,7 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq,
|
||||
struct net_device *netdev = enic->netdev;
|
||||
struct sk_buff *skb;
|
||||
struct vnic_cq *cq = &enic->cq[enic_cq_rq(enic, rq->index)];
|
||||
struct enic_rq_stats *rqstats = &enic->rq_stats[rq->index];
|
||||
|
||||
u8 type, color, eop, sop, ingress_port, vlan_stripped;
|
||||
u8 fcoe, fcoe_sof, fcoe_fc_crc_ok, fcoe_enc_error, fcoe_eof;
|
||||
@ -1323,8 +1377,11 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq,
|
||||
u32 rss_hash;
|
||||
bool outer_csum_ok = true, encap = false;
|
||||
|
||||
if (skipped)
|
||||
rqstats->packets++;
|
||||
if (skipped) {
|
||||
rqstats->desc_skip++;
|
||||
return;
|
||||
}
|
||||
|
||||
skb = buf->os_buf;
|
||||
|
||||
@ -1342,9 +1399,9 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq,
|
||||
|
||||
if (!fcs_ok) {
|
||||
if (bytes_written > 0)
|
||||
enic->rq_bad_fcs++;
|
||||
rqstats->bad_fcs++;
|
||||
else if (bytes_written == 0)
|
||||
enic->rq_truncated_pkts++;
|
||||
rqstats->pkt_truncated++;
|
||||
}
|
||||
|
||||
dma_unmap_single(&enic->pdev->dev, buf->dma_addr, buf->len,
|
||||
@ -1359,7 +1416,7 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq,
|
||||
|
||||
/* Good receive
|
||||
*/
|
||||
|
||||
rqstats->bytes += bytes_written;
|
||||
if (!enic_rxcopybreak(netdev, &skb, buf, bytes_written)) {
|
||||
buf->os_buf = NULL;
|
||||
dma_unmap_single(&enic->pdev->dev, buf->dma_addr,
|
||||
@ -1377,11 +1434,13 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq,
|
||||
case CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv6:
|
||||
case CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv6_EX:
|
||||
skb_set_hash(skb, rss_hash, PKT_HASH_TYPE_L4);
|
||||
rqstats->l4_rss_hash++;
|
||||
break;
|
||||
case CQ_ENET_RQ_DESC_RSS_TYPE_IPv4:
|
||||
case CQ_ENET_RQ_DESC_RSS_TYPE_IPv6:
|
||||
case CQ_ENET_RQ_DESC_RSS_TYPE_IPv6_EX:
|
||||
skb_set_hash(skb, rss_hash, PKT_HASH_TYPE_L3);
|
||||
rqstats->l3_rss_hash++;
|
||||
break;
|
||||
}
|
||||
}
|
||||
@ -1418,11 +1477,16 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq,
|
||||
(ipv4_csum_ok || ipv6)) {
|
||||
skb->ip_summed = CHECKSUM_UNNECESSARY;
|
||||
skb->csum_level = encap;
|
||||
if (encap)
|
||||
rqstats->csum_unnecessary_encap++;
|
||||
else
|
||||
rqstats->csum_unnecessary++;
|
||||
}
|
||||
|
||||
if (vlan_stripped)
|
||||
if (vlan_stripped) {
|
||||
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci);
|
||||
|
||||
rqstats->vlan_stripped++;
|
||||
}
|
||||
skb_mark_napi_id(skb, &enic->napi[rq->index]);
|
||||
if (!(netdev->features & NETIF_F_GRO))
|
||||
netif_receive_skb(skb);
|
||||
@ -1435,7 +1499,7 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq,
|
||||
|
||||
/* Buffer overflow
|
||||
*/
|
||||
|
||||
rqstats->pkt_truncated++;
|
||||
dma_unmap_single(&enic->pdev->dev, buf->dma_addr, buf->len,
|
||||
DMA_FROM_DEVICE);
|
||||
dev_kfree_skb_any(skb);
|
||||
@ -1568,6 +1632,9 @@ static int enic_poll(struct napi_struct *napi, int budget)
|
||||
if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce)
|
||||
enic_set_int_moderation(enic, &enic->rq[0]);
|
||||
vnic_intr_unmask(&enic->intr[intr]);
|
||||
enic->rq_stats[0].napi_complete++;
|
||||
} else {
|
||||
enic->rq_stats[0].napi_repoll++;
|
||||
}
|
||||
|
||||
return rq_work_done;
|
||||
@ -1693,6 +1760,9 @@ static int enic_poll_msix_rq(struct napi_struct *napi, int budget)
|
||||
if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce)
|
||||
enic_set_int_moderation(enic, &enic->rq[rq]);
|
||||
vnic_intr_unmask(&enic->intr[intr]);
|
||||
enic->rq_stats[rq].napi_complete++;
|
||||
} else {
|
||||
enic->rq_stats[rq].napi_repoll++;
|
||||
}
|
||||
|
||||
return work_done;
|
||||
@ -2502,6 +2572,54 @@ static void enic_clear_intr_mode(struct enic *enic)
|
||||
vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_UNKNOWN);
|
||||
}
|
||||
|
||||
static void enic_get_queue_stats_rx(struct net_device *dev, int idx,
|
||||
struct netdev_queue_stats_rx *rxs)
|
||||
{
|
||||
struct enic *enic = netdev_priv(dev);
|
||||
struct enic_rq_stats *rqstats = &enic->rq_stats[idx];
|
||||
|
||||
rxs->bytes = rqstats->bytes;
|
||||
rxs->packets = rqstats->packets;
|
||||
rxs->hw_drops = rqstats->bad_fcs + rqstats->pkt_truncated;
|
||||
rxs->hw_drop_overruns = rqstats->pkt_truncated;
|
||||
rxs->csum_unnecessary = rqstats->csum_unnecessary +
|
||||
rqstats->csum_unnecessary_encap;
|
||||
}
|
||||
|
||||
static void enic_get_queue_stats_tx(struct net_device *dev, int idx,
|
||||
struct netdev_queue_stats_tx *txs)
|
||||
{
|
||||
struct enic *enic = netdev_priv(dev);
|
||||
struct enic_wq_stats *wqstats = &enic->wq_stats[idx];
|
||||
|
||||
txs->bytes = wqstats->bytes;
|
||||
txs->packets = wqstats->packets;
|
||||
txs->csum_none = wqstats->csum_none;
|
||||
txs->needs_csum = wqstats->csum_partial + wqstats->encap_csum +
|
||||
wqstats->tso;
|
||||
txs->hw_gso_packets = wqstats->tso;
|
||||
txs->stop = wqstats->stopped;
|
||||
txs->wake = wqstats->wake;
|
||||
}
|
||||
|
||||
static void enic_get_base_stats(struct net_device *dev,
|
||||
struct netdev_queue_stats_rx *rxs,
|
||||
struct netdev_queue_stats_tx *txs)
|
||||
{
|
||||
rxs->bytes = 0;
|
||||
rxs->packets = 0;
|
||||
rxs->hw_drops = 0;
|
||||
rxs->hw_drop_overruns = 0;
|
||||
rxs->csum_unnecessary = 0;
|
||||
txs->bytes = 0;
|
||||
txs->packets = 0;
|
||||
txs->csum_none = 0;
|
||||
txs->needs_csum = 0;
|
||||
txs->hw_gso_packets = 0;
|
||||
txs->stop = 0;
|
||||
txs->wake = 0;
|
||||
}
|
||||
|
||||
static const struct net_device_ops enic_netdev_dynamic_ops = {
|
||||
.ndo_open = enic_open,
|
||||
.ndo_stop = enic_stop,
|
||||
@ -2550,6 +2668,12 @@ static const struct net_device_ops enic_netdev_ops = {
|
||||
.ndo_features_check = enic_features_check,
|
||||
};
|
||||
|
||||
static const struct netdev_stat_ops enic_netdev_stat_ops = {
|
||||
.get_queue_stats_rx = enic_get_queue_stats_rx,
|
||||
.get_queue_stats_tx = enic_get_queue_stats_tx,
|
||||
.get_base_stats = enic_get_base_stats,
|
||||
};
|
||||
|
||||
static void enic_dev_deinit(struct enic *enic)
|
||||
{
|
||||
unsigned int i;
|
||||
@ -2892,6 +3016,7 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
netdev->netdev_ops = &enic_netdev_dynamic_ops;
|
||||
else
|
||||
netdev->netdev_ops = &enic_netdev_ops;
|
||||
netdev->stat_ops = &enic_netdev_stat_ops;
|
||||
|
||||
netdev->watchdog_timeo = 2 * HZ;
|
||||
enic_set_ethtool_ops(netdev);
|
||||
|
Loading…
Reference in New Issue
Block a user