mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2024-12-29 09:12:07 +00:00
net: Replace u64_stats_fetch_begin_bh to u64_stats_fetch_begin_irq
Replace the bh safe variant with the hard irq safe variant. We need a hard irq safe variant to deal with netpoll transmitting packets from hard irq context, and we need it in most if not all of the places using the bh safe variant. Except on 32bit uni-processor the code is exactly the same so don't bother with a bh variant, just have a hard irq safe variant that everyone can use. Signed-off-by: "Eric W. Biederman" <ebiederm@xmission.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
85dcce7a73
commit
57a7744e09
@ -435,9 +435,9 @@ static inline uint64_t blkg_stat_read(struct blkg_stat *stat)
|
||||
uint64_t v;
|
||||
|
||||
do {
|
||||
start = u64_stats_fetch_begin_bh(&stat->syncp);
|
||||
start = u64_stats_fetch_begin_irq(&stat->syncp);
|
||||
v = stat->cnt;
|
||||
} while (u64_stats_fetch_retry_bh(&stat->syncp, start));
|
||||
} while (u64_stats_fetch_retry_irq(&stat->syncp, start));
|
||||
|
||||
return v;
|
||||
}
|
||||
@ -508,9 +508,9 @@ static inline struct blkg_rwstat blkg_rwstat_read(struct blkg_rwstat *rwstat)
|
||||
struct blkg_rwstat tmp;
|
||||
|
||||
do {
|
||||
start = u64_stats_fetch_begin_bh(&rwstat->syncp);
|
||||
start = u64_stats_fetch_begin_irq(&rwstat->syncp);
|
||||
tmp = *rwstat;
|
||||
} while (u64_stats_fetch_retry_bh(&rwstat->syncp, start));
|
||||
} while (u64_stats_fetch_retry_irq(&rwstat->syncp, start));
|
||||
|
||||
return tmp;
|
||||
}
|
||||
|
@ -63,10 +63,10 @@ static struct rtnl_link_stats64 *dummy_get_stats64(struct net_device *dev,
|
||||
|
||||
dstats = per_cpu_ptr(dev->dstats, i);
|
||||
do {
|
||||
start = u64_stats_fetch_begin_bh(&dstats->syncp);
|
||||
start = u64_stats_fetch_begin_irq(&dstats->syncp);
|
||||
tbytes = dstats->tx_bytes;
|
||||
tpackets = dstats->tx_packets;
|
||||
} while (u64_stats_fetch_retry_bh(&dstats->syncp, start));
|
||||
} while (u64_stats_fetch_retry_irq(&dstats->syncp, start));
|
||||
stats->tx_bytes += tbytes;
|
||||
stats->tx_packets += tpackets;
|
||||
}
|
||||
|
@ -1685,7 +1685,7 @@ static struct rtnl_link_stats64 *b44_get_stats64(struct net_device *dev,
|
||||
unsigned int start;
|
||||
|
||||
do {
|
||||
start = u64_stats_fetch_begin_bh(&hwstat->syncp);
|
||||
start = u64_stats_fetch_begin_irq(&hwstat->syncp);
|
||||
|
||||
/* Convert HW stats into rtnl_link_stats64 stats. */
|
||||
nstat->rx_packets = hwstat->rx_pkts;
|
||||
@ -1719,7 +1719,7 @@ static struct rtnl_link_stats64 *b44_get_stats64(struct net_device *dev,
|
||||
/* Carrier lost counter seems to be broken for some devices */
|
||||
nstat->tx_carrier_errors = hwstat->tx_carrier_lost;
|
||||
#endif
|
||||
} while (u64_stats_fetch_retry_bh(&hwstat->syncp, start));
|
||||
} while (u64_stats_fetch_retry_irq(&hwstat->syncp, start));
|
||||
|
||||
return nstat;
|
||||
}
|
||||
@ -2073,12 +2073,12 @@ static void b44_get_ethtool_stats(struct net_device *dev,
|
||||
do {
|
||||
data_src = &hwstat->tx_good_octets;
|
||||
data_dst = data;
|
||||
start = u64_stats_fetch_begin_bh(&hwstat->syncp);
|
||||
start = u64_stats_fetch_begin_irq(&hwstat->syncp);
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(b44_gstrings); i++)
|
||||
*data_dst++ = *data_src++;
|
||||
|
||||
} while (u64_stats_fetch_retry_bh(&hwstat->syncp, start));
|
||||
} while (u64_stats_fetch_retry_irq(&hwstat->syncp, start));
|
||||
}
|
||||
|
||||
static void b44_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
|
||||
|
@ -357,10 +357,10 @@ be_get_ethtool_stats(struct net_device *netdev,
|
||||
struct be_rx_stats *stats = rx_stats(rxo);
|
||||
|
||||
do {
|
||||
start = u64_stats_fetch_begin_bh(&stats->sync);
|
||||
start = u64_stats_fetch_begin_irq(&stats->sync);
|
||||
data[base] = stats->rx_bytes;
|
||||
data[base + 1] = stats->rx_pkts;
|
||||
} while (u64_stats_fetch_retry_bh(&stats->sync, start));
|
||||
} while (u64_stats_fetch_retry_irq(&stats->sync, start));
|
||||
|
||||
for (i = 2; i < ETHTOOL_RXSTATS_NUM; i++) {
|
||||
p = (u8 *)stats + et_rx_stats[i].offset;
|
||||
@ -373,19 +373,19 @@ be_get_ethtool_stats(struct net_device *netdev,
|
||||
struct be_tx_stats *stats = tx_stats(txo);
|
||||
|
||||
do {
|
||||
start = u64_stats_fetch_begin_bh(&stats->sync_compl);
|
||||
start = u64_stats_fetch_begin_irq(&stats->sync_compl);
|
||||
data[base] = stats->tx_compl;
|
||||
} while (u64_stats_fetch_retry_bh(&stats->sync_compl, start));
|
||||
} while (u64_stats_fetch_retry_irq(&stats->sync_compl, start));
|
||||
|
||||
do {
|
||||
start = u64_stats_fetch_begin_bh(&stats->sync);
|
||||
start = u64_stats_fetch_begin_irq(&stats->sync);
|
||||
for (i = 1; i < ETHTOOL_TXSTATS_NUM; i++) {
|
||||
p = (u8 *)stats + et_tx_stats[i].offset;
|
||||
data[base + i] =
|
||||
(et_tx_stats[i].size == sizeof(u64)) ?
|
||||
*(u64 *)p : *(u32 *)p;
|
||||
}
|
||||
} while (u64_stats_fetch_retry_bh(&stats->sync, start));
|
||||
} while (u64_stats_fetch_retry_irq(&stats->sync, start));
|
||||
base += ETHTOOL_TXSTATS_NUM;
|
||||
}
|
||||
}
|
||||
|
@ -591,10 +591,10 @@ static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
|
||||
for_all_rx_queues(adapter, rxo, i) {
|
||||
const struct be_rx_stats *rx_stats = rx_stats(rxo);
|
||||
do {
|
||||
start = u64_stats_fetch_begin_bh(&rx_stats->sync);
|
||||
start = u64_stats_fetch_begin_irq(&rx_stats->sync);
|
||||
pkts = rx_stats(rxo)->rx_pkts;
|
||||
bytes = rx_stats(rxo)->rx_bytes;
|
||||
} while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
|
||||
} while (u64_stats_fetch_retry_irq(&rx_stats->sync, start));
|
||||
stats->rx_packets += pkts;
|
||||
stats->rx_bytes += bytes;
|
||||
stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
|
||||
@ -605,10 +605,10 @@ static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
|
||||
for_all_tx_queues(adapter, txo, i) {
|
||||
const struct be_tx_stats *tx_stats = tx_stats(txo);
|
||||
do {
|
||||
start = u64_stats_fetch_begin_bh(&tx_stats->sync);
|
||||
start = u64_stats_fetch_begin_irq(&tx_stats->sync);
|
||||
pkts = tx_stats(txo)->tx_pkts;
|
||||
bytes = tx_stats(txo)->tx_bytes;
|
||||
} while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
|
||||
} while (u64_stats_fetch_retry_irq(&tx_stats->sync, start));
|
||||
stats->tx_packets += pkts;
|
||||
stats->tx_bytes += bytes;
|
||||
}
|
||||
@ -1408,15 +1408,15 @@ static void be_eqd_update(struct be_adapter *adapter)
|
||||
|
||||
rxo = &adapter->rx_obj[eqo->idx];
|
||||
do {
|
||||
start = u64_stats_fetch_begin_bh(&rxo->stats.sync);
|
||||
start = u64_stats_fetch_begin_irq(&rxo->stats.sync);
|
||||
rx_pkts = rxo->stats.rx_pkts;
|
||||
} while (u64_stats_fetch_retry_bh(&rxo->stats.sync, start));
|
||||
} while (u64_stats_fetch_retry_irq(&rxo->stats.sync, start));
|
||||
|
||||
txo = &adapter->tx_obj[eqo->idx];
|
||||
do {
|
||||
start = u64_stats_fetch_begin_bh(&txo->stats.sync);
|
||||
start = u64_stats_fetch_begin_irq(&txo->stats.sync);
|
||||
tx_pkts = txo->stats.tx_reqs;
|
||||
} while (u64_stats_fetch_retry_bh(&txo->stats.sync, start));
|
||||
} while (u64_stats_fetch_retry_irq(&txo->stats.sync, start));
|
||||
|
||||
|
||||
/* Skip, if wrapped around or first calculation */
|
||||
|
@ -653,18 +653,18 @@ static void i40e_get_ethtool_stats(struct net_device *netdev,
|
||||
|
||||
/* process Tx ring statistics */
|
||||
do {
|
||||
start = u64_stats_fetch_begin_bh(&tx_ring->syncp);
|
||||
start = u64_stats_fetch_begin_irq(&tx_ring->syncp);
|
||||
data[i] = tx_ring->stats.packets;
|
||||
data[i + 1] = tx_ring->stats.bytes;
|
||||
} while (u64_stats_fetch_retry_bh(&tx_ring->syncp, start));
|
||||
} while (u64_stats_fetch_retry_irq(&tx_ring->syncp, start));
|
||||
|
||||
/* Rx ring is the 2nd half of the queue pair */
|
||||
rx_ring = &tx_ring[1];
|
||||
do {
|
||||
start = u64_stats_fetch_begin_bh(&rx_ring->syncp);
|
||||
start = u64_stats_fetch_begin_irq(&rx_ring->syncp);
|
||||
data[i + 2] = rx_ring->stats.packets;
|
||||
data[i + 3] = rx_ring->stats.bytes;
|
||||
} while (u64_stats_fetch_retry_bh(&rx_ring->syncp, start));
|
||||
} while (u64_stats_fetch_retry_irq(&rx_ring->syncp, start));
|
||||
}
|
||||
rcu_read_unlock();
|
||||
if (vsi == pf->vsi[pf->lan_vsi]) {
|
||||
|
@ -376,20 +376,20 @@ static struct rtnl_link_stats64 *i40e_get_netdev_stats_struct(
|
||||
continue;
|
||||
|
||||
do {
|
||||
start = u64_stats_fetch_begin_bh(&tx_ring->syncp);
|
||||
start = u64_stats_fetch_begin_irq(&tx_ring->syncp);
|
||||
packets = tx_ring->stats.packets;
|
||||
bytes = tx_ring->stats.bytes;
|
||||
} while (u64_stats_fetch_retry_bh(&tx_ring->syncp, start));
|
||||
} while (u64_stats_fetch_retry_irq(&tx_ring->syncp, start));
|
||||
|
||||
stats->tx_packets += packets;
|
||||
stats->tx_bytes += bytes;
|
||||
rx_ring = &tx_ring[1];
|
||||
|
||||
do {
|
||||
start = u64_stats_fetch_begin_bh(&rx_ring->syncp);
|
||||
start = u64_stats_fetch_begin_irq(&rx_ring->syncp);
|
||||
packets = rx_ring->stats.packets;
|
||||
bytes = rx_ring->stats.bytes;
|
||||
} while (u64_stats_fetch_retry_bh(&rx_ring->syncp, start));
|
||||
} while (u64_stats_fetch_retry_irq(&rx_ring->syncp, start));
|
||||
|
||||
stats->rx_packets += packets;
|
||||
stats->rx_bytes += bytes;
|
||||
@ -770,10 +770,10 @@ void i40e_update_stats(struct i40e_vsi *vsi)
|
||||
p = ACCESS_ONCE(vsi->tx_rings[q]);
|
||||
|
||||
do {
|
||||
start = u64_stats_fetch_begin_bh(&p->syncp);
|
||||
start = u64_stats_fetch_begin_irq(&p->syncp);
|
||||
packets = p->stats.packets;
|
||||
bytes = p->stats.bytes;
|
||||
} while (u64_stats_fetch_retry_bh(&p->syncp, start));
|
||||
} while (u64_stats_fetch_retry_irq(&p->syncp, start));
|
||||
tx_b += bytes;
|
||||
tx_p += packets;
|
||||
tx_restart += p->tx_stats.restart_queue;
|
||||
@ -782,10 +782,10 @@ void i40e_update_stats(struct i40e_vsi *vsi)
|
||||
/* Rx queue is part of the same block as Tx queue */
|
||||
p = &p[1];
|
||||
do {
|
||||
start = u64_stats_fetch_begin_bh(&p->syncp);
|
||||
start = u64_stats_fetch_begin_irq(&p->syncp);
|
||||
packets = p->stats.packets;
|
||||
bytes = p->stats.bytes;
|
||||
} while (u64_stats_fetch_retry_bh(&p->syncp, start));
|
||||
} while (u64_stats_fetch_retry_irq(&p->syncp, start));
|
||||
rx_b += bytes;
|
||||
rx_p += packets;
|
||||
rx_buf += p->rx_stats.alloc_buff_failed;
|
||||
|
@ -2273,15 +2273,15 @@ static void igb_get_ethtool_stats(struct net_device *netdev,
|
||||
|
||||
ring = adapter->tx_ring[j];
|
||||
do {
|
||||
start = u64_stats_fetch_begin_bh(&ring->tx_syncp);
|
||||
start = u64_stats_fetch_begin_irq(&ring->tx_syncp);
|
||||
data[i] = ring->tx_stats.packets;
|
||||
data[i+1] = ring->tx_stats.bytes;
|
||||
data[i+2] = ring->tx_stats.restart_queue;
|
||||
} while (u64_stats_fetch_retry_bh(&ring->tx_syncp, start));
|
||||
} while (u64_stats_fetch_retry_irq(&ring->tx_syncp, start));
|
||||
do {
|
||||
start = u64_stats_fetch_begin_bh(&ring->tx_syncp2);
|
||||
start = u64_stats_fetch_begin_irq(&ring->tx_syncp2);
|
||||
restart2 = ring->tx_stats.restart_queue2;
|
||||
} while (u64_stats_fetch_retry_bh(&ring->tx_syncp2, start));
|
||||
} while (u64_stats_fetch_retry_irq(&ring->tx_syncp2, start));
|
||||
data[i+2] += restart2;
|
||||
|
||||
i += IGB_TX_QUEUE_STATS_LEN;
|
||||
@ -2289,13 +2289,13 @@ static void igb_get_ethtool_stats(struct net_device *netdev,
|
||||
for (j = 0; j < adapter->num_rx_queues; j++) {
|
||||
ring = adapter->rx_ring[j];
|
||||
do {
|
||||
start = u64_stats_fetch_begin_bh(&ring->rx_syncp);
|
||||
start = u64_stats_fetch_begin_irq(&ring->rx_syncp);
|
||||
data[i] = ring->rx_stats.packets;
|
||||
data[i+1] = ring->rx_stats.bytes;
|
||||
data[i+2] = ring->rx_stats.drops;
|
||||
data[i+3] = ring->rx_stats.csum_err;
|
||||
data[i+4] = ring->rx_stats.alloc_failed;
|
||||
} while (u64_stats_fetch_retry_bh(&ring->rx_syncp, start));
|
||||
} while (u64_stats_fetch_retry_irq(&ring->rx_syncp, start));
|
||||
i += IGB_RX_QUEUE_STATS_LEN;
|
||||
}
|
||||
spin_unlock(&adapter->stats64_lock);
|
||||
|
@ -5168,10 +5168,10 @@ void igb_update_stats(struct igb_adapter *adapter,
|
||||
}
|
||||
|
||||
do {
|
||||
start = u64_stats_fetch_begin_bh(&ring->rx_syncp);
|
||||
start = u64_stats_fetch_begin_irq(&ring->rx_syncp);
|
||||
_bytes = ring->rx_stats.bytes;
|
||||
_packets = ring->rx_stats.packets;
|
||||
} while (u64_stats_fetch_retry_bh(&ring->rx_syncp, start));
|
||||
} while (u64_stats_fetch_retry_irq(&ring->rx_syncp, start));
|
||||
bytes += _bytes;
|
||||
packets += _packets;
|
||||
}
|
||||
@ -5184,10 +5184,10 @@ void igb_update_stats(struct igb_adapter *adapter,
|
||||
for (i = 0; i < adapter->num_tx_queues; i++) {
|
||||
struct igb_ring *ring = adapter->tx_ring[i];
|
||||
do {
|
||||
start = u64_stats_fetch_begin_bh(&ring->tx_syncp);
|
||||
start = u64_stats_fetch_begin_irq(&ring->tx_syncp);
|
||||
_bytes = ring->tx_stats.bytes;
|
||||
_packets = ring->tx_stats.packets;
|
||||
} while (u64_stats_fetch_retry_bh(&ring->tx_syncp, start));
|
||||
} while (u64_stats_fetch_retry_irq(&ring->tx_syncp, start));
|
||||
bytes += _bytes;
|
||||
packets += _packets;
|
||||
}
|
||||
|
@ -1128,10 +1128,10 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev,
|
||||
}
|
||||
|
||||
do {
|
||||
start = u64_stats_fetch_begin_bh(&ring->syncp);
|
||||
start = u64_stats_fetch_begin_irq(&ring->syncp);
|
||||
data[i] = ring->stats.packets;
|
||||
data[i+1] = ring->stats.bytes;
|
||||
} while (u64_stats_fetch_retry_bh(&ring->syncp, start));
|
||||
} while (u64_stats_fetch_retry_irq(&ring->syncp, start));
|
||||
i += 2;
|
||||
#ifdef BP_EXTENDED_STATS
|
||||
data[i] = ring->stats.yields;
|
||||
@ -1156,10 +1156,10 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev,
|
||||
}
|
||||
|
||||
do {
|
||||
start = u64_stats_fetch_begin_bh(&ring->syncp);
|
||||
start = u64_stats_fetch_begin_irq(&ring->syncp);
|
||||
data[i] = ring->stats.packets;
|
||||
data[i+1] = ring->stats.bytes;
|
||||
} while (u64_stats_fetch_retry_bh(&ring->syncp, start));
|
||||
} while (u64_stats_fetch_retry_irq(&ring->syncp, start));
|
||||
i += 2;
|
||||
#ifdef BP_EXTENDED_STATS
|
||||
data[i] = ring->stats.yields;
|
||||
|
@ -7293,10 +7293,10 @@ static struct rtnl_link_stats64 *ixgbe_get_stats64(struct net_device *netdev,
|
||||
|
||||
if (ring) {
|
||||
do {
|
||||
start = u64_stats_fetch_begin_bh(&ring->syncp);
|
||||
start = u64_stats_fetch_begin_irq(&ring->syncp);
|
||||
packets = ring->stats.packets;
|
||||
bytes = ring->stats.bytes;
|
||||
} while (u64_stats_fetch_retry_bh(&ring->syncp, start));
|
||||
} while (u64_stats_fetch_retry_irq(&ring->syncp, start));
|
||||
stats->rx_packets += packets;
|
||||
stats->rx_bytes += bytes;
|
||||
}
|
||||
@ -7309,10 +7309,10 @@ static struct rtnl_link_stats64 *ixgbe_get_stats64(struct net_device *netdev,
|
||||
|
||||
if (ring) {
|
||||
do {
|
||||
start = u64_stats_fetch_begin_bh(&ring->syncp);
|
||||
start = u64_stats_fetch_begin_irq(&ring->syncp);
|
||||
packets = ring->stats.packets;
|
||||
bytes = ring->stats.bytes;
|
||||
} while (u64_stats_fetch_retry_bh(&ring->syncp, start));
|
||||
} while (u64_stats_fetch_retry_irq(&ring->syncp, start));
|
||||
stats->tx_packets += packets;
|
||||
stats->tx_bytes += bytes;
|
||||
}
|
||||
|
@ -3337,10 +3337,10 @@ static struct rtnl_link_stats64 *ixgbevf_get_stats(struct net_device *netdev,
|
||||
for (i = 0; i < adapter->num_rx_queues; i++) {
|
||||
ring = adapter->rx_ring[i];
|
||||
do {
|
||||
start = u64_stats_fetch_begin_bh(&ring->syncp);
|
||||
start = u64_stats_fetch_begin_irq(&ring->syncp);
|
||||
bytes = ring->stats.bytes;
|
||||
packets = ring->stats.packets;
|
||||
} while (u64_stats_fetch_retry_bh(&ring->syncp, start));
|
||||
} while (u64_stats_fetch_retry_irq(&ring->syncp, start));
|
||||
stats->rx_bytes += bytes;
|
||||
stats->rx_packets += packets;
|
||||
}
|
||||
@ -3348,10 +3348,10 @@ static struct rtnl_link_stats64 *ixgbevf_get_stats(struct net_device *netdev,
|
||||
for (i = 0; i < adapter->num_tx_queues; i++) {
|
||||
ring = adapter->tx_ring[i];
|
||||
do {
|
||||
start = u64_stats_fetch_begin_bh(&ring->syncp);
|
||||
start = u64_stats_fetch_begin_irq(&ring->syncp);
|
||||
bytes = ring->stats.bytes;
|
||||
packets = ring->stats.packets;
|
||||
} while (u64_stats_fetch_retry_bh(&ring->syncp, start));
|
||||
} while (u64_stats_fetch_retry_irq(&ring->syncp, start));
|
||||
stats->tx_bytes += bytes;
|
||||
stats->tx_packets += packets;
|
||||
}
|
||||
|
@ -508,12 +508,12 @@ struct rtnl_link_stats64 *mvneta_get_stats64(struct net_device *dev,
|
||||
|
||||
cpu_stats = per_cpu_ptr(pp->stats, cpu);
|
||||
do {
|
||||
start = u64_stats_fetch_begin_bh(&cpu_stats->syncp);
|
||||
start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
|
||||
rx_packets = cpu_stats->rx_packets;
|
||||
rx_bytes = cpu_stats->rx_bytes;
|
||||
tx_packets = cpu_stats->tx_packets;
|
||||
tx_bytes = cpu_stats->tx_bytes;
|
||||
} while (u64_stats_fetch_retry_bh(&cpu_stats->syncp, start));
|
||||
} while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
|
||||
|
||||
stats->rx_packets += rx_packets;
|
||||
stats->rx_bytes += rx_bytes;
|
||||
|
@ -3908,19 +3908,19 @@ static struct rtnl_link_stats64 *sky2_get_stats(struct net_device *dev,
|
||||
u64 _bytes, _packets;
|
||||
|
||||
do {
|
||||
start = u64_stats_fetch_begin_bh(&sky2->rx_stats.syncp);
|
||||
start = u64_stats_fetch_begin_irq(&sky2->rx_stats.syncp);
|
||||
_bytes = sky2->rx_stats.bytes;
|
||||
_packets = sky2->rx_stats.packets;
|
||||
} while (u64_stats_fetch_retry_bh(&sky2->rx_stats.syncp, start));
|
||||
} while (u64_stats_fetch_retry_irq(&sky2->rx_stats.syncp, start));
|
||||
|
||||
stats->rx_packets = _packets;
|
||||
stats->rx_bytes = _bytes;
|
||||
|
||||
do {
|
||||
start = u64_stats_fetch_begin_bh(&sky2->tx_stats.syncp);
|
||||
start = u64_stats_fetch_begin_irq(&sky2->tx_stats.syncp);
|
||||
_bytes = sky2->tx_stats.bytes;
|
||||
_packets = sky2->tx_stats.packets;
|
||||
} while (u64_stats_fetch_retry_bh(&sky2->tx_stats.syncp, start));
|
||||
} while (u64_stats_fetch_retry_irq(&sky2->tx_stats.syncp, start));
|
||||
|
||||
stats->tx_packets = _packets;
|
||||
stats->tx_bytes = _bytes;
|
||||
|
@ -3134,12 +3134,12 @@ vxge_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats)
|
||||
u64 packets, bytes, multicast;
|
||||
|
||||
do {
|
||||
start = u64_stats_fetch_begin_bh(&rxstats->syncp);
|
||||
start = u64_stats_fetch_begin_irq(&rxstats->syncp);
|
||||
|
||||
packets = rxstats->rx_frms;
|
||||
multicast = rxstats->rx_mcast;
|
||||
bytes = rxstats->rx_bytes;
|
||||
} while (u64_stats_fetch_retry_bh(&rxstats->syncp, start));
|
||||
} while (u64_stats_fetch_retry_irq(&rxstats->syncp, start));
|
||||
|
||||
net_stats->rx_packets += packets;
|
||||
net_stats->rx_bytes += bytes;
|
||||
@ -3149,11 +3149,11 @@ vxge_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats)
|
||||
net_stats->rx_dropped += rxstats->rx_dropped;
|
||||
|
||||
do {
|
||||
start = u64_stats_fetch_begin_bh(&txstats->syncp);
|
||||
start = u64_stats_fetch_begin_irq(&txstats->syncp);
|
||||
|
||||
packets = txstats->tx_frms;
|
||||
bytes = txstats->tx_bytes;
|
||||
} while (u64_stats_fetch_retry_bh(&txstats->syncp, start));
|
||||
} while (u64_stats_fetch_retry_irq(&txstats->syncp, start));
|
||||
|
||||
net_stats->tx_packets += packets;
|
||||
net_stats->tx_bytes += bytes;
|
||||
|
@ -1753,19 +1753,19 @@ nv_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *storage)
|
||||
|
||||
/* software stats */
|
||||
do {
|
||||
syncp_start = u64_stats_fetch_begin_bh(&np->swstats_rx_syncp);
|
||||
syncp_start = u64_stats_fetch_begin_irq(&np->swstats_rx_syncp);
|
||||
storage->rx_packets = np->stat_rx_packets;
|
||||
storage->rx_bytes = np->stat_rx_bytes;
|
||||
storage->rx_dropped = np->stat_rx_dropped;
|
||||
storage->rx_missed_errors = np->stat_rx_missed_errors;
|
||||
} while (u64_stats_fetch_retry_bh(&np->swstats_rx_syncp, syncp_start));
|
||||
} while (u64_stats_fetch_retry_irq(&np->swstats_rx_syncp, syncp_start));
|
||||
|
||||
do {
|
||||
syncp_start = u64_stats_fetch_begin_bh(&np->swstats_tx_syncp);
|
||||
syncp_start = u64_stats_fetch_begin_irq(&np->swstats_tx_syncp);
|
||||
storage->tx_packets = np->stat_tx_packets;
|
||||
storage->tx_bytes = np->stat_tx_bytes;
|
||||
storage->tx_dropped = np->stat_tx_dropped;
|
||||
} while (u64_stats_fetch_retry_bh(&np->swstats_tx_syncp, syncp_start));
|
||||
} while (u64_stats_fetch_retry_irq(&np->swstats_tx_syncp, syncp_start));
|
||||
|
||||
/* If the nic supports hw counters then retrieve latest values */
|
||||
if (np->driver_data & DEV_HAS_STATISTICS_V123) {
|
||||
|
@ -2522,16 +2522,16 @@ rtl8139_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
|
||||
netdev_stats_to_stats64(stats, &dev->stats);
|
||||
|
||||
do {
|
||||
start = u64_stats_fetch_begin_bh(&tp->rx_stats.syncp);
|
||||
start = u64_stats_fetch_begin_irq(&tp->rx_stats.syncp);
|
||||
stats->rx_packets = tp->rx_stats.packets;
|
||||
stats->rx_bytes = tp->rx_stats.bytes;
|
||||
} while (u64_stats_fetch_retry_bh(&tp->rx_stats.syncp, start));
|
||||
} while (u64_stats_fetch_retry_irq(&tp->rx_stats.syncp, start));
|
||||
|
||||
do {
|
||||
start = u64_stats_fetch_begin_bh(&tp->tx_stats.syncp);
|
||||
start = u64_stats_fetch_begin_irq(&tp->tx_stats.syncp);
|
||||
stats->tx_packets = tp->tx_stats.packets;
|
||||
stats->tx_bytes = tp->tx_stats.bytes;
|
||||
} while (u64_stats_fetch_retry_bh(&tp->tx_stats.syncp, start));
|
||||
} while (u64_stats_fetch_retry_irq(&tp->tx_stats.syncp, start));
|
||||
|
||||
return stats;
|
||||
}
|
||||
|
@ -6590,17 +6590,17 @@ rtl8169_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
|
||||
rtl8169_rx_missed(dev, ioaddr);
|
||||
|
||||
do {
|
||||
start = u64_stats_fetch_begin_bh(&tp->rx_stats.syncp);
|
||||
start = u64_stats_fetch_begin_irq(&tp->rx_stats.syncp);
|
||||
stats->rx_packets = tp->rx_stats.packets;
|
||||
stats->rx_bytes = tp->rx_stats.bytes;
|
||||
} while (u64_stats_fetch_retry_bh(&tp->rx_stats.syncp, start));
|
||||
} while (u64_stats_fetch_retry_irq(&tp->rx_stats.syncp, start));
|
||||
|
||||
|
||||
do {
|
||||
start = u64_stats_fetch_begin_bh(&tp->tx_stats.syncp);
|
||||
start = u64_stats_fetch_begin_irq(&tp->tx_stats.syncp);
|
||||
stats->tx_packets = tp->tx_stats.packets;
|
||||
stats->tx_bytes = tp->tx_stats.bytes;
|
||||
} while (u64_stats_fetch_retry_bh(&tp->tx_stats.syncp, start));
|
||||
} while (u64_stats_fetch_retry_irq(&tp->tx_stats.syncp, start));
|
||||
|
||||
stats->rx_dropped = dev->stats.rx_dropped;
|
||||
stats->tx_dropped = dev->stats.tx_dropped;
|
||||
|
@ -2068,14 +2068,14 @@ static struct rtnl_link_stats64 *tile_net_get_stats64(struct net_device *dev,
|
||||
cpu_stats = &priv->cpu[i]->stats;
|
||||
|
||||
do {
|
||||
start = u64_stats_fetch_begin_bh(&cpu_stats->syncp);
|
||||
start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
|
||||
trx_packets = cpu_stats->rx_packets;
|
||||
ttx_packets = cpu_stats->tx_packets;
|
||||
trx_bytes = cpu_stats->rx_bytes;
|
||||
ttx_bytes = cpu_stats->tx_bytes;
|
||||
trx_errors = cpu_stats->rx_errors;
|
||||
trx_dropped = cpu_stats->rx_dropped;
|
||||
} while (u64_stats_fetch_retry_bh(&cpu_stats->syncp, start));
|
||||
} while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
|
||||
|
||||
rx_packets += trx_packets;
|
||||
tx_packets += ttx_packets;
|
||||
|
@ -2070,16 +2070,16 @@ rhine_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
|
||||
netdev_stats_to_stats64(stats, &dev->stats);
|
||||
|
||||
do {
|
||||
start = u64_stats_fetch_begin_bh(&rp->rx_stats.syncp);
|
||||
start = u64_stats_fetch_begin_irq(&rp->rx_stats.syncp);
|
||||
stats->rx_packets = rp->rx_stats.packets;
|
||||
stats->rx_bytes = rp->rx_stats.bytes;
|
||||
} while (u64_stats_fetch_retry_bh(&rp->rx_stats.syncp, start));
|
||||
} while (u64_stats_fetch_retry_irq(&rp->rx_stats.syncp, start));
|
||||
|
||||
do {
|
||||
start = u64_stats_fetch_begin_bh(&rp->tx_stats.syncp);
|
||||
start = u64_stats_fetch_begin_irq(&rp->tx_stats.syncp);
|
||||
stats->tx_packets = rp->tx_stats.packets;
|
||||
stats->tx_bytes = rp->tx_stats.bytes;
|
||||
} while (u64_stats_fetch_retry_bh(&rp->tx_stats.syncp, start));
|
||||
} while (u64_stats_fetch_retry_irq(&rp->tx_stats.syncp, start));
|
||||
|
||||
return stats;
|
||||
}
|
||||
|
@ -136,18 +136,18 @@ static struct rtnl_link_stats64 *ifb_stats64(struct net_device *dev,
|
||||
unsigned int start;
|
||||
|
||||
do {
|
||||
start = u64_stats_fetch_begin_bh(&dp->rsync);
|
||||
start = u64_stats_fetch_begin_irq(&dp->rsync);
|
||||
stats->rx_packets = dp->rx_packets;
|
||||
stats->rx_bytes = dp->rx_bytes;
|
||||
} while (u64_stats_fetch_retry_bh(&dp->rsync, start));
|
||||
} while (u64_stats_fetch_retry_irq(&dp->rsync, start));
|
||||
|
||||
do {
|
||||
start = u64_stats_fetch_begin_bh(&dp->tsync);
|
||||
start = u64_stats_fetch_begin_irq(&dp->tsync);
|
||||
|
||||
stats->tx_packets = dp->tx_packets;
|
||||
stats->tx_bytes = dp->tx_bytes;
|
||||
|
||||
} while (u64_stats_fetch_retry_bh(&dp->tsync, start));
|
||||
} while (u64_stats_fetch_retry_irq(&dp->tsync, start));
|
||||
|
||||
stats->rx_dropped = dev->stats.rx_dropped;
|
||||
stats->tx_dropped = dev->stats.tx_dropped;
|
||||
|
@ -111,10 +111,10 @@ static struct rtnl_link_stats64 *loopback_get_stats64(struct net_device *dev,
|
||||
|
||||
lb_stats = per_cpu_ptr(dev->lstats, i);
|
||||
do {
|
||||
start = u64_stats_fetch_begin_bh(&lb_stats->syncp);
|
||||
start = u64_stats_fetch_begin_irq(&lb_stats->syncp);
|
||||
tbytes = lb_stats->bytes;
|
||||
tpackets = lb_stats->packets;
|
||||
} while (u64_stats_fetch_retry_bh(&lb_stats->syncp, start));
|
||||
} while (u64_stats_fetch_retry_irq(&lb_stats->syncp, start));
|
||||
bytes += tbytes;
|
||||
packets += tpackets;
|
||||
}
|
||||
|
@ -582,13 +582,13 @@ static struct rtnl_link_stats64 *macvlan_dev_get_stats64(struct net_device *dev,
|
||||
for_each_possible_cpu(i) {
|
||||
p = per_cpu_ptr(vlan->pcpu_stats, i);
|
||||
do {
|
||||
start = u64_stats_fetch_begin_bh(&p->syncp);
|
||||
start = u64_stats_fetch_begin_irq(&p->syncp);
|
||||
rx_packets = p->rx_packets;
|
||||
rx_bytes = p->rx_bytes;
|
||||
rx_multicast = p->rx_multicast;
|
||||
tx_packets = p->tx_packets;
|
||||
tx_bytes = p->tx_bytes;
|
||||
} while (u64_stats_fetch_retry_bh(&p->syncp, start));
|
||||
} while (u64_stats_fetch_retry_irq(&p->syncp, start));
|
||||
|
||||
stats->rx_packets += rx_packets;
|
||||
stats->rx_bytes += rx_bytes;
|
||||
|
@ -90,10 +90,10 @@ nlmon_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
|
||||
nl_stats = per_cpu_ptr(dev->lstats, i);
|
||||
|
||||
do {
|
||||
start = u64_stats_fetch_begin_bh(&nl_stats->syncp);
|
||||
start = u64_stats_fetch_begin_irq(&nl_stats->syncp);
|
||||
tbytes = nl_stats->bytes;
|
||||
tpackets = nl_stats->packets;
|
||||
} while (u64_stats_fetch_retry_bh(&nl_stats->syncp, start));
|
||||
} while (u64_stats_fetch_retry_irq(&nl_stats->syncp, start));
|
||||
|
||||
packets += tpackets;
|
||||
bytes += tbytes;
|
||||
|
@ -1761,13 +1761,13 @@ team_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
|
||||
for_each_possible_cpu(i) {
|
||||
p = per_cpu_ptr(team->pcpu_stats, i);
|
||||
do {
|
||||
start = u64_stats_fetch_begin_bh(&p->syncp);
|
||||
start = u64_stats_fetch_begin_irq(&p->syncp);
|
||||
rx_packets = p->rx_packets;
|
||||
rx_bytes = p->rx_bytes;
|
||||
rx_multicast = p->rx_multicast;
|
||||
tx_packets = p->tx_packets;
|
||||
tx_bytes = p->tx_bytes;
|
||||
} while (u64_stats_fetch_retry_bh(&p->syncp, start));
|
||||
} while (u64_stats_fetch_retry_irq(&p->syncp, start));
|
||||
|
||||
stats->rx_packets += rx_packets;
|
||||
stats->rx_bytes += rx_bytes;
|
||||
|
@ -432,9 +432,9 @@ static void __lb_one_cpu_stats_add(struct lb_stats *acc_stats,
|
||||
struct lb_stats tmp;
|
||||
|
||||
do {
|
||||
start = u64_stats_fetch_begin_bh(syncp);
|
||||
start = u64_stats_fetch_begin_irq(syncp);
|
||||
tmp.tx_bytes = cpu_stats->tx_bytes;
|
||||
} while (u64_stats_fetch_retry_bh(syncp, start));
|
||||
} while (u64_stats_fetch_retry_irq(syncp, start));
|
||||
acc_stats->tx_bytes += tmp.tx_bytes;
|
||||
}
|
||||
|
||||
|
@ -156,10 +156,10 @@ static u64 veth_stats_one(struct pcpu_vstats *result, struct net_device *dev)
|
||||
unsigned int start;
|
||||
|
||||
do {
|
||||
start = u64_stats_fetch_begin_bh(&stats->syncp);
|
||||
start = u64_stats_fetch_begin_irq(&stats->syncp);
|
||||
packets = stats->packets;
|
||||
bytes = stats->bytes;
|
||||
} while (u64_stats_fetch_retry_bh(&stats->syncp, start));
|
||||
} while (u64_stats_fetch_retry_irq(&stats->syncp, start));
|
||||
result->packets += packets;
|
||||
result->bytes += bytes;
|
||||
}
|
||||
|
@ -1000,16 +1000,16 @@ static struct rtnl_link_stats64 *virtnet_stats(struct net_device *dev,
|
||||
u64 tpackets, tbytes, rpackets, rbytes;
|
||||
|
||||
do {
|
||||
start = u64_stats_fetch_begin_bh(&stats->tx_syncp);
|
||||
start = u64_stats_fetch_begin_irq(&stats->tx_syncp);
|
||||
tpackets = stats->tx_packets;
|
||||
tbytes = stats->tx_bytes;
|
||||
} while (u64_stats_fetch_retry_bh(&stats->tx_syncp, start));
|
||||
} while (u64_stats_fetch_retry_irq(&stats->tx_syncp, start));
|
||||
|
||||
do {
|
||||
start = u64_stats_fetch_begin_bh(&stats->rx_syncp);
|
||||
start = u64_stats_fetch_begin_irq(&stats->rx_syncp);
|
||||
rpackets = stats->rx_packets;
|
||||
rbytes = stats->rx_bytes;
|
||||
} while (u64_stats_fetch_retry_bh(&stats->rx_syncp, start));
|
||||
} while (u64_stats_fetch_retry_irq(&stats->rx_syncp, start));
|
||||
|
||||
tot->rx_packets += rpackets;
|
||||
tot->tx_packets += tpackets;
|
||||
|
@ -1060,13 +1060,13 @@ static struct rtnl_link_stats64 *xennet_get_stats64(struct net_device *dev,
|
||||
unsigned int start;
|
||||
|
||||
do {
|
||||
start = u64_stats_fetch_begin_bh(&stats->syncp);
|
||||
start = u64_stats_fetch_begin_irq(&stats->syncp);
|
||||
|
||||
rx_packets = stats->rx_packets;
|
||||
tx_packets = stats->tx_packets;
|
||||
rx_bytes = stats->rx_bytes;
|
||||
tx_bytes = stats->tx_bytes;
|
||||
} while (u64_stats_fetch_retry_bh(&stats->syncp, start));
|
||||
} while (u64_stats_fetch_retry_irq(&stats->syncp, start));
|
||||
|
||||
tot->rx_packets += rx_packets;
|
||||
tot->tx_packets += tx_packets;
|
||||
|
@ -27,8 +27,8 @@
|
||||
* (On UP, there is no seqcount_t protection, a reader allowing interrupts could
|
||||
* read partial values)
|
||||
*
|
||||
* 7) For softirq uses, readers can use u64_stats_fetch_begin_bh() and
|
||||
* u64_stats_fetch_retry_bh() helpers
|
||||
* 7) For irq and softirq uses, readers can use u64_stats_fetch_begin_irq() and
|
||||
* u64_stats_fetch_retry_irq() helpers
|
||||
*
|
||||
* Usage :
|
||||
*
|
||||
@ -114,31 +114,31 @@ static inline bool u64_stats_fetch_retry(const struct u64_stats_sync *syncp,
|
||||
}
|
||||
|
||||
/*
|
||||
* In case softirq handlers can update u64 counters, readers can use following helpers
|
||||
* In case irq handlers can update u64 counters, readers can use following helpers
|
||||
* - SMP 32bit arches use seqcount protection, irq safe.
|
||||
* - UP 32bit must disable BH.
|
||||
* - UP 32bit must disable irqs.
|
||||
* - 64bit have no problem atomically reading u64 values, irq safe.
|
||||
*/
|
||||
static inline unsigned int u64_stats_fetch_begin_bh(const struct u64_stats_sync *syncp)
|
||||
static inline unsigned int u64_stats_fetch_begin_irq(const struct u64_stats_sync *syncp)
|
||||
{
|
||||
#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
|
||||
return read_seqcount_begin(&syncp->seq);
|
||||
#else
|
||||
#if BITS_PER_LONG==32
|
||||
local_bh_disable();
|
||||
local_irq_disable();
|
||||
#endif
|
||||
return 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline bool u64_stats_fetch_retry_bh(const struct u64_stats_sync *syncp,
|
||||
static inline bool u64_stats_fetch_retry_irq(const struct u64_stats_sync *syncp,
|
||||
unsigned int start)
|
||||
{
|
||||
#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
|
||||
return read_seqcount_retry(&syncp->seq, start);
|
||||
#else
|
||||
#if BITS_PER_LONG==32
|
||||
local_bh_enable();
|
||||
local_irq_enable();
|
||||
#endif
|
||||
return false;
|
||||
#endif
|
||||
|
@ -678,13 +678,13 @@ static struct rtnl_link_stats64 *vlan_dev_get_stats64(struct net_device *dev, st
|
||||
|
||||
p = per_cpu_ptr(vlan_dev_priv(dev)->vlan_pcpu_stats, i);
|
||||
do {
|
||||
start = u64_stats_fetch_begin_bh(&p->syncp);
|
||||
start = u64_stats_fetch_begin_irq(&p->syncp);
|
||||
rxpackets = p->rx_packets;
|
||||
rxbytes = p->rx_bytes;
|
||||
rxmulticast = p->rx_multicast;
|
||||
txpackets = p->tx_packets;
|
||||
txbytes = p->tx_bytes;
|
||||
} while (u64_stats_fetch_retry_bh(&p->syncp, start));
|
||||
} while (u64_stats_fetch_retry_irq(&p->syncp, start));
|
||||
|
||||
stats->rx_packets += rxpackets;
|
||||
stats->rx_bytes += rxbytes;
|
||||
|
@ -136,9 +136,9 @@ static struct rtnl_link_stats64 *br_get_stats64(struct net_device *dev,
|
||||
const struct pcpu_sw_netstats *bstats
|
||||
= per_cpu_ptr(br->stats, cpu);
|
||||
do {
|
||||
start = u64_stats_fetch_begin_bh(&bstats->syncp);
|
||||
start = u64_stats_fetch_begin_irq(&bstats->syncp);
|
||||
memcpy(&tmp, bstats, sizeof(tmp));
|
||||
} while (u64_stats_fetch_retry_bh(&bstats->syncp, start));
|
||||
} while (u64_stats_fetch_retry_irq(&bstats->syncp, start));
|
||||
sum.tx_bytes += tmp.tx_bytes;
|
||||
sum.tx_packets += tmp.tx_packets;
|
||||
sum.rx_bytes += tmp.rx_bytes;
|
||||
|
@ -1505,9 +1505,9 @@ u64 snmp_fold_field64(void __percpu *mib[], int offt, size_t syncp_offset)
|
||||
bhptr = per_cpu_ptr(mib[0], cpu);
|
||||
syncp = (struct u64_stats_sync *)(bhptr + syncp_offset);
|
||||
do {
|
||||
start = u64_stats_fetch_begin_bh(syncp);
|
||||
start = u64_stats_fetch_begin_irq(syncp);
|
||||
v = *(((u64 *) bhptr) + offt);
|
||||
} while (u64_stats_fetch_retry_bh(syncp, start));
|
||||
} while (u64_stats_fetch_retry_irq(syncp, start));
|
||||
|
||||
res += v;
|
||||
}
|
||||
|
@ -161,12 +161,12 @@ struct rtnl_link_stats64 *ip_tunnel_get_stats64(struct net_device *dev,
|
||||
unsigned int start;
|
||||
|
||||
do {
|
||||
start = u64_stats_fetch_begin_bh(&tstats->syncp);
|
||||
start = u64_stats_fetch_begin_irq(&tstats->syncp);
|
||||
rx_packets = tstats->rx_packets;
|
||||
tx_packets = tstats->tx_packets;
|
||||
rx_bytes = tstats->rx_bytes;
|
||||
tx_bytes = tstats->tx_bytes;
|
||||
} while (u64_stats_fetch_retry_bh(&tstats->syncp, start));
|
||||
} while (u64_stats_fetch_retry_irq(&tstats->syncp, start));
|
||||
|
||||
tot->rx_packets += rx_packets;
|
||||
tot->tx_packets += tx_packets;
|
||||
|
@ -108,12 +108,12 @@ static struct net_device_stats *ip6_get_stats(struct net_device *dev)
|
||||
per_cpu_ptr(dev->tstats, i);
|
||||
|
||||
do {
|
||||
start = u64_stats_fetch_begin_bh(&tstats->syncp);
|
||||
start = u64_stats_fetch_begin_irq(&tstats->syncp);
|
||||
tmp.rx_packets = tstats->rx_packets;
|
||||
tmp.rx_bytes = tstats->rx_bytes;
|
||||
tmp.tx_packets = tstats->tx_packets;
|
||||
tmp.tx_bytes = tstats->tx_bytes;
|
||||
} while (u64_stats_fetch_retry_bh(&tstats->syncp, start));
|
||||
} while (u64_stats_fetch_retry_irq(&tstats->syncp, start));
|
||||
|
||||
sum.rx_packets += tmp.rx_packets;
|
||||
sum.rx_bytes += tmp.rx_bytes;
|
||||
|
@ -2177,10 +2177,10 @@ static int ip_vs_stats_percpu_show(struct seq_file *seq, void *v)
|
||||
__u64 inbytes, outbytes;
|
||||
|
||||
do {
|
||||
start = u64_stats_fetch_begin_bh(&u->syncp);
|
||||
start = u64_stats_fetch_begin_irq(&u->syncp);
|
||||
inbytes = u->ustats.inbytes;
|
||||
outbytes = u->ustats.outbytes;
|
||||
} while (u64_stats_fetch_retry_bh(&u->syncp, start));
|
||||
} while (u64_stats_fetch_retry_irq(&u->syncp, start));
|
||||
|
||||
seq_printf(seq, "%3X %8X %8X %8X %16LX %16LX\n",
|
||||
i, u->ustats.conns, u->ustats.inpkts,
|
||||
|
@ -606,9 +606,9 @@ static void get_dp_stats(struct datapath *dp, struct ovs_dp_stats *stats,
|
||||
percpu_stats = per_cpu_ptr(dp->stats_percpu, i);
|
||||
|
||||
do {
|
||||
start = u64_stats_fetch_begin_bh(&percpu_stats->syncp);
|
||||
start = u64_stats_fetch_begin_irq(&percpu_stats->syncp);
|
||||
local_stats = *percpu_stats;
|
||||
} while (u64_stats_fetch_retry_bh(&percpu_stats->syncp, start));
|
||||
} while (u64_stats_fetch_retry_irq(&percpu_stats->syncp, start));
|
||||
|
||||
stats->n_hit += local_stats.n_hit;
|
||||
stats->n_missed += local_stats.n_missed;
|
||||
|
@ -277,9 +277,9 @@ void ovs_vport_get_stats(struct vport *vport, struct ovs_vport_stats *stats)
|
||||
percpu_stats = per_cpu_ptr(vport->percpu_stats, i);
|
||||
|
||||
do {
|
||||
start = u64_stats_fetch_begin_bh(&percpu_stats->syncp);
|
||||
start = u64_stats_fetch_begin_irq(&percpu_stats->syncp);
|
||||
local_stats = *percpu_stats;
|
||||
} while (u64_stats_fetch_retry_bh(&percpu_stats->syncp, start));
|
||||
} while (u64_stats_fetch_retry_irq(&percpu_stats->syncp, start));
|
||||
|
||||
stats->rx_bytes += local_stats.rx_bytes;
|
||||
stats->rx_packets += local_stats.rx_packets;
|
||||
|
Loading…
Reference in New Issue
Block a user