mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2025-01-04 04:06:26 +00:00
Merge branch 'stmmac-next'
Jose Abreu says: ==================== net: stmmac: Improvements for -next Misc improvements for stmmac. Patch 1/6, fixes a sparse warning that was introduced in recent commit in -next. Patch 2/6, adds the Split Header support which is also available in XGMAC cores and now in GMAC4+ with this patch. Patch 3/6, adds the C45 support for MDIO transactions when using XGMAC cores. Patch 4/6, removes the speed dependency on CBS callbacks so that it can be used in XGMAC cores. Patch 5/6, reworks the over-engineered stmmac_rx() function so that its easier to read. Patch 6/6, implements the UDP Segmentation Offload feature in GMAC4+ cores. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
ca22d6977b
@ -14,6 +14,7 @@
|
||||
|
||||
/* MAC registers */
|
||||
#define GMAC_CONFIG 0x00000000
|
||||
#define GMAC_EXT_CONFIG 0x00000004
|
||||
#define GMAC_PACKET_FILTER 0x00000008
|
||||
#define GMAC_HASH_TAB(x) (0x10 + (x) * 4)
|
||||
#define GMAC_VLAN_TAG 0x00000050
|
||||
@ -188,6 +189,11 @@ enum power_event {
|
||||
#define GMAC_CONFIG_TE BIT(1)
|
||||
#define GMAC_CONFIG_RE BIT(0)
|
||||
|
||||
/* MAC extended config */
|
||||
#define GMAC_CONFIG_HDSMS GENMASK(22, 20)
|
||||
#define GMAC_CONFIG_HDSMS_SHIFT 20
|
||||
#define GMAC_CONFIG_HDSMS_256 (0x2 << GMAC_CONFIG_HDSMS_SHIFT)
|
||||
|
||||
/* MAC HW features0 bitmap */
|
||||
#define GMAC_HW_FEAT_SAVLANINS BIT(27)
|
||||
#define GMAC_HW_FEAT_ADDMAC BIT(18)
|
||||
@ -211,6 +217,7 @@ enum power_event {
|
||||
#define GMAC_HW_HASH_TB_SZ GENMASK(25, 24)
|
||||
#define GMAC_HW_FEAT_AVSEL BIT(20)
|
||||
#define GMAC_HW_TSOEN BIT(18)
|
||||
#define GMAC_HW_FEAT_SPHEN BIT(17)
|
||||
#define GMAC_HW_ADDR64 GENMASK(15, 14)
|
||||
#define GMAC_HW_TXFIFOSIZE GENMASK(10, 6)
|
||||
#define GMAC_HW_RXFIFOSIZE GENMASK(4, 0)
|
||||
|
@ -733,7 +733,7 @@ static void dwmac4_set_mac_loopback(void __iomem *ioaddr, bool enable)
|
||||
}
|
||||
|
||||
static void dwmac4_update_vlan_hash(struct mac_device_info *hw, u32 hash,
|
||||
u16 perfect_match, bool is_double)
|
||||
__le16 perfect_match, bool is_double)
|
||||
{
|
||||
void __iomem *ioaddr = hw->pcsr;
|
||||
|
||||
|
@ -83,9 +83,10 @@ static int dwmac4_wrback_get_rx_status(void *data, struct stmmac_extra_stats *x,
|
||||
if (unlikely(rdes3 & RDES3_OWN))
|
||||
return dma_own;
|
||||
|
||||
/* Verify rx error by looking at the last segment. */
|
||||
if (likely(!(rdes3 & RDES3_LAST_DESCRIPTOR)))
|
||||
if (unlikely(rdes3 & RDES3_CONTEXT_DESCRIPTOR))
|
||||
return discard_frame;
|
||||
if (likely(!(rdes3 & RDES3_LAST_DESCRIPTOR)))
|
||||
return rx_not_ls;
|
||||
|
||||
if (unlikely(rdes3 & RDES3_ERROR_SUMMARY)) {
|
||||
if (unlikely(rdes3 & RDES3_GIANT_PACKET))
|
||||
@ -188,7 +189,7 @@ static void dwmac4_set_tx_owner(struct dma_desc *p)
|
||||
|
||||
static void dwmac4_set_rx_owner(struct dma_desc *p, int disable_rx_ic)
|
||||
{
|
||||
p->des3 = cpu_to_le32(RDES3_OWN | RDES3_BUFFER1_VALID_ADDR);
|
||||
p->des3 |= cpu_to_le32(RDES3_OWN | RDES3_BUFFER1_VALID_ADDR);
|
||||
|
||||
if (!disable_rx_ic)
|
||||
p->des3 |= cpu_to_le32(RDES3_INT_ON_COMPLETION_EN);
|
||||
@ -492,6 +493,18 @@ static void dwmac4_set_vlan(struct dma_desc *p, u32 type)
|
||||
p->des2 |= cpu_to_le32(type & TDES2_VLAN_TAG_MASK);
|
||||
}
|
||||
|
||||
static int dwmac4_get_rx_header_len(struct dma_desc *p, unsigned int *len)
|
||||
{
|
||||
*len = le32_to_cpu(p->des2) & RDES2_HL;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void dwmac4_set_sec_addr(struct dma_desc *p, dma_addr_t addr)
|
||||
{
|
||||
p->des2 = cpu_to_le32(lower_32_bits(addr));
|
||||
p->des3 = cpu_to_le32(upper_32_bits(addr) | RDES3_BUFFER2_VALID_ADDR);
|
||||
}
|
||||
|
||||
const struct stmmac_desc_ops dwmac4_desc_ops = {
|
||||
.tx_status = dwmac4_wrback_get_tx_status,
|
||||
.rx_status = dwmac4_wrback_get_rx_status,
|
||||
@ -519,6 +532,8 @@ const struct stmmac_desc_ops dwmac4_desc_ops = {
|
||||
.set_sarc = dwmac4_set_sarc,
|
||||
.set_vlan_tag = dwmac4_set_vlan_tag,
|
||||
.set_vlan = dwmac4_set_vlan,
|
||||
.get_rx_header_len = dwmac4_get_rx_header_len,
|
||||
.set_sec_addr = dwmac4_set_sec_addr,
|
||||
};
|
||||
|
||||
const struct stmmac_mode_ops dwmac4_ring_mode_ops = {
|
||||
|
@ -109,6 +109,7 @@
|
||||
#define RDES2_L4_FILTER_MATCH BIT(28)
|
||||
#define RDES2_L3_L4_FILT_NB_MATCH_MASK GENMASK(27, 26)
|
||||
#define RDES2_L3_L4_FILT_NB_MATCH_SHIFT 26
|
||||
#define RDES2_HL GENMASK(9, 0)
|
||||
|
||||
/* RDES3 (write back format) */
|
||||
#define RDES3_PACKET_SIZE_MASK GENMASK(14, 0)
|
||||
|
@ -368,6 +368,7 @@ static void dwmac4_get_hw_feature(void __iomem *ioaddr,
|
||||
dma_cap->hash_tb_sz = (hw_cap & GMAC_HW_HASH_TB_SZ) >> 24;
|
||||
dma_cap->av = (hw_cap & GMAC_HW_FEAT_AVSEL) >> 20;
|
||||
dma_cap->tsoen = (hw_cap & GMAC_HW_TSOEN) >> 18;
|
||||
dma_cap->sphen = (hw_cap & GMAC_HW_FEAT_SPHEN) >> 17;
|
||||
|
||||
dma_cap->addr64 = (hw_cap & GMAC_HW_ADDR64) >> 14;
|
||||
switch (dma_cap->addr64) {
|
||||
@ -460,6 +461,22 @@ static void dwmac4_set_bfsize(void __iomem *ioaddr, int bfsize, u32 chan)
|
||||
writel(value, ioaddr + DMA_CHAN_RX_CONTROL(chan));
|
||||
}
|
||||
|
||||
static void dwmac4_enable_sph(void __iomem *ioaddr, bool en, u32 chan)
|
||||
{
|
||||
u32 value = readl(ioaddr + GMAC_EXT_CONFIG);
|
||||
|
||||
value &= ~GMAC_CONFIG_HDSMS;
|
||||
value |= GMAC_CONFIG_HDSMS_256; /* Segment max 256 bytes */
|
||||
writel(value, ioaddr + GMAC_EXT_CONFIG);
|
||||
|
||||
value = readl(ioaddr + DMA_CHAN_CONTROL(chan));
|
||||
if (en)
|
||||
value |= DMA_CONTROL_SPH;
|
||||
else
|
||||
value &= ~DMA_CONTROL_SPH;
|
||||
writel(value, ioaddr + DMA_CHAN_CONTROL(chan));
|
||||
}
|
||||
|
||||
const struct stmmac_dma_ops dwmac4_dma_ops = {
|
||||
.reset = dwmac4_dma_reset,
|
||||
.init = dwmac4_dma_init,
|
||||
@ -486,6 +503,7 @@ const struct stmmac_dma_ops dwmac4_dma_ops = {
|
||||
.enable_tso = dwmac4_enable_tso,
|
||||
.qmode = dwmac4_qmode,
|
||||
.set_bfsize = dwmac4_set_bfsize,
|
||||
.enable_sph = dwmac4_enable_sph,
|
||||
};
|
||||
|
||||
const struct stmmac_dma_ops dwmac410_dma_ops = {
|
||||
@ -514,4 +532,5 @@ const struct stmmac_dma_ops dwmac410_dma_ops = {
|
||||
.enable_tso = dwmac4_enable_tso,
|
||||
.qmode = dwmac4_qmode,
|
||||
.set_bfsize = dwmac4_set_bfsize,
|
||||
.enable_sph = dwmac4_enable_sph,
|
||||
};
|
||||
|
@ -110,6 +110,7 @@
|
||||
#define DMA_CHAN_STATUS(x) (DMA_CHANX_BASE_ADDR(x) + 0x60)
|
||||
|
||||
/* DMA Control X */
|
||||
#define DMA_CONTROL_SPH BIT(24)
|
||||
#define DMA_CONTROL_MSS_MASK GENMASK(13, 0)
|
||||
|
||||
/* DMA Tx Channel X Control register defines */
|
||||
|
@ -556,7 +556,7 @@ static int dwxgmac2_rss_configure(struct mac_device_info *hw,
|
||||
}
|
||||
|
||||
static void dwxgmac2_update_vlan_hash(struct mac_device_info *hw, u32 hash,
|
||||
u16 perfect_match, bool is_double)
|
||||
__le16 perfect_match, bool is_double)
|
||||
{
|
||||
void __iomem *ioaddr = hw->pcsr;
|
||||
|
||||
|
@ -357,7 +357,7 @@ struct stmmac_ops {
|
||||
struct stmmac_rss *cfg, u32 num_rxq);
|
||||
/* VLAN */
|
||||
void (*update_vlan_hash)(struct mac_device_info *hw, u32 hash,
|
||||
u16 perfect_match, bool is_double);
|
||||
__le16 perfect_match, bool is_double);
|
||||
void (*enable_vlan)(struct mac_device_info *hw, u32 type);
|
||||
/* TX Timestamp */
|
||||
int (*get_mac_tx_timestamp)(struct mac_device_info *hw, u64 *ts);
|
||||
|
@ -36,6 +36,7 @@
|
||||
#endif /* CONFIG_DEBUG_FS */
|
||||
#include <linux/net_tstamp.h>
|
||||
#include <linux/phylink.h>
|
||||
#include <linux/udp.h>
|
||||
#include <net/pkt_cls.h>
|
||||
#include "stmmac_ptp.h"
|
||||
#include "stmmac.h"
|
||||
@ -2916,9 +2917,9 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
u32 queue = skb_get_queue_mapping(skb);
|
||||
struct stmmac_tx_queue *tx_q;
|
||||
unsigned int first_entry;
|
||||
u8 proto_hdr_len, hdr;
|
||||
int tmp_pay_len = 0;
|
||||
u32 pay_len, mss;
|
||||
u8 proto_hdr_len;
|
||||
dma_addr_t des;
|
||||
bool has_vlan;
|
||||
int i;
|
||||
@ -2926,7 +2927,13 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
tx_q = &priv->tx_queue[queue];
|
||||
|
||||
/* Compute header lengths */
|
||||
proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
|
||||
if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
|
||||
proto_hdr_len = skb_transport_offset(skb) + sizeof(struct udphdr);
|
||||
hdr = sizeof(struct udphdr);
|
||||
} else {
|
||||
proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
|
||||
hdr = tcp_hdrlen(skb);
|
||||
}
|
||||
|
||||
/* Desc availability based on threshold should be enough safe */
|
||||
if (unlikely(stmmac_tx_avail(priv, queue) <
|
||||
@ -2956,8 +2963,8 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
}
|
||||
|
||||
if (netif_msg_tx_queued(priv)) {
|
||||
pr_info("%s: tcphdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
|
||||
__func__, tcp_hdrlen(skb), proto_hdr_len, pay_len, mss);
|
||||
pr_info("%s: hdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
|
||||
__func__, hdr, proto_hdr_len, pay_len, mss);
|
||||
pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
|
||||
skb->data_len);
|
||||
}
|
||||
@ -3071,7 +3078,7 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
proto_hdr_len,
|
||||
pay_len,
|
||||
1, tx_q->tx_skbuff_dma[first_entry].last_segment,
|
||||
tcp_hdrlen(skb) / 4, (skb->len - proto_hdr_len));
|
||||
hdr / 4, (skb->len - proto_hdr_len));
|
||||
|
||||
/* If context desc is used to change MSS */
|
||||
if (mss_desc) {
|
||||
@ -3130,6 +3137,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
int i, csum_insertion = 0, is_jumbo = 0;
|
||||
u32 queue = skb_get_queue_mapping(skb);
|
||||
int nfrags = skb_shinfo(skb)->nr_frags;
|
||||
int gso = skb_shinfo(skb)->gso_type;
|
||||
struct dma_desc *desc, *first;
|
||||
struct stmmac_tx_queue *tx_q;
|
||||
unsigned int first_entry;
|
||||
@ -3145,7 +3153,9 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
|
||||
/* Manage oversized TCP frames for GMAC4 device */
|
||||
if (skb_is_gso(skb) && priv->tso) {
|
||||
if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
|
||||
if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
|
||||
return stmmac_tso_xmit(skb, dev);
|
||||
if (priv->plat->has_gmac4 && (gso & SKB_GSO_UDP_L4))
|
||||
return stmmac_tso_xmit(skb, dev);
|
||||
}
|
||||
|
||||
@ -3443,6 +3453,55 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
|
||||
stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
|
||||
}
|
||||
|
||||
static unsigned int stmmac_rx_buf1_len(struct stmmac_priv *priv,
|
||||
struct dma_desc *p,
|
||||
int status, unsigned int len)
|
||||
{
|
||||
int ret, coe = priv->hw->rx_csum;
|
||||
unsigned int plen = 0, hlen = 0;
|
||||
|
||||
/* Not first descriptor, buffer is always zero */
|
||||
if (priv->sph && len)
|
||||
return 0;
|
||||
|
||||
/* First descriptor, get split header length */
|
||||
ret = stmmac_get_rx_header_len(priv, p, &hlen);
|
||||
if (priv->sph && hlen) {
|
||||
priv->xstats.rx_split_hdr_pkt_n++;
|
||||
return hlen;
|
||||
}
|
||||
|
||||
/* First descriptor, not last descriptor and not split header */
|
||||
if (status & rx_not_ls)
|
||||
return priv->dma_buf_sz;
|
||||
|
||||
plen = stmmac_get_rx_frame_len(priv, p, coe);
|
||||
|
||||
/* First descriptor and last descriptor and not split header */
|
||||
return min_t(unsigned int, priv->dma_buf_sz, plen);
|
||||
}
|
||||
|
||||
static unsigned int stmmac_rx_buf2_len(struct stmmac_priv *priv,
|
||||
struct dma_desc *p,
|
||||
int status, unsigned int len)
|
||||
{
|
||||
int coe = priv->hw->rx_csum;
|
||||
unsigned int plen = 0;
|
||||
|
||||
/* Not split header, buffer is not available */
|
||||
if (!priv->sph)
|
||||
return 0;
|
||||
|
||||
/* Not last descriptor */
|
||||
if (status & rx_not_ls)
|
||||
return priv->dma_buf_sz;
|
||||
|
||||
plen = stmmac_get_rx_frame_len(priv, p, coe);
|
||||
|
||||
/* Last descriptor */
|
||||
return plen - len;
|
||||
}
|
||||
|
||||
/**
|
||||
* stmmac_rx - manage the receive process
|
||||
* @priv: driver private structure
|
||||
@ -3472,11 +3531,10 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
|
||||
stmmac_display_ring(priv, rx_head, DMA_RX_SIZE, true);
|
||||
}
|
||||
while (count < limit) {
|
||||
unsigned int hlen = 0, prev_len = 0;
|
||||
unsigned int buf1_len = 0, buf2_len = 0;
|
||||
enum pkt_hash_types hash_type;
|
||||
struct stmmac_rx_buffer *buf;
|
||||
struct dma_desc *np, *p;
|
||||
unsigned int sec_len;
|
||||
int entry;
|
||||
u32 hash;
|
||||
|
||||
@ -3495,7 +3553,8 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
|
||||
break;
|
||||
|
||||
read_again:
|
||||
sec_len = 0;
|
||||
buf1_len = 0;
|
||||
buf2_len = 0;
|
||||
entry = next_entry;
|
||||
buf = &rx_q->buf_pool[entry];
|
||||
|
||||
@ -3520,7 +3579,6 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
|
||||
np = rx_q->dma_rx + next_entry;
|
||||
|
||||
prefetch(np);
|
||||
prefetch(page_address(buf->page));
|
||||
|
||||
if (priv->extend_desc)
|
||||
stmmac_rx_extended_status(priv, &priv->dev->stats,
|
||||
@ -3537,69 +3595,61 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
|
||||
goto read_again;
|
||||
if (unlikely(error)) {
|
||||
dev_kfree_skb(skb);
|
||||
skb = NULL;
|
||||
count++;
|
||||
continue;
|
||||
}
|
||||
|
||||
/* Buffer is good. Go on. */
|
||||
|
||||
if (likely(status & rx_not_ls)) {
|
||||
len += priv->dma_buf_sz;
|
||||
} else {
|
||||
prev_len = len;
|
||||
len = stmmac_get_rx_frame_len(priv, p, coe);
|
||||
prefetch(page_address(buf->page));
|
||||
if (buf->sec_page)
|
||||
prefetch(page_address(buf->sec_page));
|
||||
|
||||
/* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
|
||||
* Type frames (LLC/LLC-SNAP)
|
||||
*
|
||||
* llc_snap is never checked in GMAC >= 4, so this ACS
|
||||
* feature is always disabled and packets need to be
|
||||
* stripped manually.
|
||||
*/
|
||||
if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00) ||
|
||||
unlikely(status != llc_snap))
|
||||
len -= ETH_FCS_LEN;
|
||||
buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
|
||||
len += buf1_len;
|
||||
buf2_len = stmmac_rx_buf2_len(priv, p, status, len);
|
||||
len += buf2_len;
|
||||
|
||||
/* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
|
||||
* Type frames (LLC/LLC-SNAP)
|
||||
*
|
||||
* llc_snap is never checked in GMAC >= 4, so this ACS
|
||||
* feature is always disabled and packets need to be
|
||||
* stripped manually.
|
||||
*/
|
||||
if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00) ||
|
||||
unlikely(status != llc_snap)) {
|
||||
if (buf2_len)
|
||||
buf2_len -= ETH_FCS_LEN;
|
||||
else
|
||||
buf1_len -= ETH_FCS_LEN;
|
||||
|
||||
len -= ETH_FCS_LEN;
|
||||
}
|
||||
|
||||
if (!skb) {
|
||||
int ret = stmmac_get_rx_header_len(priv, p, &hlen);
|
||||
|
||||
if (priv->sph && !ret && (hlen > 0)) {
|
||||
sec_len = len;
|
||||
if (!(status & rx_not_ls))
|
||||
sec_len = sec_len - hlen;
|
||||
len = hlen;
|
||||
|
||||
prefetch(page_address(buf->sec_page));
|
||||
priv->xstats.rx_split_hdr_pkt_n++;
|
||||
}
|
||||
|
||||
skb = napi_alloc_skb(&ch->rx_napi, len);
|
||||
skb = napi_alloc_skb(&ch->rx_napi, buf1_len);
|
||||
if (!skb) {
|
||||
priv->dev->stats.rx_dropped++;
|
||||
count++;
|
||||
continue;
|
||||
goto drain_data;
|
||||
}
|
||||
|
||||
dma_sync_single_for_cpu(priv->device, buf->addr, len,
|
||||
DMA_FROM_DEVICE);
|
||||
dma_sync_single_for_cpu(priv->device, buf->addr,
|
||||
buf1_len, DMA_FROM_DEVICE);
|
||||
skb_copy_to_linear_data(skb, page_address(buf->page),
|
||||
len);
|
||||
skb_put(skb, len);
|
||||
buf1_len);
|
||||
skb_put(skb, buf1_len);
|
||||
|
||||
/* Data payload copied into SKB, page ready for recycle */
|
||||
page_pool_recycle_direct(rx_q->page_pool, buf->page);
|
||||
buf->page = NULL;
|
||||
} else {
|
||||
unsigned int buf_len = len - prev_len;
|
||||
|
||||
if (likely(status & rx_not_ls))
|
||||
buf_len = priv->dma_buf_sz;
|
||||
|
||||
} else if (buf1_len) {
|
||||
dma_sync_single_for_cpu(priv->device, buf->addr,
|
||||
buf_len, DMA_FROM_DEVICE);
|
||||
buf1_len, DMA_FROM_DEVICE);
|
||||
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
|
||||
buf->page, 0, buf_len,
|
||||
buf->page, 0, buf1_len,
|
||||
priv->dma_buf_sz);
|
||||
|
||||
/* Data payload appended into SKB */
|
||||
@ -3607,22 +3657,23 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
|
||||
buf->page = NULL;
|
||||
}
|
||||
|
||||
if (sec_len > 0) {
|
||||
if (buf2_len) {
|
||||
dma_sync_single_for_cpu(priv->device, buf->sec_addr,
|
||||
sec_len, DMA_FROM_DEVICE);
|
||||
buf2_len, DMA_FROM_DEVICE);
|
||||
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
|
||||
buf->sec_page, 0, sec_len,
|
||||
buf->sec_page, 0, buf2_len,
|
||||
priv->dma_buf_sz);
|
||||
|
||||
len += sec_len;
|
||||
|
||||
/* Data payload appended into SKB */
|
||||
page_pool_release_page(rx_q->page_pool, buf->sec_page);
|
||||
buf->sec_page = NULL;
|
||||
}
|
||||
|
||||
drain_data:
|
||||
if (likely(status & rx_not_ls))
|
||||
goto read_again;
|
||||
if (!skb)
|
||||
continue;
|
||||
|
||||
/* Got entire packet into SKB. Finish it. */
|
||||
|
||||
@ -3640,13 +3691,14 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
|
||||
|
||||
skb_record_rx_queue(skb, queue);
|
||||
napi_gro_receive(&ch->rx_napi, skb);
|
||||
skb = NULL;
|
||||
|
||||
priv->dev->stats.rx_packets++;
|
||||
priv->dev->stats.rx_bytes += len;
|
||||
count++;
|
||||
}
|
||||
|
||||
if (status & rx_not_ls) {
|
||||
if (status & rx_not_ls || skb) {
|
||||
rx_q->state_saved = true;
|
||||
rx_q->state.skb = skb;
|
||||
rx_q->state.error = error;
|
||||
@ -3994,11 +4046,13 @@ static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
|
||||
static u16 stmmac_select_queue(struct net_device *dev, struct sk_buff *skb,
|
||||
struct net_device *sb_dev)
|
||||
{
|
||||
if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
|
||||
int gso = skb_shinfo(skb)->gso_type;
|
||||
|
||||
if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6 | SKB_GSO_UDP_L4)) {
|
||||
/*
|
||||
* There is no way to determine the number of TSO
|
||||
* There is no way to determine the number of TSO/USO
|
||||
* capable Queues. Let's use always the Queue 0
|
||||
* because if TSO is supported then at least this
|
||||
* because if TSO/USO is supported then at least this
|
||||
* one will be capable.
|
||||
*/
|
||||
return 0;
|
||||
@ -4214,6 +4268,7 @@ static u32 stmmac_vid_crc32_le(__le16 vid_le)
|
||||
static int stmmac_vlan_update(struct stmmac_priv *priv, bool is_double)
|
||||
{
|
||||
u32 crc, hash = 0;
|
||||
__le16 pmatch = 0;
|
||||
int count = 0;
|
||||
u16 vid = 0;
|
||||
|
||||
@ -4228,11 +4283,11 @@ static int stmmac_vlan_update(struct stmmac_priv *priv, bool is_double)
|
||||
if (count > 2) /* VID = 0 always passes filter */
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
vid = cpu_to_le16(vid);
|
||||
pmatch = cpu_to_le16(vid);
|
||||
hash = 0;
|
||||
}
|
||||
|
||||
return stmmac_update_vlan_hash(priv, priv->hw, hash, vid, is_double);
|
||||
return stmmac_update_vlan_hash(priv, priv->hw, hash, pmatch, is_double);
|
||||
}
|
||||
|
||||
static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
|
||||
@ -4512,6 +4567,8 @@ int stmmac_dvr_probe(struct device *device,
|
||||
|
||||
if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
|
||||
ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
|
||||
if (priv->plat->has_gmac4)
|
||||
ndev->hw_features |= NETIF_F_GSO_UDP_L4;
|
||||
priv->tso = true;
|
||||
dev_info(priv->device, "TSO feature enabled\n");
|
||||
}
|
||||
|
@ -41,20 +41,32 @@
|
||||
#define MII_XGMAC_BUSY BIT(22)
|
||||
#define MII_XGMAC_MAX_C22ADDR 3
|
||||
#define MII_XGMAC_C22P_MASK GENMASK(MII_XGMAC_MAX_C22ADDR, 0)
|
||||
#define MII_XGMAC_PA_SHIFT 16
|
||||
#define MII_XGMAC_DA_SHIFT 21
|
||||
|
||||
static int stmmac_xgmac2_c45_format(struct stmmac_priv *priv, int phyaddr,
|
||||
int phyreg, u32 *hw_addr)
|
||||
{
|
||||
u32 tmp;
|
||||
|
||||
/* Set port as Clause 45 */
|
||||
tmp = readl(priv->ioaddr + XGMAC_MDIO_C22P);
|
||||
tmp &= ~BIT(phyaddr);
|
||||
writel(tmp, priv->ioaddr + XGMAC_MDIO_C22P);
|
||||
|
||||
*hw_addr = (phyaddr << MII_XGMAC_PA_SHIFT) | (phyreg & 0xffff);
|
||||
*hw_addr |= (phyreg >> MII_DEVADDR_C45_SHIFT) << MII_XGMAC_DA_SHIFT;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int stmmac_xgmac2_c22_format(struct stmmac_priv *priv, int phyaddr,
|
||||
int phyreg, u32 *hw_addr)
|
||||
{
|
||||
unsigned int mii_data = priv->hw->mii.data;
|
||||
u32 tmp;
|
||||
|
||||
/* HW does not support C22 addr >= 4 */
|
||||
if (phyaddr > MII_XGMAC_MAX_C22ADDR)
|
||||
return -ENODEV;
|
||||
/* Wait until any existing MII operation is complete */
|
||||
if (readl_poll_timeout(priv->ioaddr + mii_data, tmp,
|
||||
!(tmp & MII_XGMAC_BUSY), 100, 10000))
|
||||
return -EBUSY;
|
||||
|
||||
/* Set port as Clause 22 */
|
||||
tmp = readl(priv->ioaddr + XGMAC_MDIO_C22P);
|
||||
@ -62,7 +74,7 @@ static int stmmac_xgmac2_c22_format(struct stmmac_priv *priv, int phyaddr,
|
||||
tmp |= BIT(phyaddr);
|
||||
writel(tmp, priv->ioaddr + XGMAC_MDIO_C22P);
|
||||
|
||||
*hw_addr = (phyaddr << 16) | (phyreg & 0x1f);
|
||||
*hw_addr = (phyaddr << MII_XGMAC_PA_SHIFT) | (phyreg & 0x1f);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -75,17 +87,28 @@ static int stmmac_xgmac2_mdio_read(struct mii_bus *bus, int phyaddr, int phyreg)
|
||||
u32 tmp, addr, value = MII_XGMAC_BUSY;
|
||||
int ret;
|
||||
|
||||
/* Wait until any existing MII operation is complete */
|
||||
if (readl_poll_timeout(priv->ioaddr + mii_data, tmp,
|
||||
!(tmp & MII_XGMAC_BUSY), 100, 10000))
|
||||
return -EBUSY;
|
||||
|
||||
if (phyreg & MII_ADDR_C45) {
|
||||
return -EOPNOTSUPP;
|
||||
phyreg &= ~MII_ADDR_C45;
|
||||
|
||||
ret = stmmac_xgmac2_c45_format(priv, phyaddr, phyreg, &addr);
|
||||
if (ret)
|
||||
return ret;
|
||||
} else {
|
||||
ret = stmmac_xgmac2_c22_format(priv, phyaddr, phyreg, &addr);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
value |= MII_XGMAC_SADDR;
|
||||
}
|
||||
|
||||
value |= (priv->clk_csr << priv->hw->mii.clk_csr_shift)
|
||||
& priv->hw->mii.clk_csr_mask;
|
||||
value |= MII_XGMAC_SADDR | MII_XGMAC_READ;
|
||||
value |= MII_XGMAC_READ;
|
||||
|
||||
/* Wait until any existing MII operation is complete */
|
||||
if (readl_poll_timeout(priv->ioaddr + mii_data, tmp,
|
||||
@ -115,17 +138,28 @@ static int stmmac_xgmac2_mdio_write(struct mii_bus *bus, int phyaddr,
|
||||
u32 addr, tmp, value = MII_XGMAC_BUSY;
|
||||
int ret;
|
||||
|
||||
/* Wait until any existing MII operation is complete */
|
||||
if (readl_poll_timeout(priv->ioaddr + mii_data, tmp,
|
||||
!(tmp & MII_XGMAC_BUSY), 100, 10000))
|
||||
return -EBUSY;
|
||||
|
||||
if (phyreg & MII_ADDR_C45) {
|
||||
return -EOPNOTSUPP;
|
||||
phyreg &= ~MII_ADDR_C45;
|
||||
|
||||
ret = stmmac_xgmac2_c45_format(priv, phyaddr, phyreg, &addr);
|
||||
if (ret)
|
||||
return ret;
|
||||
} else {
|
||||
ret = stmmac_xgmac2_c22_format(priv, phyaddr, phyreg, &addr);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
value |= MII_XGMAC_SADDR;
|
||||
}
|
||||
|
||||
value |= (priv->clk_csr << priv->hw->mii.clk_csr_shift)
|
||||
& priv->hw->mii.clk_csr_mask;
|
||||
value |= phydata | MII_XGMAC_SADDR;
|
||||
value |= phydata;
|
||||
value |= MII_XGMAC_WRITE;
|
||||
|
||||
/* Wait until any existing MII operation is complete */
|
||||
@ -363,6 +397,10 @@ int stmmac_mdio_register(struct net_device *ndev)
|
||||
goto bus_register_fail;
|
||||
}
|
||||
|
||||
/* Looks like we need a dummy read for XGMAC only and C45 PHYs */
|
||||
if (priv->plat->has_xgmac)
|
||||
stmmac_xgmac2_mdio_read(new_bus, 0, MII_ADDR_C45);
|
||||
|
||||
if (priv->plat->phy_node || mdio_node)
|
||||
goto bus_register_done;
|
||||
|
||||
|
@ -321,8 +321,6 @@ static int tc_setup_cbs(struct stmmac_priv *priv,
|
||||
return -EINVAL;
|
||||
if (!priv->dma_cap.av)
|
||||
return -EOPNOTSUPP;
|
||||
if (priv->speed != SPEED_100 && priv->speed != SPEED_1000)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
|
||||
if (mode_to_use == MTL_QUEUE_DCB && qopt->enable) {
|
||||
|
Loading…
Reference in New Issue
Block a user