mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2025-01-04 04:02:26 +00:00
Merge branch 'ravb-cleanups'
Niklas Söderlund says: ==================== ravb: Align Rx descriptor setup and maintenance When RZ/G2L support was added the Rx code path was split in two, one to support R-Car and one to support RZ/G2L. One reason for this is that R-Car uses the extended Rx descriptor format, while RZ/G2L uses the normal descriptor format. In many aspects this is not needed as the extended descriptor format is just a normal descriptor with extra metadata (timestamsp) appended. And the R-Car SoCs can also use normal descriptors if hardware timestamps were not desired. This split has led to RZ/G2L gaining support for split descriptors in the Rx path while R-Car still lacks this. This series is the first step in trying to merge the R-Car and RZ/G2L Rx paths so features and bugs corrected in one will benefit the other. The first patch in the series clarifies that the driver now supports either normal or extended descriptors, not both at the same time by grouping them in a union. This is the foundation that later patches will build on the aligning the two Rx paths. Patches 2-5 deals with correcting small issues in the Rx frame and descriptor sizes that either were incorrect at the time they were added in 2017 (my bad) or concepts built on-top of this initial incorrect design. While finally patch 6 merges the R-Car and RZ/G2L for Rx descriptor setup and maintenance. When this work has landed I plan to follow up with more work aligning the rest of the Rx code paths and hopefully bring split descriptor support to the R-Car SoCs. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
39a096d67c
@ -1015,11 +1015,6 @@ enum CSR2_BIT {
|
||||
#define NUM_RX_QUEUE 2
|
||||
#define NUM_TX_QUEUE 2
|
||||
|
||||
#define RX_BUF_SZ (2048 - ETH_FCS_LEN + sizeof(__sum16))
|
||||
|
||||
#define GBETH_RX_BUFF_MAX 8192
|
||||
#define GBETH_RX_DESC_DATA_SIZE 4080
|
||||
|
||||
struct ravb_tstamp_skb {
|
||||
struct list_head list;
|
||||
struct sk_buff *skb;
|
||||
@ -1044,9 +1039,6 @@ struct ravb_ptp {
|
||||
};
|
||||
|
||||
struct ravb_hw_info {
|
||||
void (*rx_ring_free)(struct net_device *ndev, int q);
|
||||
void (*rx_ring_format)(struct net_device *ndev, int q);
|
||||
void *(*alloc_rx_desc)(struct net_device *ndev, int q);
|
||||
bool (*receive)(struct net_device *ndev, int *quota, int q);
|
||||
void (*set_rate)(struct net_device *ndev);
|
||||
int (*set_feature)(struct net_device *ndev, netdev_features_t features);
|
||||
@ -1057,9 +1049,10 @@ struct ravb_hw_info {
|
||||
netdev_features_t net_hw_features;
|
||||
netdev_features_t net_features;
|
||||
int stats_len;
|
||||
size_t max_rx_len;
|
||||
u32 tccr_mask;
|
||||
u32 rx_max_buf_size;
|
||||
u32 rx_max_frame_size;
|
||||
u32 rx_max_desc_use;
|
||||
u32 rx_desc_size;
|
||||
unsigned aligned_tx: 1;
|
||||
|
||||
/* hardware features */
|
||||
@ -1092,8 +1085,11 @@ struct ravb_private {
|
||||
struct ravb_desc *desc_bat;
|
||||
dma_addr_t rx_desc_dma[NUM_RX_QUEUE];
|
||||
dma_addr_t tx_desc_dma[NUM_TX_QUEUE];
|
||||
struct ravb_rx_desc *gbeth_rx_ring;
|
||||
struct ravb_ex_rx_desc *rx_ring[NUM_RX_QUEUE];
|
||||
union {
|
||||
struct ravb_rx_desc *desc;
|
||||
struct ravb_ex_rx_desc *ex_desc;
|
||||
void *raw;
|
||||
} rx_ring[NUM_RX_QUEUE];
|
||||
struct ravb_tx_desc *tx_ring[NUM_TX_QUEUE];
|
||||
void *tx_align[NUM_TX_QUEUE];
|
||||
struct sk_buff *rx_1st_skb;
|
||||
|
@ -113,12 +113,23 @@ static void ravb_set_rate_rcar(struct net_device *ndev)
|
||||
}
|
||||
}
|
||||
|
||||
static void ravb_set_buffer_align(struct sk_buff *skb)
|
||||
static struct sk_buff *
|
||||
ravb_alloc_skb(struct net_device *ndev, const struct ravb_hw_info *info,
|
||||
gfp_t gfp_mask)
|
||||
{
|
||||
u32 reserve = (unsigned long)skb->data & (RAVB_ALIGN - 1);
|
||||
struct sk_buff *skb;
|
||||
u32 reserve;
|
||||
|
||||
skb = __netdev_alloc_skb(ndev, info->rx_max_frame_size + RAVB_ALIGN - 1,
|
||||
gfp_mask);
|
||||
if (!skb)
|
||||
return NULL;
|
||||
|
||||
reserve = (unsigned long)skb->data & (RAVB_ALIGN - 1);
|
||||
if (reserve)
|
||||
skb_reserve(skb, RAVB_ALIGN - reserve);
|
||||
|
||||
return skb;
|
||||
}
|
||||
|
||||
/* Get MAC address from the MAC address registers
|
||||
@ -191,6 +202,13 @@ static const struct mdiobb_ops bb_ops = {
|
||||
.get_mdio_data = ravb_get_mdio_data,
|
||||
};
|
||||
|
||||
static struct ravb_rx_desc *
|
||||
ravb_rx_get_desc(struct ravb_private *priv, unsigned int q,
|
||||
unsigned int i)
|
||||
{
|
||||
return priv->rx_ring[q].raw + priv->info->rx_desc_size * i;
|
||||
}
|
||||
|
||||
/* Free TX skb function for AVB-IP */
|
||||
static int ravb_tx_free(struct net_device *ndev, int q, bool free_txed_only)
|
||||
{
|
||||
@ -235,67 +253,40 @@ static int ravb_tx_free(struct net_device *ndev, int q, bool free_txed_only)
|
||||
return free_num;
|
||||
}
|
||||
|
||||
static void ravb_rx_ring_free_gbeth(struct net_device *ndev, int q)
|
||||
static void ravb_rx_ring_free(struct net_device *ndev, int q)
|
||||
{
|
||||
struct ravb_private *priv = netdev_priv(ndev);
|
||||
unsigned int ring_size;
|
||||
unsigned int i;
|
||||
|
||||
if (!priv->gbeth_rx_ring)
|
||||
if (!priv->rx_ring[q].raw)
|
||||
return;
|
||||
|
||||
for (i = 0; i < priv->num_rx_ring[q]; i++) {
|
||||
struct ravb_rx_desc *desc = &priv->gbeth_rx_ring[i];
|
||||
struct ravb_rx_desc *desc = ravb_rx_get_desc(priv, q, i);
|
||||
|
||||
if (!dma_mapping_error(ndev->dev.parent,
|
||||
le32_to_cpu(desc->dptr)))
|
||||
dma_unmap_single(ndev->dev.parent,
|
||||
le32_to_cpu(desc->dptr),
|
||||
GBETH_RX_BUFF_MAX,
|
||||
priv->info->rx_max_frame_size,
|
||||
DMA_FROM_DEVICE);
|
||||
}
|
||||
ring_size = sizeof(struct ravb_rx_desc) * (priv->num_rx_ring[q] + 1);
|
||||
dma_free_coherent(ndev->dev.parent, ring_size, priv->gbeth_rx_ring,
|
||||
ring_size = priv->info->rx_desc_size * (priv->num_rx_ring[q] + 1);
|
||||
dma_free_coherent(ndev->dev.parent, ring_size, priv->rx_ring[q].raw,
|
||||
priv->rx_desc_dma[q]);
|
||||
priv->gbeth_rx_ring = NULL;
|
||||
}
|
||||
|
||||
static void ravb_rx_ring_free_rcar(struct net_device *ndev, int q)
|
||||
{
|
||||
struct ravb_private *priv = netdev_priv(ndev);
|
||||
unsigned int ring_size;
|
||||
unsigned int i;
|
||||
|
||||
if (!priv->rx_ring[q])
|
||||
return;
|
||||
|
||||
for (i = 0; i < priv->num_rx_ring[q]; i++) {
|
||||
struct ravb_ex_rx_desc *desc = &priv->rx_ring[q][i];
|
||||
|
||||
if (!dma_mapping_error(ndev->dev.parent,
|
||||
le32_to_cpu(desc->dptr)))
|
||||
dma_unmap_single(ndev->dev.parent,
|
||||
le32_to_cpu(desc->dptr),
|
||||
RX_BUF_SZ,
|
||||
DMA_FROM_DEVICE);
|
||||
}
|
||||
ring_size = sizeof(struct ravb_ex_rx_desc) *
|
||||
(priv->num_rx_ring[q] + 1);
|
||||
dma_free_coherent(ndev->dev.parent, ring_size, priv->rx_ring[q],
|
||||
priv->rx_desc_dma[q]);
|
||||
priv->rx_ring[q] = NULL;
|
||||
priv->rx_ring[q].raw = NULL;
|
||||
}
|
||||
|
||||
/* Free skb's and DMA buffers for Ethernet AVB */
|
||||
static void ravb_ring_free(struct net_device *ndev, int q)
|
||||
{
|
||||
struct ravb_private *priv = netdev_priv(ndev);
|
||||
const struct ravb_hw_info *info = priv->info;
|
||||
unsigned int num_tx_desc = priv->num_tx_desc;
|
||||
unsigned int ring_size;
|
||||
unsigned int i;
|
||||
|
||||
info->rx_ring_free(ndev, q);
|
||||
ravb_rx_ring_free(ndev, q);
|
||||
|
||||
if (priv->tx_ring[q]) {
|
||||
ravb_tx_free(ndev, q, false);
|
||||
@ -326,7 +317,7 @@ static void ravb_ring_free(struct net_device *ndev, int q)
|
||||
priv->tx_skb[q] = NULL;
|
||||
}
|
||||
|
||||
static void ravb_rx_ring_format_gbeth(struct net_device *ndev, int q)
|
||||
static void ravb_rx_ring_format(struct net_device *ndev, int q)
|
||||
{
|
||||
struct ravb_private *priv = netdev_priv(ndev);
|
||||
struct ravb_rx_desc *rx_desc;
|
||||
@ -334,15 +325,15 @@ static void ravb_rx_ring_format_gbeth(struct net_device *ndev, int q)
|
||||
dma_addr_t dma_addr;
|
||||
unsigned int i;
|
||||
|
||||
rx_ring_size = sizeof(*rx_desc) * priv->num_rx_ring[q];
|
||||
memset(priv->gbeth_rx_ring, 0, rx_ring_size);
|
||||
rx_ring_size = priv->info->rx_desc_size * priv->num_rx_ring[q];
|
||||
memset(priv->rx_ring[q].raw, 0, rx_ring_size);
|
||||
/* Build RX ring buffer */
|
||||
for (i = 0; i < priv->num_rx_ring[q]; i++) {
|
||||
/* RX descriptor */
|
||||
rx_desc = &priv->gbeth_rx_ring[i];
|
||||
rx_desc->ds_cc = cpu_to_le16(GBETH_RX_DESC_DATA_SIZE);
|
||||
rx_desc = ravb_rx_get_desc(priv, q, i);
|
||||
rx_desc->ds_cc = cpu_to_le16(priv->info->rx_max_desc_use);
|
||||
dma_addr = dma_map_single(ndev->dev.parent, priv->rx_skb[q][i]->data,
|
||||
GBETH_RX_BUFF_MAX,
|
||||
priv->info->rx_max_frame_size,
|
||||
DMA_FROM_DEVICE);
|
||||
/* We just set the data size to 0 for a failed mapping which
|
||||
* should prevent DMA from happening...
|
||||
@ -352,37 +343,7 @@ static void ravb_rx_ring_format_gbeth(struct net_device *ndev, int q)
|
||||
rx_desc->dptr = cpu_to_le32(dma_addr);
|
||||
rx_desc->die_dt = DT_FEMPTY;
|
||||
}
|
||||
rx_desc = &priv->gbeth_rx_ring[i];
|
||||
rx_desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma[q]);
|
||||
rx_desc->die_dt = DT_LINKFIX; /* type */
|
||||
}
|
||||
|
||||
static void ravb_rx_ring_format_rcar(struct net_device *ndev, int q)
|
||||
{
|
||||
struct ravb_private *priv = netdev_priv(ndev);
|
||||
struct ravb_ex_rx_desc *rx_desc;
|
||||
unsigned int rx_ring_size = sizeof(*rx_desc) * priv->num_rx_ring[q];
|
||||
dma_addr_t dma_addr;
|
||||
unsigned int i;
|
||||
|
||||
memset(priv->rx_ring[q], 0, rx_ring_size);
|
||||
/* Build RX ring buffer */
|
||||
for (i = 0; i < priv->num_rx_ring[q]; i++) {
|
||||
/* RX descriptor */
|
||||
rx_desc = &priv->rx_ring[q][i];
|
||||
rx_desc->ds_cc = cpu_to_le16(RX_BUF_SZ);
|
||||
dma_addr = dma_map_single(ndev->dev.parent, priv->rx_skb[q][i]->data,
|
||||
RX_BUF_SZ,
|
||||
DMA_FROM_DEVICE);
|
||||
/* We just set the data size to 0 for a failed mapping which
|
||||
* should prevent DMA from happening...
|
||||
*/
|
||||
if (dma_mapping_error(ndev->dev.parent, dma_addr))
|
||||
rx_desc->ds_cc = cpu_to_le16(0);
|
||||
rx_desc->dptr = cpu_to_le32(dma_addr);
|
||||
rx_desc->die_dt = DT_FEMPTY;
|
||||
}
|
||||
rx_desc = &priv->rx_ring[q][i];
|
||||
rx_desc = ravb_rx_get_desc(priv, q, i);
|
||||
rx_desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma[q]);
|
||||
rx_desc->die_dt = DT_LINKFIX; /* type */
|
||||
}
|
||||
@ -391,7 +352,6 @@ static void ravb_rx_ring_format_rcar(struct net_device *ndev, int q)
|
||||
static void ravb_ring_format(struct net_device *ndev, int q)
|
||||
{
|
||||
struct ravb_private *priv = netdev_priv(ndev);
|
||||
const struct ravb_hw_info *info = priv->info;
|
||||
unsigned int num_tx_desc = priv->num_tx_desc;
|
||||
struct ravb_tx_desc *tx_desc;
|
||||
struct ravb_desc *desc;
|
||||
@ -404,7 +364,7 @@ static void ravb_ring_format(struct net_device *ndev, int q)
|
||||
priv->dirty_rx[q] = 0;
|
||||
priv->dirty_tx[q] = 0;
|
||||
|
||||
info->rx_ring_format(ndev, q);
|
||||
ravb_rx_ring_format(ndev, q);
|
||||
|
||||
memset(priv->tx_ring[q], 0, tx_ring_size);
|
||||
/* Build TX ring buffer */
|
||||
@ -430,30 +390,18 @@ static void ravb_ring_format(struct net_device *ndev, int q)
|
||||
desc->dptr = cpu_to_le32((u32)priv->tx_desc_dma[q]);
|
||||
}
|
||||
|
||||
static void *ravb_alloc_rx_desc_gbeth(struct net_device *ndev, int q)
|
||||
static void *ravb_alloc_rx_desc(struct net_device *ndev, int q)
|
||||
{
|
||||
struct ravb_private *priv = netdev_priv(ndev);
|
||||
unsigned int ring_size;
|
||||
|
||||
ring_size = sizeof(struct ravb_rx_desc) * (priv->num_rx_ring[q] + 1);
|
||||
ring_size = priv->info->rx_desc_size * (priv->num_rx_ring[q] + 1);
|
||||
|
||||
priv->gbeth_rx_ring = dma_alloc_coherent(ndev->dev.parent, ring_size,
|
||||
&priv->rx_desc_dma[q],
|
||||
GFP_KERNEL);
|
||||
return priv->gbeth_rx_ring;
|
||||
}
|
||||
priv->rx_ring[q].raw = dma_alloc_coherent(ndev->dev.parent, ring_size,
|
||||
&priv->rx_desc_dma[q],
|
||||
GFP_KERNEL);
|
||||
|
||||
static void *ravb_alloc_rx_desc_rcar(struct net_device *ndev, int q)
|
||||
{
|
||||
struct ravb_private *priv = netdev_priv(ndev);
|
||||
unsigned int ring_size;
|
||||
|
||||
ring_size = sizeof(struct ravb_ex_rx_desc) * (priv->num_rx_ring[q] + 1);
|
||||
|
||||
priv->rx_ring[q] = dma_alloc_coherent(ndev->dev.parent, ring_size,
|
||||
&priv->rx_desc_dma[q],
|
||||
GFP_KERNEL);
|
||||
return priv->rx_ring[q];
|
||||
return priv->rx_ring[q].raw;
|
||||
}
|
||||
|
||||
/* Init skb and descriptor buffer for Ethernet AVB */
|
||||
@ -475,10 +423,9 @@ static int ravb_ring_init(struct net_device *ndev, int q)
|
||||
goto error;
|
||||
|
||||
for (i = 0; i < priv->num_rx_ring[q]; i++) {
|
||||
skb = __netdev_alloc_skb(ndev, info->max_rx_len, GFP_KERNEL);
|
||||
skb = ravb_alloc_skb(ndev, info, GFP_KERNEL);
|
||||
if (!skb)
|
||||
goto error;
|
||||
ravb_set_buffer_align(skb);
|
||||
priv->rx_skb[q][i] = skb;
|
||||
}
|
||||
|
||||
@ -491,7 +438,7 @@ static int ravb_ring_init(struct net_device *ndev, int q)
|
||||
}
|
||||
|
||||
/* Allocate all RX descriptors. */
|
||||
if (!info->alloc_rx_desc(ndev, q))
|
||||
if (!ravb_alloc_rx_desc(ndev, q))
|
||||
goto error;
|
||||
|
||||
priv->dirty_rx[q] = 0;
|
||||
@ -557,7 +504,7 @@ static void ravb_emac_init_gbeth(struct net_device *ndev)
|
||||
}
|
||||
|
||||
/* Receive frame limit set register */
|
||||
ravb_write(ndev, GBETH_RX_BUFF_MAX + ETH_FCS_LEN, RFLR);
|
||||
ravb_write(ndev, priv->info->rx_max_frame_size + ETH_FCS_LEN, RFLR);
|
||||
|
||||
/* EMAC Mode: PAUSE prohibition; Duplex; TX; RX; CRC Pass Through */
|
||||
ravb_write(ndev, ECMR_ZPF | ((priv->duplex > 0) ? ECMR_DM : 0) |
|
||||
@ -618,6 +565,7 @@ static void ravb_emac_init(struct net_device *ndev)
|
||||
|
||||
static int ravb_dmac_init_gbeth(struct net_device *ndev)
|
||||
{
|
||||
struct ravb_private *priv = netdev_priv(ndev);
|
||||
int error;
|
||||
|
||||
error = ravb_ring_init(ndev, RAVB_BE);
|
||||
@ -631,7 +579,7 @@ static int ravb_dmac_init_gbeth(struct net_device *ndev)
|
||||
ravb_write(ndev, 0x60000000, RCR);
|
||||
|
||||
/* Set Max Frame Length (RTC) */
|
||||
ravb_write(ndev, 0x7ffc0000 | GBETH_RX_BUFF_MAX, RTC);
|
||||
ravb_write(ndev, 0x7ffc0000 | priv->info->rx_max_frame_size, RTC);
|
||||
|
||||
/* Set FIFO size */
|
||||
ravb_write(ndev, 0x00222200, TGC);
|
||||
@ -804,7 +752,8 @@ static struct sk_buff *ravb_get_skb_gbeth(struct net_device *ndev, int entry,
|
||||
skb = priv->rx_skb[RAVB_BE][entry];
|
||||
priv->rx_skb[RAVB_BE][entry] = NULL;
|
||||
dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr),
|
||||
ALIGN(GBETH_RX_BUFF_MAX, 16), DMA_FROM_DEVICE);
|
||||
ALIGN(priv->info->rx_max_frame_size, 16),
|
||||
DMA_FROM_DEVICE);
|
||||
|
||||
return skb;
|
||||
}
|
||||
@ -830,7 +779,7 @@ static bool ravb_rx_gbeth(struct net_device *ndev, int *quota, int q)
|
||||
limit = priv->dirty_rx[q] + priv->num_rx_ring[q] - priv->cur_rx[q];
|
||||
stats = &priv->stats[q];
|
||||
|
||||
desc = &priv->gbeth_rx_ring[entry];
|
||||
desc = &priv->rx_ring[q].desc[entry];
|
||||
for (i = 0; i < limit && rx_packets < *quota && desc->die_dt != DT_FEMPTY; i++) {
|
||||
/* Descriptor type must be checked before all other reads */
|
||||
dma_rmb();
|
||||
@ -901,23 +850,22 @@ static bool ravb_rx_gbeth(struct net_device *ndev, int *quota, int q)
|
||||
}
|
||||
|
||||
entry = (++priv->cur_rx[q]) % priv->num_rx_ring[q];
|
||||
desc = &priv->gbeth_rx_ring[entry];
|
||||
desc = &priv->rx_ring[q].desc[entry];
|
||||
}
|
||||
|
||||
/* Refill the RX ring buffers. */
|
||||
for (; priv->cur_rx[q] - priv->dirty_rx[q] > 0; priv->dirty_rx[q]++) {
|
||||
entry = priv->dirty_rx[q] % priv->num_rx_ring[q];
|
||||
desc = &priv->gbeth_rx_ring[entry];
|
||||
desc->ds_cc = cpu_to_le16(GBETH_RX_DESC_DATA_SIZE);
|
||||
desc = &priv->rx_ring[q].desc[entry];
|
||||
desc->ds_cc = cpu_to_le16(priv->info->rx_max_desc_use);
|
||||
|
||||
if (!priv->rx_skb[q][entry]) {
|
||||
skb = netdev_alloc_skb(ndev, info->max_rx_len);
|
||||
skb = ravb_alloc_skb(ndev, info, GFP_ATOMIC);
|
||||
if (!skb)
|
||||
break;
|
||||
ravb_set_buffer_align(skb);
|
||||
dma_addr = dma_map_single(ndev->dev.parent,
|
||||
skb->data,
|
||||
GBETH_RX_BUFF_MAX,
|
||||
priv->info->rx_max_frame_size,
|
||||
DMA_FROM_DEVICE);
|
||||
skb_checksum_none_assert(skb);
|
||||
/* We just set the data size to 0 for a failed mapping
|
||||
@ -957,7 +905,7 @@ static bool ravb_rx_rcar(struct net_device *ndev, int *quota, int q)
|
||||
|
||||
boguscnt = min(boguscnt, *quota);
|
||||
limit = boguscnt;
|
||||
desc = &priv->rx_ring[q][entry];
|
||||
desc = &priv->rx_ring[q].ex_desc[entry];
|
||||
while (desc->die_dt != DT_FEMPTY) {
|
||||
/* Descriptor type must be checked before all other reads */
|
||||
dma_rmb();
|
||||
@ -991,7 +939,7 @@ static bool ravb_rx_rcar(struct net_device *ndev, int *quota, int q)
|
||||
skb = priv->rx_skb[q][entry];
|
||||
priv->rx_skb[q][entry] = NULL;
|
||||
dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr),
|
||||
RX_BUF_SZ,
|
||||
priv->info->rx_max_frame_size,
|
||||
DMA_FROM_DEVICE);
|
||||
get_ts &= (q == RAVB_NC) ?
|
||||
RAVB_RXTSTAMP_TYPE_V2_L2_EVENT :
|
||||
@ -1017,20 +965,19 @@ static bool ravb_rx_rcar(struct net_device *ndev, int *quota, int q)
|
||||
}
|
||||
|
||||
entry = (++priv->cur_rx[q]) % priv->num_rx_ring[q];
|
||||
desc = &priv->rx_ring[q][entry];
|
||||
desc = &priv->rx_ring[q].ex_desc[entry];
|
||||
}
|
||||
|
||||
/* Refill the RX ring buffers. */
|
||||
for (; priv->cur_rx[q] - priv->dirty_rx[q] > 0; priv->dirty_rx[q]++) {
|
||||
entry = priv->dirty_rx[q] % priv->num_rx_ring[q];
|
||||
desc = &priv->rx_ring[q][entry];
|
||||
desc->ds_cc = cpu_to_le16(RX_BUF_SZ);
|
||||
desc = &priv->rx_ring[q].ex_desc[entry];
|
||||
desc->ds_cc = cpu_to_le16(priv->info->rx_max_desc_use);
|
||||
|
||||
if (!priv->rx_skb[q][entry]) {
|
||||
skb = netdev_alloc_skb(ndev, info->max_rx_len);
|
||||
skb = ravb_alloc_skb(ndev, info, GFP_ATOMIC);
|
||||
if (!skb)
|
||||
break; /* Better luck next round. */
|
||||
ravb_set_buffer_align(skb);
|
||||
dma_addr = dma_map_single(ndev->dev.parent, skb->data,
|
||||
le16_to_cpu(desc->ds_cc),
|
||||
DMA_FROM_DEVICE);
|
||||
@ -2668,9 +2615,6 @@ static int ravb_mdio_release(struct ravb_private *priv)
|
||||
}
|
||||
|
||||
static const struct ravb_hw_info ravb_gen3_hw_info = {
|
||||
.rx_ring_free = ravb_rx_ring_free_rcar,
|
||||
.rx_ring_format = ravb_rx_ring_format_rcar,
|
||||
.alloc_rx_desc = ravb_alloc_rx_desc_rcar,
|
||||
.receive = ravb_rx_rcar,
|
||||
.set_rate = ravb_set_rate_rcar,
|
||||
.set_feature = ravb_set_features_rcar,
|
||||
@ -2681,9 +2625,10 @@ static const struct ravb_hw_info ravb_gen3_hw_info = {
|
||||
.net_hw_features = NETIF_F_RXCSUM,
|
||||
.net_features = NETIF_F_RXCSUM,
|
||||
.stats_len = ARRAY_SIZE(ravb_gstrings_stats),
|
||||
.max_rx_len = RX_BUF_SZ + RAVB_ALIGN - 1,
|
||||
.tccr_mask = TCCR_TSRQ0 | TCCR_TSRQ1 | TCCR_TSRQ2 | TCCR_TSRQ3,
|
||||
.rx_max_buf_size = SZ_2K,
|
||||
.rx_max_frame_size = SZ_2K,
|
||||
.rx_max_desc_use = SZ_2K - ETH_FCS_LEN + sizeof(__sum16),
|
||||
.rx_desc_size = sizeof(struct ravb_ex_rx_desc),
|
||||
.internal_delay = 1,
|
||||
.tx_counters = 1,
|
||||
.multi_irqs = 1,
|
||||
@ -2694,9 +2639,6 @@ static const struct ravb_hw_info ravb_gen3_hw_info = {
|
||||
};
|
||||
|
||||
static const struct ravb_hw_info ravb_gen2_hw_info = {
|
||||
.rx_ring_free = ravb_rx_ring_free_rcar,
|
||||
.rx_ring_format = ravb_rx_ring_format_rcar,
|
||||
.alloc_rx_desc = ravb_alloc_rx_desc_rcar,
|
||||
.receive = ravb_rx_rcar,
|
||||
.set_rate = ravb_set_rate_rcar,
|
||||
.set_feature = ravb_set_features_rcar,
|
||||
@ -2707,9 +2649,10 @@ static const struct ravb_hw_info ravb_gen2_hw_info = {
|
||||
.net_hw_features = NETIF_F_RXCSUM,
|
||||
.net_features = NETIF_F_RXCSUM,
|
||||
.stats_len = ARRAY_SIZE(ravb_gstrings_stats),
|
||||
.max_rx_len = RX_BUF_SZ + RAVB_ALIGN - 1,
|
||||
.tccr_mask = TCCR_TSRQ0 | TCCR_TSRQ1 | TCCR_TSRQ2 | TCCR_TSRQ3,
|
||||
.rx_max_buf_size = SZ_2K,
|
||||
.rx_max_frame_size = SZ_2K,
|
||||
.rx_max_desc_use = SZ_2K - ETH_FCS_LEN + sizeof(__sum16),
|
||||
.rx_desc_size = sizeof(struct ravb_ex_rx_desc),
|
||||
.aligned_tx = 1,
|
||||
.gptp = 1,
|
||||
.nc_queues = 1,
|
||||
@ -2717,9 +2660,6 @@ static const struct ravb_hw_info ravb_gen2_hw_info = {
|
||||
};
|
||||
|
||||
static const struct ravb_hw_info ravb_rzv2m_hw_info = {
|
||||
.rx_ring_free = ravb_rx_ring_free_rcar,
|
||||
.rx_ring_format = ravb_rx_ring_format_rcar,
|
||||
.alloc_rx_desc = ravb_alloc_rx_desc_rcar,
|
||||
.receive = ravb_rx_rcar,
|
||||
.set_rate = ravb_set_rate_rcar,
|
||||
.set_feature = ravb_set_features_rcar,
|
||||
@ -2730,9 +2670,10 @@ static const struct ravb_hw_info ravb_rzv2m_hw_info = {
|
||||
.net_hw_features = NETIF_F_RXCSUM,
|
||||
.net_features = NETIF_F_RXCSUM,
|
||||
.stats_len = ARRAY_SIZE(ravb_gstrings_stats),
|
||||
.max_rx_len = RX_BUF_SZ + RAVB_ALIGN - 1,
|
||||
.tccr_mask = TCCR_TSRQ0 | TCCR_TSRQ1 | TCCR_TSRQ2 | TCCR_TSRQ3,
|
||||
.rx_max_buf_size = SZ_2K,
|
||||
.rx_max_frame_size = SZ_2K,
|
||||
.rx_max_desc_use = SZ_2K - ETH_FCS_LEN + sizeof(__sum16),
|
||||
.rx_desc_size = sizeof(struct ravb_ex_rx_desc),
|
||||
.multi_irqs = 1,
|
||||
.err_mgmt_irqs = 1,
|
||||
.gptp = 1,
|
||||
@ -2742,9 +2683,6 @@ static const struct ravb_hw_info ravb_rzv2m_hw_info = {
|
||||
};
|
||||
|
||||
static const struct ravb_hw_info gbeth_hw_info = {
|
||||
.rx_ring_free = ravb_rx_ring_free_gbeth,
|
||||
.rx_ring_format = ravb_rx_ring_format_gbeth,
|
||||
.alloc_rx_desc = ravb_alloc_rx_desc_gbeth,
|
||||
.receive = ravb_rx_gbeth,
|
||||
.set_rate = ravb_set_rate_gbeth,
|
||||
.set_feature = ravb_set_features_gbeth,
|
||||
@ -2755,9 +2693,10 @@ static const struct ravb_hw_info gbeth_hw_info = {
|
||||
.net_hw_features = NETIF_F_RXCSUM | NETIF_F_HW_CSUM,
|
||||
.net_features = NETIF_F_RXCSUM | NETIF_F_HW_CSUM,
|
||||
.stats_len = ARRAY_SIZE(ravb_gstrings_stats_gbeth),
|
||||
.max_rx_len = ALIGN(GBETH_RX_BUFF_MAX, RAVB_ALIGN),
|
||||
.tccr_mask = TCCR_TSRQ0,
|
||||
.rx_max_buf_size = SZ_8K,
|
||||
.rx_max_frame_size = SZ_8K,
|
||||
.rx_max_desc_use = 4080,
|
||||
.rx_desc_size = sizeof(struct ravb_rx_desc),
|
||||
.aligned_tx = 1,
|
||||
.tx_counters = 1,
|
||||
.carrier_counters = 1,
|
||||
@ -2966,7 +2905,8 @@ static int ravb_probe(struct platform_device *pdev)
|
||||
priv->avb_link_active_low =
|
||||
of_property_read_bool(np, "renesas,ether-link-active-low");
|
||||
|
||||
ndev->max_mtu = info->rx_max_buf_size - (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN);
|
||||
ndev->max_mtu = info->rx_max_frame_size -
|
||||
(ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN);
|
||||
ndev->min_mtu = ETH_MIN_MTU;
|
||||
|
||||
/* FIXME: R-Car Gen2 has 4byte alignment restriction for tx buffer
|
||||
|
Loading…
Reference in New Issue
Block a user