mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2025-01-12 16:58:53 +00:00
mv643xx_eth: split out rx queue state
Split all RX queue related state into 'struct rx_queue', in preparation for multiple RX queue support. Signed-off-by: Lennert Buytenhek <buytenh@marvell.com> Acked-by: Dale Farnsworth <dale@farnsworth.org>
This commit is contained in:
parent
69876569bb
commit
8a578111e3
@ -73,13 +73,7 @@ static char mv643xx_eth_driver_version[] = "1.0";
|
|||||||
#define MAX_DESCS_PER_SKB 1
|
#define MAX_DESCS_PER_SKB 1
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#define ETH_VLAN_HLEN 4
|
#define ETH_HW_IP_ALIGN 2
|
||||||
#define ETH_FCS_LEN 4
|
|
||||||
#define ETH_HW_IP_ALIGN 2 /* hw aligns IP header */
|
|
||||||
#define ETH_WRAPPER_LEN (ETH_HW_IP_ALIGN + ETH_HLEN + \
|
|
||||||
ETH_VLAN_HLEN + ETH_FCS_LEN)
|
|
||||||
#define ETH_RX_SKB_SIZE (dev->mtu + ETH_WRAPPER_LEN + \
|
|
||||||
dma_get_cache_alignment())
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Registers shared between all ports.
|
* Registers shared between all ports.
|
||||||
@ -288,22 +282,32 @@ struct mib_counters {
|
|||||||
u32 late_collision;
|
u32 late_collision;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
struct rx_queue {
|
||||||
|
int rx_ring_size;
|
||||||
|
|
||||||
|
int rx_desc_count;
|
||||||
|
int rx_curr_desc;
|
||||||
|
int rx_used_desc;
|
||||||
|
|
||||||
|
struct rx_desc *rx_desc_area;
|
||||||
|
dma_addr_t rx_desc_dma;
|
||||||
|
int rx_desc_area_size;
|
||||||
|
struct sk_buff **rx_skb;
|
||||||
|
|
||||||
|
struct timer_list rx_oom;
|
||||||
|
};
|
||||||
|
|
||||||
struct mv643xx_eth_private {
|
struct mv643xx_eth_private {
|
||||||
struct mv643xx_eth_shared_private *shared;
|
struct mv643xx_eth_shared_private *shared;
|
||||||
int port_num; /* User Ethernet port number */
|
int port_num; /* User Ethernet port number */
|
||||||
|
|
||||||
struct mv643xx_eth_shared_private *shared_smi;
|
struct mv643xx_eth_shared_private *shared_smi;
|
||||||
|
|
||||||
u32 rx_sram_addr; /* Base address of rx sram area */
|
|
||||||
u32 rx_sram_size; /* Size of rx sram area */
|
|
||||||
u32 tx_sram_addr; /* Base address of tx sram area */
|
u32 tx_sram_addr; /* Base address of tx sram area */
|
||||||
u32 tx_sram_size; /* Size of tx sram area */
|
u32 tx_sram_size; /* Size of tx sram area */
|
||||||
|
|
||||||
/* Tx/Rx rings managment indexes fields. For driver use */
|
/* Tx/Rx rings managment indexes fields. For driver use */
|
||||||
|
|
||||||
/* Next available and first returning Rx resource */
|
|
||||||
int rx_curr_desc, rx_used_desc;
|
|
||||||
|
|
||||||
/* Next available and first returning Tx resource */
|
/* Next available and first returning Tx resource */
|
||||||
int tx_curr_desc, tx_used_desc;
|
int tx_curr_desc, tx_used_desc;
|
||||||
|
|
||||||
@ -311,11 +315,6 @@ struct mv643xx_eth_private {
|
|||||||
u32 tx_clean_threshold;
|
u32 tx_clean_threshold;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
struct rx_desc *rx_desc_area;
|
|
||||||
dma_addr_t rx_desc_dma;
|
|
||||||
int rx_desc_area_size;
|
|
||||||
struct sk_buff **rx_skb;
|
|
||||||
|
|
||||||
struct tx_desc *tx_desc_area;
|
struct tx_desc *tx_desc_area;
|
||||||
dma_addr_t tx_desc_dma;
|
dma_addr_t tx_desc_dma;
|
||||||
int tx_desc_area_size;
|
int tx_desc_area_size;
|
||||||
@ -324,27 +323,25 @@ struct mv643xx_eth_private {
|
|||||||
struct work_struct tx_timeout_task;
|
struct work_struct tx_timeout_task;
|
||||||
|
|
||||||
struct net_device *dev;
|
struct net_device *dev;
|
||||||
struct napi_struct napi;
|
|
||||||
struct mib_counters mib_counters;
|
struct mib_counters mib_counters;
|
||||||
spinlock_t lock;
|
spinlock_t lock;
|
||||||
/* Size of Tx Ring per queue */
|
/* Size of Tx Ring per queue */
|
||||||
int tx_ring_size;
|
int tx_ring_size;
|
||||||
/* Number of tx descriptors in use */
|
/* Number of tx descriptors in use */
|
||||||
int tx_desc_count;
|
int tx_desc_count;
|
||||||
/* Size of Rx Ring per queue */
|
|
||||||
int rx_ring_size;
|
|
||||||
/* Number of rx descriptors in use */
|
|
||||||
int rx_desc_count;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Used in case RX Ring is empty, which can be caused when
|
|
||||||
* system does not have resources (skb's)
|
|
||||||
*/
|
|
||||||
struct timer_list timeout;
|
|
||||||
|
|
||||||
u32 rx_int_coal;
|
u32 rx_int_coal;
|
||||||
u32 tx_int_coal;
|
u32 tx_int_coal;
|
||||||
struct mii_if_info mii;
|
struct mii_if_info mii;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* RX state.
|
||||||
|
*/
|
||||||
|
int default_rx_ring_size;
|
||||||
|
unsigned long rx_desc_sram_addr;
|
||||||
|
int rx_desc_sram_size;
|
||||||
|
struct napi_struct napi;
|
||||||
|
struct rx_queue rxq[1];
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
@ -361,32 +358,27 @@ static inline void wrl(struct mv643xx_eth_private *mp, int offset, u32 data)
|
|||||||
|
|
||||||
|
|
||||||
/* rxq/txq helper functions *************************************************/
|
/* rxq/txq helper functions *************************************************/
|
||||||
static void mv643xx_eth_port_enable_rx(struct mv643xx_eth_private *mp,
|
static struct mv643xx_eth_private *rxq_to_mp(struct rx_queue *rxq)
|
||||||
unsigned int queues)
|
|
||||||
{
|
{
|
||||||
wrl(mp, RXQ_COMMAND(mp->port_num), queues);
|
return container_of(rxq, struct mv643xx_eth_private, rxq[0]);
|
||||||
}
|
}
|
||||||
|
|
||||||
static unsigned int mv643xx_eth_port_disable_rx(struct mv643xx_eth_private *mp)
|
static void rxq_enable(struct rx_queue *rxq)
|
||||||
{
|
{
|
||||||
unsigned int port_num = mp->port_num;
|
struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
|
||||||
u32 queues;
|
wrl(mp, RXQ_COMMAND(mp->port_num), 1);
|
||||||
|
}
|
||||||
|
|
||||||
/* Stop Rx port activity. Check port Rx activity. */
|
static void rxq_disable(struct rx_queue *rxq)
|
||||||
queues = rdl(mp, RXQ_COMMAND(port_num)) & 0xFF;
|
{
|
||||||
if (queues) {
|
struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
|
||||||
/* Issue stop command for active queues only */
|
u8 mask = 1;
|
||||||
wrl(mp, RXQ_COMMAND(port_num), (queues << 8));
|
|
||||||
|
|
||||||
/* Wait for all Rx activity to terminate. */
|
wrl(mp, RXQ_COMMAND(mp->port_num), mask << 8);
|
||||||
/* Check port cause register that all Rx queues are stopped */
|
while (rdl(mp, RXQ_COMMAND(mp->port_num)) & mask)
|
||||||
while (rdl(mp, RXQ_COMMAND(port_num)) & 0xFF)
|
|
||||||
udelay(10);
|
udelay(10);
|
||||||
}
|
}
|
||||||
|
|
||||||
return queues;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void mv643xx_eth_port_enable_tx(struct mv643xx_eth_private *mp,
|
static void mv643xx_eth_port_enable_tx(struct mv643xx_eth_private *mp,
|
||||||
unsigned int queues)
|
unsigned int queues)
|
||||||
{
|
{
|
||||||
@ -421,19 +413,29 @@ static unsigned int mv643xx_eth_port_disable_tx(struct mv643xx_eth_private *mp)
|
|||||||
/* rx ***********************************************************************/
|
/* rx ***********************************************************************/
|
||||||
static void mv643xx_eth_free_completed_tx_descs(struct net_device *dev);
|
static void mv643xx_eth_free_completed_tx_descs(struct net_device *dev);
|
||||||
|
|
||||||
static void mv643xx_eth_rx_refill_descs(struct net_device *dev)
|
static void rxq_refill(struct rx_queue *rxq)
|
||||||
{
|
{
|
||||||
struct mv643xx_eth_private *mp = netdev_priv(dev);
|
struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
spin_lock_irqsave(&mp->lock, flags);
|
spin_lock_irqsave(&mp->lock, flags);
|
||||||
|
|
||||||
while (mp->rx_desc_count < mp->rx_ring_size) {
|
while (rxq->rx_desc_count < rxq->rx_ring_size) {
|
||||||
|
int skb_size;
|
||||||
struct sk_buff *skb;
|
struct sk_buff *skb;
|
||||||
int unaligned;
|
int unaligned;
|
||||||
int rx;
|
int rx;
|
||||||
|
|
||||||
skb = dev_alloc_skb(ETH_RX_SKB_SIZE + dma_get_cache_alignment());
|
/*
|
||||||
|
* Reserve 2+14 bytes for an ethernet header (the
|
||||||
|
* hardware automatically prepends 2 bytes of dummy
|
||||||
|
* data to each received packet), 4 bytes for a VLAN
|
||||||
|
* header, and 4 bytes for the trailing FCS -- 24
|
||||||
|
* bytes total.
|
||||||
|
*/
|
||||||
|
skb_size = mp->dev->mtu + 24;
|
||||||
|
|
||||||
|
skb = dev_alloc_skb(skb_size + dma_get_cache_alignment() - 1);
|
||||||
if (skb == NULL)
|
if (skb == NULL)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
@ -441,44 +443,43 @@ static void mv643xx_eth_rx_refill_descs(struct net_device *dev)
|
|||||||
if (unaligned)
|
if (unaligned)
|
||||||
skb_reserve(skb, dma_get_cache_alignment() - unaligned);
|
skb_reserve(skb, dma_get_cache_alignment() - unaligned);
|
||||||
|
|
||||||
mp->rx_desc_count++;
|
rxq->rx_desc_count++;
|
||||||
rx = mp->rx_used_desc;
|
rx = rxq->rx_used_desc;
|
||||||
mp->rx_used_desc = (rx + 1) % mp->rx_ring_size;
|
rxq->rx_used_desc = (rx + 1) % rxq->rx_ring_size;
|
||||||
|
|
||||||
mp->rx_desc_area[rx].buf_ptr = dma_map_single(NULL,
|
rxq->rx_desc_area[rx].buf_ptr = dma_map_single(NULL, skb->data,
|
||||||
skb->data,
|
skb_size, DMA_FROM_DEVICE);
|
||||||
ETH_RX_SKB_SIZE,
|
rxq->rx_desc_area[rx].buf_size = skb_size;
|
||||||
DMA_FROM_DEVICE);
|
rxq->rx_skb[rx] = skb;
|
||||||
mp->rx_desc_area[rx].buf_size = ETH_RX_SKB_SIZE;
|
|
||||||
mp->rx_skb[rx] = skb;
|
|
||||||
wmb();
|
wmb();
|
||||||
mp->rx_desc_area[rx].cmd_sts = BUFFER_OWNED_BY_DMA |
|
rxq->rx_desc_area[rx].cmd_sts = BUFFER_OWNED_BY_DMA |
|
||||||
RX_ENABLE_INTERRUPT;
|
RX_ENABLE_INTERRUPT;
|
||||||
wmb();
|
wmb();
|
||||||
|
|
||||||
skb_reserve(skb, ETH_HW_IP_ALIGN);
|
skb_reserve(skb, ETH_HW_IP_ALIGN);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (mp->rx_desc_count == 0) {
|
if (rxq->rx_desc_count == 0) {
|
||||||
mp->timeout.expires = jiffies + (HZ / 10);
|
rxq->rx_oom.expires = jiffies + (HZ / 10);
|
||||||
add_timer(&mp->timeout);
|
add_timer(&rxq->rx_oom);
|
||||||
}
|
}
|
||||||
|
|
||||||
spin_unlock_irqrestore(&mp->lock, flags);
|
spin_unlock_irqrestore(&mp->lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void mv643xx_eth_rx_refill_descs_timer_wrapper(unsigned long data)
|
static inline void rxq_refill_timer_wrapper(unsigned long data)
|
||||||
{
|
{
|
||||||
mv643xx_eth_rx_refill_descs((struct net_device *)data);
|
rxq_refill((struct rx_queue *)data);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int mv643xx_eth_receive_queue(struct net_device *dev, int budget)
|
static int rxq_process(struct rx_queue *rxq, int budget)
|
||||||
{
|
{
|
||||||
struct mv643xx_eth_private *mp = netdev_priv(dev);
|
struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
|
||||||
struct net_device_stats *stats = &dev->stats;
|
struct net_device_stats *stats = &mp->dev->stats;
|
||||||
unsigned int received_packets = 0;
|
int rx;
|
||||||
|
|
||||||
while (budget-- > 0) {
|
rx = 0;
|
||||||
|
while (rx < budget) {
|
||||||
struct sk_buff *skb;
|
struct sk_buff *skb;
|
||||||
volatile struct rx_desc *rx_desc;
|
volatile struct rx_desc *rx_desc;
|
||||||
unsigned int cmd_sts;
|
unsigned int cmd_sts;
|
||||||
@ -486,7 +487,7 @@ static int mv643xx_eth_receive_queue(struct net_device *dev, int budget)
|
|||||||
|
|
||||||
spin_lock_irqsave(&mp->lock, flags);
|
spin_lock_irqsave(&mp->lock, flags);
|
||||||
|
|
||||||
rx_desc = &mp->rx_desc_area[mp->rx_curr_desc];
|
rx_desc = &rxq->rx_desc_area[rxq->rx_curr_desc];
|
||||||
|
|
||||||
cmd_sts = rx_desc->cmd_sts;
|
cmd_sts = rx_desc->cmd_sts;
|
||||||
if (cmd_sts & BUFFER_OWNED_BY_DMA) {
|
if (cmd_sts & BUFFER_OWNED_BY_DMA) {
|
||||||
@ -495,17 +496,17 @@ static int mv643xx_eth_receive_queue(struct net_device *dev, int budget)
|
|||||||
}
|
}
|
||||||
rmb();
|
rmb();
|
||||||
|
|
||||||
skb = mp->rx_skb[mp->rx_curr_desc];
|
skb = rxq->rx_skb[rxq->rx_curr_desc];
|
||||||
mp->rx_skb[mp->rx_curr_desc] = NULL;
|
rxq->rx_skb[rxq->rx_curr_desc] = NULL;
|
||||||
|
|
||||||
mp->rx_curr_desc = (mp->rx_curr_desc + 1) % mp->rx_ring_size;
|
rxq->rx_curr_desc = (rxq->rx_curr_desc + 1) % rxq->rx_ring_size;
|
||||||
|
|
||||||
spin_unlock_irqrestore(&mp->lock, flags);
|
spin_unlock_irqrestore(&mp->lock, flags);
|
||||||
|
|
||||||
dma_unmap_single(NULL, rx_desc->buf_ptr + ETH_HW_IP_ALIGN,
|
dma_unmap_single(NULL, rx_desc->buf_ptr + ETH_HW_IP_ALIGN,
|
||||||
ETH_RX_SKB_SIZE, DMA_FROM_DEVICE);
|
mp->dev->mtu + 24, DMA_FROM_DEVICE);
|
||||||
mp->rx_desc_count--;
|
rxq->rx_desc_count--;
|
||||||
received_packets++;
|
rx++;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Update statistics.
|
* Update statistics.
|
||||||
@ -528,7 +529,7 @@ static int mv643xx_eth_receive_queue(struct net_device *dev, int budget)
|
|||||||
printk(KERN_ERR
|
printk(KERN_ERR
|
||||||
"%s: Received packet spread "
|
"%s: Received packet spread "
|
||||||
"on multiple descriptors\n",
|
"on multiple descriptors\n",
|
||||||
dev->name);
|
mp->dev->name);
|
||||||
}
|
}
|
||||||
if (cmd_sts & ERROR_SUMMARY)
|
if (cmd_sts & ERROR_SUMMARY)
|
||||||
stats->rx_errors++;
|
stats->rx_errors++;
|
||||||
@ -546,48 +547,45 @@ static int mv643xx_eth_receive_queue(struct net_device *dev, int budget)
|
|||||||
skb->csum = htons(
|
skb->csum = htons(
|
||||||
(cmd_sts & 0x0007fff8) >> 3);
|
(cmd_sts & 0x0007fff8) >> 3);
|
||||||
}
|
}
|
||||||
skb->protocol = eth_type_trans(skb, dev);
|
skb->protocol = eth_type_trans(skb, mp->dev);
|
||||||
#ifdef MV643XX_ETH_NAPI
|
#ifdef MV643XX_ETH_NAPI
|
||||||
netif_receive_skb(skb);
|
netif_receive_skb(skb);
|
||||||
#else
|
#else
|
||||||
netif_rx(skb);
|
netif_rx(skb);
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
dev->last_rx = jiffies;
|
mp->dev->last_rx = jiffies;
|
||||||
}
|
}
|
||||||
mv643xx_eth_rx_refill_descs(dev); /* Fill RX ring with skb's */
|
rxq_refill(rxq);
|
||||||
|
|
||||||
return received_packets;
|
return rx;
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef MV643XX_ETH_NAPI
|
#ifdef MV643XX_ETH_NAPI
|
||||||
static int mv643xx_eth_poll(struct napi_struct *napi, int budget)
|
static int mv643xx_eth_poll(struct napi_struct *napi, int budget)
|
||||||
{
|
{
|
||||||
struct mv643xx_eth_private *mp = container_of(napi, struct mv643xx_eth_private, napi);
|
struct mv643xx_eth_private *mp;
|
||||||
struct net_device *dev = mp->dev;
|
int rx;
|
||||||
unsigned int port_num = mp->port_num;
|
|
||||||
int work_done;
|
mp = container_of(napi, struct mv643xx_eth_private, napi);
|
||||||
|
|
||||||
#ifdef MV643XX_ETH_TX_FAST_REFILL
|
#ifdef MV643XX_ETH_TX_FAST_REFILL
|
||||||
if (++mp->tx_clean_threshold > 5) {
|
if (++mp->tx_clean_threshold > 5) {
|
||||||
mv643xx_eth_free_completed_tx_descs(dev);
|
mv643xx_eth_free_completed_tx_descs(mp->dev);
|
||||||
mp->tx_clean_threshold = 0;
|
mp->tx_clean_threshold = 0;
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
work_done = 0;
|
rx = rxq_process(mp->rxq, budget);
|
||||||
if ((rdl(mp, RXQ_CURRENT_DESC_PTR(port_num)))
|
|
||||||
!= (u32) mp->rx_used_desc)
|
|
||||||
work_done = mv643xx_eth_receive_queue(dev, budget);
|
|
||||||
|
|
||||||
if (work_done < budget) {
|
if (rx < budget) {
|
||||||
netif_rx_complete(dev, napi);
|
netif_rx_complete(mp->dev, napi);
|
||||||
wrl(mp, INT_CAUSE(port_num), 0);
|
wrl(mp, INT_CAUSE(mp->port_num), 0);
|
||||||
wrl(mp, INT_CAUSE_EXT(port_num), 0);
|
wrl(mp, INT_CAUSE_EXT(mp->port_num), 0);
|
||||||
wrl(mp, INT_MASK(port_num), INT_RX | INT_EXT);
|
wrl(mp, INT_MASK(mp->port_num), INT_RX | INT_EXT);
|
||||||
}
|
}
|
||||||
|
|
||||||
return work_done;
|
return rx;
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
@ -1252,53 +1250,102 @@ static void mv643xx_eth_set_rx_mode(struct net_device *dev)
|
|||||||
|
|
||||||
|
|
||||||
/* rx/tx queue initialisation ***********************************************/
|
/* rx/tx queue initialisation ***********************************************/
|
||||||
static void ether_init_rx_desc_ring(struct mv643xx_eth_private *mp)
|
static int rxq_init(struct mv643xx_eth_private *mp)
|
||||||
{
|
{
|
||||||
volatile struct rx_desc *p_rx_desc;
|
struct rx_queue *rxq = mp->rxq;
|
||||||
int rx_desc_num = mp->rx_ring_size;
|
struct rx_desc *rx_desc;
|
||||||
|
int size;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
/* initialize the next_desc_ptr links in the Rx descriptors ring */
|
rxq->rx_ring_size = mp->default_rx_ring_size;
|
||||||
p_rx_desc = (struct rx_desc *)mp->rx_desc_area;
|
|
||||||
for (i = 0; i < rx_desc_num; i++) {
|
rxq->rx_desc_count = 0;
|
||||||
p_rx_desc[i].next_desc_ptr = mp->rx_desc_dma +
|
rxq->rx_curr_desc = 0;
|
||||||
((i + 1) % rx_desc_num) * sizeof(struct rx_desc);
|
rxq->rx_used_desc = 0;
|
||||||
|
|
||||||
|
size = rxq->rx_ring_size * sizeof(struct rx_desc);
|
||||||
|
|
||||||
|
if (size <= mp->rx_desc_sram_size) {
|
||||||
|
rxq->rx_desc_area = ioremap(mp->rx_desc_sram_addr,
|
||||||
|
mp->rx_desc_sram_size);
|
||||||
|
rxq->rx_desc_dma = mp->rx_desc_sram_addr;
|
||||||
|
} else {
|
||||||
|
rxq->rx_desc_area = dma_alloc_coherent(NULL, size,
|
||||||
|
&rxq->rx_desc_dma,
|
||||||
|
GFP_KERNEL);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Save Rx desc pointer to driver struct. */
|
if (rxq->rx_desc_area == NULL) {
|
||||||
mp->rx_curr_desc = 0;
|
dev_printk(KERN_ERR, &mp->dev->dev,
|
||||||
mp->rx_used_desc = 0;
|
"can't allocate rx ring (%d bytes)\n", size);
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
memset(rxq->rx_desc_area, 0, size);
|
||||||
|
|
||||||
mp->rx_desc_area_size = rx_desc_num * sizeof(struct rx_desc);
|
rxq->rx_desc_area_size = size;
|
||||||
|
rxq->rx_skb = kmalloc(rxq->rx_ring_size * sizeof(*rxq->rx_skb),
|
||||||
|
GFP_KERNEL);
|
||||||
|
if (rxq->rx_skb == NULL) {
|
||||||
|
dev_printk(KERN_ERR, &mp->dev->dev,
|
||||||
|
"can't allocate rx skb ring\n");
|
||||||
|
goto out_free;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void mv643xx_eth_free_rx_rings(struct net_device *dev)
|
rx_desc = (struct rx_desc *)rxq->rx_desc_area;
|
||||||
{
|
for (i = 0; i < rxq->rx_ring_size; i++) {
|
||||||
struct mv643xx_eth_private *mp = netdev_priv(dev);
|
int nexti = (i + 1) % rxq->rx_ring_size;
|
||||||
int curr;
|
rx_desc[i].next_desc_ptr = rxq->rx_desc_dma +
|
||||||
|
nexti * sizeof(struct rx_desc);
|
||||||
/* Stop RX Queues */
|
|
||||||
mv643xx_eth_port_disable_rx(mp);
|
|
||||||
|
|
||||||
/* Free preallocated skb's on RX rings */
|
|
||||||
for (curr = 0; mp->rx_desc_count && curr < mp->rx_ring_size; curr++) {
|
|
||||||
if (mp->rx_skb[curr]) {
|
|
||||||
dev_kfree_skb(mp->rx_skb[curr]);
|
|
||||||
mp->rx_desc_count--;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (mp->rx_desc_count)
|
init_timer(&rxq->rx_oom);
|
||||||
printk(KERN_ERR
|
rxq->rx_oom.data = (unsigned long)rxq;
|
||||||
"%s: Error in freeing Rx Ring. %d skb's still"
|
rxq->rx_oom.function = rxq_refill_timer_wrapper;
|
||||||
" stuck in RX Ring - ignoring them\n", dev->name,
|
|
||||||
mp->rx_desc_count);
|
return 0;
|
||||||
/* Free RX ring */
|
|
||||||
if (mp->rx_sram_size)
|
|
||||||
iounmap(mp->rx_desc_area);
|
out_free:
|
||||||
|
if (size <= mp->rx_desc_sram_size)
|
||||||
|
iounmap(rxq->rx_desc_area);
|
||||||
else
|
else
|
||||||
dma_free_coherent(NULL, mp->rx_desc_area_size,
|
dma_free_coherent(NULL, size,
|
||||||
mp->rx_desc_area, mp->rx_desc_dma);
|
rxq->rx_desc_area,
|
||||||
|
rxq->rx_desc_dma);
|
||||||
|
|
||||||
|
out:
|
||||||
|
return -ENOMEM;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void rxq_deinit(struct rx_queue *rxq)
|
||||||
|
{
|
||||||
|
struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
|
||||||
|
int i;
|
||||||
|
|
||||||
|
rxq_disable(rxq);
|
||||||
|
|
||||||
|
del_timer_sync(&rxq->rx_oom);
|
||||||
|
|
||||||
|
for (i = 0; i < rxq->rx_ring_size; i++) {
|
||||||
|
if (rxq->rx_skb[i]) {
|
||||||
|
dev_kfree_skb(rxq->rx_skb[i]);
|
||||||
|
rxq->rx_desc_count--;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (rxq->rx_desc_count) {
|
||||||
|
dev_printk(KERN_ERR, &mp->dev->dev,
|
||||||
|
"error freeing rx ring -- %d skbs stuck\n",
|
||||||
|
rxq->rx_desc_count);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (rxq->rx_desc_area_size <= mp->rx_desc_sram_size)
|
||||||
|
iounmap(rxq->rx_desc_area);
|
||||||
|
else
|
||||||
|
dma_free_coherent(NULL, rxq->rx_desc_area_size,
|
||||||
|
rxq->rx_desc_area, rxq->rx_desc_dma);
|
||||||
|
|
||||||
|
kfree(rxq->rx_skb);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void ether_init_tx_desc_ring(struct mv643xx_eth_private *mp)
|
static void ether_init_tx_desc_ring(struct mv643xx_eth_private *mp)
|
||||||
@ -1510,7 +1557,7 @@ static irqreturn_t mv643xx_eth_int_handler(int irq, void *dev_id)
|
|||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
if (int_cause & INT_RX)
|
if (int_cause & INT_RX)
|
||||||
mv643xx_eth_receive_queue(dev, INT_MAX);
|
rxq_process(mp->rxq, INT_MAX);
|
||||||
#endif
|
#endif
|
||||||
if (int_cause_ext & INT_EXT_TX)
|
if (int_cause_ext & INT_EXT_TX)
|
||||||
mv643xx_eth_free_completed_tx_descs(dev);
|
mv643xx_eth_free_completed_tx_descs(dev);
|
||||||
@ -1544,20 +1591,30 @@ static void phy_reset(struct mv643xx_eth_private *mp)
|
|||||||
static void port_start(struct net_device *dev)
|
static void port_start(struct net_device *dev)
|
||||||
{
|
{
|
||||||
struct mv643xx_eth_private *mp = netdev_priv(dev);
|
struct mv643xx_eth_private *mp = netdev_priv(dev);
|
||||||
unsigned int port_num = mp->port_num;
|
|
||||||
int tx_curr_desc, rx_curr_desc;
|
|
||||||
u32 pscr;
|
u32 pscr;
|
||||||
struct ethtool_cmd ethtool_cmd;
|
struct ethtool_cmd ethtool_cmd;
|
||||||
|
int i;
|
||||||
|
|
||||||
/* Assignment of Tx CTRP of given queue */
|
/*
|
||||||
tx_curr_desc = mp->tx_curr_desc;
|
* Configure basic link parameters.
|
||||||
wrl(mp, TXQ_CURRENT_DESC_PTR(port_num),
|
*/
|
||||||
(u32)((struct tx_desc *)mp->tx_desc_dma + tx_curr_desc));
|
pscr = rdl(mp, PORT_SERIAL_CONTROL(mp->port_num));
|
||||||
|
pscr &= ~(SERIAL_PORT_ENABLE | FORCE_LINK_PASS);
|
||||||
|
wrl(mp, PORT_SERIAL_CONTROL(mp->port_num), pscr);
|
||||||
|
pscr |= DISABLE_AUTO_NEG_FOR_FLOW_CTRL |
|
||||||
|
DISABLE_AUTO_NEG_SPEED_GMII |
|
||||||
|
DISABLE_AUTO_NEG_FOR_DUPLEX |
|
||||||
|
DO_NOT_FORCE_LINK_FAIL |
|
||||||
|
SERIAL_PORT_CONTROL_RESERVED;
|
||||||
|
wrl(mp, PORT_SERIAL_CONTROL(mp->port_num), pscr);
|
||||||
|
pscr |= SERIAL_PORT_ENABLE;
|
||||||
|
wrl(mp, PORT_SERIAL_CONTROL(mp->port_num), pscr);
|
||||||
|
|
||||||
/* Assignment of Rx CRDP of given queue */
|
wrl(mp, SDMA_CONFIG(mp->port_num), PORT_SDMA_CONFIG_DEFAULT_VALUE);
|
||||||
rx_curr_desc = mp->rx_curr_desc;
|
|
||||||
wrl(mp, RXQ_CURRENT_DESC_PTR(port_num),
|
mv643xx_eth_get_settings(dev, ðtool_cmd);
|
||||||
(u32)((struct rx_desc *)mp->rx_desc_dma + rx_curr_desc));
|
phy_reset(mp);
|
||||||
|
mv643xx_eth_set_settings(dev, ðtool_cmd);
|
||||||
|
|
||||||
/* Add the assigned Ethernet address to the port's address table */
|
/* Add the assigned Ethernet address to the port's address table */
|
||||||
uc_addr_set(mp, dev->dev_addr);
|
uc_addr_set(mp, dev->dev_addr);
|
||||||
@ -1566,42 +1623,34 @@ static void port_start(struct net_device *dev)
|
|||||||
* Receive all unmatched unicast, TCP, UDP, BPDU and broadcast
|
* Receive all unmatched unicast, TCP, UDP, BPDU and broadcast
|
||||||
* frames to RX queue #0.
|
* frames to RX queue #0.
|
||||||
*/
|
*/
|
||||||
wrl(mp, PORT_CONFIG(port_num), 0x00000000);
|
wrl(mp, PORT_CONFIG(mp->port_num), 0x00000000);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Treat BPDUs as normal multicasts, and disable partition mode.
|
* Treat BPDUs as normal multicasts, and disable partition mode.
|
||||||
*/
|
*/
|
||||||
wrl(mp, PORT_CONFIG_EXT(port_num), 0x00000000);
|
wrl(mp, PORT_CONFIG_EXT(mp->port_num), 0x00000000);
|
||||||
|
|
||||||
pscr = rdl(mp, PORT_SERIAL_CONTROL(port_num));
|
/*
|
||||||
|
* Enable the receive queue.
|
||||||
|
*/
|
||||||
|
for (i = 0; i < 1; i++) {
|
||||||
|
struct rx_queue *rxq = mp->rxq;
|
||||||
|
int off = RXQ_CURRENT_DESC_PTR(mp->port_num);
|
||||||
|
u32 addr;
|
||||||
|
|
||||||
pscr &= ~(SERIAL_PORT_ENABLE | FORCE_LINK_PASS);
|
addr = (u32)rxq->rx_desc_dma;
|
||||||
wrl(mp, PORT_SERIAL_CONTROL(port_num), pscr);
|
addr += rxq->rx_curr_desc * sizeof(struct rx_desc);
|
||||||
|
wrl(mp, off, addr);
|
||||||
|
|
||||||
pscr |= DISABLE_AUTO_NEG_FOR_FLOW_CTRL |
|
rxq_enable(rxq);
|
||||||
DISABLE_AUTO_NEG_SPEED_GMII |
|
}
|
||||||
DISABLE_AUTO_NEG_FOR_DUPLEX |
|
|
||||||
DO_NOT_FORCE_LINK_FAIL |
|
|
||||||
SERIAL_PORT_CONTROL_RESERVED;
|
|
||||||
|
|
||||||
wrl(mp, PORT_SERIAL_CONTROL(port_num), pscr);
|
|
||||||
|
|
||||||
pscr |= SERIAL_PORT_ENABLE;
|
wrl(mp, TXQ_CURRENT_DESC_PTR(mp->port_num),
|
||||||
wrl(mp, PORT_SERIAL_CONTROL(port_num), pscr);
|
(u32)((struct tx_desc *)mp->tx_desc_dma + mp->tx_curr_desc));
|
||||||
|
|
||||||
/* Assign port SDMA configuration */
|
|
||||||
wrl(mp, SDMA_CONFIG(port_num), PORT_SDMA_CONFIG_DEFAULT_VALUE);
|
|
||||||
|
|
||||||
/* Enable port Rx. */
|
|
||||||
mv643xx_eth_port_enable_rx(mp, 1);
|
|
||||||
|
|
||||||
/* Disable port bandwidth limits by clearing MTU register */
|
/* Disable port bandwidth limits by clearing MTU register */
|
||||||
wrl(mp, TX_BW_MTU(port_num), 0);
|
wrl(mp, TX_BW_MTU(mp->port_num), 0);
|
||||||
|
|
||||||
/* save phy settings across reset */
|
|
||||||
mv643xx_eth_get_settings(dev, ðtool_cmd);
|
|
||||||
phy_reset(mp);
|
|
||||||
mv643xx_eth_set_settings(dev, ðtool_cmd);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef MV643XX_ETH_COAL
|
#ifdef MV643XX_ETH_COAL
|
||||||
@ -1661,18 +1710,11 @@ static int mv643xx_eth_open(struct net_device *dev)
|
|||||||
|
|
||||||
port_init(mp);
|
port_init(mp);
|
||||||
|
|
||||||
memset(&mp->timeout, 0, sizeof(struct timer_list));
|
err = rxq_init(mp);
|
||||||
mp->timeout.function = mv643xx_eth_rx_refill_descs_timer_wrapper;
|
if (err)
|
||||||
mp->timeout.data = (unsigned long)dev;
|
|
||||||
|
|
||||||
/* Allocate RX and TX skb rings */
|
|
||||||
mp->rx_skb = kmalloc(sizeof(*mp->rx_skb) * mp->rx_ring_size,
|
|
||||||
GFP_KERNEL);
|
|
||||||
if (!mp->rx_skb) {
|
|
||||||
printk(KERN_ERR "%s: Cannot allocate Rx skb ring\n", dev->name);
|
|
||||||
err = -ENOMEM;
|
|
||||||
goto out_free_irq;
|
goto out_free_irq;
|
||||||
}
|
rxq_refill(mp->rxq);
|
||||||
|
|
||||||
mp->tx_skb = kmalloc(sizeof(*mp->tx_skb) * mp->tx_ring_size,
|
mp->tx_skb = kmalloc(sizeof(*mp->tx_skb) * mp->tx_ring_size,
|
||||||
GFP_KERNEL);
|
GFP_KERNEL);
|
||||||
if (!mp->tx_skb) {
|
if (!mp->tx_skb) {
|
||||||
@ -1706,39 +1748,6 @@ static int mv643xx_eth_open(struct net_device *dev)
|
|||||||
|
|
||||||
ether_init_tx_desc_ring(mp);
|
ether_init_tx_desc_ring(mp);
|
||||||
|
|
||||||
/* Allocate RX ring */
|
|
||||||
mp->rx_desc_count = 0;
|
|
||||||
size = mp->rx_ring_size * sizeof(struct rx_desc);
|
|
||||||
mp->rx_desc_area_size = size;
|
|
||||||
|
|
||||||
if (mp->rx_sram_size) {
|
|
||||||
mp->rx_desc_area = ioremap(mp->rx_sram_addr,
|
|
||||||
mp->rx_sram_size);
|
|
||||||
mp->rx_desc_dma = mp->rx_sram_addr;
|
|
||||||
} else
|
|
||||||
mp->rx_desc_area = dma_alloc_coherent(NULL, size,
|
|
||||||
&mp->rx_desc_dma,
|
|
||||||
GFP_KERNEL);
|
|
||||||
|
|
||||||
if (!mp->rx_desc_area) {
|
|
||||||
printk(KERN_ERR "%s: Cannot allocate Rx ring (size %d bytes)\n",
|
|
||||||
dev->name, size);
|
|
||||||
printk(KERN_ERR "%s: Freeing previously allocated TX queues...",
|
|
||||||
dev->name);
|
|
||||||
if (mp->rx_sram_size)
|
|
||||||
iounmap(mp->tx_desc_area);
|
|
||||||
else
|
|
||||||
dma_free_coherent(NULL, mp->tx_desc_area_size,
|
|
||||||
mp->tx_desc_area, mp->tx_desc_dma);
|
|
||||||
err = -ENOMEM;
|
|
||||||
goto out_free_tx_skb;
|
|
||||||
}
|
|
||||||
memset((void *)mp->rx_desc_area, 0, size);
|
|
||||||
|
|
||||||
ether_init_rx_desc_ring(mp);
|
|
||||||
|
|
||||||
mv643xx_eth_rx_refill_descs(dev); /* Fill RX ring with skb's */
|
|
||||||
|
|
||||||
#ifdef MV643XX_ETH_NAPI
|
#ifdef MV643XX_ETH_NAPI
|
||||||
napi_enable(&mp->napi);
|
napi_enable(&mp->napi);
|
||||||
#endif
|
#endif
|
||||||
@ -1764,7 +1773,7 @@ static int mv643xx_eth_open(struct net_device *dev)
|
|||||||
out_free_tx_skb:
|
out_free_tx_skb:
|
||||||
kfree(mp->tx_skb);
|
kfree(mp->tx_skb);
|
||||||
out_free_rx_skb:
|
out_free_rx_skb:
|
||||||
kfree(mp->rx_skb);
|
rxq_deinit(mp->rxq);
|
||||||
out_free_irq:
|
out_free_irq:
|
||||||
free_irq(dev->irq, dev);
|
free_irq(dev->irq, dev);
|
||||||
|
|
||||||
@ -1777,7 +1786,7 @@ static void port_reset(struct mv643xx_eth_private *mp)
|
|||||||
unsigned int reg_data;
|
unsigned int reg_data;
|
||||||
|
|
||||||
mv643xx_eth_port_disable_tx(mp);
|
mv643xx_eth_port_disable_tx(mp);
|
||||||
mv643xx_eth_port_disable_rx(mp);
|
rxq_disable(mp->rxq);
|
||||||
|
|
||||||
/* Clear all MIB counters */
|
/* Clear all MIB counters */
|
||||||
clear_mib_counters(mp);
|
clear_mib_counters(mp);
|
||||||
@ -1809,7 +1818,7 @@ static int mv643xx_eth_stop(struct net_device *dev)
|
|||||||
port_reset(mp);
|
port_reset(mp);
|
||||||
|
|
||||||
mv643xx_eth_free_tx_rings(dev);
|
mv643xx_eth_free_tx_rings(dev);
|
||||||
mv643xx_eth_free_rx_rings(dev);
|
rxq_deinit(mp->rxq);
|
||||||
|
|
||||||
free_irq(dev->irq, dev);
|
free_irq(dev->irq, dev);
|
||||||
|
|
||||||
@ -2162,7 +2171,6 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
|
|||||||
|
|
||||||
/* set default config values */
|
/* set default config values */
|
||||||
uc_addr_get(mp, dev->dev_addr);
|
uc_addr_get(mp, dev->dev_addr);
|
||||||
mp->rx_ring_size = DEFAULT_RX_QUEUE_SIZE;
|
|
||||||
mp->tx_ring_size = DEFAULT_TX_QUEUE_SIZE;
|
mp->tx_ring_size = DEFAULT_TX_QUEUE_SIZE;
|
||||||
|
|
||||||
if (is_valid_ether_addr(pd->mac_addr))
|
if (is_valid_ether_addr(pd->mac_addr))
|
||||||
@ -2171,8 +2179,9 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
|
|||||||
if (pd->phy_addr || pd->force_phy_addr)
|
if (pd->phy_addr || pd->force_phy_addr)
|
||||||
phy_addr_set(mp, pd->phy_addr);
|
phy_addr_set(mp, pd->phy_addr);
|
||||||
|
|
||||||
|
mp->default_rx_ring_size = DEFAULT_RX_QUEUE_SIZE;
|
||||||
if (pd->rx_queue_size)
|
if (pd->rx_queue_size)
|
||||||
mp->rx_ring_size = pd->rx_queue_size;
|
mp->default_rx_ring_size = pd->rx_queue_size;
|
||||||
|
|
||||||
if (pd->tx_queue_size)
|
if (pd->tx_queue_size)
|
||||||
mp->tx_ring_size = pd->tx_queue_size;
|
mp->tx_ring_size = pd->tx_queue_size;
|
||||||
@ -2183,8 +2192,8 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (pd->rx_sram_size) {
|
if (pd->rx_sram_size) {
|
||||||
mp->rx_sram_size = pd->rx_sram_size;
|
mp->rx_desc_sram_addr = pd->rx_sram_addr;
|
||||||
mp->rx_sram_addr = pd->rx_sram_addr;
|
mp->rx_desc_sram_size = pd->rx_sram_size;
|
||||||
}
|
}
|
||||||
|
|
||||||
duplex = pd->duplex;
|
duplex = pd->duplex;
|
||||||
|
Loading…
x
Reference in New Issue
Block a user