octeontx2-pf: Add basic net_device_ops

Implements basic set of net_device_ops.

Signed-off-by: Geetha sowjanya <gakula@marvell.com>
Reviewed-by: Simon Horman <horms@kernel.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Geetha sowjanya 2024-11-07 21:38:30 +05:30 committed by David S. Miller
parent 3937b7308d
commit 22f8587967
5 changed files with 70 additions and 11 deletions

View File

@ -2116,7 +2116,7 @@ static netdev_tx_t otx2_xmit(struct sk_buff *skb, struct net_device *netdev)
sq = &pf->qset.sq[sq_idx];
txq = netdev_get_tx_queue(netdev, qidx);
if (!otx2_sq_append_skb(netdev, sq, skb, qidx)) {
if (!otx2_sq_append_skb(pf, txq, sq, skb, qidx)) {
netif_tx_stop_queue(txq);
/* Check again, incase SQBs got freed up */

View File

@ -376,9 +376,11 @@ static void otx2_rcv_pkt_handler(struct otx2_nic *pfvf,
}
otx2_set_rxhash(pfvf, cqe, skb);
skb_record_rx_queue(skb, cq->cq_idx);
if (pfvf->netdev->features & NETIF_F_RXCSUM)
skb->ip_summed = CHECKSUM_UNNECESSARY;
if (!(pfvf->flags & OTX2_FLAG_REP_MODE_ENABLED)) {
skb_record_rx_queue(skb, cq->cq_idx);
if (pfvf->netdev->features & NETIF_F_RXCSUM)
skb->ip_summed = CHECKSUM_UNNECESSARY;
}
if (pfvf->flags & OTX2_FLAG_TC_MARK_ENABLED)
skb->mark = parse->match_id;
@ -453,6 +455,7 @@ static int otx2_tx_napi_handler(struct otx2_nic *pfvf,
int tx_pkts = 0, tx_bytes = 0, qidx;
struct otx2_snd_queue *sq;
struct nix_cqe_tx_s *cqe;
struct net_device *ndev;
int processed_cqe = 0;
if (cq->pend_cqe >= budget)
@ -493,6 +496,13 @@ process_cqe:
otx2_write64(pfvf, NIX_LF_CQ_OP_DOOR,
((u64)cq->cq_idx << 32) | processed_cqe);
#if IS_ENABLED(CONFIG_RVU_ESWITCH)
if (pfvf->flags & OTX2_FLAG_REP_MODE_ENABLED)
ndev = pfvf->reps[qidx]->netdev;
else
#endif
ndev = pfvf->netdev;
if (likely(tx_pkts)) {
struct netdev_queue *txq;
@ -500,12 +510,14 @@ process_cqe:
if (qidx >= pfvf->hw.tx_queues)
qidx -= pfvf->hw.xdp_queues;
txq = netdev_get_tx_queue(pfvf->netdev, qidx);
if (pfvf->flags & OTX2_FLAG_REP_MODE_ENABLED)
qidx = 0;
txq = netdev_get_tx_queue(ndev, qidx);
netdev_tx_completed_queue(txq, tx_pkts, tx_bytes);
/* Check if queue was stopped earlier due to ring full */
smp_mb();
if (netif_tx_queue_stopped(txq) &&
netif_carrier_ok(pfvf->netdev))
netif_carrier_ok(ndev))
netif_tx_wake_queue(txq);
}
return 0;
@ -1142,13 +1154,13 @@ static void otx2_set_txtstamp(struct otx2_nic *pfvf, struct sk_buff *skb,
}
}
bool otx2_sq_append_skb(struct net_device *netdev, struct otx2_snd_queue *sq,
bool otx2_sq_append_skb(void *dev, struct netdev_queue *txq,
struct otx2_snd_queue *sq,
struct sk_buff *skb, u16 qidx)
{
struct netdev_queue *txq = netdev_get_tx_queue(netdev, qidx);
struct otx2_nic *pfvf = netdev_priv(netdev);
int offset, num_segs, free_desc;
struct nix_sqe_hdr_s *sqe_hdr;
struct otx2_nic *pfvf = dev;
/* Check if there is enough room between producer
* and consumer index.

View File

@ -167,7 +167,8 @@ static inline u64 otx2_iova_to_phys(void *iommu_domain, dma_addr_t dma_addr)
}
int otx2_napi_handler(struct napi_struct *napi, int budget);
bool otx2_sq_append_skb(struct net_device *netdev, struct otx2_snd_queue *sq,
bool otx2_sq_append_skb(void *dev, struct netdev_queue *txq,
struct otx2_snd_queue *sq,
struct sk_buff *skb, u16 qidx);
void cn10k_sqe_flush(void *dev, struct otx2_snd_queue *sq,
int size, int qidx);

View File

@ -395,7 +395,7 @@ static netdev_tx_t otx2vf_xmit(struct sk_buff *skb, struct net_device *netdev)
sq = &vf->qset.sq[qidx];
txq = netdev_get_tx_queue(netdev, qidx);
if (!otx2_sq_append_skb(netdev, sq, skb, qidx)) {
if (!otx2_sq_append_skb(vf, txq, sq, skb, qidx)) {
netif_tx_stop_queue(txq);
/* Check again, incase SQBs got freed up */

View File

@ -28,6 +28,51 @@ MODULE_DESCRIPTION(DRV_STRING);
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, rvu_rep_id_table);
static netdev_tx_t rvu_rep_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct rep_dev *rep = netdev_priv(dev);
struct otx2_nic *pf = rep->mdev;
struct otx2_snd_queue *sq;
struct netdev_queue *txq;
sq = &pf->qset.sq[rep->rep_id];
txq = netdev_get_tx_queue(dev, 0);
if (!otx2_sq_append_skb(pf, txq, sq, skb, rep->rep_id)) {
netif_tx_stop_queue(txq);
/* Check again, in case SQBs got freed up */
smp_mb();
if (((sq->num_sqbs - *sq->aura_fc_addr) * sq->sqe_per_sqb)
> sq->sqe_thresh)
netif_tx_wake_queue(txq);
return NETDEV_TX_BUSY;
}
return NETDEV_TX_OK;
}
static int rvu_rep_open(struct net_device *dev)
{
netif_carrier_on(dev);
netif_tx_start_all_queues(dev);
return 0;
}
static int rvu_rep_stop(struct net_device *dev)
{
netif_carrier_off(dev);
netif_tx_disable(dev);
return 0;
}
static const struct net_device_ops rvu_rep_netdev_ops = {
.ndo_open = rvu_rep_open,
.ndo_stop = rvu_rep_stop,
.ndo_start_xmit = rvu_rep_xmit,
};
static int rvu_rep_napi_init(struct otx2_nic *priv,
struct netlink_ext_ack *extack)
{
@ -208,6 +253,7 @@ int rvu_rep_create(struct otx2_nic *priv, struct netlink_ext_ack *extack)
ndev->min_mtu = OTX2_MIN_MTU;
ndev->max_mtu = priv->hw.max_mtu;
ndev->netdev_ops = &rvu_rep_netdev_ops;
pcifunc = priv->rep_pf_map[rep_id];
rep->pcifunc = pcifunc;