ionic: Mark error paths in the data path as unlikely

As the title states, mark unlikely error paths in the data path as
unlikely.

Signed-off-by: Brett Creeley <brett.creeley@amd.com>
Signed-off-by: Shannon Nelson <shannon.nelson@amd.com>
Link: https://lore.kernel.org/r/20240529000259.25775-5-shannon.nelson@amd.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Brett Creeley 2024-05-28 17:02:56 -07:00 committed by Jakub Kicinski
parent 4dde9588c5
commit d9c0420999

View File

@ -582,7 +582,7 @@ static bool ionic_run_xdp(struct ionic_rx_stats *stats,
buf_info->page_offset,
true);
__netif_tx_unlock(nq);
if (err) {
if (unlikely(err)) {
netdev_dbg(netdev, "tx ionic_xdp_post_frame err %d\n", err);
goto out_xdp_abort;
}
@ -597,7 +597,7 @@ static bool ionic_run_xdp(struct ionic_rx_stats *stats,
IONIC_PAGE_SIZE, DMA_FROM_DEVICE);
err = xdp_do_redirect(netdev, &xdp_buf, xdp_prog);
if (err) {
if (unlikely(err)) {
netdev_dbg(netdev, "xdp_do_redirect err %d\n", err);
goto out_xdp_abort;
}
@ -1058,7 +1058,7 @@ static dma_addr_t ionic_tx_map_single(struct ionic_queue *q,
dma_addr_t dma_addr;
dma_addr = dma_map_single(dev, data, len, DMA_TO_DEVICE);
if (dma_mapping_error(dev, dma_addr)) {
if (unlikely(dma_mapping_error(dev, dma_addr))) {
net_warn_ratelimited("%s: DMA single map failed on %s!\n",
dev_name(dev), q->name);
q_to_tx_stats(q)->dma_map_err++;
@ -1075,7 +1075,7 @@ static dma_addr_t ionic_tx_map_frag(struct ionic_queue *q,
dma_addr_t dma_addr;
dma_addr = skb_frag_dma_map(dev, frag, offset, len, DMA_TO_DEVICE);
if (dma_mapping_error(dev, dma_addr)) {
if (unlikely(dma_mapping_error(dev, dma_addr))) {
net_warn_ratelimited("%s: DMA frag map failed on %s!\n",
dev_name(dev), q->name);
q_to_tx_stats(q)->dma_map_err++;
@ -1316,7 +1316,7 @@ static int ionic_tx_tcp_inner_pseudo_csum(struct sk_buff *skb)
int err;
err = skb_cow_head(skb, 0);
if (err)
if (unlikely(err))
return err;
if (skb->protocol == cpu_to_be16(ETH_P_IP)) {
@ -1340,7 +1340,7 @@ static int ionic_tx_tcp_pseudo_csum(struct sk_buff *skb)
int err;
err = skb_cow_head(skb, 0);
if (err)
if (unlikely(err))
return err;
if (skb->protocol == cpu_to_be16(ETH_P_IP)) {
@ -1444,7 +1444,7 @@ static int ionic_tx_tso(struct net_device *netdev, struct ionic_queue *q,
err = ionic_tx_tcp_inner_pseudo_csum(skb);
else
err = ionic_tx_tcp_pseudo_csum(skb);
if (err) {
if (unlikely(err)) {
/* clean up mapping from ionic_tx_map_skb */
ionic_tx_desc_unmap_bufs(q, desc_info);
return err;
@ -1729,7 +1729,7 @@ static int ionic_tx_descs_needed(struct ionic_queue *q, struct sk_buff *skb)
linearize:
if (too_many_frags) {
err = skb_linearize(skb);
if (err)
if (unlikely(err))
return err;
q_to_tx_stats(q)->linearize++;
}
@ -1763,7 +1763,7 @@ static netdev_tx_t ionic_start_hwstamp_xmit(struct sk_buff *skb,
else
err = ionic_tx(netdev, q, skb);
if (err)
if (unlikely(err))
goto err_out_drop;
return NETDEV_TX_OK;
@ -1809,7 +1809,7 @@ netdev_tx_t ionic_start_xmit(struct sk_buff *skb, struct net_device *netdev)
else
err = ionic_tx(netdev, q, skb);
if (err)
if (unlikely(err))
goto err_out_drop;
return NETDEV_TX_OK;