net/mlx5e: move XDP_REDIRECT sq to dynamic allocation

Dynamically allocating xdpsq, used by egress side XDP_REDIRECT.
mlx5 has multiple XDP sqs. Under struct mlx5e_channel:
1. rx_xdpsq: used for XDP_TX, an XDP prog handles the rx packet and
transmits using the same queue as rx.
2. xdpsq: used by egress side XDP_REDIRECT. This is for another interface
to redirect packet to the mlx5 interface, using ndo_xdp_xmit .
3. xsksq: used by XSK. XSK has its own dedicated channel, and it also
has resources of 1 and 2.

The patch changes only the 2. xdpsq.

Signed-off-by: William Tu <witu@nvidia.com>
Reviewed-by: Parav Pandit <parav@nvidia.com>
Signed-off-by: Tariq Toukan <tariqt@nvidia.com>
Link: https://patch.msgid.link/20241031125856.530927-5-tariqt@nvidia.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
William Tu 2024-10-31 14:58:55 +02:00 committed by Jakub Kicinski
parent a2740138ec
commit bb135e4012
5 changed files with 56 additions and 23 deletions

View File

@ -755,7 +755,7 @@ struct mlx5e_channel {
u8 lag_port;
/* XDP_REDIRECT */
struct mlx5e_xdpsq xdpsq;
struct mlx5e_xdpsq *xdpsq;
/* AF_XDP zero-copy */
struct mlx5e_rq xskrq;

View File

@ -865,7 +865,7 @@ int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
if (unlikely(sq_num >= priv->channels.num))
return -ENXIO;
sq = &priv->channels.c[sq_num]->xdpsq;
sq = priv->channels.c[sq_num]->xdpsq;
for (i = 0; i < n; i++) {
struct mlx5e_xmit_data_frags xdptxdf = {};

View File

@ -2086,6 +2086,44 @@ void mlx5e_close_xdpsq(struct mlx5e_xdpsq *sq)
mlx5e_free_xdpsq(sq);
}
static struct mlx5e_xdpsq *mlx5e_open_xdpredirect_sq(struct mlx5e_channel *c,
struct mlx5e_params *params,
struct mlx5e_channel_param *cparam,
struct mlx5e_create_cq_param *ccp)
{
struct mlx5e_xdpsq *xdpsq;
int err;
xdpsq = kvzalloc_node(sizeof(*xdpsq), GFP_KERNEL, c->cpu);
if (!xdpsq)
return ERR_PTR(-ENOMEM);
err = mlx5e_open_cq(c->mdev, params->tx_cq_moderation,
&cparam->xdp_sq.cqp, ccp, &xdpsq->cq);
if (err)
goto err_free_xdpsq;
err = mlx5e_open_xdpsq(c, params, &cparam->xdp_sq, NULL, xdpsq, true);
if (err)
goto err_close_xdpsq_cq;
return xdpsq;
err_close_xdpsq_cq:
mlx5e_close_cq(&xdpsq->cq);
err_free_xdpsq:
kvfree(xdpsq);
return ERR_PTR(err);
}
static void mlx5e_close_xdpredirect_sq(struct mlx5e_xdpsq *xdpsq)
{
mlx5e_close_xdpsq(xdpsq);
mlx5e_close_cq(&xdpsq->cq);
kvfree(xdpsq);
}
static int mlx5e_alloc_cq_common(struct mlx5_core_dev *mdev,
struct net_device *netdev,
struct workqueue_struct *workqueue,
@ -2496,15 +2534,16 @@ static int mlx5e_open_queues(struct mlx5e_channel *c,
if (err)
goto err_close_icosq_cq;
err = mlx5e_open_cq(c->mdev, params->tx_cq_moderation, &cparam->xdp_sq.cqp, &ccp,
&c->xdpsq.cq);
if (err)
c->xdpsq = mlx5e_open_xdpredirect_sq(c, params, cparam, &ccp);
if (IS_ERR(c->xdpsq)) {
err = PTR_ERR(c->xdpsq);
goto err_close_tx_cqs;
}
err = mlx5e_open_cq(c->mdev, params->rx_cq_moderation, &cparam->rq.cqp, &ccp,
&c->rq.cq);
if (err)
goto err_close_xdp_tx_cqs;
goto err_close_xdpredirect_sq;
err = c->xdp ? mlx5e_open_cq(c->mdev, params->tx_cq_moderation, &cparam->xdp_sq.cqp,
&ccp, &c->rq_xdpsq.cq) : 0;
@ -2516,7 +2555,7 @@ static int mlx5e_open_queues(struct mlx5e_channel *c,
err = mlx5e_open_icosq(c, params, &cparam->async_icosq, &c->async_icosq,
mlx5e_async_icosq_err_cqe_work);
if (err)
goto err_close_xdpsq_cq;
goto err_close_rq_xdpsq_cq;
mutex_init(&c->icosq_recovery_lock);
@ -2540,16 +2579,8 @@ static int mlx5e_open_queues(struct mlx5e_channel *c,
goto err_close_rq;
}
err = mlx5e_open_xdpsq(c, params, &cparam->xdp_sq, NULL, &c->xdpsq, true);
if (err)
goto err_close_xdp_sq;
return 0;
err_close_xdp_sq:
if (c->xdp)
mlx5e_close_xdpsq(&c->rq_xdpsq);
err_close_rq:
mlx5e_close_rq(&c->rq);
@ -2562,15 +2593,15 @@ err_close_icosq:
err_close_async_icosq:
mlx5e_close_icosq(&c->async_icosq);
err_close_xdpsq_cq:
err_close_rq_xdpsq_cq:
if (c->xdp)
mlx5e_close_cq(&c->rq_xdpsq.cq);
err_close_rx_cq:
mlx5e_close_cq(&c->rq.cq);
err_close_xdp_tx_cqs:
mlx5e_close_cq(&c->xdpsq.cq);
err_close_xdpredirect_sq:
mlx5e_close_xdpredirect_sq(c->xdpsq);
err_close_tx_cqs:
mlx5e_close_tx_cqs(c);
@ -2586,7 +2617,6 @@ err_close_async_icosq_cq:
static void mlx5e_close_queues(struct mlx5e_channel *c)
{
mlx5e_close_xdpsq(&c->xdpsq);
if (c->xdp)
mlx5e_close_xdpsq(&c->rq_xdpsq);
/* The same ICOSQ is used for UMRs for both RQ and XSKRQ. */
@ -2599,7 +2629,7 @@ static void mlx5e_close_queues(struct mlx5e_channel *c)
if (c->xdp)
mlx5e_close_cq(&c->rq_xdpsq.cq);
mlx5e_close_cq(&c->rq.cq);
mlx5e_close_cq(&c->xdpsq.cq);
mlx5e_close_xdpredirect_sq(c->xdpsq);
mlx5e_close_tx_cqs(c);
mlx5e_close_cq(&c->icosq.cq);
mlx5e_close_cq(&c->async_icosq.cq);

View File

@ -600,7 +600,8 @@ mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv)
if (c->xdp)
sqs[num_sqs++] = c->rq_xdpsq.sqn;
sqs[num_sqs++] = c->xdpsq.sqn;
if (c->xdpsq)
sqs[num_sqs++] = c->xdpsq->sqn;
}
}
if (ptp_sq) {

View File

@ -165,7 +165,8 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget)
if (unlikely(!budget))
goto out;
busy |= mlx5e_poll_xdpsq_cq(&c->xdpsq.cq);
if (c->xdpsq)
busy |= mlx5e_poll_xdpsq_cq(&c->xdpsq->cq);
if (c->xdp)
busy |= mlx5e_poll_xdpsq_cq(&c->rq_xdpsq.cq);
@ -236,7 +237,8 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget)
mlx5e_cq_arm(&rq->cq);
mlx5e_cq_arm(&c->icosq.cq);
mlx5e_cq_arm(&c->async_icosq.cq);
mlx5e_cq_arm(&c->xdpsq.cq);
if (c->xdpsq)
mlx5e_cq_arm(&c->xdpsq->cq);
if (xsk_open) {
mlx5e_handle_rx_dim(xskrq);