mlx5-updates-2023-06-16

1) Added a new event handler to firmware sync reset, which is used to
    support firmware sync reset flow on smart NIC. Adding this new stage to
    the flow enables the firmware to ensure host PFs unload before ECPFs
    unload, to avoid race of PFs recovery.
 
 2) Debugfs for mlx5 eswitch bridge offloads
 
 3) Added two new counters for vport stats
 
 4) Minor Fixups and cleanups for net-next branch
 -----BEGIN PGP SIGNATURE-----
 
 iQEzBAABCAAdFiEEGhZs6bAKwk/OTgTpSD+KveBX+j4FAmSMsbEACgkQSD+KveBX
 +j6BAwgAyiZWqA8VEabjJtJNh30Ghfi5OkheAvJmPkd9UlM04EPw4x1dZ/z/jBnU
 piBixrs0ou92ZF//Ph1vlf0Jae/YdfKdNcm/FEydspHZC0C2QqeNpDDWl0z3STqB
 leBiFPjjBgqYWZWGwgaVlGOyzglTTu5DilkKVy1Ohf7j50P0FppViOsvQle3pAqO
 UUXITN8si2AK/03JBJQNIulyyVP+EN9/+d8ZVZagKgurB9Qim9lUbZCerXacEUGV
 M/XxYd+pwSAMh3TYlt2x1jv1aSpVwPzIolidJTjUd4gaImi40dw36kNP5BcoSMfr
 QEaB1NMv4zfAwiZ7XaXY6/l5wqTjOg==
 =vYKY
 -----END PGP SIGNATURE-----

Merge tag 'mlx5-updates-2023-06-16' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

mlx5-updates-2023-06-16

1) Added a new event handler to firmware sync reset, which is used to
   support firmware sync reset flow on smart NIC. Adding this new stage to
   the flow enables the firmware to ensure host PFs unload before ECPFs
   unload, to avoid race of PFs recovery.

2) Debugfs for mlx5 eswitch bridge offloads

3) Added two new counters for vport stats

4) Minor Fixups and cleanups for net-next branch

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2023-06-18 18:52:58 +01:00
commit 9a94d764e9
33 changed files with 435 additions and 167 deletions

View File

@ -797,6 +797,16 @@ Counters on the NIC port that is connected to a eSwitch.
RoCE/UD/RC traffic) [#accel]_.
- Acceleration
* - `vport_loopback_packets`
- Unicast, multicast and broadcast packets that were loop-back (received
and transmitted), IB/Eth [#accel]_.
- Acceleration
* - `vport_loopback_bytes`
- Unicast, multicast and broadcast bytes that were loop-back (received
and transmitted), IB/Eth [#accel]_.
- Acceleration
* - `rx_steer_missed_packets`
- Number of packets that was received by the NIC, however was discarded
because it did not match any flow in the NIC flow table.

View File

@ -75,7 +75,8 @@ mlx5_core-$(CONFIG_MLX5_ESWITCH) += esw/acl/helper.o \
esw/acl/egress_lgcy.o esw/acl/egress_ofld.o \
esw/acl/ingress_lgcy.o esw/acl/ingress_ofld.o
mlx5_core-$(CONFIG_MLX5_BRIDGE) += esw/bridge.o esw/bridge_mcast.o en/rep/bridge.o
mlx5_core-$(CONFIG_MLX5_BRIDGE) += esw/bridge.o esw/bridge_mcast.o esw/bridge_debugfs.o \
en/rep/bridge.o
mlx5_core-$(CONFIG_THERMAL) += thermal.o
mlx5_core-$(CONFIG_MLX5_MPFS) += lib/mpfs.o

View File

@ -165,15 +165,6 @@ struct page_pool;
#define MLX5E_MAX_KLM_PER_WQE(mdev) \
MLX5E_KLM_ENTRIES_PER_WQE(MLX5_SEND_WQE_BB * mlx5e_get_max_sq_aligned_wqebbs(mdev))
#define MLX5E_MSG_LEVEL NETIF_MSG_LINK
#define mlx5e_dbg(mlevel, priv, format, ...) \
do { \
if (NETIF_MSG_##mlevel & (priv)->msglevel) \
netdev_warn(priv->netdev, format, \
##__VA_ARGS__); \
} while (0)
#define mlx5e_state_dereference(priv, p) \
rcu_dereference_protected((p), lockdep_is_held(&(priv)->state_lock))
@ -880,7 +871,6 @@ struct mlx5e_priv {
#endif
/* priv data path fields - end */
u32 msglevel;
unsigned long state;
struct mutex state_lock; /* Protects Interface state */
struct mlx5e_rq drop_rq;

View File

@ -65,12 +65,13 @@ int mlx5e_port_query_buffer(struct mlx5e_priv *priv,
MLX5_GET(bufferx_reg, buffer, xoff_threshold) * port_buff_cell_sz;
total_used += port_buffer->buffer[i].size;
mlx5e_dbg(HW, priv, "buffer %d: size=%d, xon=%d, xoff=%d, epsb=%d, lossy=%d\n", i,
port_buffer->buffer[i].size,
port_buffer->buffer[i].xon,
port_buffer->buffer[i].xoff,
port_buffer->buffer[i].epsb,
port_buffer->buffer[i].lossy);
netdev_dbg(priv->netdev, "buffer %d: size=%d, xon=%d, xoff=%d, epsb=%d, lossy=%d\n",
i,
port_buffer->buffer[i].size,
port_buffer->buffer[i].xon,
port_buffer->buffer[i].xoff,
port_buffer->buffer[i].epsb,
port_buffer->buffer[i].lossy);
}
port_buffer->internal_buffers_size = 0;
@ -87,11 +88,11 @@ int mlx5e_port_query_buffer(struct mlx5e_priv *priv,
port_buffer->internal_buffers_size -
port_buffer->headroom_size;
mlx5e_dbg(HW, priv,
"total buffer size=%u, headroom buffer size=%u, internal buffers size=%u, spare buffer size=%u\n",
port_buffer->port_buffer_size, port_buffer->headroom_size,
port_buffer->internal_buffers_size,
port_buffer->spare_buffer_size);
netdev_dbg(priv->netdev,
"total buffer size=%u, headroom buffer size=%u, internal buffers size=%u, spare buffer size=%u\n",
port_buffer->port_buffer_size, port_buffer->headroom_size,
port_buffer->internal_buffers_size,
port_buffer->spare_buffer_size);
out:
kfree(out);
return err;
@ -352,7 +353,7 @@ static u32 calculate_xoff(struct mlx5e_priv *priv, unsigned int mtu)
xoff = (301 + 216 * priv->dcbx.cable_len / 100) * speed / 1000 + 272 * mtu / 100;
mlx5e_dbg(HW, priv, "%s: xoff=%d\n", __func__, xoff);
netdev_dbg(priv->netdev, "%s: xoff=%d\n", __func__, xoff);
return xoff;
}
@ -484,6 +485,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
u8 *prio2buffer)
{
u16 port_buff_cell_sz = priv->dcbx.port_buff_cell_sz;
struct net_device *netdev = priv->netdev;
struct mlx5e_port_buffer port_buffer;
u32 xoff = calculate_xoff(priv, mtu);
bool update_prio2buffer = false;
@ -495,7 +497,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
int err;
int i;
mlx5e_dbg(HW, priv, "%s: change=%x\n", __func__, change);
netdev_dbg(netdev, "%s: change=%x\n", __func__, change);
max_mtu = max_t(unsigned int, priv->netdev->max_mtu, MINIMUM_MAX_MTU);
err = mlx5e_port_query_buffer(priv, &port_buffer);
@ -510,8 +512,8 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
}
if (change & MLX5E_PORT_BUFFER_PFC) {
mlx5e_dbg(HW, priv, "%s: requested PFC per priority bitmask: 0x%x\n",
__func__, pfc->pfc_en);
netdev_dbg(netdev, "%s: requested PFC per priority bitmask: 0x%x\n",
__func__, pfc->pfc_en);
err = mlx5e_port_query_priority2buffer(priv->mdev, buffer);
if (err)
return err;
@ -526,8 +528,8 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
if (change & MLX5E_PORT_BUFFER_PRIO2BUFFER) {
update_prio2buffer = true;
for (i = 0; i < MLX5E_MAX_NETWORK_BUFFER; i++)
mlx5e_dbg(HW, priv, "%s: requested to map prio[%d] to buffer %d\n",
__func__, i, prio2buffer[i]);
netdev_dbg(priv->netdev, "%s: requested to map prio[%d] to buffer %d\n",
__func__, i, prio2buffer[i]);
err = fill_pfc_en(priv->mdev, &curr_pfc_en);
if (err)
@ -541,10 +543,10 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
if (change & MLX5E_PORT_BUFFER_SIZE) {
for (i = 0; i < MLX5E_MAX_NETWORK_BUFFER; i++) {
mlx5e_dbg(HW, priv, "%s: buffer[%d]=%d\n", __func__, i, buffer_size[i]);
netdev_dbg(priv->netdev, "%s: buffer[%d]=%d\n", __func__, i, buffer_size[i]);
if (!port_buffer.buffer[i].lossy && !buffer_size[i]) {
mlx5e_dbg(HW, priv, "%s: lossless buffer[%d] size cannot be zero\n",
__func__, i);
netdev_dbg(priv->netdev, "%s: lossless buffer[%d] size cannot be zero\n",
__func__, i);
return -EINVAL;
}
@ -552,7 +554,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
total_used += buffer_size[i];
}
mlx5e_dbg(HW, priv, "%s: total buffer requested=%d\n", __func__, total_used);
netdev_dbg(priv->netdev, "%s: total buffer requested=%d\n", __func__, total_used);
if (total_used > port_buffer.headroom_size &&
(total_used - port_buffer.headroom_size) >

View File

@ -136,7 +136,6 @@ static int mlx5_esw_bridge_port_changeupper(struct notifier_block *nb, void *ptr
struct mlx5_eswitch *esw = br_offloads->esw;
u16 vport_num, esw_owner_vhca_id;
struct netlink_ext_ack *extack;
int ifindex = upper->ifindex;
int err = 0;
if (!netif_is_bridge_master(upper))
@ -150,15 +149,15 @@ static int mlx5_esw_bridge_port_changeupper(struct notifier_block *nb, void *ptr
if (mlx5_esw_bridge_is_local(dev, rep, esw))
err = info->linking ?
mlx5_esw_bridge_vport_link(ifindex, vport_num, esw_owner_vhca_id,
mlx5_esw_bridge_vport_link(upper, vport_num, esw_owner_vhca_id,
br_offloads, extack) :
mlx5_esw_bridge_vport_unlink(ifindex, vport_num, esw_owner_vhca_id,
mlx5_esw_bridge_vport_unlink(upper, vport_num, esw_owner_vhca_id,
br_offloads, extack);
else if (mlx5_esw_bridge_dev_same_hw(rep, esw))
err = info->linking ?
mlx5_esw_bridge_vport_peer_link(ifindex, vport_num, esw_owner_vhca_id,
mlx5_esw_bridge_vport_peer_link(upper, vport_num, esw_owner_vhca_id,
br_offloads, extack) :
mlx5_esw_bridge_vport_peer_unlink(ifindex, vport_num, esw_owner_vhca_id,
mlx5_esw_bridge_vport_peer_unlink(upper, vport_num, esw_owner_vhca_id,
br_offloads, extack);
return err;

View File

@ -570,10 +570,10 @@ static struct mlx5_flow_handle *arfs_add_rule(struct mlx5e_priv *priv,
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
priv->channel_stats[arfs_rule->rxq]->rq.arfs_err++;
mlx5e_dbg(HW, priv,
"%s: add rule(filter id=%d, rq idx=%d, ip proto=0x%x) failed,err=%d\n",
__func__, arfs_rule->filter_id, arfs_rule->rxq,
tuple->ip_proto, err);
netdev_dbg(priv->netdev,
"%s: add rule(filter id=%d, rq idx=%d, ip proto=0x%x) failed,err=%d\n",
__func__, arfs_rule->filter_id, arfs_rule->rxq,
tuple->ip_proto, err);
}
out:

View File

@ -275,10 +275,10 @@ static int mlx5e_dcbnl_ieee_setets_core(struct mlx5e_priv *priv, struct ieee_ets
memcpy(priv->dcbx.tc_tsa, ets->tc_tsa, sizeof(ets->tc_tsa));
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
mlx5e_dbg(HW, priv, "%s: prio_%d <=> tc_%d\n",
__func__, i, ets->prio_tc[i]);
mlx5e_dbg(HW, priv, "%s: tc_%d <=> tx_bw_%d%%, group_%d\n",
__func__, i, tc_tx_bw[i], tc_group[i]);
netdev_dbg(priv->netdev, "%s: prio_%d <=> tc_%d\n",
__func__, i, ets->prio_tc[i]);
netdev_dbg(priv->netdev, "%s: tc_%d <=> tx_bw_%d%%, group_%d\n",
__func__, i, tc_tx_bw[i], tc_group[i]);
}
return err;
@ -399,9 +399,9 @@ static int mlx5e_dcbnl_ieee_setpfc(struct net_device *dev,
}
if (!ret) {
mlx5e_dbg(HW, priv,
"%s: PFC per priority bit mask: 0x%x\n",
__func__, pfc->pfc_en);
netdev_dbg(dev,
"%s: PFC per priority bit mask: 0x%x\n",
__func__, pfc->pfc_en);
}
return ret;
}
@ -611,8 +611,8 @@ static int mlx5e_dcbnl_ieee_setmaxrate(struct net_device *netdev,
}
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
mlx5e_dbg(HW, priv, "%s: tc_%d <=> max_bw %d Gbps\n",
__func__, i, max_bw_value[i]);
netdev_dbg(netdev, "%s: tc_%d <=> max_bw %d Gbps\n",
__func__, i, max_bw_value[i]);
}
return mlx5_modify_port_ets_rate_limit(mdev, max_bw_value, max_bw_unit);
@ -640,10 +640,10 @@ static u8 mlx5e_dcbnl_setall(struct net_device *netdev)
ets.tc_rx_bw[i] = cee_cfg->pg_bw_pct[i];
ets.tc_tsa[i] = IEEE_8021QAZ_TSA_ETS;
ets.prio_tc[i] = cee_cfg->prio_to_pg_map[i];
mlx5e_dbg(HW, priv,
"%s: Priority group %d: tx_bw %d, rx_bw %d, prio_tc %d\n",
__func__, i, ets.tc_tx_bw[i], ets.tc_rx_bw[i],
ets.prio_tc[i]);
netdev_dbg(netdev,
"%s: Priority group %d: tx_bw %d, rx_bw %d, prio_tc %d\n",
__func__, i, ets.tc_tx_bw[i], ets.tc_rx_bw[i],
ets.prio_tc[i]);
}
err = mlx5e_dbcnl_validate_ets(netdev, &ets, true);

View File

@ -1689,16 +1689,6 @@ static int mlx5e_set_fecparam(struct net_device *netdev,
return 0;
}
static u32 mlx5e_get_msglevel(struct net_device *dev)
{
return ((struct mlx5e_priv *)netdev_priv(dev))->msglevel;
}
static void mlx5e_set_msglevel(struct net_device *dev, u32 val)
{
((struct mlx5e_priv *)netdev_priv(dev))->msglevel = val;
}
static int mlx5e_set_phys_id(struct net_device *dev,
enum ethtool_phys_id_state state)
{
@ -1952,9 +1942,9 @@ int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool new_val
if (err)
return err;
mlx5e_dbg(DRV, priv, "MLX5E: RxCqeCmprss was turned %s\n",
MLX5E_GET_PFLAG(&priv->channels.params,
MLX5E_PFLAG_RX_CQE_COMPRESS) ? "ON" : "OFF");
netdev_dbg(priv->netdev, "MLX5E: RxCqeCmprss was turned %s\n",
MLX5E_GET_PFLAG(&priv->channels.params,
MLX5E_PFLAG_RX_CQE_COMPRESS) ? "ON" : "OFF");
return 0;
}
@ -2444,8 +2434,6 @@ const struct ethtool_ops mlx5e_ethtool_ops = {
.get_priv_flags = mlx5e_get_priv_flags,
.set_priv_flags = mlx5e_set_priv_flags,
.self_test = mlx5e_self_test,
.get_msglevel = mlx5e_get_msglevel,
.set_msglevel = mlx5e_set_msglevel,
.get_fec_stats = mlx5e_get_fec_stats,
.get_fecparam = mlx5e_get_fecparam,
.set_fecparam = mlx5e_set_fecparam,

View File

@ -2401,7 +2401,7 @@ static int mlx5e_channel_stats_alloc(struct mlx5e_priv *priv, int ix, int cpu)
/* Asymmetric dynamic memory allocation.
* Freed in mlx5e_priv_arrays_free, not on channel closure.
*/
mlx5e_dbg(DRV, priv, "Creating channel stats %d\n", ix);
netdev_dbg(priv->netdev, "Creating channel stats %d\n", ix);
priv->channel_stats[ix] = kvzalloc_node(sizeof(**priv->channel_stats),
GFP_KERNEL, cpu_to_node(cpu));
if (!priv->channel_stats[ix])
@ -2779,7 +2779,7 @@ int mlx5e_update_tx_netdev_queues(struct mlx5e_priv *priv)
if (MLX5E_GET_PFLAG(&priv->channels.params, MLX5E_PFLAG_TX_PORT_TS))
num_txqs += ntc;
mlx5e_dbg(DRV, priv, "Setting num_txqs %d\n", num_txqs);
netdev_dbg(priv->netdev, "Setting num_txqs %d\n", num_txqs);
err = netif_set_real_num_tx_queues(priv->netdev, num_txqs);
if (err)
netdev_warn(priv->netdev, "netif_set_real_num_tx_queues failed, %d\n", err);
@ -5585,7 +5585,6 @@ int mlx5e_priv_init(struct mlx5e_priv *priv,
/* priv init */
priv->mdev = mdev;
priv->netdev = netdev;
priv->msglevel = MLX5E_MSG_LEVEL;
priv->max_nch = nch;
priv->max_opened_tc = 1;

View File

@ -30,7 +30,7 @@
* SOFTWARE.
*/
#include "lib/mlx5.h"
#include "lib/events.h"
#include "en.h"
#include "en_accel/ktls.h"
#include "en_accel/en_accel.h"
@ -748,11 +748,22 @@ static const struct counter_desc vport_stats_desc[] = {
VPORT_COUNTER_OFF(transmitted_ib_multicast.octets) },
};
static const struct counter_desc vport_loopback_stats_desc[] = {
{ "vport_loopback_packets",
VPORT_COUNTER_OFF(local_loopback.packets) },
{ "vport_loopback_bytes",
VPORT_COUNTER_OFF(local_loopback.octets) },
};
#define NUM_VPORT_COUNTERS ARRAY_SIZE(vport_stats_desc)
#define NUM_VPORT_LOOPBACK_COUNTERS(dev) \
(MLX5_CAP_GEN(dev, vport_counter_local_loopback) ? \
ARRAY_SIZE(vport_loopback_stats_desc) : 0)
static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(vport)
{
return NUM_VPORT_COUNTERS;
return NUM_VPORT_COUNTERS +
NUM_VPORT_LOOPBACK_COUNTERS(priv->mdev);
}
static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(vport)
@ -761,6 +772,11 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(vport)
for (i = 0; i < NUM_VPORT_COUNTERS; i++)
strcpy(data + (idx++) * ETH_GSTRING_LEN, vport_stats_desc[i].format);
for (i = 0; i < NUM_VPORT_LOOPBACK_COUNTERS(priv->mdev); i++)
strcpy(data + (idx++) * ETH_GSTRING_LEN,
vport_loopback_stats_desc[i].format);
return idx;
}
@ -771,6 +787,11 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(vport)
for (i = 0; i < NUM_VPORT_COUNTERS; i++)
data[idx++] = MLX5E_READ_CTR64_BE(priv->stats.vport.query_vport_out,
vport_stats_desc, i);
for (i = 0; i < NUM_VPORT_LOOPBACK_COUNTERS(priv->mdev); i++)
data[idx++] = MLX5E_READ_CTR64_BE(priv->stats.vport.query_vport_out,
vport_loopback_stats_desc, i);
return idx;
}

View File

@ -834,7 +834,7 @@ mlx5_esw_bridge_egress_miss_flow_create(struct mlx5_flow_table *egress_ft,
return handle;
}
static struct mlx5_esw_bridge *mlx5_esw_bridge_create(int ifindex,
static struct mlx5_esw_bridge *mlx5_esw_bridge_create(struct net_device *br_netdev,
struct mlx5_esw_bridge_offloads *br_offloads)
{
struct mlx5_esw_bridge *bridge;
@ -858,11 +858,12 @@ static struct mlx5_esw_bridge *mlx5_esw_bridge_create(int ifindex,
goto err_mdb_ht;
INIT_LIST_HEAD(&bridge->fdb_list);
bridge->ifindex = ifindex;
bridge->ifindex = br_netdev->ifindex;
bridge->refcnt = 1;
bridge->ageing_time = clock_t_to_jiffies(BR_DEFAULT_AGEING_TIME);
bridge->vlan_proto = ETH_P_8021Q;
list_add(&bridge->list, &br_offloads->bridges);
mlx5_esw_bridge_debugfs_init(br_netdev, bridge);
return bridge;
@ -886,6 +887,7 @@ static void mlx5_esw_bridge_put(struct mlx5_esw_bridge_offloads *br_offloads,
if (--bridge->refcnt)
return;
mlx5_esw_bridge_debugfs_cleanup(bridge);
mlx5_esw_bridge_egress_table_cleanup(bridge);
mlx5_esw_bridge_mcast_disable(bridge);
list_del(&bridge->list);
@ -898,14 +900,14 @@ static void mlx5_esw_bridge_put(struct mlx5_esw_bridge_offloads *br_offloads,
}
static struct mlx5_esw_bridge *
mlx5_esw_bridge_lookup(int ifindex, struct mlx5_esw_bridge_offloads *br_offloads)
mlx5_esw_bridge_lookup(struct net_device *br_netdev, struct mlx5_esw_bridge_offloads *br_offloads)
{
struct mlx5_esw_bridge *bridge;
ASSERT_RTNL();
list_for_each_entry(bridge, &br_offloads->bridges, list) {
if (bridge->ifindex == ifindex) {
if (bridge->ifindex == br_netdev->ifindex) {
mlx5_esw_bridge_get(bridge);
return bridge;
}
@ -918,7 +920,7 @@ mlx5_esw_bridge_lookup(int ifindex, struct mlx5_esw_bridge_offloads *br_offloads
return ERR_PTR(err);
}
bridge = mlx5_esw_bridge_create(ifindex, br_offloads);
bridge = mlx5_esw_bridge_create(br_netdev, br_offloads);
if (IS_ERR(bridge) && list_empty(&br_offloads->bridges))
mlx5_esw_bridge_ingress_table_cleanup(br_offloads);
return bridge;
@ -1601,15 +1603,15 @@ static int mlx5_esw_bridge_vport_cleanup(struct mlx5_esw_bridge_offloads *br_off
return 0;
}
static int mlx5_esw_bridge_vport_link_with_flags(int ifindex, u16 vport_num, u16 esw_owner_vhca_id,
u16 flags,
static int mlx5_esw_bridge_vport_link_with_flags(struct net_device *br_netdev, u16 vport_num,
u16 esw_owner_vhca_id, u16 flags,
struct mlx5_esw_bridge_offloads *br_offloads,
struct netlink_ext_ack *extack)
{
struct mlx5_esw_bridge *bridge;
int err;
bridge = mlx5_esw_bridge_lookup(ifindex, br_offloads);
bridge = mlx5_esw_bridge_lookup(br_netdev, br_offloads);
if (IS_ERR(bridge)) {
NL_SET_ERR_MSG_MOD(extack, "Error checking for existing bridge with same ifindex");
return PTR_ERR(bridge);
@ -1627,15 +1629,16 @@ static int mlx5_esw_bridge_vport_link_with_flags(int ifindex, u16 vport_num, u16
return err;
}
int mlx5_esw_bridge_vport_link(int ifindex, u16 vport_num, u16 esw_owner_vhca_id,
int mlx5_esw_bridge_vport_link(struct net_device *br_netdev, u16 vport_num, u16 esw_owner_vhca_id,
struct mlx5_esw_bridge_offloads *br_offloads,
struct netlink_ext_ack *extack)
{
return mlx5_esw_bridge_vport_link_with_flags(ifindex, vport_num, esw_owner_vhca_id, 0,
return mlx5_esw_bridge_vport_link_with_flags(br_netdev, vport_num, esw_owner_vhca_id, 0,
br_offloads, extack);
}
int mlx5_esw_bridge_vport_unlink(int ifindex, u16 vport_num, u16 esw_owner_vhca_id,
int mlx5_esw_bridge_vport_unlink(struct net_device *br_netdev, u16 vport_num,
u16 esw_owner_vhca_id,
struct mlx5_esw_bridge_offloads *br_offloads,
struct netlink_ext_ack *extack)
{
@ -1647,7 +1650,7 @@ int mlx5_esw_bridge_vport_unlink(int ifindex, u16 vport_num, u16 esw_owner_vhca_
NL_SET_ERR_MSG_MOD(extack, "Port is not attached to any bridge");
return -EINVAL;
}
if (port->bridge->ifindex != ifindex) {
if (port->bridge->ifindex != br_netdev->ifindex) {
NL_SET_ERR_MSG_MOD(extack, "Port is attached to another bridge");
return -EINVAL;
}
@ -1658,23 +1661,25 @@ int mlx5_esw_bridge_vport_unlink(int ifindex, u16 vport_num, u16 esw_owner_vhca_
return err;
}
int mlx5_esw_bridge_vport_peer_link(int ifindex, u16 vport_num, u16 esw_owner_vhca_id,
int mlx5_esw_bridge_vport_peer_link(struct net_device *br_netdev, u16 vport_num,
u16 esw_owner_vhca_id,
struct mlx5_esw_bridge_offloads *br_offloads,
struct netlink_ext_ack *extack)
{
if (!MLX5_CAP_ESW(br_offloads->esw->dev, merged_eswitch))
return 0;
return mlx5_esw_bridge_vport_link_with_flags(ifindex, vport_num, esw_owner_vhca_id,
return mlx5_esw_bridge_vport_link_with_flags(br_netdev, vport_num, esw_owner_vhca_id,
MLX5_ESW_BRIDGE_PORT_FLAG_PEER,
br_offloads, extack);
}
int mlx5_esw_bridge_vport_peer_unlink(int ifindex, u16 vport_num, u16 esw_owner_vhca_id,
int mlx5_esw_bridge_vport_peer_unlink(struct net_device *br_netdev, u16 vport_num,
u16 esw_owner_vhca_id,
struct mlx5_esw_bridge_offloads *br_offloads,
struct netlink_ext_ack *extack)
{
return mlx5_esw_bridge_vport_unlink(ifindex, vport_num, esw_owner_vhca_id, br_offloads,
return mlx5_esw_bridge_vport_unlink(br_netdev, vport_num, esw_owner_vhca_id, br_offloads,
extack);
}
@ -1901,6 +1906,7 @@ struct mlx5_esw_bridge_offloads *mlx5_esw_bridge_init(struct mlx5_eswitch *esw)
xa_init(&br_offloads->ports);
br_offloads->esw = esw;
esw->br_offloads = br_offloads;
mlx5_esw_bridge_debugfs_offloads_init(br_offloads);
return br_offloads;
}
@ -1916,6 +1922,7 @@ void mlx5_esw_bridge_cleanup(struct mlx5_eswitch *esw)
mlx5_esw_bridge_flush(br_offloads);
WARN_ON(!xa_empty(&br_offloads->ports));
mlx5_esw_bridge_debugfs_offloads_cleanup(br_offloads);
esw->br_offloads = NULL;
kvfree(br_offloads);

View File

@ -10,6 +10,7 @@
#include <linux/xarray.h>
#include "eswitch.h"
struct dentry;
struct mlx5_flow_table;
struct mlx5_flow_group;
@ -17,6 +18,7 @@ struct mlx5_esw_bridge_offloads {
struct mlx5_eswitch *esw;
struct list_head bridges;
struct xarray ports;
struct dentry *debugfs_root;
struct notifier_block netdev_nb;
struct notifier_block nb_blk;
@ -43,16 +45,18 @@ struct mlx5_esw_bridge_offloads {
struct mlx5_esw_bridge_offloads *mlx5_esw_bridge_init(struct mlx5_eswitch *esw);
void mlx5_esw_bridge_cleanup(struct mlx5_eswitch *esw);
int mlx5_esw_bridge_vport_link(int ifindex, u16 vport_num, u16 esw_owner_vhca_id,
int mlx5_esw_bridge_vport_link(struct net_device *br_netdev, u16 vport_num, u16 esw_owner_vhca_id,
struct mlx5_esw_bridge_offloads *br_offloads,
struct netlink_ext_ack *extack);
int mlx5_esw_bridge_vport_unlink(int ifindex, u16 vport_num, u16 esw_owner_vhca_id,
int mlx5_esw_bridge_vport_unlink(struct net_device *br_netdev, u16 vport_num, u16 esw_owner_vhca_id,
struct mlx5_esw_bridge_offloads *br_offloads,
struct netlink_ext_ack *extack);
int mlx5_esw_bridge_vport_peer_link(int ifindex, u16 vport_num, u16 esw_owner_vhca_id,
int mlx5_esw_bridge_vport_peer_link(struct net_device *br_netdev, u16 vport_num,
u16 esw_owner_vhca_id,
struct mlx5_esw_bridge_offloads *br_offloads,
struct netlink_ext_ack *extack);
int mlx5_esw_bridge_vport_peer_unlink(int ifindex, u16 vport_num, u16 esw_owner_vhca_id,
int mlx5_esw_bridge_vport_peer_unlink(struct net_device *br_netdev, u16 vport_num,
u16 esw_owner_vhca_id,
struct mlx5_esw_bridge_offloads *br_offloads,
struct netlink_ext_ack *extack);
void mlx5_esw_bridge_fdb_update_used(struct net_device *dev, u16 vport_num, u16 esw_owner_vhca_id,

View File

@ -0,0 +1,89 @@
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/* Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
#include <linux/debugfs.h>
#include "bridge.h"
#include "bridge_priv.h"
static void *mlx5_esw_bridge_debugfs_start(struct seq_file *seq, loff_t *pos);
static void *mlx5_esw_bridge_debugfs_next(struct seq_file *seq, void *v, loff_t *pos);
static void mlx5_esw_bridge_debugfs_stop(struct seq_file *seq, void *v);
static int mlx5_esw_bridge_debugfs_show(struct seq_file *seq, void *v);
static const struct seq_operations mlx5_esw_bridge_debugfs_sops = {
.start = mlx5_esw_bridge_debugfs_start,
.next = mlx5_esw_bridge_debugfs_next,
.stop = mlx5_esw_bridge_debugfs_stop,
.show = mlx5_esw_bridge_debugfs_show,
};
DEFINE_SEQ_ATTRIBUTE(mlx5_esw_bridge_debugfs);
static void *mlx5_esw_bridge_debugfs_start(struct seq_file *seq, loff_t *pos)
{
struct mlx5_esw_bridge *bridge = seq->private;
rtnl_lock();
return *pos ? seq_list_start(&bridge->fdb_list, *pos - 1) : SEQ_START_TOKEN;
}
static void *mlx5_esw_bridge_debugfs_next(struct seq_file *seq, void *v, loff_t *pos)
{
struct mlx5_esw_bridge *bridge = seq->private;
return seq_list_next(v == SEQ_START_TOKEN ? &bridge->fdb_list : v, &bridge->fdb_list, pos);
}
static void mlx5_esw_bridge_debugfs_stop(struct seq_file *seq, void *v)
{
rtnl_unlock();
}
static int mlx5_esw_bridge_debugfs_show(struct seq_file *seq, void *v)
{
struct mlx5_esw_bridge_fdb_entry *entry;
u64 packets, bytes, lastuse;
if (v == SEQ_START_TOKEN) {
seq_printf(seq, "%-16s %-17s %4s %20s %20s %20s %5s\n",
"DEV", "MAC", "VLAN", "PACKETS", "BYTES", "LASTUSE", "FLAGS");
return 0;
}
entry = list_entry(v, struct mlx5_esw_bridge_fdb_entry, list);
mlx5_fc_query_cached_raw(entry->ingress_counter, &bytes, &packets, &lastuse);
seq_printf(seq, "%-16s %-17pM %4d %20llu %20llu %20llu %#5x\n",
entry->dev->name, entry->key.addr, entry->key.vid, packets, bytes, lastuse,
entry->flags);
return 0;
}
void mlx5_esw_bridge_debugfs_init(struct net_device *br_netdev, struct mlx5_esw_bridge *bridge)
{
if (!bridge->br_offloads->debugfs_root)
return;
bridge->debugfs_dir = debugfs_create_dir(br_netdev->name,
bridge->br_offloads->debugfs_root);
debugfs_create_file("fdb", 0444, bridge->debugfs_dir, bridge,
&mlx5_esw_bridge_debugfs_fops);
}
void mlx5_esw_bridge_debugfs_cleanup(struct mlx5_esw_bridge *bridge)
{
debugfs_remove_recursive(bridge->debugfs_dir);
bridge->debugfs_dir = NULL;
}
void mlx5_esw_bridge_debugfs_offloads_init(struct mlx5_esw_bridge_offloads *br_offloads)
{
if (!br_offloads->esw->debugfs_root)
return;
br_offloads->debugfs_root = debugfs_create_dir("bridge", br_offloads->esw->debugfs_root);
}
void mlx5_esw_bridge_debugfs_offloads_cleanup(struct mlx5_esw_bridge_offloads *br_offloads)
{
debugfs_remove_recursive(br_offloads->debugfs_root);
br_offloads->debugfs_root = NULL;
}

View File

@ -199,6 +199,7 @@ struct mlx5_esw_bridge {
int refcnt;
struct list_head list;
struct mlx5_esw_bridge_offloads *br_offloads;
struct dentry *debugfs_dir;
struct list_head fdb_list;
struct rhashtable fdb_ht;
@ -241,4 +242,9 @@ void mlx5_esw_bridge_port_mdb_vlan_flush(struct mlx5_esw_bridge_port *port,
struct mlx5_esw_bridge_vlan *vlan);
void mlx5_esw_bridge_mdb_flush(struct mlx5_esw_bridge *bridge);
void mlx5_esw_bridge_debugfs_offloads_init(struct mlx5_esw_bridge_offloads *br_offloads);
void mlx5_esw_bridge_debugfs_offloads_cleanup(struct mlx5_esw_bridge_offloads *br_offloads);
void mlx5_esw_bridge_debugfs_init(struct net_device *br_netdev, struct mlx5_esw_bridge *bridge);
void mlx5_esw_bridge_debugfs_cleanup(struct mlx5_esw_bridge *bridge);
#endif /* _MLX5_ESW_BRIDGE_PRIVATE_ */

View File

@ -285,9 +285,8 @@ static int _mlx5_eswitch_set_vepa_locked(struct mlx5_eswitch *esw,
if (IS_ERR(flow_rule)) {
err = PTR_ERR(flow_rule);
goto out;
} else {
esw->fdb_table.legacy.vepa_uplink_rule = flow_rule;
}
esw->fdb_table.legacy.vepa_uplink_rule = flow_rule;
/* Star rule to forward all traffic to uplink vport */
memset(&dest, 0, sizeof(dest));
@ -299,9 +298,8 @@ static int _mlx5_eswitch_set_vepa_locked(struct mlx5_eswitch *esw,
if (IS_ERR(flow_rule)) {
err = PTR_ERR(flow_rule);
goto out;
} else {
esw->fdb_table.legacy.vepa_star_rule = flow_rule;
}
esw->fdb_table.legacy.vepa_star_rule = flow_rule;
out:
kvfree(spec);

View File

@ -31,6 +31,7 @@
*/
#include <linux/etherdevice.h>
#include <linux/debugfs.h>
#include <linux/mlx5/driver.h>
#include <linux/mlx5/mlx5_ifc.h>
#include <linux/mlx5/vport.h>
@ -1765,6 +1766,7 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
esw->manager_vport = mlx5_eswitch_manager_vport(dev);
esw->first_host_vport = mlx5_eswitch_first_host_vport_num(dev);
esw->debugfs_root = debugfs_create_dir("esw", mlx5_debugfs_get_dev_root(dev));
esw->work_queue = create_singlethread_workqueue("mlx5_esw_wq");
if (!esw->work_queue) {
err = -ENOMEM;
@ -1818,6 +1820,7 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
abort:
if (esw->work_queue)
destroy_workqueue(esw->work_queue);
debugfs_remove_recursive(esw->debugfs_root);
kfree(esw);
unregister_param:
devl_params_unregister(priv_to_devlink(dev), mlx5_eswitch_params,
@ -1844,6 +1847,7 @@ void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw)
mutex_destroy(&esw->offloads.decap_tbl_lock);
esw_offloads_cleanup(esw);
mlx5_esw_vports_cleanup(esw);
debugfs_remove_recursive(esw->debugfs_root);
kfree(esw);
devl_params_unregister(priv_to_devlink(esw->dev), mlx5_eswitch_params,
ARRAY_SIZE(mlx5_eswitch_params));

View File

@ -304,6 +304,8 @@ enum {
MLX5_ESW_FDB_CREATED = BIT(0),
};
struct dentry;
struct mlx5_eswitch {
struct mlx5_core_dev *dev;
struct mlx5_nb nb;
@ -312,6 +314,7 @@ struct mlx5_eswitch {
struct hlist_head mc_table[MLX5_L2_ADDR_HASH_SIZE];
struct esw_mc_addr mc_promisc;
/* end of legacy */
struct dentry *debugfs_root;
struct workqueue_struct *work_queue;
struct xarray vports;
u32 flags;
@ -665,6 +668,7 @@ void mlx5e_tc_clean_fdb_peer_flows(struct mlx5_eswitch *esw);
index, \
vport, \
MLX5_CAP_GEN_2((esw->dev), ec_vf_vport_base), \
MLX5_CAP_GEN_2((esw->dev), ec_vf_vport_base) +\
(last) - 1)
struct mlx5_eswitch *mlx5_devlink_eswitch_get(struct devlink *devlink);

View File

@ -5,7 +5,7 @@
#include "mlx5_core.h"
#include "lib/eq.h"
#include "lib/mlx5.h"
#include "lib/events.h"
struct mlx5_event_nb {
struct mlx5_nb nb;

View File

@ -21,6 +21,7 @@ struct mlx5_fw_reset {
struct workqueue_struct *wq;
struct work_struct fw_live_patch_work;
struct work_struct reset_request_work;
struct work_struct reset_unload_work;
struct work_struct reset_reload_work;
struct work_struct reset_now_work;
struct work_struct reset_abort_work;
@ -30,6 +31,26 @@ struct mlx5_fw_reset {
int ret;
};
enum {
MLX5_FW_RST_STATE_IDLE = 0,
MLX5_FW_RST_STATE_TOGGLE_REQ = 4,
};
enum {
MLX5_RST_STATE_BIT_NUM = 12,
MLX5_RST_ACK_BIT_NUM = 22,
};
static u8 mlx5_get_fw_rst_state(struct mlx5_core_dev *dev)
{
return (ioread32be(&dev->iseg->initializing) >> MLX5_RST_STATE_BIT_NUM) & 0xF;
}
static void mlx5_set_fw_rst_ack(struct mlx5_core_dev *dev)
{
iowrite32be(BIT(MLX5_RST_ACK_BIT_NUM), &dev->iseg->initializing);
}
static int mlx5_fw_reset_enable_remote_dev_reset_set(struct devlink *devlink, u32 id,
struct devlink_param_gset_ctx *ctx)
{
@ -155,7 +176,7 @@ int mlx5_fw_reset_set_live_patch(struct mlx5_core_dev *dev)
return mlx5_reg_mfrl_set(dev, MLX5_MFRL_REG_RESET_LEVEL0, 0, 0, false);
}
static void mlx5_fw_reset_complete_reload(struct mlx5_core_dev *dev)
static void mlx5_fw_reset_complete_reload(struct mlx5_core_dev *dev, bool unloaded)
{
struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset;
@ -163,7 +184,8 @@ static void mlx5_fw_reset_complete_reload(struct mlx5_core_dev *dev)
if (test_bit(MLX5_FW_RESET_FLAGS_PENDING_COMP, &fw_reset->reset_flags)) {
complete(&fw_reset->done);
} else {
mlx5_unload_one(dev, false);
if (!unloaded)
mlx5_unload_one(dev, false);
if (mlx5_health_wait_pci_up(dev))
mlx5_core_err(dev, "reset reload flow aborted, PCI reads still not working\n");
else
@ -204,7 +226,7 @@ static void mlx5_sync_reset_reload_work(struct work_struct *work)
mlx5_sync_reset_clear_reset_requested(dev, false);
mlx5_enter_error_state(dev, true);
mlx5_fw_reset_complete_reload(dev);
mlx5_fw_reset_complete_reload(dev, false);
}
#define MLX5_RESET_POLL_INTERVAL (HZ / 10)
@ -276,6 +298,44 @@ static void mlx5_fw_live_patch_event(struct work_struct *work)
mlx5_core_err(dev, "Failed to reload FW tracer\n");
}
static int mlx5_check_dev_ids(struct mlx5_core_dev *dev, u16 dev_id)
{
struct pci_bus *bridge_bus = dev->pdev->bus;
struct pci_dev *sdev;
u16 sdev_id;
int err;
/* Check that all functions under the pci bridge are PFs of
* this device otherwise fail this function.
*/
list_for_each_entry(sdev, &bridge_bus->devices, bus_list) {
err = pci_read_config_word(sdev, PCI_DEVICE_ID, &sdev_id);
if (err)
return err;
if (sdev_id != dev_id) {
mlx5_core_warn(dev, "unrecognized dev_id (0x%x)\n", sdev_id);
return -EPERM;
}
}
return 0;
}
static bool mlx5_is_reset_now_capable(struct mlx5_core_dev *dev)
{
u16 dev_id;
int err;
if (!MLX5_CAP_GEN(dev, fast_teardown)) {
mlx5_core_warn(dev, "fast teardown is not supported by firmware\n");
return -EOPNOTSUPP;
}
err = pci_read_config_word(dev->pdev, PCI_DEVICE_ID, &dev_id);
if (err)
return false;
return (!mlx5_check_dev_ids(dev, dev_id));
}
static void mlx5_sync_reset_request_event(struct work_struct *work)
{
struct mlx5_fw_reset *fw_reset = container_of(work, struct mlx5_fw_reset,
@ -283,7 +343,8 @@ static void mlx5_sync_reset_request_event(struct work_struct *work)
struct mlx5_core_dev *dev = fw_reset->dev;
int err;
if (test_bit(MLX5_FW_RESET_FLAGS_NACK_RESET_REQUEST, &fw_reset->reset_flags)) {
if (test_bit(MLX5_FW_RESET_FLAGS_NACK_RESET_REQUEST, &fw_reset->reset_flags) ||
!mlx5_is_reset_now_capable(dev)) {
err = mlx5_fw_reset_set_reset_sync_nack(dev);
mlx5_core_warn(dev, "PCI Sync FW Update Reset Nack %s",
err ? "Failed" : "Sent");
@ -303,26 +364,18 @@ static int mlx5_pci_link_toggle(struct mlx5_core_dev *dev)
{
struct pci_bus *bridge_bus = dev->pdev->bus;
struct pci_dev *bridge = bridge_bus->self;
u16 reg16, dev_id, sdev_id;
unsigned long timeout;
struct pci_dev *sdev;
u16 reg16, dev_id;
int cap, err;
u32 reg32;
/* Check that all functions under the pci bridge are PFs of
* this device otherwise fail this function.
*/
err = pci_read_config_word(dev->pdev, PCI_DEVICE_ID, &dev_id);
if (err)
return err;
list_for_each_entry(sdev, &bridge_bus->devices, bus_list) {
err = pci_read_config_word(sdev, PCI_DEVICE_ID, &sdev_id);
if (err)
return err;
if (sdev_id != dev_id)
return -EPERM;
}
err = mlx5_check_dev_ids(dev, dev_id);
if (err)
return err;
cap = pci_find_capability(bridge, PCI_CAP_ID_EXP);
if (!cap)
return -EOPNOTSUPP;
@ -427,7 +480,70 @@ static void mlx5_sync_reset_now_event(struct work_struct *work)
mlx5_enter_error_state(dev, true);
done:
fw_reset->ret = err;
mlx5_fw_reset_complete_reload(dev);
mlx5_fw_reset_complete_reload(dev, false);
}
static void mlx5_sync_reset_unload_event(struct work_struct *work)
{
struct mlx5_fw_reset *fw_reset;
struct mlx5_core_dev *dev;
unsigned long timeout;
bool reset_action;
u8 rst_state;
int err;
fw_reset = container_of(work, struct mlx5_fw_reset, reset_unload_work);
dev = fw_reset->dev;
if (mlx5_sync_reset_clear_reset_requested(dev, false))
return;
mlx5_core_warn(dev, "Sync Reset Unload. Function is forced down.\n");
err = mlx5_cmd_fast_teardown_hca(dev);
if (err)
mlx5_core_warn(dev, "Fast teardown failed, unloading, err %d\n", err);
else
mlx5_enter_error_state(dev, true);
if (test_bit(MLX5_FW_RESET_FLAGS_PENDING_COMP, &fw_reset->reset_flags))
mlx5_unload_one_devl_locked(dev, false);
else
mlx5_unload_one(dev, false);
mlx5_set_fw_rst_ack(dev);
mlx5_core_warn(dev, "Sync Reset Unload done, device reset expected\n");
reset_action = false;
timeout = jiffies + msecs_to_jiffies(mlx5_tout_ms(dev, RESET_UNLOAD));
do {
rst_state = mlx5_get_fw_rst_state(dev);
if (rst_state == MLX5_FW_RST_STATE_TOGGLE_REQ ||
rst_state == MLX5_FW_RST_STATE_IDLE) {
reset_action = true;
break;
}
msleep(20);
} while (!time_after(jiffies, timeout));
if (!reset_action) {
mlx5_core_err(dev, "Got timeout waiting for sync reset action, state = %u\n",
rst_state);
fw_reset->ret = -ETIMEDOUT;
goto done;
}
mlx5_core_warn(dev, "Sync Reset, got reset action. rst_state = %u\n", rst_state);
if (rst_state == MLX5_FW_RST_STATE_TOGGLE_REQ) {
err = mlx5_pci_link_toggle(dev);
if (err) {
mlx5_core_warn(dev, "mlx5_pci_link_toggle failed, err %d\n", err);
fw_reset->ret = err;
}
}
done:
mlx5_fw_reset_complete_reload(dev, true);
}
static void mlx5_sync_reset_abort_event(struct work_struct *work)
@ -452,6 +568,9 @@ static void mlx5_sync_reset_events_handle(struct mlx5_fw_reset *fw_reset, struct
case MLX5_SYNC_RST_STATE_RESET_REQUEST:
queue_work(fw_reset->wq, &fw_reset->reset_request_work);
break;
case MLX5_SYNC_RST_STATE_RESET_UNLOAD:
queue_work(fw_reset->wq, &fw_reset->reset_unload_work);
break;
case MLX5_SYNC_RST_STATE_RESET_NOW:
queue_work(fw_reset->wq, &fw_reset->reset_now_work);
break;
@ -486,10 +605,13 @@ static int fw_reset_event_notifier(struct notifier_block *nb, unsigned long acti
int mlx5_fw_reset_wait_reset_done(struct mlx5_core_dev *dev)
{
unsigned long pci_sync_update_timeout = mlx5_tout_ms(dev, PCI_SYNC_UPDATE);
unsigned long timeout = msecs_to_jiffies(pci_sync_update_timeout);
struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset;
unsigned long timeout;
int err;
if (MLX5_CAP_GEN(dev, pci_sync_for_fw_update_with_driver_unload))
pci_sync_update_timeout += mlx5_tout_ms(dev, RESET_UNLOAD);
timeout = msecs_to_jiffies(pci_sync_update_timeout);
if (!wait_for_completion_timeout(&fw_reset->done, timeout)) {
mlx5_core_warn(dev, "FW sync reset timeout after %lu seconds\n",
pci_sync_update_timeout / 1000);
@ -526,6 +648,7 @@ void mlx5_drain_fw_reset(struct mlx5_core_dev *dev)
set_bit(MLX5_FW_RESET_FLAGS_DROP_NEW_REQUESTS, &fw_reset->reset_flags);
cancel_work_sync(&fw_reset->fw_live_patch_work);
cancel_work_sync(&fw_reset->reset_request_work);
cancel_work_sync(&fw_reset->reset_unload_work);
cancel_work_sync(&fw_reset->reset_reload_work);
cancel_work_sync(&fw_reset->reset_now_work);
cancel_work_sync(&fw_reset->reset_abort_work);
@ -564,6 +687,7 @@ int mlx5_fw_reset_init(struct mlx5_core_dev *dev)
INIT_WORK(&fw_reset->fw_live_patch_work, mlx5_fw_live_patch_event);
INIT_WORK(&fw_reset->reset_request_work, mlx5_sync_reset_request_event);
INIT_WORK(&fw_reset->reset_unload_work, mlx5_sync_reset_unload_event);
INIT_WORK(&fw_reset->reset_reload_work, mlx5_sync_reset_reload_work);
INIT_WORK(&fw_reset->reset_now_work, mlx5_sync_reset_now_event);
INIT_WORK(&fw_reset->reset_abort_work, mlx5_sync_reset_abort_event);

View File

@ -39,6 +39,7 @@
#include "mlx5_core.h"
#include "lib/eq.h"
#include "lib/mlx5.h"
#include "lib/events.h"
#include "lib/pci_vsc.h"
#include "lib/tout.h"
#include "diag/fw_tracer.h"

View File

@ -7,7 +7,7 @@
#include "lag/mp.h"
#include "mlx5_core.h"
#include "eswitch.h"
#include "lib/mlx5.h"
#include "lib/events.h"
static bool __mlx5_lag_is_multipath(struct mlx5_lag *ldev)
{

View File

@ -6,7 +6,7 @@
#include "lag/lag.h"
#include "eswitch.h"
#include "esw/acl/ofld.h"
#include "lib/mlx5.h"
#include "lib/events.h"
static void mlx5_mpesw_metadata_cleanup(struct mlx5_lag *ldev)
{

View File

@ -0,0 +1,40 @@
/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/* Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
#ifndef __LIB_EVENTS_H__
#define __LIB_EVENTS_H__
#include "mlx5_core.h"
#define PORT_MODULE_EVENT_MODULE_STATUS_MASK 0xF
#define PORT_MODULE_EVENT_ERROR_TYPE_MASK 0xF
enum port_module_event_status_type {
MLX5_MODULE_STATUS_PLUGGED = 0x1,
MLX5_MODULE_STATUS_UNPLUGGED = 0x2,
MLX5_MODULE_STATUS_ERROR = 0x3,
MLX5_MODULE_STATUS_DISABLED = 0x4,
MLX5_MODULE_STATUS_NUM,
};
enum port_module_event_error_type {
MLX5_MODULE_EVENT_ERROR_POWER_BUDGET_EXCEEDED = 0x0,
MLX5_MODULE_EVENT_ERROR_LONG_RANGE_FOR_NON_MLNX = 0x1,
MLX5_MODULE_EVENT_ERROR_BUS_STUCK = 0x2,
MLX5_MODULE_EVENT_ERROR_NO_EEPROM_RETRY_TIMEOUT = 0x3,
MLX5_MODULE_EVENT_ERROR_ENFORCE_PART_NUMBER_LIST = 0x4,
MLX5_MODULE_EVENT_ERROR_UNKNOWN_IDENTIFIER = 0x5,
MLX5_MODULE_EVENT_ERROR_HIGH_TEMPERATURE = 0x6,
MLX5_MODULE_EVENT_ERROR_BAD_CABLE = 0x7,
MLX5_MODULE_EVENT_ERROR_PCIE_POWER_SLOT_EXCEEDED = 0xc,
MLX5_MODULE_EVENT_ERROR_NUM,
};
struct mlx5_pme_stats {
u64 status_counters[MLX5_MODULE_STATUS_NUM];
u64 error_counters[MLX5_MODULE_EVENT_ERROR_NUM];
};
void mlx5_get_pme_stats(struct mlx5_core_dev *dev, struct mlx5_pme_stats *stats);
int mlx5_notifier_call_chain(struct mlx5_events *events, unsigned int event, void *data);
#endif

View File

@ -45,40 +45,6 @@ int mlx5_crdump_enable(struct mlx5_core_dev *dev);
void mlx5_crdump_disable(struct mlx5_core_dev *dev);
int mlx5_crdump_collect(struct mlx5_core_dev *dev, u32 *cr_data);
/* TODO move to lib/events.h */
#define PORT_MODULE_EVENT_MODULE_STATUS_MASK 0xF
#define PORT_MODULE_EVENT_ERROR_TYPE_MASK 0xF
enum port_module_event_status_type {
MLX5_MODULE_STATUS_PLUGGED = 0x1,
MLX5_MODULE_STATUS_UNPLUGGED = 0x2,
MLX5_MODULE_STATUS_ERROR = 0x3,
MLX5_MODULE_STATUS_DISABLED = 0x4,
MLX5_MODULE_STATUS_NUM,
};
enum port_module_event_error_type {
MLX5_MODULE_EVENT_ERROR_POWER_BUDGET_EXCEEDED = 0x0,
MLX5_MODULE_EVENT_ERROR_LONG_RANGE_FOR_NON_MLNX = 0x1,
MLX5_MODULE_EVENT_ERROR_BUS_STUCK = 0x2,
MLX5_MODULE_EVENT_ERROR_NO_EEPROM_RETRY_TIMEOUT = 0x3,
MLX5_MODULE_EVENT_ERROR_ENFORCE_PART_NUMBER_LIST = 0x4,
MLX5_MODULE_EVENT_ERROR_UNKNOWN_IDENTIFIER = 0x5,
MLX5_MODULE_EVENT_ERROR_HIGH_TEMPERATURE = 0x6,
MLX5_MODULE_EVENT_ERROR_BAD_CABLE = 0x7,
MLX5_MODULE_EVENT_ERROR_PCIE_POWER_SLOT_EXCEEDED = 0xc,
MLX5_MODULE_EVENT_ERROR_NUM,
};
struct mlx5_pme_stats {
u64 status_counters[MLX5_MODULE_STATUS_NUM];
u64 error_counters[MLX5_MODULE_EVENT_ERROR_NUM];
};
void mlx5_get_pme_stats(struct mlx5_core_dev *dev, struct mlx5_pme_stats *stats);
int mlx5_notifier_call_chain(struct mlx5_events *events, unsigned int event, void *data);
static inline struct net *mlx5_core_net(struct mlx5_core_dev *dev)
{
return devlink_net(priv_to_devlink(dev));

View File

@ -24,7 +24,8 @@ static const u32 tout_def_sw_val[MAX_TIMEOUT_TYPES] = {
[MLX5_TO_TEARDOWN_MS] = 3000,
[MLX5_TO_FSM_REACTIVATE_MS] = 5000,
[MLX5_TO_RECLAIM_PAGES_MS] = 5000,
[MLX5_TO_RECLAIM_VFS_PAGES_MS] = 120000
[MLX5_TO_RECLAIM_VFS_PAGES_MS] = 120000,
[MLX5_TO_RESET_UNLOAD_MS] = 300000
};
static void tout_set(struct mlx5_core_dev *dev, u64 val, enum mlx5_timeouts_types type)
@ -118,7 +119,8 @@ u64 _mlx5_tout_ms(struct mlx5_core_dev *dev, enum mlx5_timeouts_types type)
#define MLX5_TIMEOUT_FILL(fld, reg_out, dev, to_type, to_extra) \
({ \
u64 fw_to = MLX5_TIMEOUT_QUERY(fld, reg_out); \
tout_set(dev, fw_to + (to_extra), to_type); \
if (fw_to) \
tout_set(dev, fw_to + (to_extra), to_type); \
fw_to; \
})
@ -146,6 +148,7 @@ static int tout_query_dtor(struct mlx5_core_dev *dev)
MLX5_TIMEOUT_FILL(fsm_reactivate_to, out, dev, MLX5_TO_FSM_REACTIVATE_MS, 0);
MLX5_TIMEOUT_FILL(reclaim_pages_to, out, dev, MLX5_TO_RECLAIM_PAGES_MS, 0);
MLX5_TIMEOUT_FILL(reclaim_vfs_pages_to, out, dev, MLX5_TO_RECLAIM_VFS_PAGES_MS, 0);
MLX5_TIMEOUT_FILL(reset_unload_to, out, dev, MLX5_TO_RESET_UNLOAD_MS, 0);
return 0;
}

View File

@ -26,6 +26,7 @@ enum mlx5_timeouts_types {
MLX5_TO_FSM_REACTIVATE_MS,
MLX5_TO_RECLAIM_PAGES_MS,
MLX5_TO_RECLAIM_VFS_PAGES_MS,
MLX5_TO_RESET_UNLOAD_MS,
MAX_TIMEOUT_TYPES
};

View File

@ -619,6 +619,9 @@ static int handle_hca_cap(struct mlx5_core_dev *dev, void *set_ctx)
if (MLX5_CAP_GEN_MAX(dev, pci_sync_for_fw_update_event))
MLX5_SET(cmd_hca_cap, set_hca_cap, pci_sync_for_fw_update_event, 1);
if (MLX5_CAP_GEN_MAX(dev, pci_sync_for_fw_update_with_driver_unload))
MLX5_SET(cmd_hca_cap, set_hca_cap,
pci_sync_for_fw_update_with_driver_unload, 1);
if (MLX5_CAP_GEN_MAX(dev, num_vhca_ports))
MLX5_SET(cmd_hca_cap,

View File

@ -358,4 +358,11 @@ static inline bool mlx5_core_is_ec_vf_vport(const struct mlx5_core_dev *dev, u16
return (vport_num >= base_vport && vport_num < max_vport);
}
static inline int mlx5_vport_to_func_id(const struct mlx5_core_dev *dev, u16 vport, bool ec_vf_func)
{
return ec_vf_func ? vport - mlx5_core_ec_vf_vport_base(dev)
: vport;
}
#endif /* __MLX5_CORE_H__ */

View File

@ -28,7 +28,6 @@ struct mlx5_sf_table {
struct mutex sf_state_lock; /* Serializes sf state among user cmds & vhca event handler. */
struct notifier_block esw_nb;
struct notifier_block vhca_nb;
u8 ecpu: 1;
};
static struct mlx5_sf *

View File

@ -34,6 +34,7 @@ int mlx5dr_cmd_query_esw_vport_context(struct mlx5_core_dev *mdev,
int mlx5dr_cmd_query_gvmi(struct mlx5_core_dev *mdev, bool other_vport,
u16 vport_number, u16 *gvmi)
{
bool ec_vf_func = other_vport ? mlx5_core_is_ec_vf_vport(mdev, vport_number) : false;
u32 in[MLX5_ST_SZ_DW(query_hca_cap_in)] = {};
int out_size;
void *out;
@ -46,7 +47,8 @@ int mlx5dr_cmd_query_gvmi(struct mlx5_core_dev *mdev, bool other_vport,
MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP);
MLX5_SET(query_hca_cap_in, in, other_function, other_vport);
MLX5_SET(query_hca_cap_in, in, function_id, vport_number);
MLX5_SET(query_hca_cap_in, in, function_id, mlx5_vport_to_func_id(mdev, vport_number, ec_vf_func));
MLX5_SET(query_hca_cap_in, in, ec_vf_function, ec_vf_func);
MLX5_SET(query_hca_cap_in, in, op_mod,
MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE << 1 |
HCA_CAP_OPMOD_GET_CUR);

View File

@ -1161,12 +1161,6 @@ u64 mlx5_query_nic_system_image_guid(struct mlx5_core_dev *mdev)
}
EXPORT_SYMBOL_GPL(mlx5_query_nic_system_image_guid);
static int mlx5_vport_to_func_id(const struct mlx5_core_dev *dev, u16 vport, bool ec_vf_func)
{
return ec_vf_func ? vport - mlx5_core_ec_vf_vport_base(dev)
: vport;
}
int mlx5_vport_get_other_func_cap(struct mlx5_core_dev *dev, u16 vport, void *out,
u16 opmod)
{

View File

@ -716,6 +716,7 @@ enum sync_rst_state_type {
MLX5_SYNC_RST_STATE_RESET_REQUEST = 0x0,
MLX5_SYNC_RST_STATE_RESET_NOW = 0x1,
MLX5_SYNC_RST_STATE_RESET_ABORT = 0x2,
MLX5_SYNC_RST_STATE_RESET_UNLOAD = 0x3,
};
struct mlx5_eqe_sync_fw_update {

View File

@ -1755,9 +1755,10 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 reserved_at_328[0x2];
u8 relaxed_ordering_read[0x1];
u8 log_max_pd[0x5];
u8 reserved_at_330[0x7];
u8 reserved_at_330[0x6];
u8 pci_sync_for_fw_update_with_driver_unload[0x1];
u8 vnic_env_cnt_steering_fail[0x1];
u8 reserved_at_338[0x1];
u8 vport_counter_local_loopback[0x1];
u8 q_counter_aggregation[0x1];
u8 q_counter_other_vport[0x1];
u8 log_max_xrcd[0x5];
@ -3117,7 +3118,9 @@ struct mlx5_ifc_dtor_reg_bits {
struct mlx5_ifc_default_timeout_bits reclaim_vfs_pages_to;
u8 reserved_at_1c0[0x40];
struct mlx5_ifc_default_timeout_bits reset_unload_to;
u8 reserved_at_1c0[0x20];
};
enum {
@ -5187,7 +5190,9 @@ struct mlx5_ifc_query_vport_counter_out_bits {
struct mlx5_ifc_traffic_counter_bits transmitted_eth_multicast;
u8 reserved_at_680[0xa00];
struct mlx5_ifc_traffic_counter_bits local_loopback;
u8 reserved_at_700[0x980];
};
enum {