Merge branch '100GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/tnguy/net-queue

Tony Nguyen says:

====================
Intel Wired LAN Driver Updates 2024-03-05 (idpf, ice, i40e, igc, e1000e)

This series contains updates to idpf, ice, i40e, igc and e1000e drivers.

Emil disables local BH on NAPI schedule for proper handling of softirqs
on idpf.

Jake stops reporting of virtchannel RSS option which in unsupported on
ice.

Rand Deeb adds null check to prevent possible null pointer dereference
on ice.

Michal Schmidt moves DPLL mutex initialization to resolve uninitialized
mutex usage for ice.

Jesse fixes incorrect variable usage for calculating Tx stats on ice.

Ivan Vecera corrects logic for firmware equals check on i40e.

Florian Kauer prevents memory corruption for XDP_REDIRECT on igc.

Sasha reverts an incorrect use of FIELD_GET which caused a regression
for Wake on LAN on e1000e.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2024-03-06 10:28:02 +00:00
commit f287d6aafd
9 changed files with 15 additions and 22 deletions

View File

@ -2559,7 +2559,7 @@ void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw)
hw->phy.ops.write_reg_page(hw, BM_RAR_H(i), hw->phy.ops.write_reg_page(hw, BM_RAR_H(i),
(u16)(mac_reg & 0xFFFF)); (u16)(mac_reg & 0xFFFF));
hw->phy.ops.write_reg_page(hw, BM_RAR_CTRL(i), hw->phy.ops.write_reg_page(hw, BM_RAR_CTRL(i),
FIELD_GET(E1000_RAH_AV, mac_reg)); (u16)((mac_reg & E1000_RAH_AV) >> 16));
} }
e1000_disable_phy_wakeup_reg_access_bm(hw, &phy_reg); e1000_disable_phy_wakeup_reg_access_bm(hw, &phy_reg);

View File

@ -567,8 +567,7 @@ static inline bool i40e_is_fw_ver_lt(struct i40e_hw *hw, u16 maj, u16 min)
**/ **/
static inline bool i40e_is_fw_ver_eq(struct i40e_hw *hw, u16 maj, u16 min) static inline bool i40e_is_fw_ver_eq(struct i40e_hw *hw, u16 maj, u16 min)
{ {
return (hw->aq.fw_maj_ver > maj || return (hw->aq.fw_maj_ver == maj && hw->aq.fw_min_ver == min);
(hw->aq.fw_maj_ver == maj && hw->aq.fw_min_ver == min));
} }
#endif /* _I40E_PROTOTYPE_H_ */ #endif /* _I40E_PROTOTYPE_H_ */

View File

@ -2120,6 +2120,7 @@ void ice_dpll_init(struct ice_pf *pf)
struct ice_dplls *d = &pf->dplls; struct ice_dplls *d = &pf->dplls;
int err = 0; int err = 0;
mutex_init(&d->lock);
err = ice_dpll_init_info(pf, cgu); err = ice_dpll_init_info(pf, cgu);
if (err) if (err)
goto err_exit; goto err_exit;
@ -2132,7 +2133,6 @@ void ice_dpll_init(struct ice_pf *pf)
err = ice_dpll_init_pins(pf, cgu); err = ice_dpll_init_pins(pf, cgu);
if (err) if (err)
goto deinit_pps; goto deinit_pps;
mutex_init(&d->lock);
if (cgu) { if (cgu) {
err = ice_dpll_init_worker(pf); err = ice_dpll_init_worker(pf);
if (err) if (err)

View File

@ -3192,7 +3192,7 @@ ice_vsi_realloc_stat_arrays(struct ice_vsi *vsi)
} }
} }
tx_ring_stats = vsi_stat->rx_ring_stats; tx_ring_stats = vsi_stat->tx_ring_stats;
vsi_stat->tx_ring_stats = vsi_stat->tx_ring_stats =
krealloc_array(vsi_stat->tx_ring_stats, req_txq, krealloc_array(vsi_stat->tx_ring_stats, req_txq,
sizeof(*vsi_stat->tx_ring_stats), sizeof(*vsi_stat->tx_ring_stats),

View File

@ -8013,6 +8013,8 @@ ice_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
pf_sw = pf->first_sw; pf_sw = pf->first_sw;
/* find the attribute in the netlink message */ /* find the attribute in the netlink message */
br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
if (!br_spec)
return -EINVAL;
nla_for_each_nested(attr, br_spec, rem) { nla_for_each_nested(attr, br_spec, rem) {
__u16 mode; __u16 mode;

View File

@ -440,7 +440,6 @@ static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg)
vf->driver_caps = *(u32 *)msg; vf->driver_caps = *(u32 *)msg;
else else
vf->driver_caps = VIRTCHNL_VF_OFFLOAD_L2 | vf->driver_caps = VIRTCHNL_VF_OFFLOAD_L2 |
VIRTCHNL_VF_OFFLOAD_RSS_REG |
VIRTCHNL_VF_OFFLOAD_VLAN; VIRTCHNL_VF_OFFLOAD_VLAN;
vfres->vf_cap_flags = VIRTCHNL_VF_OFFLOAD_L2; vfres->vf_cap_flags = VIRTCHNL_VF_OFFLOAD_L2;
@ -453,14 +452,8 @@ static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg)
vfres->vf_cap_flags |= ice_vc_get_vlan_caps(hw, vf, vsi, vfres->vf_cap_flags |= ice_vc_get_vlan_caps(hw, vf, vsi,
vf->driver_caps); vf->driver_caps);
if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PF) { if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PF)
vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PF; vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PF;
} else {
if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_AQ)
vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_AQ;
else
vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_REG;
}
if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC) if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC)
vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC; vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC;

View File

@ -13,8 +13,6 @@
* - opcodes needed by VF when caps are activated * - opcodes needed by VF when caps are activated
* *
* Caps that don't use new opcodes (no opcodes should be allowed): * Caps that don't use new opcodes (no opcodes should be allowed):
* - VIRTCHNL_VF_OFFLOAD_RSS_AQ
* - VIRTCHNL_VF_OFFLOAD_RSS_REG
* - VIRTCHNL_VF_OFFLOAD_WB_ON_ITR * - VIRTCHNL_VF_OFFLOAD_WB_ON_ITR
* - VIRTCHNL_VF_OFFLOAD_CRC * - VIRTCHNL_VF_OFFLOAD_CRC
* - VIRTCHNL_VF_OFFLOAD_RX_POLLING * - VIRTCHNL_VF_OFFLOAD_RX_POLLING

View File

@ -2087,8 +2087,10 @@ int idpf_send_disable_queues_msg(struct idpf_vport *vport)
set_bit(__IDPF_Q_POLL_MODE, vport->txqs[i]->flags); set_bit(__IDPF_Q_POLL_MODE, vport->txqs[i]->flags);
/* schedule the napi to receive all the marker packets */ /* schedule the napi to receive all the marker packets */
local_bh_disable();
for (i = 0; i < vport->num_q_vectors; i++) for (i = 0; i < vport->num_q_vectors; i++)
napi_schedule(&vport->q_vectors[i].napi); napi_schedule(&vport->q_vectors[i].napi);
local_bh_enable();
return idpf_wait_for_marker_event(vport); return idpf_wait_for_marker_event(vport);
} }

View File

@ -6487,7 +6487,7 @@ static int igc_xdp_xmit(struct net_device *dev, int num_frames,
int cpu = smp_processor_id(); int cpu = smp_processor_id();
struct netdev_queue *nq; struct netdev_queue *nq;
struct igc_ring *ring; struct igc_ring *ring;
int i, drops; int i, nxmit;
if (unlikely(!netif_carrier_ok(dev))) if (unlikely(!netif_carrier_ok(dev)))
return -ENETDOWN; return -ENETDOWN;
@ -6503,16 +6503,15 @@ static int igc_xdp_xmit(struct net_device *dev, int num_frames,
/* Avoid transmit queue timeout since we share it with the slow path */ /* Avoid transmit queue timeout since we share it with the slow path */
txq_trans_cond_update(nq); txq_trans_cond_update(nq);
drops = 0; nxmit = 0;
for (i = 0; i < num_frames; i++) { for (i = 0; i < num_frames; i++) {
int err; int err;
struct xdp_frame *xdpf = frames[i]; struct xdp_frame *xdpf = frames[i];
err = igc_xdp_init_tx_descriptor(ring, xdpf); err = igc_xdp_init_tx_descriptor(ring, xdpf);
if (err) { if (err)
xdp_return_frame_rx_napi(xdpf); break;
drops++; nxmit++;
}
} }
if (flags & XDP_XMIT_FLUSH) if (flags & XDP_XMIT_FLUSH)
@ -6520,7 +6519,7 @@ static int igc_xdp_xmit(struct net_device *dev, int num_frames,
__netif_tx_unlock(nq); __netif_tx_unlock(nq);
return num_frames - drops; return nxmit;
} }
static void igc_trigger_rxtxq_interrupt(struct igc_adapter *adapter, static void igc_trigger_rxtxq_interrupt(struct igc_adapter *adapter,