Merge branch '100GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/tnguy/next-queue

Tony Nguyen says:

====================
Intel Wired LAN Driver Updates 2024-10-08 (ice, iavf, igb, e1000e, e1000)

This series contains updates to ice, iavf, igb, e1000e, and e1000
drivers.

For ice:

Wojciech adds support for ethtool reset.

Paul adds support for hardware based VF mailbox limits for E830 devices.

Jake adjusts to a common iterator in ice_vc_cfg_qs_msg() and moves
storing of max_frame and rx_buf_len from VSI struct to the ring
structure.

Hongbo Li uses assign_bit() to replace an open-coded instance.

Markus Elfring adjusts a couple of PTP error paths to use a common,
shared exit point.

Yue Haibing removes unused declarations.

For iavf:

Yue Haibing removes unused declarations.

For igb:

Yue Haibing removes unused declarations.

For e1000e:

Takamitsu Iwai removes unneccessary writel() calls.

Joe Damato adds support for netdev-genl support to query IRQ, NAPI,
and queue information.

For e1000:

Joe Damato adds support for netdev-genl support to query IRQ, NAPI,
and queue information.

* '100GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/tnguy/next-queue:
  e1000: Link NAPI instances to queues and IRQs
  e1000e: Link NAPI instances to queues and IRQs
  e1000e: Remove duplicated writel() in e1000_configure_tx/rx()
  igb: Cleanup unused declarations
  iavf: Remove unused declarations
  ice: Cleanup unused declarations
  ice: Use common error handling code in two functions
  ice: Make use of assign_bit() API
  ice: store max_frame and rx_buf_len only in ice_rx_ring
  ice: consistently use q_idx in ice_vc_cfg_qs_msg()
  ice: add E830 HW VF mailbox message limit support
  ice: Implement ethtool reset support
====================

Link: https://patch.msgid.link/20241008233441.928802-1-anthony.l.nguyen@intel.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Jakub Kicinski 2024-10-09 20:04:43 -07:00
commit 22ee378eb6
25 changed files with 269 additions and 100 deletions

View File

@ -101,6 +101,37 @@ example, if Rx packets are 10 and Netdev (software statistics) displays
rx_bytes as "X", then ethtool (hardware statistics) will display rx_bytes as
"X+40" (4 bytes CRC x 10 packets).
ethtool reset
-------------
The driver supports 3 types of resets:
- PF reset - resets only components associated with the given PF, does not
impact other PFs
- CORE reset - whole adapter is affected, reset all PFs
- GLOBAL reset - same as CORE but mac and phy components are also reinitialized
These are mapped to ethtool reset flags as follow:
- PF reset:
# ethtool --reset <ethX> irq dma filter offload
- CORE reset:
# ethtool --reset <ethX> irq-shared dma-shared filter-shared offload-shared \
ram-shared
- GLOBAL reset:
# ethtool --reset <ethX> irq-shared dma-shared filter-shared offload-shared \
mac-shared phy-shared ram-shared
In switchdev mode you can reset a VF using port representor:
# ethtool --reset <repr> irq dma filter offload
Viewing Link Messages
---------------------

View File

@ -513,6 +513,8 @@ void e1000_down(struct e1000_adapter *adapter)
*/
netif_carrier_off(netdev);
netif_queue_set_napi(netdev, 0, NETDEV_QUEUE_TYPE_RX, NULL);
netif_queue_set_napi(netdev, 0, NETDEV_QUEUE_TYPE_TX, NULL);
napi_disable(&adapter->napi);
e1000_irq_disable(adapter);
@ -1392,7 +1394,10 @@ int e1000_open(struct net_device *netdev)
/* From here on the code is the same as e1000_up() */
clear_bit(__E1000_DOWN, &adapter->flags);
netif_napi_set_irq(&adapter->napi, adapter->pdev->irq);
napi_enable(&adapter->napi);
netif_queue_set_napi(netdev, 0, NETDEV_QUEUE_TYPE_RX, &adapter->napi);
netif_queue_set_napi(netdev, 0, NETDEV_QUEUE_TYPE_TX, &adapter->napi);
e1000_irq_enable(adapter);

View File

@ -2928,11 +2928,8 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
tx_ring->head = adapter->hw.hw_addr + E1000_TDH(0);
tx_ring->tail = adapter->hw.hw_addr + E1000_TDT(0);
writel(0, tx_ring->head);
if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
e1000e_update_tdt_wa(tx_ring, 0);
else
writel(0, tx_ring->tail);
/* Set the Tx Interrupt Delay register */
ew32(TIDV, adapter->tx_int_delay);
@ -3253,11 +3250,8 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
rx_ring->head = adapter->hw.hw_addr + E1000_RDH(0);
rx_ring->tail = adapter->hw.hw_addr + E1000_RDT(0);
writel(0, rx_ring->head);
if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
e1000e_update_rdt_wa(rx_ring, 0);
else
writel(0, rx_ring->tail);
/* Enable Receive Checksum Offload for TCP and UDP */
rxcsum = er32(RXCSUM);
@ -4613,6 +4607,7 @@ int e1000e_open(struct net_device *netdev)
struct e1000_hw *hw = &adapter->hw;
struct pci_dev *pdev = adapter->pdev;
int err;
int irq;
/* disallow open during test */
if (test_bit(__E1000_TESTING, &adapter->state))
@ -4676,7 +4671,15 @@ int e1000e_open(struct net_device *netdev)
/* From here on the code is the same as e1000e_up() */
clear_bit(__E1000_DOWN, &adapter->state);
if (adapter->int_mode == E1000E_INT_MODE_MSIX)
irq = adapter->msix_entries[0].vector;
else
irq = adapter->pdev->irq;
netif_napi_set_irq(&adapter->napi, irq);
napi_enable(&adapter->napi);
netif_queue_set_napi(netdev, 0, NETDEV_QUEUE_TYPE_RX, &adapter->napi);
netif_queue_set_napi(netdev, 0, NETDEV_QUEUE_TYPE_TX, &adapter->napi);
e1000_irq_enable(adapter);
@ -4735,6 +4738,8 @@ int e1000e_close(struct net_device *netdev)
netdev_info(netdev, "NIC Link is Down\n");
}
netif_queue_set_napi(netdev, 0, NETDEV_QUEUE_TYPE_RX, NULL);
netif_queue_set_napi(netdev, 0, NETDEV_QUEUE_TYPE_TX, NULL);
napi_disable(&adapter->napi);
e1000e_free_tx_resources(adapter->tx_ring);

View File

@ -529,22 +529,16 @@ static inline void iavf_change_state(struct iavf_adapter *adapter,
iavf_state_str(adapter->state));
}
int iavf_up(struct iavf_adapter *adapter);
void iavf_down(struct iavf_adapter *adapter);
int iavf_process_config(struct iavf_adapter *adapter);
int iavf_parse_vf_resource_msg(struct iavf_adapter *adapter);
void iavf_schedule_reset(struct iavf_adapter *adapter, u64 flags);
void iavf_schedule_aq_request(struct iavf_adapter *adapter, u64 flags);
void iavf_schedule_finish_config(struct iavf_adapter *adapter);
void iavf_reset(struct iavf_adapter *adapter);
void iavf_set_ethtool_ops(struct net_device *netdev);
void iavf_update_stats(struct iavf_adapter *adapter);
void iavf_free_all_tx_resources(struct iavf_adapter *adapter);
void iavf_free_all_rx_resources(struct iavf_adapter *adapter);
void iavf_napi_add_all(struct iavf_adapter *adapter);
void iavf_napi_del_all(struct iavf_adapter *adapter);
int iavf_send_api_ver(struct iavf_adapter *adapter);
int iavf_verify_api_ver(struct iavf_adapter *adapter);
int iavf_send_vf_config_msg(struct iavf_adapter *adapter);
@ -555,11 +549,9 @@ void iavf_set_queue_vlan_tag_loc(struct iavf_adapter *adapter);
u16 iavf_get_num_vlans_added(struct iavf_adapter *adapter);
void iavf_irq_enable(struct iavf_adapter *adapter, bool flush);
void iavf_configure_queues(struct iavf_adapter *adapter);
void iavf_deconfigure_queues(struct iavf_adapter *adapter);
void iavf_enable_queues(struct iavf_adapter *adapter);
void iavf_disable_queues(struct iavf_adapter *adapter);
void iavf_map_queues(struct iavf_adapter *adapter);
int iavf_request_queues(struct iavf_adapter *adapter, int num);
void iavf_add_ether_addrs(struct iavf_adapter *adapter);
void iavf_del_ether_addrs(struct iavf_adapter *adapter);
void iavf_add_vlans(struct iavf_adapter *adapter);
@ -579,8 +571,6 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
enum virtchnl_ops v_opcode,
enum iavf_status v_retval, u8 *msg, u16 msglen);
int iavf_config_rss(struct iavf_adapter *adapter);
int iavf_lan_add_device(struct iavf_adapter *adapter);
int iavf_lan_del_device(struct iavf_adapter *adapter);
void iavf_enable_channels(struct iavf_adapter *adapter);
void iavf_disable_channels(struct iavf_adapter *adapter);
void iavf_add_cloud_filter(struct iavf_adapter *adapter);

View File

@ -18,7 +18,6 @@
/* adminq functions */
enum iavf_status iavf_init_adminq(struct iavf_hw *hw);
enum iavf_status iavf_shutdown_adminq(struct iavf_hw *hw);
void iavf_adminq_init_ring_data(struct iavf_hw *hw);
enum iavf_status iavf_clean_arq_element(struct iavf_hw *hw,
struct iavf_arq_event_info *e,
u16 *events_pending);
@ -33,8 +32,6 @@ bool iavf_asq_done(struct iavf_hw *hw);
void iavf_debug_aq(struct iavf_hw *hw, enum iavf_debug_mask mask,
void *desc, void *buffer, u16 buf_len);
void iavf_idle_aq(struct iavf_hw *hw);
void iavf_resume_aq(struct iavf_hw *hw);
bool iavf_check_asq_alive(struct iavf_hw *hw);
enum iavf_status iavf_aq_queue_shutdown(struct iavf_hw *hw, bool unloading);
const char *iavf_aq_str(struct iavf_hw *hw, enum iavf_admin_queue_err aq_err);

View File

@ -207,6 +207,7 @@ enum ice_feature {
ICE_F_GNSS,
ICE_F_ROCE_LAG,
ICE_F_SRIOV_LAG,
ICE_F_MBX_LIMIT,
ICE_F_MAX
};
@ -371,9 +372,6 @@ struct ice_vsi {
spinlock_t arfs_lock; /* protects aRFS hash table and filter state */
atomic_t *arfs_last_fltr_id;
u16 max_frame;
u16 rx_buf_len;
struct ice_aqc_vsi_props info; /* VSI properties */
struct ice_vsi_vlan_info vlan_info; /* vlan config to be restored */

View File

@ -445,7 +445,7 @@ static int ice_setup_rx_ctx(struct ice_rx_ring *ring)
/* Max packet size for this queue - must not be set to a larger value
* than 5 x DBUF
*/
rlan_ctx.rxmax = min_t(u32, vsi->max_frame,
rlan_ctx.rxmax = min_t(u32, ring->max_frame,
ICE_MAX_CHAINED_RX_BUFS * ring->rx_buf_len);
/* Rx queue threshold in units of 64 */
@ -541,8 +541,6 @@ static int ice_vsi_cfg_rxq(struct ice_rx_ring *ring)
u32 num_bufs = ICE_RX_DESC_UNUSED(ring);
int err;
ring->rx_buf_len = ring->vsi->rx_buf_len;
if (ring->vsi->type == ICE_VSI_PF || ring->vsi->type == ICE_VSI_SF) {
if (!xdp_rxq_info_is_reg(&ring->xdp_rxq)) {
err = __xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
@ -641,21 +639,25 @@ int ice_vsi_cfg_single_rxq(struct ice_vsi *vsi, u16 q_idx)
/**
* ice_vsi_cfg_frame_size - setup max frame size and Rx buffer length
* @vsi: VSI
* @ring: Rx ring to configure
*
* Determine the maximum frame size and Rx buffer length to use for a PF VSI.
* Set these in the associated Rx ring structure.
*/
static void ice_vsi_cfg_frame_size(struct ice_vsi *vsi)
static void ice_vsi_cfg_frame_size(struct ice_vsi *vsi, struct ice_rx_ring *ring)
{
if (!vsi->netdev || test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags)) {
vsi->max_frame = ICE_MAX_FRAME_LEGACY_RX;
vsi->rx_buf_len = ICE_RXBUF_1664;
ring->max_frame = ICE_MAX_FRAME_LEGACY_RX;
ring->rx_buf_len = ICE_RXBUF_1664;
#if (PAGE_SIZE < 8192)
} else if (!ICE_2K_TOO_SMALL_WITH_PADDING &&
(vsi->netdev->mtu <= ETH_DATA_LEN)) {
vsi->max_frame = ICE_RXBUF_1536 - NET_IP_ALIGN;
vsi->rx_buf_len = ICE_RXBUF_1536 - NET_IP_ALIGN;
ring->max_frame = ICE_RXBUF_1536 - NET_IP_ALIGN;
ring->rx_buf_len = ICE_RXBUF_1536 - NET_IP_ALIGN;
#endif
} else {
vsi->max_frame = ICE_AQ_SET_MAC_FRAME_SIZE_MAX;
vsi->rx_buf_len = ICE_RXBUF_3072;
ring->max_frame = ICE_AQ_SET_MAC_FRAME_SIZE_MAX;
ring->rx_buf_len = ICE_RXBUF_3072;
}
}
@ -670,15 +672,15 @@ int ice_vsi_cfg_rxqs(struct ice_vsi *vsi)
{
u16 i;
if (vsi->type == ICE_VSI_VF)
goto setup_rings;
ice_vsi_cfg_frame_size(vsi);
setup_rings:
/* set up individual rings */
ice_for_each_rxq(vsi, i) {
int err = ice_vsi_cfg_rxq(vsi->rx_rings[i]);
struct ice_rx_ring *ring = vsi->rx_rings[i];
int err;
if (vsi->type != ICE_VSI_VF)
ice_vsi_cfg_frame_size(vsi, ring);
err = ice_vsi_cfg_rxq(ring);
if (err)
return err;
}

View File

@ -60,11 +60,6 @@ ice_eswitch_set_target_vsi(struct sk_buff *skb,
static inline void
ice_eswitch_update_repr(unsigned long *repr_id, struct ice_vsi *vsi) { }
static inline int ice_eswitch_configure(struct ice_pf *pf)
{
return 0;
}
static inline int ice_eswitch_mode_get(struct devlink *devlink, u16 *mode)
{
return DEVLINK_ESWITCH_MODE_LEGACY;

View File

@ -4716,6 +4716,81 @@ static void ice_get_fec_stats(struct net_device *netdev,
pi->lport, err);
}
#define ICE_ETHTOOL_PFR (ETH_RESET_IRQ | ETH_RESET_DMA | \
ETH_RESET_FILTER | ETH_RESET_OFFLOAD)
#define ICE_ETHTOOL_CORER ((ICE_ETHTOOL_PFR | ETH_RESET_RAM) << \
ETH_RESET_SHARED_SHIFT)
#define ICE_ETHTOOL_GLOBR (ICE_ETHTOOL_CORER | \
(ETH_RESET_MAC << ETH_RESET_SHARED_SHIFT) | \
(ETH_RESET_PHY << ETH_RESET_SHARED_SHIFT))
#define ICE_ETHTOOL_VFR ICE_ETHTOOL_PFR
/**
* ice_ethtool_reset - triggers a given type of reset
* @dev: network interface device structure
* @flags: set of reset flags
*
* Return: 0 on success, -EOPNOTSUPP when using unsupported set of flags.
*/
static int ice_ethtool_reset(struct net_device *dev, u32 *flags)
{
struct ice_netdev_priv *np = netdev_priv(dev);
struct ice_pf *pf = np->vsi->back;
enum ice_reset_req reset;
switch (*flags) {
case ICE_ETHTOOL_CORER:
reset = ICE_RESET_CORER;
break;
case ICE_ETHTOOL_GLOBR:
reset = ICE_RESET_GLOBR;
break;
case ICE_ETHTOOL_PFR:
reset = ICE_RESET_PFR;
break;
default:
netdev_info(dev, "Unsupported set of ethtool flags");
return -EOPNOTSUPP;
}
ice_schedule_reset(pf, reset);
*flags = 0;
return 0;
}
/**
* ice_repr_ethtool_reset - triggers a VF reset
* @dev: network interface device structure
* @flags: set of reset flags
*
* Return: 0 on success,
* -EOPNOTSUPP when using unsupported set of flags
* -EBUSY when VF is not ready for reset.
*/
static int ice_repr_ethtool_reset(struct net_device *dev, u32 *flags)
{
struct ice_repr *repr = ice_netdev_to_repr(dev);
struct ice_vf *vf;
if (repr->type != ICE_REPR_TYPE_VF ||
*flags != ICE_ETHTOOL_VFR)
return -EOPNOTSUPP;
vf = repr->vf;
if (ice_check_vf_ready_for_cfg(vf))
return -EBUSY;
*flags = 0;
return ice_reset_vf(vf, ICE_VF_RESET_VFLR | ICE_VF_RESET_LOCK);
}
static const struct ethtool_ops ice_ethtool_ops = {
.cap_rss_ctx_supported = true,
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
@ -4752,6 +4827,7 @@ static const struct ethtool_ops ice_ethtool_ops = {
.nway_reset = ice_nway_reset,
.get_pauseparam = ice_get_pauseparam,
.set_pauseparam = ice_set_pauseparam,
.reset = ice_ethtool_reset,
.get_rxfh_key_size = ice_get_rxfh_key_size,
.get_rxfh_indir_size = ice_get_rxfh_indir_size,
.get_rxfh = ice_get_rxfh,
@ -4804,6 +4880,7 @@ static const struct ethtool_ops ice_ethtool_repr_ops = {
.get_strings = ice_repr_get_strings,
.get_ethtool_stats = ice_repr_get_ethtool_stats,
.get_sset_count = ice_repr_get_sset_count,
.reset = ice_repr_ethtool_reset,
};
/**

View File

@ -23,9 +23,6 @@ int
ice_get_sw_fv_list(struct ice_hw *hw, struct ice_prot_lkup_ext *lkups,
unsigned long *bm, struct list_head *fv_list);
int
ice_pkg_buf_unreserve_section(struct ice_buf_build *bld, u16 count);
u16 ice_pkg_buf_get_free_space(struct ice_buf_build *bld);
int
ice_aq_upload_section(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
u16 buf_size, struct ice_sq_cd *cd);
bool

View File

@ -539,5 +539,8 @@
#define E830_PRTMAC_CL01_QNT_THR_CL0_M GENMASK(15, 0)
#define VFINT_DYN_CTLN(_i) (0x00003800 + ((_i) * 4))
#define VFINT_DYN_CTLN_CLEARPBA_M BIT(1)
#define E830_MBX_PF_IN_FLIGHT_VF_MSGS_THRESH 0x00234000
#define E830_MBX_VF_DEC_TRIG(_VF) (0x00233800 + (_VF) * 4)
#define E830_MBX_VF_IN_FLIGHT_MSGS_AT_PF_CNT(_VF) (0x00233000 + (_VF) * 4)
#endif /* _ICE_HW_AUTOGEN_H_ */

View File

@ -3880,6 +3880,9 @@ void ice_init_feature_support(struct ice_pf *pf)
default:
break;
}
if (pf->hw.mac_type == ICE_MAC_E830)
ice_set_feature_support(pf, ICE_F_MBX_LIMIT);
}
/**

View File

@ -88,8 +88,6 @@ void ice_write_intrl(struct ice_q_vector *q_vector, u8 intrl);
void ice_write_itr(struct ice_ring_container *rc, u16 itr);
void ice_set_q_vector_intrl(struct ice_q_vector *q_vector);
int ice_vsi_cfg_mac_fltr(struct ice_vsi *vsi, const u8 *macaddr, bool set);
bool ice_is_safe_mode(struct ice_pf *pf);
bool ice_is_rdma_ena(struct ice_pf *pf);
bool ice_is_dflt_vsi_in_use(struct ice_port_info *pi);

View File

@ -1563,12 +1563,20 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
ice_vf_lan_overflow_event(pf, &event);
break;
case ice_mbx_opc_send_msg_to_pf:
data.num_msg_proc = i;
data.num_pending_arq = pending;
data.max_num_msgs_mbx = hw->mailboxq.num_rq_entries;
data.async_watermark_val = ICE_MBX_OVERFLOW_WATERMARK;
if (ice_is_feature_supported(pf, ICE_F_MBX_LIMIT)) {
ice_vc_process_vf_msg(pf, &event, NULL);
ice_mbx_vf_dec_trig_e830(hw, &event);
} else {
u16 val = hw->mailboxq.num_rq_entries;
ice_vc_process_vf_msg(pf, &event, &data);
data.max_num_msgs_mbx = val;
val = ICE_MBX_OVERFLOW_WATERMARK;
data.async_watermark_val = val;
data.num_msg_proc = i;
data.num_pending_arq = pending;
ice_vc_process_vf_msg(pf, &event, &data);
}
break;
case ice_aqc_opc_fw_logs_event:
ice_get_fwlog_data(pf, &event);
@ -4099,7 +4107,11 @@ static int ice_init_pf(struct ice_pf *pf)
mutex_init(&pf->vfs.table_lock);
hash_init(pf->vfs.table);
ice_mbx_init_snapshot(&pf->hw);
if (ice_is_feature_supported(pf, ICE_F_MBX_LIMIT))
wr32(&pf->hw, E830_MBX_PF_IN_FLIGHT_VF_MSGS_THRESH,
ICE_MBX_OVERFLOW_WATERMARK);
else
ice_mbx_init_snapshot(&pf->hw);
xa_init(&pf->dyn_ports);
xa_init(&pf->sf_nums);
@ -6537,8 +6549,7 @@ ice_set_features(struct net_device *netdev, netdev_features_t features)
if (changed & NETIF_F_HW_TC) {
bool ena = !!(features & NETIF_F_HW_TC);
ena ? set_bit(ICE_FLAG_CLS_FLOWER, pf->flags) :
clear_bit(ICE_FLAG_CLS_FLOWER, pf->flags);
assign_bit(ICE_FLAG_CLS_FLOWER, pf->flags, ena);
}
if (changed & NETIF_F_LOOPBACK)

View File

@ -2857,10 +2857,8 @@ static int ice_ptp_rebuild_owner(struct ice_pf *pf)
/* Write the increment time value to PHY and LAN */
err = ice_ptp_write_incval(hw, ice_base_incval(pf));
if (err) {
ice_ptp_unlock(hw);
return err;
}
if (err)
goto err_unlock;
/* Write the initial Time value to PHY and LAN using the cached PHC
* time before the reset and time difference between stopping and
@ -2873,10 +2871,8 @@ static int ice_ptp_rebuild_owner(struct ice_pf *pf)
ts = ktime_to_timespec64(ktime_get_real());
}
err = ice_ptp_write_init(pf, &ts);
if (err) {
ice_ptp_unlock(hw);
return err;
}
if (err)
goto err_unlock;
/* Release the global hardware lock */
ice_ptp_unlock(hw);
@ -2900,6 +2896,10 @@ static int ice_ptp_rebuild_owner(struct ice_pf *pf)
ice_ptp_enable_all_extts(pf);
return 0;
err_unlock:
ice_ptp_unlock(hw);
return err;
}
/**
@ -3032,18 +3032,14 @@ static int ice_ptp_init_owner(struct ice_pf *pf)
/* Write the increment time value to PHY and LAN */
err = ice_ptp_write_incval(hw, ice_base_incval(pf));
if (err) {
ice_ptp_unlock(hw);
goto err_exit;
}
if (err)
goto err_unlock;
ts = ktime_to_timespec64(ktime_get_real());
/* Write the initial Time value to PHY and LAN */
err = ice_ptp_write_init(pf, &ts);
if (err) {
ice_ptp_unlock(hw);
goto err_exit;
}
if (err)
goto err_unlock;
/* Release the global hardware lock */
ice_ptp_unlock(hw);
@ -3063,6 +3059,10 @@ static int ice_ptp_init_owner(struct ice_pf *pf)
pf->ptp.clock = NULL;
err_exit:
return err;
err_unlock:
ice_ptp_unlock(hw);
return err;
}
/**

View File

@ -405,7 +405,6 @@ int ice_phy_cfg_intr_e82x(struct ice_hw *hw, u8 quad, bool ena, u8 threshold);
int ice_read_sma_ctrl(struct ice_hw *hw, u8 *data);
int ice_write_sma_ctrl(struct ice_hw *hw, u8 data);
int ice_read_pca9575_reg(struct ice_hw *hw, u8 offset, u8 *data);
bool ice_is_pca9575_present(struct ice_hw *hw);
int ice_ptp_read_sdp_ac(struct ice_hw *hw, __le16 *entries, uint *num_entries);
enum dpll_pin_type ice_cgu_get_pin_type(struct ice_hw *hw, u8 pin, bool input);
struct dpll_pin_frequency *
@ -423,8 +422,6 @@ int ice_cgu_get_output_pin_state_caps(struct ice_hw *hw, u8 pin_id,
int ice_ptp_read_tx_hwtstamp_status_eth56g(struct ice_hw *hw, u32 *ts_status);
int ice_stop_phy_timer_eth56g(struct ice_hw *hw, u8 port, bool soft_reset);
int ice_start_phy_timer_eth56g(struct ice_hw *hw, u8 port);
int ice_phy_cfg_tx_offset_eth56g(struct ice_hw *hw, u8 port);
int ice_phy_cfg_rx_offset_eth56g(struct ice_hw *hw, u8 port);
int ice_phy_cfg_intr_eth56g(struct ice_hw *hw, u8 port, bool ena, u8 threshold);
int ice_phy_cfg_ptp_1step_eth56g(struct ice_hw *hw, u8 port);

View File

@ -194,7 +194,8 @@ void ice_free_vfs(struct ice_pf *pf)
}
/* clear malicious info since the VF is getting released */
list_del(&vf->mbx_info.list_entry);
if (!ice_is_feature_supported(pf, ICE_F_MBX_LIMIT))
list_del(&vf->mbx_info.list_entry);
mutex_unlock(&vf->cfg_lock);
}

View File

@ -359,8 +359,9 @@ struct ice_rx_ring {
struct ice_rx_ring *next; /* pointer to next ring in q_vector */
struct xsk_buff_pool *xsk_pool;
u32 nr_frags;
dma_addr_t dma; /* physical address of ring */
u16 max_frame;
u16 rx_buf_len;
dma_addr_t dma; /* physical address of ring */
u8 dcb_tc; /* Traffic class of ring */
u8 ptp_rx;
#define ICE_RX_FLAGS_RING_BUILD_SKB BIT(1)

View File

@ -154,7 +154,6 @@ static inline u32 ice_set_rs_bit(const struct ice_tx_ring *xdp_ring)
}
void ice_finalize_xdp_rx(struct ice_tx_ring *xdp_ring, unsigned int xdp_res, u32 first_idx);
int ice_xmit_xdp_buff(struct xdp_buff *xdp, struct ice_tx_ring *xdp_ring);
int __ice_xmit_xdp_ring(struct xdp_buff *xdp, struct ice_tx_ring *xdp_ring,
bool frame);
void ice_release_rx_desc(struct ice_rx_ring *rx_ring, u16 val);

View File

@ -709,6 +709,23 @@ ice_vf_clear_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m)
return 0;
}
/**
* ice_reset_vf_mbx_cnt - reset VF mailbox message count
* @vf: pointer to the VF structure
*
* This function clears the VF mailbox message count, and should be called on
* VF reset.
*/
static void ice_reset_vf_mbx_cnt(struct ice_vf *vf)
{
struct ice_pf *pf = vf->pf;
if (ice_is_feature_supported(pf, ICE_F_MBX_LIMIT))
ice_mbx_vf_clear_cnt_e830(&pf->hw, vf->vf_id);
else
ice_mbx_clear_malvf(&vf->mbx_info);
}
/**
* ice_reset_all_vfs - reset all allocated VFs in one go
* @pf: pointer to the PF structure
@ -735,7 +752,7 @@ void ice_reset_all_vfs(struct ice_pf *pf)
/* clear all malicious info if the VFs are getting reset */
ice_for_each_vf(pf, bkt, vf)
ice_mbx_clear_malvf(&vf->mbx_info);
ice_reset_vf_mbx_cnt(vf);
/* If VFs have been disabled, there is no need to reset */
if (test_and_set_bit(ICE_VF_DIS, pf->state)) {
@ -951,7 +968,7 @@ int ice_reset_vf(struct ice_vf *vf, u32 flags)
ice_eswitch_update_repr(&vf->repr_id, vsi);
/* if the VF has been reset allow it to come up again */
ice_mbx_clear_malvf(&vf->mbx_info);
ice_reset_vf_mbx_cnt(vf);
out_unlock:
if (lag && lag->bonded && lag->primary &&
@ -1004,7 +1021,10 @@ void ice_initialize_vf_entry(struct ice_vf *vf)
ice_vf_fdir_init(vf);
/* Initialize mailbox info for this VF */
ice_mbx_init_vf_info(&pf->hw, &vf->mbx_info);
if (ice_is_feature_supported(pf, ICE_F_MBX_LIMIT))
ice_mbx_vf_clear_cnt_e830(&pf->hw, vf->vf_id);
else
ice_mbx_init_vf_info(&pf->hw, &vf->mbx_info);
mutex_init(&vf->cfg_lock);
}

View File

@ -210,6 +210,38 @@ ice_mbx_detect_malvf(struct ice_hw *hw, struct ice_mbx_vf_info *vf_info,
return 0;
}
/**
* ice_mbx_vf_dec_trig_e830 - Decrements the VF mailbox queue counter
* @hw: pointer to the HW struct
* @event: pointer to the control queue receive event
*
* This function triggers to decrement the counter
* MBX_VF_IN_FLIGHT_MSGS_AT_PF_CNT when the driver replenishes
* the buffers at the PF mailbox queue.
*/
void ice_mbx_vf_dec_trig_e830(const struct ice_hw *hw,
const struct ice_rq_event_info *event)
{
u16 vfid = le16_to_cpu(event->desc.retval);
wr32(hw, E830_MBX_VF_DEC_TRIG(vfid), 1);
}
/**
* ice_mbx_vf_clear_cnt_e830 - Clear the VF mailbox queue count
* @hw: pointer to the HW struct
* @vf_id: VF ID in the PF space
*
* This function clears the counter MBX_VF_IN_FLIGHT_MSGS_AT_PF_CNT, and should
* be called when a VF is created and on VF reset.
*/
void ice_mbx_vf_clear_cnt_e830(const struct ice_hw *hw, u16 vf_id)
{
u32 reg = rd32(hw, E830_MBX_VF_IN_FLIGHT_MSGS_AT_PF_CNT(vf_id));
wr32(hw, E830_MBX_VF_DEC_TRIG(vf_id), reg);
}
/**
* ice_mbx_vf_state_handler - Handle states of the overflow algorithm
* @hw: pointer to the HW struct

View File

@ -19,6 +19,9 @@ ice_aq_send_msg_to_vf(struct ice_hw *hw, u16 vfid, u32 v_opcode, u32 v_retval,
u8 *msg, u16 msglen, struct ice_sq_cd *cd);
u32 ice_conv_link_speed_to_virtchnl(bool adv_link_support, u16 link_speed);
void ice_mbx_vf_dec_trig_e830(const struct ice_hw *hw,
const struct ice_rq_event_info *event);
void ice_mbx_vf_clear_cnt_e830(const struct ice_hw *hw, u16 vf_id);
int
ice_mbx_vf_state_handler(struct ice_hw *hw, struct ice_mbx_data *mbx_data,
struct ice_mbx_vf_info *vf_info, bool *report_malvf);
@ -47,5 +50,11 @@ static inline void ice_mbx_init_snapshot(struct ice_hw *hw)
{
}
static inline void
ice_mbx_vf_dec_trig_e830(const struct ice_hw *hw,
const struct ice_rq_event_info *event)
{
}
#endif /* CONFIG_PCI_IOV */
#endif /* _ICE_VF_MBX_H_ */

View File

@ -1715,8 +1715,8 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
/* copy Tx queue info from VF into VSI */
if (qpi->txq.ring_len > 0) {
vsi->tx_rings[i]->dma = qpi->txq.dma_ring_addr;
vsi->tx_rings[i]->count = qpi->txq.ring_len;
vsi->tx_rings[q_idx]->dma = qpi->txq.dma_ring_addr;
vsi->tx_rings[q_idx]->count = qpi->txq.ring_len;
/* Disable any existing queue first */
if (ice_vf_vsi_dis_single_txq(vf, vsi, q_idx))
@ -1725,7 +1725,7 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
/* Configure a queue with the requested settings */
if (ice_vsi_cfg_single_txq(vsi, vsi->tx_rings, q_idx)) {
dev_warn(ice_pf_to_dev(pf), "VF-%d failed to configure TX queue %d\n",
vf->vf_id, i);
vf->vf_id, q_idx);
goto error_param;
}
}
@ -1733,39 +1733,37 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
/* copy Rx queue info from VF into VSI */
if (qpi->rxq.ring_len > 0) {
u16 max_frame_size = ice_vc_get_max_frame_size(vf);
struct ice_rx_ring *ring = vsi->rx_rings[q_idx];
u32 rxdid;
vsi->rx_rings[i]->dma = qpi->rxq.dma_ring_addr;
vsi->rx_rings[i]->count = qpi->rxq.ring_len;
ring->dma = qpi->rxq.dma_ring_addr;
ring->count = qpi->rxq.ring_len;
if (qpi->rxq.crc_disable)
vsi->rx_rings[q_idx]->flags |=
ICE_RX_FLAGS_CRC_STRIP_DIS;
ring->flags |= ICE_RX_FLAGS_CRC_STRIP_DIS;
else
vsi->rx_rings[q_idx]->flags &=
~ICE_RX_FLAGS_CRC_STRIP_DIS;
ring->flags &= ~ICE_RX_FLAGS_CRC_STRIP_DIS;
if (qpi->rxq.databuffer_size != 0 &&
(qpi->rxq.databuffer_size > ((16 * 1024) - 128) ||
qpi->rxq.databuffer_size < 1024))
goto error_param;
vsi->rx_buf_len = qpi->rxq.databuffer_size;
vsi->rx_rings[i]->rx_buf_len = vsi->rx_buf_len;
ring->rx_buf_len = qpi->rxq.databuffer_size;
if (qpi->rxq.max_pkt_size > max_frame_size ||
qpi->rxq.max_pkt_size < 64)
goto error_param;
vsi->max_frame = qpi->rxq.max_pkt_size;
ring->max_frame = qpi->rxq.max_pkt_size;
/* add space for the port VLAN since the VF driver is
* not expected to account for it in the MTU
* calculation
*/
if (ice_vf_is_port_vlan_ena(vf))
vsi->max_frame += VLAN_HLEN;
ring->max_frame += VLAN_HLEN;
if (ice_vsi_cfg_single_rxq(vsi, q_idx)) {
dev_warn(ice_pf_to_dev(pf), "VF-%d failed to configure RX queue %d\n",
vf->vf_id, i);
vf->vf_id, q_idx);
goto error_param;
}
@ -4009,8 +4007,10 @@ ice_is_malicious_vf(struct ice_vf *vf, struct ice_mbx_data *mbxdata)
* @event: pointer to the AQ event
* @mbxdata: information used to detect VF attempting mailbox overflow
*
* called from the common asq/arq handler to
* process request from VF
* Called from the common asq/arq handler to process request from VF. When this
* flow is used for devices with hardware VF to PF message queue overflow
* support (ICE_F_MBX_LIMIT) mbxdata is set to NULL and ice_is_malicious_vf
* check is skipped.
*/
void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event,
struct ice_mbx_data *mbxdata)
@ -4036,7 +4036,7 @@ void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event,
mutex_lock(&vf->cfg_lock);
/* Check if the VF is trying to overflow the mailbox */
if (ice_is_malicious_vf(vf, mbxdata))
if (mbxdata && ice_is_malicious_vf(vf, mbxdata))
goto finish;
/* Check if VF is disabled. */

View File

@ -63,6 +63,5 @@ enum e1000_mng_mode {
#define E1000_MNG_DHCP_COOKIE_STATUS_VLAN 0x2
void e1000_init_function_pointers_82575(struct e1000_hw *hw);
#endif

View File

@ -7,7 +7,6 @@
s32 igb_acquire_nvm(struct e1000_hw *hw);
void igb_release_nvm(struct e1000_hw *hw);
s32 igb_read_mac_addr(struct e1000_hw *hw);
s32 igb_read_part_num(struct e1000_hw *hw, u32 *part_num);
s32 igb_read_part_string(struct e1000_hw *hw, u8 *part_num,
u32 part_num_size);
s32 igb_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);