Merge branch '40GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/tnguy/net-queue

Tony Nguyen says:

====================
Intel Wired LAN Driver Updates 2023-09-11 (i40e, iavf)

This series contains updates to i40e and iavf drivers.

Andrii ensures all VSIs are cleaned up for remove in i40e.

Brett reworks logic for setting promiscuous mode that can, currently, cause
incorrect states on iavf.
---
v2:
 - Remove redundant i40e_vsi_free_q_vectors() and kfree() calls (patch 1)

v1: https://lore.kernel.org/netdev/20230905180521.887861-1-anthony.l.nguyen@intel.com/
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2023-09-13 06:50:58 +01:00
commit 7e6cadf51a
30 changed files with 300 additions and 215 deletions

View File

@ -266,6 +266,8 @@ struct sja1105_private {
* the switch doesn't confuse them with one another.
*/
struct mutex mgmt_lock;
/* Serializes accesses to the FDB */
struct mutex fdb_lock;
/* PTP two-step TX timestamp ID, and its serialization lock */
spinlock_t ts_id_lock;
u8 ts_id;

View File

@ -1175,18 +1175,15 @@ const struct sja1105_dynamic_table_ops sja1110_dyn_ops[BLK_IDX_MAX_DYN] = {
static int
sja1105_dynamic_config_poll_valid(struct sja1105_private *priv,
struct sja1105_dyn_cmd *cmd,
const struct sja1105_dynamic_table_ops *ops)
const struct sja1105_dynamic_table_ops *ops,
void *entry, bool check_valident,
bool check_errors)
{
u8 packed_buf[SJA1105_MAX_DYN_CMD_SIZE] = {};
struct sja1105_dyn_cmd cmd = {};
int rc;
/* We don't _need_ to read the full entry, just the command area which
* is a fixed SJA1105_SIZE_DYN_CMD. But our cmd_packing() API expects a
* buffer that contains the full entry too. Additionally, our API
* doesn't really know how many bytes into the buffer does the command
* area really begin. So just read back the whole entry.
*/
/* Read back the whole entry + command structure. */
rc = sja1105_xfer_buf(priv, SPI_READ, ops->addr, packed_buf,
ops->packed_size);
if (rc)
@ -1195,11 +1192,25 @@ sja1105_dynamic_config_poll_valid(struct sja1105_private *priv,
/* Unpack the command structure, and return it to the caller in case it
* needs to perform further checks on it (VALIDENT).
*/
memset(cmd, 0, sizeof(*cmd));
ops->cmd_packing(packed_buf, cmd, UNPACK);
ops->cmd_packing(packed_buf, &cmd, UNPACK);
/* Hardware hasn't cleared VALID => still working on it */
return cmd->valid ? -EAGAIN : 0;
if (cmd.valid)
return -EAGAIN;
if (check_valident && !cmd.valident && !(ops->access & OP_VALID_ANYWAY))
return -ENOENT;
if (check_errors && cmd.errors)
return -EINVAL;
/* Don't dereference possibly NULL pointer - maybe caller
* only wanted to see whether the entry existed or not.
*/
if (entry)
ops->entry_packing(packed_buf, entry, UNPACK);
return 0;
}
/* Poll the dynamic config entry's control area until the hardware has
@ -1208,16 +1219,19 @@ sja1105_dynamic_config_poll_valid(struct sja1105_private *priv,
*/
static int
sja1105_dynamic_config_wait_complete(struct sja1105_private *priv,
struct sja1105_dyn_cmd *cmd,
const struct sja1105_dynamic_table_ops *ops)
const struct sja1105_dynamic_table_ops *ops,
void *entry, bool check_valident,
bool check_errors)
{
int rc;
int err, rc;
return read_poll_timeout(sja1105_dynamic_config_poll_valid,
rc, rc != -EAGAIN,
SJA1105_DYNAMIC_CONFIG_SLEEP_US,
SJA1105_DYNAMIC_CONFIG_TIMEOUT_US,
false, priv, cmd, ops);
err = read_poll_timeout(sja1105_dynamic_config_poll_valid,
rc, rc != -EAGAIN,
SJA1105_DYNAMIC_CONFIG_SLEEP_US,
SJA1105_DYNAMIC_CONFIG_TIMEOUT_US,
false, priv, ops, entry, check_valident,
check_errors);
return err < 0 ? err : rc;
}
/* Provides read access to the settings through the dynamic interface
@ -1286,25 +1300,14 @@ int sja1105_dynamic_config_read(struct sja1105_private *priv,
mutex_lock(&priv->dynamic_config_lock);
rc = sja1105_xfer_buf(priv, SPI_WRITE, ops->addr, packed_buf,
ops->packed_size);
if (rc < 0) {
mutex_unlock(&priv->dynamic_config_lock);
return rc;
}
rc = sja1105_dynamic_config_wait_complete(priv, &cmd, ops);
mutex_unlock(&priv->dynamic_config_lock);
if (rc < 0)
return rc;
goto out;
if (!cmd.valident && !(ops->access & OP_VALID_ANYWAY))
return -ENOENT;
rc = sja1105_dynamic_config_wait_complete(priv, ops, entry, true, false);
out:
mutex_unlock(&priv->dynamic_config_lock);
/* Don't dereference possibly NULL pointer - maybe caller
* only wanted to see whether the entry existed or not.
*/
if (entry)
ops->entry_packing(packed_buf, entry, UNPACK);
return 0;
return rc;
}
int sja1105_dynamic_config_write(struct sja1105_private *priv,
@ -1356,22 +1359,14 @@ int sja1105_dynamic_config_write(struct sja1105_private *priv,
mutex_lock(&priv->dynamic_config_lock);
rc = sja1105_xfer_buf(priv, SPI_WRITE, ops->addr, packed_buf,
ops->packed_size);
if (rc < 0) {
mutex_unlock(&priv->dynamic_config_lock);
return rc;
}
rc = sja1105_dynamic_config_wait_complete(priv, &cmd, ops);
mutex_unlock(&priv->dynamic_config_lock);
if (rc < 0)
return rc;
goto out;
cmd = (struct sja1105_dyn_cmd) {0};
ops->cmd_packing(packed_buf, &cmd, UNPACK);
if (cmd.errors)
return -EINVAL;
rc = sja1105_dynamic_config_wait_complete(priv, ops, NULL, false, true);
out:
mutex_unlock(&priv->dynamic_config_lock);
return 0;
return rc;
}
static u8 sja1105_crc8_add(u8 crc, u8 byte, u8 poly)

View File

@ -1798,6 +1798,7 @@ static int sja1105_fdb_add(struct dsa_switch *ds, int port,
struct dsa_db db)
{
struct sja1105_private *priv = ds->priv;
int rc;
if (!vid) {
switch (db.type) {
@ -1812,12 +1813,16 @@ static int sja1105_fdb_add(struct dsa_switch *ds, int port,
}
}
return priv->info->fdb_add_cmd(ds, port, addr, vid);
mutex_lock(&priv->fdb_lock);
rc = priv->info->fdb_add_cmd(ds, port, addr, vid);
mutex_unlock(&priv->fdb_lock);
return rc;
}
static int sja1105_fdb_del(struct dsa_switch *ds, int port,
const unsigned char *addr, u16 vid,
struct dsa_db db)
static int __sja1105_fdb_del(struct dsa_switch *ds, int port,
const unsigned char *addr, u16 vid,
struct dsa_db db)
{
struct sja1105_private *priv = ds->priv;
@ -1837,6 +1842,20 @@ static int sja1105_fdb_del(struct dsa_switch *ds, int port,
return priv->info->fdb_del_cmd(ds, port, addr, vid);
}
static int sja1105_fdb_del(struct dsa_switch *ds, int port,
const unsigned char *addr, u16 vid,
struct dsa_db db)
{
struct sja1105_private *priv = ds->priv;
int rc;
mutex_lock(&priv->fdb_lock);
rc = __sja1105_fdb_del(ds, port, addr, vid, db);
mutex_unlock(&priv->fdb_lock);
return rc;
}
static int sja1105_fdb_dump(struct dsa_switch *ds, int port,
dsa_fdb_dump_cb_t *cb, void *data)
{
@ -1868,13 +1887,14 @@ static int sja1105_fdb_dump(struct dsa_switch *ds, int port,
if (!(l2_lookup.destports & BIT(port)))
continue;
/* We need to hide the FDB entry for unknown multicast */
if (l2_lookup.macaddr == SJA1105_UNKNOWN_MULTICAST &&
l2_lookup.mask_macaddr == SJA1105_UNKNOWN_MULTICAST)
continue;
u64_to_ether_addr(l2_lookup.macaddr, macaddr);
/* Hardware FDB is shared for fdb and mdb, "bridge fdb show"
* only wants to see unicast
*/
if (is_multicast_ether_addr(macaddr))
continue;
/* We need to hide the dsa_8021q VLANs from the user. */
if (vid_is_dsa_8021q(l2_lookup.vlanid))
l2_lookup.vlanid = 0;
@ -1898,6 +1918,8 @@ static void sja1105_fast_age(struct dsa_switch *ds, int port)
};
int i;
mutex_lock(&priv->fdb_lock);
for (i = 0; i < SJA1105_MAX_L2_LOOKUP_COUNT; i++) {
struct sja1105_l2_lookup_entry l2_lookup = {0};
u8 macaddr[ETH_ALEN];
@ -1911,7 +1933,7 @@ static void sja1105_fast_age(struct dsa_switch *ds, int port)
if (rc) {
dev_err(ds->dev, "Failed to read FDB: %pe\n",
ERR_PTR(rc));
return;
break;
}
if (!(l2_lookup.destports & BIT(port)))
@ -1923,14 +1945,16 @@ static void sja1105_fast_age(struct dsa_switch *ds, int port)
u64_to_ether_addr(l2_lookup.macaddr, macaddr);
rc = sja1105_fdb_del(ds, port, macaddr, l2_lookup.vlanid, db);
rc = __sja1105_fdb_del(ds, port, macaddr, l2_lookup.vlanid, db);
if (rc) {
dev_err(ds->dev,
"Failed to delete FDB entry %pM vid %lld: %pe\n",
macaddr, l2_lookup.vlanid, ERR_PTR(rc));
return;
break;
}
}
mutex_unlock(&priv->fdb_lock);
}
static int sja1105_mdb_add(struct dsa_switch *ds, int port,
@ -2273,6 +2297,7 @@ int sja1105_static_config_reload(struct sja1105_private *priv,
int rc, i;
s64 now;
mutex_lock(&priv->fdb_lock);
mutex_lock(&priv->mgmt_lock);
mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries;
@ -2385,6 +2410,7 @@ int sja1105_static_config_reload(struct sja1105_private *priv,
goto out;
out:
mutex_unlock(&priv->mgmt_lock);
mutex_unlock(&priv->fdb_lock);
return rc;
}
@ -2954,7 +2980,9 @@ static int sja1105_port_mcast_flood(struct sja1105_private *priv, int to,
{
struct sja1105_l2_lookup_entry *l2_lookup;
struct sja1105_table *table;
int match;
int match, rc;
mutex_lock(&priv->fdb_lock);
table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP];
l2_lookup = table->entries;
@ -2967,7 +2995,8 @@ static int sja1105_port_mcast_flood(struct sja1105_private *priv, int to,
if (match == table->entry_count) {
NL_SET_ERR_MSG_MOD(extack,
"Could not find FDB entry for unknown multicast");
return -ENOSPC;
rc = -ENOSPC;
goto out;
}
if (flags.val & BR_MCAST_FLOOD)
@ -2975,10 +3004,13 @@ static int sja1105_port_mcast_flood(struct sja1105_private *priv, int to,
else
l2_lookup[match].destports &= ~BIT(to);
return sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP,
l2_lookup[match].index,
&l2_lookup[match],
true);
rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP,
l2_lookup[match].index,
&l2_lookup[match], true);
out:
mutex_unlock(&priv->fdb_lock);
return rc;
}
static int sja1105_port_pre_bridge_flags(struct dsa_switch *ds, int port,
@ -3348,6 +3380,7 @@ static int sja1105_probe(struct spi_device *spi)
mutex_init(&priv->ptp_data.lock);
mutex_init(&priv->dynamic_config_lock);
mutex_init(&priv->mgmt_lock);
mutex_init(&priv->fdb_lock);
spin_lock_init(&priv->ts_id_lock);
rc = sja1105_parse_dt(priv);

View File

@ -1385,7 +1385,7 @@ static int adin1110_fdb_add(struct adin1110_port_priv *port_priv,
return -ENOMEM;
other_port = priv->ports[!port_priv->nr];
port_rules = adin1110_port_rules(port_priv, false, true);
port_rules = adin1110_port_rules(other_port, false, true);
eth_broadcast_addr(mask);
return adin1110_write_mac_address(other_port, mac_nr, (u8 *)fdb->addr,

View File

@ -528,13 +528,16 @@ void bcmasp_netfilt_suspend(struct bcmasp_intf *intf)
ASP_RX_FILTER_BLK_CTRL);
}
void bcmasp_netfilt_get_all_active(struct bcmasp_intf *intf, u32 *rule_locs,
u32 *rule_cnt)
int bcmasp_netfilt_get_all_active(struct bcmasp_intf *intf, u32 *rule_locs,
u32 *rule_cnt)
{
struct bcmasp_priv *priv = intf->parent;
int j = 0, i;
for (i = 0; i < NUM_NET_FILTERS; i++) {
if (j == *rule_cnt)
return -EMSGSIZE;
if (!priv->net_filters[i].claimed ||
priv->net_filters[i].port != intf->port)
continue;
@ -548,6 +551,8 @@ void bcmasp_netfilt_get_all_active(struct bcmasp_intf *intf, u32 *rule_locs,
}
*rule_cnt = j;
return 0;
}
int bcmasp_netfilt_get_active(struct bcmasp_intf *intf)
@ -1300,6 +1305,7 @@ static int bcmasp_probe(struct platform_device *pdev)
if (!intf) {
dev_err(dev, "Cannot create eth interface %d\n", i);
bcmasp_remove_intfs(priv);
of_node_put(intf_node);
goto of_put_exit;
}
list_add_tail(&intf->list, &priv->intfs);

View File

@ -577,8 +577,8 @@ void bcmasp_netfilt_release(struct bcmasp_intf *intf,
int bcmasp_netfilt_get_active(struct bcmasp_intf *intf);
void bcmasp_netfilt_get_all_active(struct bcmasp_intf *intf, u32 *rule_locs,
u32 *rule_cnt);
int bcmasp_netfilt_get_all_active(struct bcmasp_intf *intf, u32 *rule_locs,
u32 *rule_cnt);
void bcmasp_netfilt_suspend(struct bcmasp_intf *intf);

View File

@ -335,7 +335,7 @@ static int bcmasp_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
err = bcmasp_flow_get(intf, cmd);
break;
case ETHTOOL_GRXCLSRLALL:
bcmasp_netfilt_get_all_active(intf, rule_locs, &cmd->rule_cnt);
err = bcmasp_netfilt_get_all_active(intf, rule_locs, &cmd->rule_cnt);
cmd->data = NUM_NET_FILTERS;
break;
default:

View File

@ -16320,11 +16320,15 @@ static void i40e_remove(struct pci_dev *pdev)
i40e_switch_branch_release(pf->veb[i]);
}
/* Now we can shutdown the PF's VSI, just before we kill
/* Now we can shutdown the PF's VSIs, just before we kill
* adminq and hmc.
*/
if (pf->vsi[pf->lan_vsi])
i40e_vsi_release(pf->vsi[pf->lan_vsi]);
for (i = pf->num_alloc_vsi; i--;)
if (pf->vsi[i]) {
i40e_vsi_close(pf->vsi[i]);
i40e_vsi_release(pf->vsi[i]);
pf->vsi[i] = NULL;
}
i40e_cloud_filter_exit(pf);

View File

@ -298,8 +298,6 @@ struct iavf_adapter {
#define IAVF_FLAG_CLIENT_NEEDS_OPEN BIT(10)
#define IAVF_FLAG_CLIENT_NEEDS_CLOSE BIT(11)
#define IAVF_FLAG_CLIENT_NEEDS_L2_PARAMS BIT(12)
#define IAVF_FLAG_PROMISC_ON BIT(13)
#define IAVF_FLAG_ALLMULTI_ON BIT(14)
#define IAVF_FLAG_LEGACY_RX BIT(15)
#define IAVF_FLAG_REINIT_ITR_NEEDED BIT(16)
#define IAVF_FLAG_QUEUES_DISABLED BIT(17)
@ -325,10 +323,7 @@ struct iavf_adapter {
#define IAVF_FLAG_AQ_SET_HENA BIT_ULL(12)
#define IAVF_FLAG_AQ_SET_RSS_KEY BIT_ULL(13)
#define IAVF_FLAG_AQ_SET_RSS_LUT BIT_ULL(14)
#define IAVF_FLAG_AQ_REQUEST_PROMISC BIT_ULL(15)
#define IAVF_FLAG_AQ_RELEASE_PROMISC BIT_ULL(16)
#define IAVF_FLAG_AQ_REQUEST_ALLMULTI BIT_ULL(17)
#define IAVF_FLAG_AQ_RELEASE_ALLMULTI BIT_ULL(18)
#define IAVF_FLAG_AQ_CONFIGURE_PROMISC_MODE BIT_ULL(15)
#define IAVF_FLAG_AQ_ENABLE_VLAN_STRIPPING BIT_ULL(19)
#define IAVF_FLAG_AQ_DISABLE_VLAN_STRIPPING BIT_ULL(20)
#define IAVF_FLAG_AQ_ENABLE_CHANNELS BIT_ULL(21)
@ -365,6 +360,12 @@ struct iavf_adapter {
(IAVF_EXTENDED_CAP_SEND_VLAN_V2 | \
IAVF_EXTENDED_CAP_RECV_VLAN_V2)
/* Lock to prevent possible clobbering of
* current_netdev_promisc_flags
*/
spinlock_t current_netdev_promisc_flags_lock;
netdev_features_t current_netdev_promisc_flags;
/* OS defined structs */
struct net_device *netdev;
struct pci_dev *pdev;
@ -551,7 +552,8 @@ void iavf_add_ether_addrs(struct iavf_adapter *adapter);
void iavf_del_ether_addrs(struct iavf_adapter *adapter);
void iavf_add_vlans(struct iavf_adapter *adapter);
void iavf_del_vlans(struct iavf_adapter *adapter);
void iavf_set_promiscuous(struct iavf_adapter *adapter, int flags);
void iavf_set_promiscuous(struct iavf_adapter *adapter);
bool iavf_promiscuous_mode_changed(struct iavf_adapter *adapter);
void iavf_request_stats(struct iavf_adapter *adapter);
int iavf_request_reset(struct iavf_adapter *adapter);
void iavf_get_hena(struct iavf_adapter *adapter);

View File

@ -1188,6 +1188,16 @@ static int iavf_addr_unsync(struct net_device *netdev, const u8 *addr)
return 0;
}
/**
* iavf_promiscuous_mode_changed - check if promiscuous mode bits changed
* @adapter: device specific adapter
*/
bool iavf_promiscuous_mode_changed(struct iavf_adapter *adapter)
{
return (adapter->current_netdev_promisc_flags ^ adapter->netdev->flags) &
(IFF_PROMISC | IFF_ALLMULTI);
}
/**
* iavf_set_rx_mode - NDO callback to set the netdev filters
* @netdev: network interface device structure
@ -1201,19 +1211,10 @@ static void iavf_set_rx_mode(struct net_device *netdev)
__dev_mc_sync(netdev, iavf_addr_sync, iavf_addr_unsync);
spin_unlock_bh(&adapter->mac_vlan_list_lock);
if (netdev->flags & IFF_PROMISC &&
!(adapter->flags & IAVF_FLAG_PROMISC_ON))
adapter->aq_required |= IAVF_FLAG_AQ_REQUEST_PROMISC;
else if (!(netdev->flags & IFF_PROMISC) &&
adapter->flags & IAVF_FLAG_PROMISC_ON)
adapter->aq_required |= IAVF_FLAG_AQ_RELEASE_PROMISC;
if (netdev->flags & IFF_ALLMULTI &&
!(adapter->flags & IAVF_FLAG_ALLMULTI_ON))
adapter->aq_required |= IAVF_FLAG_AQ_REQUEST_ALLMULTI;
else if (!(netdev->flags & IFF_ALLMULTI) &&
adapter->flags & IAVF_FLAG_ALLMULTI_ON)
adapter->aq_required |= IAVF_FLAG_AQ_RELEASE_ALLMULTI;
spin_lock_bh(&adapter->current_netdev_promisc_flags_lock);
if (iavf_promiscuous_mode_changed(adapter))
adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_PROMISC_MODE;
spin_unlock_bh(&adapter->current_netdev_promisc_flags_lock);
}
/**
@ -2163,19 +2164,8 @@ static int iavf_process_aq_command(struct iavf_adapter *adapter)
return 0;
}
if (adapter->aq_required & IAVF_FLAG_AQ_REQUEST_PROMISC) {
iavf_set_promiscuous(adapter, FLAG_VF_UNICAST_PROMISC |
FLAG_VF_MULTICAST_PROMISC);
return 0;
}
if (adapter->aq_required & IAVF_FLAG_AQ_REQUEST_ALLMULTI) {
iavf_set_promiscuous(adapter, FLAG_VF_MULTICAST_PROMISC);
return 0;
}
if ((adapter->aq_required & IAVF_FLAG_AQ_RELEASE_PROMISC) ||
(adapter->aq_required & IAVF_FLAG_AQ_RELEASE_ALLMULTI)) {
iavf_set_promiscuous(adapter, 0);
if (adapter->aq_required & IAVF_FLAG_AQ_CONFIGURE_PROMISC_MODE) {
iavf_set_promiscuous(adapter);
return 0;
}
@ -4971,6 +4961,7 @@ static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
spin_lock_init(&adapter->cloud_filter_list_lock);
spin_lock_init(&adapter->fdir_fltr_lock);
spin_lock_init(&adapter->adv_rss_lock);
spin_lock_init(&adapter->current_netdev_promisc_flags_lock);
INIT_LIST_HEAD(&adapter->mac_filter_list);
INIT_LIST_HEAD(&adapter->vlan_filter_list);

View File

@ -936,14 +936,14 @@ void iavf_del_vlans(struct iavf_adapter *adapter)
/**
* iavf_set_promiscuous
* @adapter: adapter structure
* @flags: bitmask to control unicast/multicast promiscuous.
*
* Request that the PF enable promiscuous mode for our VSI.
**/
void iavf_set_promiscuous(struct iavf_adapter *adapter, int flags)
void iavf_set_promiscuous(struct iavf_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
struct virtchnl_promisc_info vpi;
int promisc_all;
unsigned int flags;
if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
/* bail because we already have a command pending */
@ -952,35 +952,56 @@ void iavf_set_promiscuous(struct iavf_adapter *adapter, int flags)
return;
}
promisc_all = FLAG_VF_UNICAST_PROMISC |
FLAG_VF_MULTICAST_PROMISC;
if ((flags & promisc_all) == promisc_all) {
adapter->flags |= IAVF_FLAG_PROMISC_ON;
adapter->aq_required &= ~IAVF_FLAG_AQ_REQUEST_PROMISC;
/* prevent changes to promiscuous flags */
spin_lock_bh(&adapter->current_netdev_promisc_flags_lock);
/* sanity check to prevent duplicate AQ calls */
if (!iavf_promiscuous_mode_changed(adapter)) {
adapter->aq_required &= ~IAVF_FLAG_AQ_CONFIGURE_PROMISC_MODE;
dev_dbg(&adapter->pdev->dev, "No change in promiscuous mode\n");
/* allow changes to promiscuous flags */
spin_unlock_bh(&adapter->current_netdev_promisc_flags_lock);
return;
}
/* there are 2 bits, but only 3 states */
if (!(netdev->flags & IFF_PROMISC) &&
netdev->flags & IFF_ALLMULTI) {
/* State 1 - only multicast promiscuous mode enabled
* - !IFF_PROMISC && IFF_ALLMULTI
*/
flags = FLAG_VF_MULTICAST_PROMISC;
adapter->current_netdev_promisc_flags |= IFF_ALLMULTI;
adapter->current_netdev_promisc_flags &= ~IFF_PROMISC;
dev_info(&adapter->pdev->dev, "Entering multicast promiscuous mode\n");
} else if (!(netdev->flags & IFF_PROMISC) &&
!(netdev->flags & IFF_ALLMULTI)) {
/* State 2 - unicast/multicast promiscuous mode disabled
* - !IFF_PROMISC && !IFF_ALLMULTI
*/
flags = 0;
adapter->current_netdev_promisc_flags &=
~(IFF_PROMISC | IFF_ALLMULTI);
dev_info(&adapter->pdev->dev, "Leaving promiscuous mode\n");
} else {
/* State 3 - unicast/multicast promiscuous mode enabled
* - IFF_PROMISC && IFF_ALLMULTI
* - IFF_PROMISC && !IFF_ALLMULTI
*/
flags = FLAG_VF_UNICAST_PROMISC | FLAG_VF_MULTICAST_PROMISC;
adapter->current_netdev_promisc_flags |= IFF_PROMISC;
if (netdev->flags & IFF_ALLMULTI)
adapter->current_netdev_promisc_flags |= IFF_ALLMULTI;
else
adapter->current_netdev_promisc_flags &= ~IFF_ALLMULTI;
dev_info(&adapter->pdev->dev, "Entering promiscuous mode\n");
}
if (flags & FLAG_VF_MULTICAST_PROMISC) {
adapter->flags |= IAVF_FLAG_ALLMULTI_ON;
adapter->aq_required &= ~IAVF_FLAG_AQ_REQUEST_ALLMULTI;
dev_info(&adapter->pdev->dev, "%s is entering multicast promiscuous mode\n",
adapter->netdev->name);
}
adapter->aq_required &= ~IAVF_FLAG_AQ_CONFIGURE_PROMISC_MODE;
if (!flags) {
if (adapter->flags & IAVF_FLAG_PROMISC_ON) {
adapter->flags &= ~IAVF_FLAG_PROMISC_ON;
adapter->aq_required &= ~IAVF_FLAG_AQ_RELEASE_PROMISC;
dev_info(&adapter->pdev->dev, "Leaving promiscuous mode\n");
}
if (adapter->flags & IAVF_FLAG_ALLMULTI_ON) {
adapter->flags &= ~IAVF_FLAG_ALLMULTI_ON;
adapter->aq_required &= ~IAVF_FLAG_AQ_RELEASE_ALLMULTI;
dev_info(&adapter->pdev->dev, "%s is leaving multicast promiscuous mode\n",
adapter->netdev->name);
}
}
/* allow changes to promiscuous flags */
spin_unlock_bh(&adapter->current_netdev_promisc_flags_lock);
adapter->current_op = VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE;
vpi.vsi_id = adapter->vsi_res->vsi_id;

View File

@ -5586,6 +5586,11 @@ static int mvpp2_ethtool_get_rxnfc(struct net_device *dev,
break;
case ETHTOOL_GRXCLSRLALL:
for (i = 0; i < MVPP2_N_RFS_ENTRIES_PER_FLOW; i++) {
if (loc == info->rule_cnt) {
ret = -EMSGSIZE;
break;
}
if (port->rfs_rules[i])
rules[loc++] = i;
}

View File

@ -107,12 +107,13 @@ int cn10k_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura)
}
#define NPA_MAX_BURST 16
void cn10k_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq)
int cn10k_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq)
{
struct otx2_nic *pfvf = dev;
int cnt = cq->pool_ptrs;
u64 ptrs[NPA_MAX_BURST];
int num_ptrs = 1;
dma_addr_t bufptr;
int num_ptrs = 1;
/* Refill pool with new buffers */
while (cq->pool_ptrs) {
@ -131,6 +132,7 @@ void cn10k_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq)
num_ptrs = 1;
}
}
return cnt - cq->pool_ptrs;
}
void cn10k_sqe_flush(void *dev, struct otx2_snd_queue *sq, int size, int qidx)

View File

@ -24,7 +24,7 @@ static inline int mtu_to_dwrr_weight(struct otx2_nic *pfvf, int mtu)
return weight;
}
void cn10k_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq);
int cn10k_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq);
void cn10k_sqe_flush(void *dev, struct otx2_snd_queue *sq, int size, int qidx);
int cn10k_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura);
int cn10k_lmtst_init(struct otx2_nic *pfvf);

View File

@ -574,20 +574,8 @@ int otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool,
int otx2_alloc_buffer(struct otx2_nic *pfvf, struct otx2_cq_queue *cq,
dma_addr_t *dma)
{
if (unlikely(__otx2_alloc_rbuf(pfvf, cq->rbpool, dma))) {
struct refill_work *work;
struct delayed_work *dwork;
work = &pfvf->refill_wrk[cq->cq_idx];
dwork = &work->pool_refill_work;
/* Schedule a task if no other task is running */
if (!cq->refill_task_sched) {
cq->refill_task_sched = true;
schedule_delayed_work(dwork,
msecs_to_jiffies(100));
}
if (unlikely(__otx2_alloc_rbuf(pfvf, cq->rbpool, dma)))
return -ENOMEM;
}
return 0;
}
@ -1082,39 +1070,20 @@ static int otx2_cq_init(struct otx2_nic *pfvf, u16 qidx)
static void otx2_pool_refill_task(struct work_struct *work)
{
struct otx2_cq_queue *cq;
struct otx2_pool *rbpool;
struct refill_work *wrk;
int qidx, free_ptrs = 0;
struct otx2_nic *pfvf;
dma_addr_t bufptr;
int qidx;
wrk = container_of(work, struct refill_work, pool_refill_work.work);
pfvf = wrk->pf;
qidx = wrk - pfvf->refill_wrk;
cq = &pfvf->qset.cq[qidx];
rbpool = cq->rbpool;
free_ptrs = cq->pool_ptrs;
while (cq->pool_ptrs) {
if (otx2_alloc_rbuf(pfvf, rbpool, &bufptr)) {
/* Schedule a WQ if we fails to free atleast half of the
* pointers else enable napi for this RQ.
*/
if (!((free_ptrs - cq->pool_ptrs) > free_ptrs / 2)) {
struct delayed_work *dwork;
dwork = &wrk->pool_refill_work;
schedule_delayed_work(dwork,
msecs_to_jiffies(100));
} else {
cq->refill_task_sched = false;
}
return;
}
pfvf->hw_ops->aura_freeptr(pfvf, qidx, bufptr + OTX2_HEAD_ROOM);
cq->pool_ptrs--;
}
cq->refill_task_sched = false;
local_bh_disable();
napi_schedule(wrk->napi);
local_bh_enable();
}
int otx2_config_nix_queues(struct otx2_nic *pfvf)

View File

@ -302,6 +302,7 @@ struct flr_work {
struct refill_work {
struct delayed_work pool_refill_work;
struct otx2_nic *pf;
struct napi_struct *napi;
};
/* PTPv2 originTimestamp structure */
@ -370,7 +371,7 @@ struct dev_hw_ops {
int (*sq_aq_init)(void *dev, u16 qidx, u16 sqb_aura);
void (*sqe_flush)(void *dev, struct otx2_snd_queue *sq,
int size, int qidx);
void (*refill_pool_ptrs)(void *dev, struct otx2_cq_queue *cq);
int (*refill_pool_ptrs)(void *dev, struct otx2_cq_queue *cq);
void (*aura_freeptr)(void *dev, int aura, u64 buf);
};

View File

@ -1943,6 +1943,10 @@ int otx2_stop(struct net_device *netdev)
netif_tx_disable(netdev);
for (wrk = 0; wrk < pf->qset.cq_cnt; wrk++)
cancel_delayed_work_sync(&pf->refill_wrk[wrk].pool_refill_work);
devm_kfree(pf->dev, pf->refill_wrk);
otx2_free_hw_resources(pf);
otx2_free_cints(pf, pf->hw.cint_cnt);
otx2_disable_napi(pf);
@ -1950,9 +1954,6 @@ int otx2_stop(struct net_device *netdev)
for (qidx = 0; qidx < netdev->num_tx_queues; qidx++)
netdev_tx_reset_queue(netdev_get_tx_queue(netdev, qidx));
for (wrk = 0; wrk < pf->qset.cq_cnt; wrk++)
cancel_delayed_work_sync(&pf->refill_wrk[wrk].pool_refill_work);
devm_kfree(pf->dev, pf->refill_wrk);
kfree(qset->sq);
kfree(qset->cq);

View File

@ -424,9 +424,10 @@ process_cqe:
return processed_cqe;
}
void otx2_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq)
int otx2_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq)
{
struct otx2_nic *pfvf = dev;
int cnt = cq->pool_ptrs;
dma_addr_t bufptr;
while (cq->pool_ptrs) {
@ -435,6 +436,8 @@ void otx2_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq)
otx2_aura_freeptr(pfvf, cq->cq_idx, bufptr + OTX2_HEAD_ROOM);
cq->pool_ptrs--;
}
return cnt - cq->pool_ptrs;
}
static int otx2_tx_napi_handler(struct otx2_nic *pfvf,
@ -521,6 +524,7 @@ int otx2_napi_handler(struct napi_struct *napi, int budget)
struct otx2_cq_queue *cq;
struct otx2_qset *qset;
struct otx2_nic *pfvf;
int filled_cnt = -1;
cq_poll = container_of(napi, struct otx2_cq_poll, napi);
pfvf = (struct otx2_nic *)cq_poll->dev;
@ -541,7 +545,7 @@ int otx2_napi_handler(struct napi_struct *napi, int budget)
}
if (rx_cq && rx_cq->pool_ptrs)
pfvf->hw_ops->refill_pool_ptrs(pfvf, rx_cq);
filled_cnt = pfvf->hw_ops->refill_pool_ptrs(pfvf, rx_cq);
/* Clear the IRQ */
otx2_write64(pfvf, NIX_LF_CINTX_INT(cq_poll->cint_idx), BIT_ULL(0));
@ -561,9 +565,25 @@ int otx2_napi_handler(struct napi_struct *napi, int budget)
otx2_config_irq_coalescing(pfvf, i);
}
/* Re-enable interrupts */
otx2_write64(pfvf, NIX_LF_CINTX_ENA_W1S(cq_poll->cint_idx),
BIT_ULL(0));
if (unlikely(!filled_cnt)) {
struct refill_work *work;
struct delayed_work *dwork;
work = &pfvf->refill_wrk[cq->cq_idx];
dwork = &work->pool_refill_work;
/* Schedule a task if no other task is running */
if (!cq->refill_task_sched) {
work->napi = napi;
cq->refill_task_sched = true;
schedule_delayed_work(dwork,
msecs_to_jiffies(100));
}
} else {
/* Re-enable interrupts */
otx2_write64(pfvf,
NIX_LF_CINTX_ENA_W1S(cq_poll->cint_idx),
BIT_ULL(0));
}
}
return workdone;
}

View File

@ -170,6 +170,6 @@ void cn10k_sqe_flush(void *dev, struct otx2_snd_queue *sq,
int size, int qidx);
void otx2_sqe_flush(void *dev, struct otx2_snd_queue *sq,
int size, int qidx);
void otx2_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq);
void cn10k_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq);
int otx2_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq);
int cn10k_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq);
#endif /* OTX2_TXRX_H */

View File

@ -2005,11 +2005,11 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
u8 *data, *new_data;
struct mtk_rx_dma_v2 *rxd, trxd;
int done = 0, bytes = 0;
dma_addr_t dma_addr = DMA_MAPPING_ERROR;
while (done < budget) {
unsigned int pktlen, *rxdcsum;
struct net_device *netdev;
dma_addr_t dma_addr;
u32 hash, reason;
int mac = 0;
@ -2186,7 +2186,8 @@ release_desc:
else
rxd->rxd2 = RX_DMA_PREP_PLEN0(ring->buf_size);
if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA))
if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA) &&
likely(dma_addr != DMA_MAPPING_ERROR))
rxd->rxd2 |= RX_DMA_PREP_ADDR64(dma_addr);
ring->calc_idx = idx;
@ -2994,6 +2995,9 @@ static int mtk_hwlro_get_fdir_all(struct net_device *dev,
int i;
for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
if (cnt == cmd->rule_cnt)
return -EMSGSIZE;
if (mac->hwlro_ip[i]) {
rule_locs[cnt] = i;
cnt++;

View File

@ -214,9 +214,11 @@ mtk_flow_set_output_device(struct mtk_eth *eth, struct mtk_foe_entry *foe,
dsa_port = mtk_flow_get_dsa_port(&dev);
if (dev == eth->netdev[0])
pse_port = 1;
pse_port = PSE_GDM1_PORT;
else if (dev == eth->netdev[1])
pse_port = 2;
pse_port = PSE_GDM2_PORT;
else if (dev == eth->netdev[2])
pse_port = PSE_GDM3_PORT;
else
return -EOPNOTSUPP;

View File

@ -1021,18 +1021,32 @@ static struct vcap_rule_internal *vcap_dup_rule(struct vcap_rule_internal *ri,
list_for_each_entry(ckf, &ri->data.keyfields, ctrl.list) {
newckf = kmemdup(ckf, sizeof(*newckf), GFP_KERNEL);
if (!newckf)
return ERR_PTR(-ENOMEM);
goto err;
list_add_tail(&newckf->ctrl.list, &duprule->data.keyfields);
}
list_for_each_entry(caf, &ri->data.actionfields, ctrl.list) {
newcaf = kmemdup(caf, sizeof(*newcaf), GFP_KERNEL);
if (!newcaf)
return ERR_PTR(-ENOMEM);
goto err;
list_add_tail(&newcaf->ctrl.list, &duprule->data.actionfields);
}
return duprule;
err:
list_for_each_entry_safe(ckf, newckf, &duprule->data.keyfields, ctrl.list) {
list_del(&ckf->ctrl.list);
kfree(ckf);
}
list_for_each_entry_safe(caf, newcaf, &duprule->data.actionfields, ctrl.list) {
list_del(&caf->ctrl.list);
kfree(caf);
}
kfree(duprule);
return ERR_PTR(-ENOMEM);
}
static void vcap_apply_width(u8 *dst, int width, int bytes)

View File

@ -2704,9 +2704,7 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
/* We still have pending packets, let's call for a new scheduling */
if (tx_q->dirty_tx != tx_q->cur_tx)
hrtimer_start(&tx_q->txtimer,
STMMAC_COAL_TIMER(priv->tx_coal_timer[queue]),
HRTIMER_MODE_REL);
stmmac_tx_timer_arm(priv, queue);
flags = u64_stats_update_begin_irqsave(&tx_q->txq_stats.syncp);
tx_q->txq_stats.tx_packets += tx_packets;
@ -2995,9 +2993,13 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv)
static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue)
{
struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
u32 tx_coal_timer = priv->tx_coal_timer[queue];
if (!tx_coal_timer)
return;
hrtimer_start(&tx_q->txtimer,
STMMAC_COAL_TIMER(priv->tx_coal_timer[queue]),
STMMAC_COAL_TIMER(tx_coal_timer),
HRTIMER_MODE_REL);
}

View File

@ -2636,6 +2636,9 @@ static int r8152_poll(struct napi_struct *napi, int budget)
struct r8152 *tp = container_of(napi, struct r8152, napi);
int work_done;
if (!budget)
return 0;
work_done = rx_bottom(tp, budget);
if (work_done < budget) {

View File

@ -594,6 +594,7 @@ static int fill_frame_info(struct hsr_frame_info *frame,
proto = vlan_hdr->vlanhdr.h_vlan_encapsulated_proto;
/* FIXME: */
netdev_warn_once(skb->dev, "VLAN not yet supported");
return -EINVAL;
}
frame->is_from_san = false;

View File

@ -355,14 +355,14 @@ static void __inet_del_ifa(struct in_device *in_dev,
{
struct in_ifaddr *promote = NULL;
struct in_ifaddr *ifa, *ifa1;
struct in_ifaddr *last_prim;
struct in_ifaddr __rcu **last_prim;
struct in_ifaddr *prev_prom = NULL;
int do_promote = IN_DEV_PROMOTE_SECONDARIES(in_dev);
ASSERT_RTNL();
ifa1 = rtnl_dereference(*ifap);
last_prim = rtnl_dereference(in_dev->ifa_list);
last_prim = ifap;
if (in_dev->dead)
goto no_promotions;
@ -376,7 +376,7 @@ static void __inet_del_ifa(struct in_device *in_dev,
while ((ifa = rtnl_dereference(*ifap1)) != NULL) {
if (!(ifa->ifa_flags & IFA_F_SECONDARY) &&
ifa1->ifa_scope <= ifa->ifa_scope)
last_prim = ifa;
last_prim = &ifa->ifa_next;
if (!(ifa->ifa_flags & IFA_F_SECONDARY) ||
ifa1->ifa_mask != ifa->ifa_mask ||
@ -440,9 +440,9 @@ no_promotions:
rcu_assign_pointer(prev_prom->ifa_next, next_sec);
last_sec = rtnl_dereference(last_prim->ifa_next);
last_sec = rtnl_dereference(*last_prim);
rcu_assign_pointer(promote->ifa_next, last_sec);
rcu_assign_pointer(last_prim->ifa_next, promote);
rcu_assign_pointer(*last_prim, promote);
}
promote->ifa_flags &= ~IFA_F_SECONDARY;

View File

@ -939,6 +939,8 @@ out_error:
if (head != kcm->seq_skb)
kfree_skb(head);
else if (copied)
kcm_tx_msg(head)->last_skb = skb;
err = sk_stream_error(sk, msg->msg_flags, err);

View File

@ -1662,6 +1662,7 @@ void smcr_port_add(struct smc_ib_device *smcibdev, u8 ibport)
{
struct smc_link_group *lgr, *n;
spin_lock_bh(&smc_lgr_list.lock);
list_for_each_entry_safe(lgr, n, &smc_lgr_list.list, list) {
struct smc_link *link;
@ -1680,6 +1681,7 @@ void smcr_port_add(struct smc_ib_device *smcibdev, u8 ibport)
if (link)
smc_llc_add_link_local(link);
}
spin_unlock_bh(&smc_lgr_list.lock);
}
/* link is down - switch connections to alternate link,

View File

@ -243,8 +243,9 @@ while (0)
#define SMC_STAT_SERV_SUCC_INC(net, _ini) \
do { \
typeof(_ini) i = (_ini); \
bool is_v2 = (i->smcd_version & SMC_V2); \
bool is_smcd = (i->is_smcd); \
u8 version = is_smcd ? i->smcd_version : i->smcr_version; \
bool is_v2 = (version & SMC_V2); \
typeof(net->smc.smc_stats) smc_stats = (net)->smc.smc_stats; \
if (is_v2 && is_smcd) \
this_cpu_inc(smc_stats->smc[SMC_TYPE_D].srv_v2_succ_cnt); \

View File

@ -2,7 +2,7 @@
# SPDX-License-Identifier: GPL-2.0
NR_FILES=32768
SAVED_NR_FILES=$(ulimit -n)
readonly NETNS="ns-$(mktemp -u XXXXXX)"
# default values
port=443
@ -36,21 +36,21 @@ while getopts "ha:p:64" opt; do
done
setup() {
ip netns add "${NETNS}"
ip -netns "${NETNS}" link add veth0 type veth peer name veth1
ip -netns "${NETNS}" link set lo up
ip -netns "${NETNS}" link set veth0 up
ip -netns "${NETNS}" link set veth1 up
if [[ "$use_v6" == true ]]; then
ip addr add $addr_v6 nodad dev eth0
ip -netns "${NETNS}" addr add $addr_v6 nodad dev veth0
else
ip addr add $addr_v4 dev lo
ip -netns "${NETNS}" addr add $addr_v4 dev lo
fi
ulimit -n $NR_FILES
}
cleanup() {
if [[ "$use_v6" == true ]]; then
ip addr del $addr_v6 dev eth0
else
ip addr del $addr_v4/32 dev lo
fi
ulimit -n $SAVED_NR_FILES
ip netns del "${NETNS}"
}
if [[ "$addr" != "" ]]; then
@ -59,8 +59,10 @@ if [[ "$addr" != "" ]]; then
fi
setup
if [[ "$use_v6" == true ]] ; then
./bind_bhash $port "ipv6" $addr_v6
ip netns exec "${NETNS}" sh -c \
"ulimit -n ${NR_FILES};./bind_bhash ${port} ipv6 ${addr_v6}"
else
./bind_bhash $port "ipv4" $addr_v4
ip netns exec "${NETNS}" sh -c \
"ulimit -n ${NR_FILES};./bind_bhash ${port} ipv4 ${addr_v4}"
fi
cleanup