mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2025-01-07 13:43:51 +00:00
Networking fixes for 5.17-rc8/final, including fixes from bluetooth,
and ipsec. Current release - regressions: - Bluetooth: fix unbalanced unlock in set_device_flags() - Bluetooth: fix not processing all entries on cmd_sync_work, make connect with qualcomm and intel adapters reliable - Revert "xfrm: state and policy should fail if XFRMA_IF_ID 0" - xdp: xdp_mem_allocator can be NULL in trace_mem_connect() - eth: ice: fix race condition and deadlock during interface enslave Current release - new code bugs: - tipc: fix incorrect order of state message data sanity check Previous releases - regressions: - esp: fix possible buffer overflow in ESP transformation - dsa: unlock the rtnl_mutex when dsa_master_setup() fails - phy: meson-gxl: fix interrupt handling in forced mode - smsc95xx: ignore -ENODEV errors when device is unplugged Previous releases - always broken: - xfrm: fix tunnel mode fragmentation behavior - esp: fix inter address family tunneling on GSO - tipc: fix null-deref due to race when enabling bearer - sctp: fix kernel-infoleak for SCTP sockets - eth: macb: fix lost RX packet wakeup race in NAPI receive - eth: intel stop disabling VFs due to PF error responses - eth: bcmgenet: don't claim WOL when its not available Signed-off-by: Jakub Kicinski <kuba@kernel.org> -----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEE6jPA+I1ugmIBA4hXMUZtbf5SIrsFAmIqlOsACgkQMUZtbf5S IrtKJBAAjZpYBwwHty6JR7AahLF4LNO+o1KmraqFV7YByS5NRfBRpXV7asvpxJNF 9iJhOWtLMsz/mVq0OXdx/+NpDh9JIHrQzb3GiskeKzBdhHmW4HjuYug1gytqRDMx uZOiQEuJSREu0tCsfcVWTF8wm4OgmPWtyZNZq2kwXsHiKoptB9KFK9pcvD6Utxrg jTpYBS5I9cX0Sj+gG9fZFNeyaxgmKkC5cM4cSLcheGSKHvEbX6MIXfi2Wb1VRBzE Qk/1JbkQf4gQ1BAu9kt8+jgWqW7vSnDn2iYUVw7RSSlj5xIM4f4m71nS9XzejJLb ADry24arlmknMS9Rhpy7n3ogNn/5MtlsZt01z/AAyZDRc1rrsWDqOJugtDRSnSEh yAhAsl/vqOuoovA86IRBTji8JlyfNZXt33K7+1KKDsj1wzSpcB9AKTDps8Ncu9uL elyaU2v4bTdhdqkQnxpcsLlLcV3FzLaWUVLpcla3XVLvzjEnoY+mhR5boW735uj7 f8Ig9Aj4UceJ+sQtXywciknE1+s48/pWqs8b8Y5DXX1P168A1ud5voy4Po6RvqQG B17WvAaq/7DsMKcuofeykFHCKlwO36xdt6l0ExaQuzmV+NgoEBWAmgwsyl9ktFpT I09D2RMPfTqYgdNvYkKGBrMKV87weVvHpMIeJiG1YeiBB3e1Xw8= =WfAR -----END PGP SIGNATURE----- Merge tag 'net-5.17-rc8' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net Pull networking fixes from Jakub Kicinski: "Including fixes from bluetooth, and ipsec. Current release - regressions: - Bluetooth: fix unbalanced unlock in set_device_flags() - Bluetooth: fix not processing all entries on cmd_sync_work, make connect with qualcomm and intel adapters reliable - Revert "xfrm: state and policy should fail if XFRMA_IF_ID 0" - xdp: xdp_mem_allocator can be NULL in trace_mem_connect() - eth: ice: fix race condition and deadlock during interface enslave Current release - new code bugs: - tipc: fix incorrect order of state message data sanity check Previous releases - regressions: - esp: fix possible buffer overflow in ESP transformation - dsa: unlock the rtnl_mutex when dsa_master_setup() fails - phy: meson-gxl: fix interrupt handling in forced mode - smsc95xx: ignore -ENODEV errors when device is unplugged Previous releases - always broken: - xfrm: fix tunnel mode fragmentation behavior - esp: fix inter address family tunneling on GSO - tipc: fix null-deref due to race when enabling bearer - sctp: fix kernel-infoleak for SCTP sockets - eth: macb: fix lost RX packet wakeup race in NAPI receive - eth: intel stop disabling VFs due to PF error responses - eth: bcmgenet: don't claim WOL when its not available" * tag 'net-5.17-rc8' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net: (50 commits) xdp: xdp_mem_allocator can be NULL in trace_mem_connect(). ice: Fix race condition during interface enslave net: phy: meson-gxl: improve link-up behavior net: bcmgenet: Don't claim WOL when its not available net: arc_emac: Fix use after free in arc_mdio_probe() sctp: fix kernel-infoleak for SCTP sockets net: phy: correct spelling error of media in documentation net: phy: DP83822: clear MISR2 register to disable interrupts gianfar: ethtool: Fix refcount leak in gfar_get_ts_info selftests: pmtu.sh: Kill nettest processes launched in subshell. selftests: pmtu.sh: Kill tcpdump processes launched by subshell. NFC: port100: fix use-after-free in port100_send_complete net/mlx5e: SHAMPO, reduce TIR indication net/mlx5e: Lag, Only handle events from highest priority multipath entry net/mlx5: Fix offloading with ESWITCH_IPV4_TTL_MODIFY_ENABLE net/mlx5: Fix a race on command flush flow net/mlx5: Fix size field in bufferx_reg struct ax25: Fix NULL pointer dereference in ax25_kill_by_device net: marvell: prestera: Add missing of_node_put() in prestera_switch_set_base_mac_addr net: ethernet: lpc_eth: Handle error for clk_enable ...
This commit is contained in:
commit
186d32bbf0
@ -2005,7 +2005,11 @@ setup_hw(struct hfc_pci *hc)
|
||||
}
|
||||
/* Allocate memory for FIFOS */
|
||||
/* the memory needs to be on a 32k boundary within the first 4G */
|
||||
dma_set_mask(&hc->pdev->dev, 0xFFFF8000);
|
||||
if (dma_set_mask(&hc->pdev->dev, 0xFFFF8000)) {
|
||||
printk(KERN_WARNING
|
||||
"HFC-PCI: No usable DMA configuration!\n");
|
||||
return -EIO;
|
||||
}
|
||||
buffer = dma_alloc_coherent(&hc->pdev->dev, 0x8000, &hc->hw.dmahandle,
|
||||
GFP_KERNEL);
|
||||
/* We silently assume the address is okay if nonzero */
|
||||
|
@ -192,7 +192,7 @@ void dsp_pipeline_destroy(struct dsp_pipeline *pipeline)
|
||||
int dsp_pipeline_build(struct dsp_pipeline *pipeline, const char *cfg)
|
||||
{
|
||||
int found = 0;
|
||||
char *dup, *tok, *name, *args;
|
||||
char *dup, *next, *tok, *name, *args;
|
||||
struct dsp_element_entry *entry, *n;
|
||||
struct dsp_pipeline_entry *pipeline_entry;
|
||||
struct mISDN_dsp_element *elem;
|
||||
@ -203,10 +203,10 @@ int dsp_pipeline_build(struct dsp_pipeline *pipeline, const char *cfg)
|
||||
if (!list_empty(&pipeline->list))
|
||||
_dsp_pipeline_destroy(pipeline);
|
||||
|
||||
dup = kstrdup(cfg, GFP_ATOMIC);
|
||||
dup = next = kstrdup(cfg, GFP_ATOMIC);
|
||||
if (!dup)
|
||||
return 0;
|
||||
while ((tok = strsep(&dup, "|"))) {
|
||||
while ((tok = strsep(&next, "|"))) {
|
||||
if (!strlen(tok))
|
||||
continue;
|
||||
name = strsep(&tok, "(");
|
||||
|
@ -2936,7 +2936,7 @@ mt753x_phylink_validate(struct dsa_switch *ds, int port,
|
||||
|
||||
phylink_set_port_modes(mask);
|
||||
|
||||
if (state->interface != PHY_INTERFACE_MODE_TRGMII ||
|
||||
if (state->interface != PHY_INTERFACE_MODE_TRGMII &&
|
||||
!phy_interface_mode_is_8023z(state->interface)) {
|
||||
phylink_set(mask, 10baseT_Half);
|
||||
phylink_set(mask, 10baseT_Full);
|
||||
|
@ -405,12 +405,12 @@ static int mcf8390_init(struct net_device *dev)
|
||||
static int mcf8390_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct net_device *dev;
|
||||
struct resource *mem, *irq;
|
||||
struct resource *mem;
|
||||
resource_size_t msize;
|
||||
int ret;
|
||||
int ret, irq;
|
||||
|
||||
irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
|
||||
if (irq == NULL) {
|
||||
irq = platform_get_irq(pdev, 0);
|
||||
if (irq < 0) {
|
||||
dev_err(&pdev->dev, "no IRQ specified?\n");
|
||||
return -ENXIO;
|
||||
}
|
||||
@ -433,7 +433,7 @@ static int mcf8390_probe(struct platform_device *pdev)
|
||||
SET_NETDEV_DEV(dev, &pdev->dev);
|
||||
platform_set_drvdata(pdev, dev);
|
||||
|
||||
dev->irq = irq->start;
|
||||
dev->irq = irq;
|
||||
dev->base_addr = mem->start;
|
||||
|
||||
ret = mcf8390_init(dev);
|
||||
|
@ -132,6 +132,7 @@ int arc_mdio_probe(struct arc_emac_priv *priv)
|
||||
{
|
||||
struct arc_emac_mdio_bus_data *data = &priv->bus_data;
|
||||
struct device_node *np = priv->dev->of_node;
|
||||
const char *name = "Synopsys MII Bus";
|
||||
struct mii_bus *bus;
|
||||
int error;
|
||||
|
||||
@ -142,7 +143,7 @@ int arc_mdio_probe(struct arc_emac_priv *priv)
|
||||
priv->bus = bus;
|
||||
bus->priv = priv;
|
||||
bus->parent = priv->dev;
|
||||
bus->name = "Synopsys MII Bus";
|
||||
bus->name = name;
|
||||
bus->read = &arc_mdio_read;
|
||||
bus->write = &arc_mdio_write;
|
||||
bus->reset = &arc_mdio_reset;
|
||||
@ -167,7 +168,7 @@ int arc_mdio_probe(struct arc_emac_priv *priv)
|
||||
if (error) {
|
||||
mdiobus_free(bus);
|
||||
return dev_err_probe(priv->dev, error,
|
||||
"cannot register MDIO bus %s\n", bus->name);
|
||||
"cannot register MDIO bus %s\n", name);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -40,6 +40,13 @@
|
||||
void bcmgenet_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
|
||||
{
|
||||
struct bcmgenet_priv *priv = netdev_priv(dev);
|
||||
struct device *kdev = &priv->pdev->dev;
|
||||
|
||||
if (!device_can_wakeup(kdev)) {
|
||||
wol->supported = 0;
|
||||
wol->wolopts = 0;
|
||||
return;
|
||||
}
|
||||
|
||||
wol->supported = WAKE_MAGIC | WAKE_MAGICSECURE | WAKE_FILTER;
|
||||
wol->wolopts = priv->wolopts;
|
||||
|
@ -1573,7 +1573,14 @@ static int macb_poll(struct napi_struct *napi, int budget)
|
||||
if (work_done < budget) {
|
||||
napi_complete_done(napi, work_done);
|
||||
|
||||
/* Packets received while interrupts were disabled */
|
||||
/* RSR bits only seem to propagate to raise interrupts when
|
||||
* interrupts are enabled at the time, so if bits are already
|
||||
* set due to packets received while interrupts were disabled,
|
||||
* they will not cause another interrupt to be generated when
|
||||
* interrupts are re-enabled.
|
||||
* Check for this case here. This has been seen to happen
|
||||
* around 30% of the time under heavy network load.
|
||||
*/
|
||||
status = macb_readl(bp, RSR);
|
||||
if (status) {
|
||||
if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
|
||||
@ -1581,6 +1588,22 @@ static int macb_poll(struct napi_struct *napi, int budget)
|
||||
napi_reschedule(napi);
|
||||
} else {
|
||||
queue_writel(queue, IER, bp->rx_intr_mask);
|
||||
|
||||
/* In rare cases, packets could have been received in
|
||||
* the window between the check above and re-enabling
|
||||
* interrupts. Therefore, a double-check is required
|
||||
* to avoid losing a wakeup. This can potentially race
|
||||
* with the interrupt handler doing the same actions
|
||||
* if an interrupt is raised just after enabling them,
|
||||
* but this should be harmless.
|
||||
*/
|
||||
status = macb_readl(bp, RSR);
|
||||
if (unlikely(status)) {
|
||||
queue_writel(queue, IDR, bp->rx_intr_mask);
|
||||
if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
|
||||
queue_writel(queue, ISR, MACB_BIT(RCOMP));
|
||||
napi_schedule(napi);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1464,6 +1464,7 @@ static int gfar_get_ts_info(struct net_device *dev,
|
||||
ptp_node = of_find_compatible_node(NULL, NULL, "fsl,etsec-ptp");
|
||||
if (ptp_node) {
|
||||
ptp_dev = of_find_device_by_node(ptp_node);
|
||||
of_node_put(ptp_node);
|
||||
if (ptp_dev)
|
||||
ptp = platform_get_drvdata(ptp_dev);
|
||||
}
|
||||
|
@ -742,10 +742,8 @@ static void i40e_dbg_dump_vf(struct i40e_pf *pf, int vf_id)
|
||||
vsi = pf->vsi[vf->lan_vsi_idx];
|
||||
dev_info(&pf->pdev->dev, "vf %2d: VSI id=%d, seid=%d, qps=%d\n",
|
||||
vf_id, vf->lan_vsi_id, vsi->seid, vf->num_queue_pairs);
|
||||
dev_info(&pf->pdev->dev, " num MDD=%lld, invalid msg=%lld, valid msg=%lld\n",
|
||||
vf->num_mdd_events,
|
||||
vf->num_invalid_msgs,
|
||||
vf->num_valid_msgs);
|
||||
dev_info(&pf->pdev->dev, " num MDD=%lld\n",
|
||||
vf->num_mdd_events);
|
||||
} else {
|
||||
dev_info(&pf->pdev->dev, "invalid VF id %d\n", vf_id);
|
||||
}
|
||||
|
@ -1917,19 +1917,17 @@ int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
|
||||
/***********************virtual channel routines******************/
|
||||
|
||||
/**
|
||||
* i40e_vc_send_msg_to_vf_ex
|
||||
* i40e_vc_send_msg_to_vf
|
||||
* @vf: pointer to the VF info
|
||||
* @v_opcode: virtual channel opcode
|
||||
* @v_retval: virtual channel return value
|
||||
* @msg: pointer to the msg buffer
|
||||
* @msglen: msg length
|
||||
* @is_quiet: true for not printing unsuccessful return values, false otherwise
|
||||
*
|
||||
* send msg to VF
|
||||
**/
|
||||
static int i40e_vc_send_msg_to_vf_ex(struct i40e_vf *vf, u32 v_opcode,
|
||||
u32 v_retval, u8 *msg, u16 msglen,
|
||||
bool is_quiet)
|
||||
static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode,
|
||||
u32 v_retval, u8 *msg, u16 msglen)
|
||||
{
|
||||
struct i40e_pf *pf;
|
||||
struct i40e_hw *hw;
|
||||
@ -1944,25 +1942,6 @@ static int i40e_vc_send_msg_to_vf_ex(struct i40e_vf *vf, u32 v_opcode,
|
||||
hw = &pf->hw;
|
||||
abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
|
||||
|
||||
/* single place to detect unsuccessful return values */
|
||||
if (v_retval && !is_quiet) {
|
||||
vf->num_invalid_msgs++;
|
||||
dev_info(&pf->pdev->dev, "VF %d failed opcode %d, retval: %d\n",
|
||||
vf->vf_id, v_opcode, v_retval);
|
||||
if (vf->num_invalid_msgs >
|
||||
I40E_DEFAULT_NUM_INVALID_MSGS_ALLOWED) {
|
||||
dev_err(&pf->pdev->dev,
|
||||
"Number of invalid messages exceeded for VF %d\n",
|
||||
vf->vf_id);
|
||||
dev_err(&pf->pdev->dev, "Use PF Control I/F to enable the VF\n");
|
||||
set_bit(I40E_VF_STATE_DISABLED, &vf->vf_states);
|
||||
}
|
||||
} else {
|
||||
vf->num_valid_msgs++;
|
||||
/* reset the invalid counter, if a valid message is received. */
|
||||
vf->num_invalid_msgs = 0;
|
||||
}
|
||||
|
||||
aq_ret = i40e_aq_send_msg_to_vf(hw, abs_vf_id, v_opcode, v_retval,
|
||||
msg, msglen, NULL);
|
||||
if (aq_ret) {
|
||||
@ -1975,23 +1954,6 @@ static int i40e_vc_send_msg_to_vf_ex(struct i40e_vf *vf, u32 v_opcode,
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* i40e_vc_send_msg_to_vf
|
||||
* @vf: pointer to the VF info
|
||||
* @v_opcode: virtual channel opcode
|
||||
* @v_retval: virtual channel return value
|
||||
* @msg: pointer to the msg buffer
|
||||
* @msglen: msg length
|
||||
*
|
||||
* send msg to VF
|
||||
**/
|
||||
static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode,
|
||||
u32 v_retval, u8 *msg, u16 msglen)
|
||||
{
|
||||
return i40e_vc_send_msg_to_vf_ex(vf, v_opcode, v_retval,
|
||||
msg, msglen, false);
|
||||
}
|
||||
|
||||
/**
|
||||
* i40e_vc_send_resp_to_vf
|
||||
* @vf: pointer to the VF info
|
||||
@ -2822,7 +2784,6 @@ static int i40e_vc_get_stats_msg(struct i40e_vf *vf, u8 *msg)
|
||||
* i40e_check_vf_permission
|
||||
* @vf: pointer to the VF info
|
||||
* @al: MAC address list from virtchnl
|
||||
* @is_quiet: set true for printing msg without opcode info, false otherwise
|
||||
*
|
||||
* Check that the given list of MAC addresses is allowed. Will return -EPERM
|
||||
* if any address in the list is not valid. Checks the following conditions:
|
||||
@ -2837,8 +2798,7 @@ static int i40e_vc_get_stats_msg(struct i40e_vf *vf, u8 *msg)
|
||||
* addresses might not be accurate.
|
||||
**/
|
||||
static inline int i40e_check_vf_permission(struct i40e_vf *vf,
|
||||
struct virtchnl_ether_addr_list *al,
|
||||
bool *is_quiet)
|
||||
struct virtchnl_ether_addr_list *al)
|
||||
{
|
||||
struct i40e_pf *pf = vf->pf;
|
||||
struct i40e_vsi *vsi = pf->vsi[vf->lan_vsi_idx];
|
||||
@ -2846,7 +2806,6 @@ static inline int i40e_check_vf_permission(struct i40e_vf *vf,
|
||||
int mac2add_cnt = 0;
|
||||
int i;
|
||||
|
||||
*is_quiet = false;
|
||||
for (i = 0; i < al->num_elements; i++) {
|
||||
struct i40e_mac_filter *f;
|
||||
u8 *addr = al->list[i].addr;
|
||||
@ -2870,7 +2829,6 @@ static inline int i40e_check_vf_permission(struct i40e_vf *vf,
|
||||
!ether_addr_equal(addr, vf->default_lan_addr.addr)) {
|
||||
dev_err(&pf->pdev->dev,
|
||||
"VF attempting to override administratively set MAC address, bring down and up the VF interface to resume normal operation\n");
|
||||
*is_quiet = true;
|
||||
return -EPERM;
|
||||
}
|
||||
|
||||
@ -2921,7 +2879,6 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
|
||||
(struct virtchnl_ether_addr_list *)msg;
|
||||
struct i40e_pf *pf = vf->pf;
|
||||
struct i40e_vsi *vsi = NULL;
|
||||
bool is_quiet = false;
|
||||
i40e_status ret = 0;
|
||||
int i;
|
||||
|
||||
@ -2938,7 +2895,7 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
|
||||
*/
|
||||
spin_lock_bh(&vsi->mac_filter_hash_lock);
|
||||
|
||||
ret = i40e_check_vf_permission(vf, al, &is_quiet);
|
||||
ret = i40e_check_vf_permission(vf, al);
|
||||
if (ret) {
|
||||
spin_unlock_bh(&vsi->mac_filter_hash_lock);
|
||||
goto error_param;
|
||||
@ -2976,8 +2933,8 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
|
||||
|
||||
error_param:
|
||||
/* send the response to the VF */
|
||||
return i40e_vc_send_msg_to_vf_ex(vf, VIRTCHNL_OP_ADD_ETH_ADDR,
|
||||
ret, NULL, 0, is_quiet);
|
||||
return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_ETH_ADDR,
|
||||
ret, NULL, 0);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -10,8 +10,6 @@
|
||||
|
||||
#define I40E_VIRTCHNL_SUPPORTED_QTYPES 2
|
||||
|
||||
#define I40E_DEFAULT_NUM_INVALID_MSGS_ALLOWED 10
|
||||
|
||||
#define I40E_VLAN_PRIORITY_SHIFT 13
|
||||
#define I40E_VLAN_MASK 0xFFF
|
||||
#define I40E_PRIORITY_MASK 0xE000
|
||||
@ -92,9 +90,6 @@ struct i40e_vf {
|
||||
u8 num_queue_pairs; /* num of qps assigned to VF vsis */
|
||||
u8 num_req_queues; /* num of requested qps */
|
||||
u64 num_mdd_events; /* num of mdd events detected */
|
||||
/* num of continuous malformed or invalid msgs detected */
|
||||
u64 num_invalid_msgs;
|
||||
u64 num_valid_msgs; /* num of valid msgs detected */
|
||||
|
||||
unsigned long vf_caps; /* vf's adv. capabilities */
|
||||
unsigned long vf_states; /* vf's runtime states */
|
||||
|
@ -288,6 +288,7 @@ struct iavf_adapter {
|
||||
#define IAVF_FLAG_REINIT_ITR_NEEDED BIT(16)
|
||||
#define IAVF_FLAG_QUEUES_DISABLED BIT(17)
|
||||
#define IAVF_FLAG_SETUP_NETDEV_FEATURES BIT(18)
|
||||
#define IAVF_FLAG_REINIT_MSIX_NEEDED BIT(20)
|
||||
/* duplicates for common code */
|
||||
#define IAVF_FLAG_DCB_ENABLED 0
|
||||
/* flags for admin queue service task */
|
||||
|
@ -2120,7 +2120,7 @@ int iavf_parse_vf_resource_msg(struct iavf_adapter *adapter)
|
||||
"Requested %d queues, but PF only gave us %d.\n",
|
||||
num_req_queues,
|
||||
adapter->vsi_res->num_queue_pairs);
|
||||
adapter->flags |= IAVF_FLAG_REINIT_ITR_NEEDED;
|
||||
adapter->flags |= IAVF_FLAG_REINIT_MSIX_NEEDED;
|
||||
adapter->num_req_queues = adapter->vsi_res->num_queue_pairs;
|
||||
iavf_schedule_reset(adapter);
|
||||
|
||||
@ -2727,7 +2727,8 @@ static void iavf_reset_task(struct work_struct *work)
|
||||
err);
|
||||
adapter->aq_required = 0;
|
||||
|
||||
if (adapter->flags & IAVF_FLAG_REINIT_ITR_NEEDED) {
|
||||
if ((adapter->flags & IAVF_FLAG_REINIT_MSIX_NEEDED) ||
|
||||
(adapter->flags & IAVF_FLAG_REINIT_ITR_NEEDED)) {
|
||||
err = iavf_reinit_interrupt_scheme(adapter);
|
||||
if (err)
|
||||
goto reset_err;
|
||||
@ -2799,12 +2800,13 @@ static void iavf_reset_task(struct work_struct *work)
|
||||
if (err)
|
||||
goto reset_err;
|
||||
|
||||
if (adapter->flags & IAVF_FLAG_REINIT_ITR_NEEDED) {
|
||||
if ((adapter->flags & IAVF_FLAG_REINIT_MSIX_NEEDED) ||
|
||||
(adapter->flags & IAVF_FLAG_REINIT_ITR_NEEDED)) {
|
||||
err = iavf_request_traffic_irqs(adapter, netdev->name);
|
||||
if (err)
|
||||
goto reset_err;
|
||||
|
||||
adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED;
|
||||
adapter->flags &= ~IAVF_FLAG_REINIT_MSIX_NEEDED;
|
||||
}
|
||||
|
||||
iavf_configure(adapter);
|
||||
@ -2819,6 +2821,9 @@ static void iavf_reset_task(struct work_struct *work)
|
||||
iavf_change_state(adapter, __IAVF_DOWN);
|
||||
wake_up(&adapter->down_waitqueue);
|
||||
}
|
||||
|
||||
adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED;
|
||||
|
||||
mutex_unlock(&adapter->client_lock);
|
||||
mutex_unlock(&adapter->crit_lock);
|
||||
|
||||
|
@ -1834,6 +1834,22 @@ void iavf_request_reset(struct iavf_adapter *adapter)
|
||||
adapter->current_op = VIRTCHNL_OP_UNKNOWN;
|
||||
}
|
||||
|
||||
/**
|
||||
* iavf_netdev_features_vlan_strip_set - update vlan strip status
|
||||
* @netdev: ptr to netdev being adjusted
|
||||
* @enable: enable or disable vlan strip
|
||||
*
|
||||
* Helper function to change vlan strip status in netdev->features.
|
||||
*/
|
||||
static void iavf_netdev_features_vlan_strip_set(struct net_device *netdev,
|
||||
const bool enable)
|
||||
{
|
||||
if (enable)
|
||||
netdev->features |= NETIF_F_HW_VLAN_CTAG_RX;
|
||||
else
|
||||
netdev->features &= ~NETIF_F_HW_VLAN_CTAG_RX;
|
||||
}
|
||||
|
||||
/**
|
||||
* iavf_virtchnl_completion
|
||||
* @adapter: adapter structure
|
||||
@ -2057,8 +2073,18 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
|
||||
}
|
||||
break;
|
||||
case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
|
||||
dev_warn(&adapter->pdev->dev, "Changing VLAN Stripping is not allowed when Port VLAN is configured\n");
|
||||
/* Vlan stripping could not be enabled by ethtool.
|
||||
* Disable it in netdev->features.
|
||||
*/
|
||||
iavf_netdev_features_vlan_strip_set(netdev, false);
|
||||
break;
|
||||
case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
|
||||
dev_warn(&adapter->pdev->dev, "Changing VLAN Stripping is not allowed when Port VLAN is configured\n");
|
||||
/* Vlan stripping could not be disabled by ethtool.
|
||||
* Enable it in netdev->features.
|
||||
*/
|
||||
iavf_netdev_features_vlan_strip_set(netdev, true);
|
||||
break;
|
||||
default:
|
||||
dev_err(&adapter->pdev->dev, "PF returned error %d (%s) to our request %d\n",
|
||||
@ -2312,6 +2338,20 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
|
||||
spin_unlock_bh(&adapter->adv_rss_lock);
|
||||
}
|
||||
break;
|
||||
case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
|
||||
/* PF enabled vlan strip on this VF.
|
||||
* Update netdev->features if needed to be in sync with ethtool.
|
||||
*/
|
||||
if (!v_retval)
|
||||
iavf_netdev_features_vlan_strip_set(netdev, true);
|
||||
break;
|
||||
case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
|
||||
/* PF disabled vlan strip on this VF.
|
||||
* Update netdev->features if needed to be in sync with ethtool.
|
||||
*/
|
||||
if (!v_retval)
|
||||
iavf_netdev_features_vlan_strip_set(netdev, false);
|
||||
break;
|
||||
default:
|
||||
if (adapter->current_op && (v_opcode != adapter->current_op))
|
||||
dev_warn(&adapter->pdev->dev, "Expected response %d from PF, received %d\n",
|
||||
|
@ -483,6 +483,7 @@ enum ice_pf_flags {
|
||||
ICE_FLAG_MDD_AUTO_RESET_VF,
|
||||
ICE_FLAG_LINK_LENIENT_MODE_ENA,
|
||||
ICE_FLAG_PLUG_AUX_DEV,
|
||||
ICE_FLAG_MTU_CHANGED,
|
||||
ICE_PF_FLAGS_NBITS /* must be last */
|
||||
};
|
||||
|
||||
@ -897,7 +898,16 @@ static inline void ice_set_rdma_cap(struct ice_pf *pf)
|
||||
*/
|
||||
static inline void ice_clear_rdma_cap(struct ice_pf *pf)
|
||||
{
|
||||
ice_unplug_aux_dev(pf);
|
||||
/* We can directly unplug aux device here only if the flag bit
|
||||
* ICE_FLAG_PLUG_AUX_DEV is not set because ice_unplug_aux_dev()
|
||||
* could race with ice_plug_aux_dev() called from
|
||||
* ice_service_task(). In this case we only clear that bit now and
|
||||
* aux device will be unplugged later once ice_plug_aux_device()
|
||||
* called from ice_service_task() finishes (see ice_service_task()).
|
||||
*/
|
||||
if (!test_and_clear_bit(ICE_FLAG_PLUG_AUX_DEV, pf->flags))
|
||||
ice_unplug_aux_dev(pf);
|
||||
|
||||
clear_bit(ICE_FLAG_RDMA_ENA, pf->flags);
|
||||
clear_bit(ICE_FLAG_AUX_ENA, pf->flags);
|
||||
}
|
||||
|
@ -2298,7 +2298,7 @@ ice_set_link_ksettings(struct net_device *netdev,
|
||||
if (err)
|
||||
goto done;
|
||||
|
||||
curr_link_speed = pi->phy.link_info.link_speed;
|
||||
curr_link_speed = pi->phy.curr_user_speed_req;
|
||||
adv_link_speed = ice_ksettings_find_adv_link_speed(ks);
|
||||
|
||||
/* If speed didn't get set, set it to what it currently is.
|
||||
|
@ -2255,9 +2255,30 @@ static void ice_service_task(struct work_struct *work)
|
||||
return;
|
||||
}
|
||||
|
||||
if (test_and_clear_bit(ICE_FLAG_PLUG_AUX_DEV, pf->flags))
|
||||
if (test_bit(ICE_FLAG_PLUG_AUX_DEV, pf->flags)) {
|
||||
/* Plug aux device per request */
|
||||
ice_plug_aux_dev(pf);
|
||||
|
||||
/* Mark plugging as done but check whether unplug was
|
||||
* requested during ice_plug_aux_dev() call
|
||||
* (e.g. from ice_clear_rdma_cap()) and if so then
|
||||
* plug aux device.
|
||||
*/
|
||||
if (!test_and_clear_bit(ICE_FLAG_PLUG_AUX_DEV, pf->flags))
|
||||
ice_unplug_aux_dev(pf);
|
||||
}
|
||||
|
||||
if (test_and_clear_bit(ICE_FLAG_MTU_CHANGED, pf->flags)) {
|
||||
struct iidc_event *event;
|
||||
|
||||
event = kzalloc(sizeof(*event), GFP_KERNEL);
|
||||
if (event) {
|
||||
set_bit(IIDC_EVENT_AFTER_MTU_CHANGE, event->type);
|
||||
ice_send_event_to_aux(pf, event);
|
||||
kfree(event);
|
||||
}
|
||||
}
|
||||
|
||||
ice_clean_adminq_subtask(pf);
|
||||
ice_check_media_subtask(pf);
|
||||
ice_check_for_hang_subtask(pf);
|
||||
@ -3023,7 +3044,7 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
|
||||
struct iidc_event *event;
|
||||
|
||||
ena_mask &= ~ICE_AUX_CRIT_ERR;
|
||||
event = kzalloc(sizeof(*event), GFP_KERNEL);
|
||||
event = kzalloc(sizeof(*event), GFP_ATOMIC);
|
||||
if (event) {
|
||||
set_bit(IIDC_EVENT_CRIT_ERR, event->type);
|
||||
/* report the entire OICR value to AUX driver */
|
||||
@ -6822,7 +6843,6 @@ static int ice_change_mtu(struct net_device *netdev, int new_mtu)
|
||||
struct ice_netdev_priv *np = netdev_priv(netdev);
|
||||
struct ice_vsi *vsi = np->vsi;
|
||||
struct ice_pf *pf = vsi->back;
|
||||
struct iidc_event *event;
|
||||
u8 count = 0;
|
||||
int err = 0;
|
||||
|
||||
@ -6857,14 +6877,6 @@ static int ice_change_mtu(struct net_device *netdev, int new_mtu)
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
event = kzalloc(sizeof(*event), GFP_KERNEL);
|
||||
if (!event)
|
||||
return -ENOMEM;
|
||||
|
||||
set_bit(IIDC_EVENT_BEFORE_MTU_CHANGE, event->type);
|
||||
ice_send_event_to_aux(pf, event);
|
||||
clear_bit(IIDC_EVENT_BEFORE_MTU_CHANGE, event->type);
|
||||
|
||||
netdev->mtu = (unsigned int)new_mtu;
|
||||
|
||||
/* if VSI is up, bring it down and then back up */
|
||||
@ -6872,21 +6884,18 @@ static int ice_change_mtu(struct net_device *netdev, int new_mtu)
|
||||
err = ice_down(vsi);
|
||||
if (err) {
|
||||
netdev_err(netdev, "change MTU if_down err %d\n", err);
|
||||
goto event_after;
|
||||
return err;
|
||||
}
|
||||
|
||||
err = ice_up(vsi);
|
||||
if (err) {
|
||||
netdev_err(netdev, "change MTU if_up err %d\n", err);
|
||||
goto event_after;
|
||||
return err;
|
||||
}
|
||||
}
|
||||
|
||||
netdev_dbg(netdev, "changed MTU to %d\n", new_mtu);
|
||||
event_after:
|
||||
set_bit(IIDC_EVENT_AFTER_MTU_CHANGE, event->type);
|
||||
ice_send_event_to_aux(pf, event);
|
||||
kfree(event);
|
||||
set_bit(ICE_FLAG_MTU_CHANGED, pf->flags);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
@ -2182,24 +2182,6 @@ ice_vc_send_msg_to_vf(struct ice_vf *vf, u32 v_opcode,
|
||||
|
||||
dev = ice_pf_to_dev(pf);
|
||||
|
||||
/* single place to detect unsuccessful return values */
|
||||
if (v_retval) {
|
||||
vf->num_inval_msgs++;
|
||||
dev_info(dev, "VF %d failed opcode %d, retval: %d\n", vf->vf_id,
|
||||
v_opcode, v_retval);
|
||||
if (vf->num_inval_msgs > ICE_DFLT_NUM_INVAL_MSGS_ALLOWED) {
|
||||
dev_err(dev, "Number of invalid messages exceeded for VF %d\n",
|
||||
vf->vf_id);
|
||||
dev_err(dev, "Use PF Control I/F to enable the VF\n");
|
||||
set_bit(ICE_VF_STATE_DIS, vf->vf_states);
|
||||
return -EIO;
|
||||
}
|
||||
} else {
|
||||
vf->num_valid_msgs++;
|
||||
/* reset the invalid counter, if a valid message is received. */
|
||||
vf->num_inval_msgs = 0;
|
||||
}
|
||||
|
||||
aq_ret = ice_aq_send_msg_to_vf(&pf->hw, vf->vf_id, v_opcode, v_retval,
|
||||
msg, msglen, NULL);
|
||||
if (aq_ret && pf->hw.mailboxq.sq_last_status != ICE_AQ_RC_ENOSYS) {
|
||||
|
@ -14,7 +14,6 @@
|
||||
#define ICE_MAX_MACADDR_PER_VF 18
|
||||
|
||||
/* Malicious Driver Detection */
|
||||
#define ICE_DFLT_NUM_INVAL_MSGS_ALLOWED 10
|
||||
#define ICE_MDD_EVENTS_THRESHOLD 30
|
||||
|
||||
/* Static VF transaction/status register def */
|
||||
@ -134,8 +133,6 @@ struct ice_vf {
|
||||
unsigned int max_tx_rate; /* Maximum Tx bandwidth limit in Mbps */
|
||||
DECLARE_BITMAP(vf_states, ICE_VF_STATES_NBITS); /* VF runtime states */
|
||||
|
||||
u64 num_inval_msgs; /* number of continuous invalid msgs */
|
||||
u64 num_valid_msgs; /* number of valid msgs detected */
|
||||
unsigned long vf_caps; /* VF's adv. capabilities */
|
||||
u8 num_req_qs; /* num of queue pairs requested by VF */
|
||||
u16 num_mac;
|
||||
|
@ -260,9 +260,9 @@ static int xrx200_hw_receive(struct xrx200_chan *ch)
|
||||
|
||||
if (ctl & LTQ_DMA_EOP) {
|
||||
ch->skb_head->protocol = eth_type_trans(ch->skb_head, net_dev);
|
||||
netif_receive_skb(ch->skb_head);
|
||||
net_dev->stats.rx_packets++;
|
||||
net_dev->stats.rx_bytes += ch->skb_head->len;
|
||||
netif_receive_skb(ch->skb_head);
|
||||
ch->skb_head = NULL;
|
||||
ch->skb_tail = NULL;
|
||||
ret = XRX200_DMA_PACKET_COMPLETE;
|
||||
|
@ -554,6 +554,7 @@ static int prestera_switch_set_base_mac_addr(struct prestera_switch *sw)
|
||||
dev_info(prestera_dev(sw), "using random base mac address\n");
|
||||
}
|
||||
of_node_put(base_mac_np);
|
||||
of_node_put(np);
|
||||
|
||||
return prestera_hw_switch_mac_set(sw, sw->base_mac);
|
||||
}
|
||||
|
@ -131,11 +131,8 @@ static int cmd_alloc_index(struct mlx5_cmd *cmd)
|
||||
|
||||
static void cmd_free_index(struct mlx5_cmd *cmd, int idx)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&cmd->alloc_lock, flags);
|
||||
lockdep_assert_held(&cmd->alloc_lock);
|
||||
set_bit(idx, &cmd->bitmask);
|
||||
spin_unlock_irqrestore(&cmd->alloc_lock, flags);
|
||||
}
|
||||
|
||||
static void cmd_ent_get(struct mlx5_cmd_work_ent *ent)
|
||||
@ -145,17 +142,21 @@ static void cmd_ent_get(struct mlx5_cmd_work_ent *ent)
|
||||
|
||||
static void cmd_ent_put(struct mlx5_cmd_work_ent *ent)
|
||||
{
|
||||
struct mlx5_cmd *cmd = ent->cmd;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&cmd->alloc_lock, flags);
|
||||
if (!refcount_dec_and_test(&ent->refcnt))
|
||||
return;
|
||||
goto out;
|
||||
|
||||
if (ent->idx >= 0) {
|
||||
struct mlx5_cmd *cmd = ent->cmd;
|
||||
|
||||
cmd_free_index(cmd, ent->idx);
|
||||
up(ent->page_queue ? &cmd->pages_sem : &cmd->sem);
|
||||
}
|
||||
|
||||
cmd_free_ent(ent);
|
||||
out:
|
||||
spin_unlock_irqrestore(&cmd->alloc_lock, flags);
|
||||
}
|
||||
|
||||
static struct mlx5_cmd_layout *get_inst(struct mlx5_cmd *cmd, int idx)
|
||||
|
@ -88,9 +88,6 @@ void mlx5e_tir_builder_build_packet_merge(struct mlx5e_tir_builder *builder,
|
||||
(MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ - rough_max_l2_l3_hdr_sz) >> 8);
|
||||
MLX5_SET(tirc, tirc, lro_timeout_period_usecs, pkt_merge_param->timeout);
|
||||
break;
|
||||
case MLX5E_PACKET_MERGE_SHAMPO:
|
||||
MLX5_SET(tirc, tirc, packet_merge_mask, MLX5_TIRC_PACKET_MERGE_MASK_SHAMPO);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
@ -3616,8 +3616,7 @@ static int set_feature_hw_gro(struct net_device *netdev, bool enable)
|
||||
goto out;
|
||||
}
|
||||
|
||||
err = mlx5e_safe_switch_params(priv, &new_params,
|
||||
mlx5e_modify_tirs_packet_merge_ctx, NULL, reset);
|
||||
err = mlx5e_safe_switch_params(priv, &new_params, NULL, NULL, reset);
|
||||
out:
|
||||
mutex_unlock(&priv->state_lock);
|
||||
return err;
|
||||
|
@ -126,6 +126,10 @@ static void mlx5_lag_fib_route_event(struct mlx5_lag *ldev,
|
||||
return;
|
||||
}
|
||||
|
||||
/* Handle multipath entry with lower priority value */
|
||||
if (mp->mfi && mp->mfi != fi && fi->fib_priority >= mp->mfi->fib_priority)
|
||||
return;
|
||||
|
||||
/* Handle add/replace event */
|
||||
nhs = fib_info_num_path(fi);
|
||||
if (nhs == 1) {
|
||||
@ -135,12 +139,13 @@ static void mlx5_lag_fib_route_event(struct mlx5_lag *ldev,
|
||||
int i = mlx5_lag_dev_get_netdev_idx(ldev, nh_dev);
|
||||
|
||||
if (i < 0)
|
||||
i = MLX5_LAG_NORMAL_AFFINITY;
|
||||
else
|
||||
++i;
|
||||
return;
|
||||
|
||||
i++;
|
||||
mlx5_lag_set_port_affinity(ldev, i);
|
||||
}
|
||||
|
||||
mp->mfi = fi;
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -121,9 +121,6 @@ u32 mlx5_chains_get_nf_ft_chain(struct mlx5_fs_chains *chains)
|
||||
|
||||
u32 mlx5_chains_get_prio_range(struct mlx5_fs_chains *chains)
|
||||
{
|
||||
if (!mlx5_chains_prios_supported(chains))
|
||||
return 1;
|
||||
|
||||
if (mlx5_chains_ignore_flow_level_supported(chains))
|
||||
return UINT_MAX;
|
||||
|
||||
|
@ -1471,6 +1471,7 @@ static int lpc_eth_drv_resume(struct platform_device *pdev)
|
||||
{
|
||||
struct net_device *ndev = platform_get_drvdata(pdev);
|
||||
struct netdata_local *pldat;
|
||||
int ret;
|
||||
|
||||
if (device_may_wakeup(&pdev->dev))
|
||||
disable_irq_wake(ndev->irq);
|
||||
@ -1480,7 +1481,9 @@ static int lpc_eth_drv_resume(struct platform_device *pdev)
|
||||
pldat = netdev_priv(ndev);
|
||||
|
||||
/* Enable interface clock */
|
||||
clk_enable(pldat->clk);
|
||||
ret = clk_enable(pldat->clk);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* Reset and initialize */
|
||||
__lpc_eth_reset(pldat);
|
||||
|
@ -3806,11 +3806,11 @@ bool qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn, u32 *p_disabled_vfs)
|
||||
return found;
|
||||
}
|
||||
|
||||
static void qed_iov_get_link(struct qed_hwfn *p_hwfn,
|
||||
u16 vfid,
|
||||
struct qed_mcp_link_params *p_params,
|
||||
struct qed_mcp_link_state *p_link,
|
||||
struct qed_mcp_link_capabilities *p_caps)
|
||||
static int qed_iov_get_link(struct qed_hwfn *p_hwfn,
|
||||
u16 vfid,
|
||||
struct qed_mcp_link_params *p_params,
|
||||
struct qed_mcp_link_state *p_link,
|
||||
struct qed_mcp_link_capabilities *p_caps)
|
||||
{
|
||||
struct qed_vf_info *p_vf = qed_iov_get_vf_info(p_hwfn,
|
||||
vfid,
|
||||
@ -3818,7 +3818,7 @@ static void qed_iov_get_link(struct qed_hwfn *p_hwfn,
|
||||
struct qed_bulletin_content *p_bulletin;
|
||||
|
||||
if (!p_vf)
|
||||
return;
|
||||
return -EINVAL;
|
||||
|
||||
p_bulletin = p_vf->bulletin.p_virt;
|
||||
|
||||
@ -3828,6 +3828,7 @@ static void qed_iov_get_link(struct qed_hwfn *p_hwfn,
|
||||
__qed_vf_get_link_state(p_hwfn, p_link, p_bulletin);
|
||||
if (p_caps)
|
||||
__qed_vf_get_link_caps(p_hwfn, p_caps, p_bulletin);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
@ -4686,6 +4687,7 @@ static int qed_get_vf_config(struct qed_dev *cdev,
|
||||
struct qed_public_vf_info *vf_info;
|
||||
struct qed_mcp_link_state link;
|
||||
u32 tx_rate;
|
||||
int ret;
|
||||
|
||||
/* Sanitize request */
|
||||
if (IS_VF(cdev))
|
||||
@ -4699,7 +4701,9 @@ static int qed_get_vf_config(struct qed_dev *cdev,
|
||||
|
||||
vf_info = qed_iov_get_public_vf_info(hwfn, vf_id, true);
|
||||
|
||||
qed_iov_get_link(hwfn, vf_id, NULL, &link, NULL);
|
||||
ret = qed_iov_get_link(hwfn, vf_id, NULL, &link, NULL);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* Fill information about VF */
|
||||
ivi->vf = vf_id;
|
||||
|
@ -513,6 +513,9 @@ int qed_vf_hw_prepare(struct qed_hwfn *p_hwfn)
|
||||
p_iov->bulletin.size,
|
||||
&p_iov->bulletin.phys,
|
||||
GFP_KERNEL);
|
||||
if (!p_iov->bulletin.p_virt)
|
||||
goto free_pf2vf_reply;
|
||||
|
||||
DP_VERBOSE(p_hwfn, QED_MSG_IOV,
|
||||
"VF's bulletin Board [%p virt 0x%llx phys 0x%08x bytes]\n",
|
||||
p_iov->bulletin.p_virt,
|
||||
@ -552,6 +555,10 @@ int qed_vf_hw_prepare(struct qed_hwfn *p_hwfn)
|
||||
|
||||
return rc;
|
||||
|
||||
free_pf2vf_reply:
|
||||
dma_free_coherent(&p_hwfn->cdev->pdev->dev,
|
||||
sizeof(union pfvf_tlvs),
|
||||
p_iov->pf2vf_reply, p_iov->pf2vf_reply_phys);
|
||||
free_vf2pf_request:
|
||||
dma_free_coherent(&p_hwfn->cdev->pdev->dev,
|
||||
sizeof(union vfpf_tlvs),
|
||||
|
@ -3146,7 +3146,7 @@ static int happy_meal_pci_probe(struct pci_dev *pdev,
|
||||
if (err) {
|
||||
printk(KERN_ERR "happymeal(PCI): Cannot register net device, "
|
||||
"aborting.\n");
|
||||
goto err_out_iounmap;
|
||||
goto err_out_free_coherent;
|
||||
}
|
||||
|
||||
pci_set_drvdata(pdev, hp);
|
||||
@ -3179,6 +3179,10 @@ static int happy_meal_pci_probe(struct pci_dev *pdev,
|
||||
|
||||
return 0;
|
||||
|
||||
err_out_free_coherent:
|
||||
dma_free_coherent(hp->dma_dev, PAGE_SIZE,
|
||||
hp->happy_block, hp->hblock_dvma);
|
||||
|
||||
err_out_iounmap:
|
||||
iounmap(hp->gregs);
|
||||
|
||||
|
@ -568,7 +568,9 @@ int cpts_register(struct cpts *cpts)
|
||||
for (i = 0; i < CPTS_MAX_EVENTS; i++)
|
||||
list_add(&cpts->pool_data[i].list, &cpts->pool);
|
||||
|
||||
clk_enable(cpts->refclk);
|
||||
err = clk_enable(cpts->refclk);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
cpts_write32(cpts, CPTS_EN, control);
|
||||
cpts_write32(cpts, TS_PEND_EN, int_enable);
|
||||
|
@ -1183,7 +1183,7 @@ static int xemaclite_of_probe(struct platform_device *ofdev)
|
||||
if (rc) {
|
||||
dev_err(dev,
|
||||
"Cannot register network device, aborting\n");
|
||||
goto error;
|
||||
goto put_node;
|
||||
}
|
||||
|
||||
dev_info(dev,
|
||||
@ -1191,6 +1191,8 @@ static int xemaclite_of_probe(struct platform_device *ofdev)
|
||||
(unsigned long __force)ndev->mem_start, lp->base_addr, ndev->irq);
|
||||
return 0;
|
||||
|
||||
put_node:
|
||||
of_node_put(lp->phy_node);
|
||||
error:
|
||||
free_netdev(ndev);
|
||||
return rc;
|
||||
|
@ -274,7 +274,7 @@ static int dp83822_config_intr(struct phy_device *phydev)
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
err = phy_write(phydev, MII_DP83822_MISR1, 0);
|
||||
err = phy_write(phydev, MII_DP83822_MISR2, 0);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
|
@ -30,8 +30,12 @@
|
||||
#define INTSRC_LINK_DOWN BIT(4)
|
||||
#define INTSRC_REMOTE_FAULT BIT(5)
|
||||
#define INTSRC_ANEG_COMPLETE BIT(6)
|
||||
#define INTSRC_ENERGY_DETECT BIT(7)
|
||||
#define INTSRC_MASK 30
|
||||
|
||||
#define INT_SOURCES (INTSRC_LINK_DOWN | INTSRC_ANEG_COMPLETE | \
|
||||
INTSRC_ENERGY_DETECT)
|
||||
|
||||
#define BANK_ANALOG_DSP 0
|
||||
#define BANK_WOL 1
|
||||
#define BANK_BIST 3
|
||||
@ -200,7 +204,6 @@ static int meson_gxl_ack_interrupt(struct phy_device *phydev)
|
||||
|
||||
static int meson_gxl_config_intr(struct phy_device *phydev)
|
||||
{
|
||||
u16 val;
|
||||
int ret;
|
||||
|
||||
if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
|
||||
@ -209,16 +212,9 @@ static int meson_gxl_config_intr(struct phy_device *phydev)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
val = INTSRC_ANEG_PR
|
||||
| INTSRC_PARALLEL_FAULT
|
||||
| INTSRC_ANEG_LP_ACK
|
||||
| INTSRC_LINK_DOWN
|
||||
| INTSRC_REMOTE_FAULT
|
||||
| INTSRC_ANEG_COMPLETE;
|
||||
ret = phy_write(phydev, INTSRC_MASK, val);
|
||||
ret = phy_write(phydev, INTSRC_MASK, INT_SOURCES);
|
||||
} else {
|
||||
val = 0;
|
||||
ret = phy_write(phydev, INTSRC_MASK, val);
|
||||
ret = phy_write(phydev, INTSRC_MASK, 0);
|
||||
|
||||
/* Ack any pending IRQ */
|
||||
ret = meson_gxl_ack_interrupt(phydev);
|
||||
@ -237,10 +233,23 @@ static irqreturn_t meson_gxl_handle_interrupt(struct phy_device *phydev)
|
||||
return IRQ_NONE;
|
||||
}
|
||||
|
||||
irq_status &= INT_SOURCES;
|
||||
|
||||
if (irq_status == 0)
|
||||
return IRQ_NONE;
|
||||
|
||||
phy_trigger_machine(phydev);
|
||||
/* Aneg-complete interrupt is used for link-up detection */
|
||||
if (phydev->autoneg == AUTONEG_ENABLE &&
|
||||
irq_status == INTSRC_ENERGY_DETECT)
|
||||
return IRQ_HANDLED;
|
||||
|
||||
/* Give PHY some time before MAC starts sending data. This works
|
||||
* around an issue where network doesn't come up properly.
|
||||
*/
|
||||
if (!(irq_status & INTSRC_LINK_DOWN))
|
||||
phy_queue_state_machine(phydev, msecs_to_jiffies(100));
|
||||
else
|
||||
phy_trigger_machine(phydev);
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
@ -84,9 +84,10 @@ static int __must_check __smsc95xx_read_reg(struct usbnet *dev, u32 index,
|
||||
ret = fn(dev, USB_VENDOR_REQUEST_READ_REGISTER, USB_DIR_IN
|
||||
| USB_TYPE_VENDOR | USB_RECIP_DEVICE,
|
||||
0, index, &buf, 4);
|
||||
if (unlikely(ret < 0)) {
|
||||
netdev_warn(dev->net, "Failed to read reg index 0x%08x: %d\n",
|
||||
index, ret);
|
||||
if (ret < 0) {
|
||||
if (ret != -ENODEV)
|
||||
netdev_warn(dev->net, "Failed to read reg index 0x%08x: %d\n",
|
||||
index, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -116,7 +117,7 @@ static int __must_check __smsc95xx_write_reg(struct usbnet *dev, u32 index,
|
||||
ret = fn(dev, USB_VENDOR_REQUEST_WRITE_REGISTER, USB_DIR_OUT
|
||||
| USB_TYPE_VENDOR | USB_RECIP_DEVICE,
|
||||
0, index, &buf, 4);
|
||||
if (unlikely(ret < 0))
|
||||
if (ret < 0 && ret != -ENODEV)
|
||||
netdev_warn(dev->net, "Failed to write reg index 0x%08x: %d\n",
|
||||
index, ret);
|
||||
|
||||
@ -159,6 +160,9 @@ static int __must_check __smsc95xx_phy_wait_not_busy(struct usbnet *dev,
|
||||
do {
|
||||
ret = __smsc95xx_read_reg(dev, MII_ADDR, &val, in_pm);
|
||||
if (ret < 0) {
|
||||
/* Ignore -ENODEV error during disconnect() */
|
||||
if (ret == -ENODEV)
|
||||
return 0;
|
||||
netdev_warn(dev->net, "Error reading MII_ACCESS\n");
|
||||
return ret;
|
||||
}
|
||||
@ -194,7 +198,8 @@ static int __smsc95xx_mdio_read(struct usbnet *dev, int phy_id, int idx,
|
||||
addr = mii_address_cmd(phy_id, idx, MII_READ_ | MII_BUSY_);
|
||||
ret = __smsc95xx_write_reg(dev, MII_ADDR, addr, in_pm);
|
||||
if (ret < 0) {
|
||||
netdev_warn(dev->net, "Error writing MII_ADDR\n");
|
||||
if (ret != -ENODEV)
|
||||
netdev_warn(dev->net, "Error writing MII_ADDR\n");
|
||||
goto done;
|
||||
}
|
||||
|
||||
@ -206,7 +211,8 @@ static int __smsc95xx_mdio_read(struct usbnet *dev, int phy_id, int idx,
|
||||
|
||||
ret = __smsc95xx_read_reg(dev, MII_DATA, &val, in_pm);
|
||||
if (ret < 0) {
|
||||
netdev_warn(dev->net, "Error reading MII_DATA\n");
|
||||
if (ret != -ENODEV)
|
||||
netdev_warn(dev->net, "Error reading MII_DATA\n");
|
||||
goto done;
|
||||
}
|
||||
|
||||
@ -214,6 +220,10 @@ static int __smsc95xx_mdio_read(struct usbnet *dev, int phy_id, int idx,
|
||||
|
||||
done:
|
||||
mutex_unlock(&dev->phy_mutex);
|
||||
|
||||
/* Ignore -ENODEV error during disconnect() */
|
||||
if (ret == -ENODEV)
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -235,7 +245,8 @@ static void __smsc95xx_mdio_write(struct usbnet *dev, int phy_id,
|
||||
val = regval;
|
||||
ret = __smsc95xx_write_reg(dev, MII_DATA, val, in_pm);
|
||||
if (ret < 0) {
|
||||
netdev_warn(dev->net, "Error writing MII_DATA\n");
|
||||
if (ret != -ENODEV)
|
||||
netdev_warn(dev->net, "Error writing MII_DATA\n");
|
||||
goto done;
|
||||
}
|
||||
|
||||
@ -243,7 +254,8 @@ static void __smsc95xx_mdio_write(struct usbnet *dev, int phy_id,
|
||||
addr = mii_address_cmd(phy_id, idx, MII_WRITE_ | MII_BUSY_);
|
||||
ret = __smsc95xx_write_reg(dev, MII_ADDR, addr, in_pm);
|
||||
if (ret < 0) {
|
||||
netdev_warn(dev->net, "Error writing MII_ADDR\n");
|
||||
if (ret != -ENODEV)
|
||||
netdev_warn(dev->net, "Error writing MII_ADDR\n");
|
||||
goto done;
|
||||
}
|
||||
|
||||
|
@ -1612,7 +1612,9 @@ static int port100_probe(struct usb_interface *interface,
|
||||
nfc_digital_free_device(dev->nfc_digital_dev);
|
||||
|
||||
error:
|
||||
usb_kill_urb(dev->in_urb);
|
||||
usb_free_urb(dev->in_urb);
|
||||
usb_kill_urb(dev->out_urb);
|
||||
usb_free_urb(dev->out_urb);
|
||||
usb_put_dev(dev->udev);
|
||||
|
||||
|
@ -3434,7 +3434,6 @@ enum {
|
||||
enum {
|
||||
MLX5_TIRC_PACKET_MERGE_MASK_IPV4_LRO = BIT(0),
|
||||
MLX5_TIRC_PACKET_MERGE_MASK_IPV6_LRO = BIT(1),
|
||||
MLX5_TIRC_PACKET_MERGE_MASK_SHAMPO = BIT(2),
|
||||
};
|
||||
|
||||
enum {
|
||||
@ -9900,8 +9899,8 @@ struct mlx5_ifc_bufferx_reg_bits {
|
||||
u8 reserved_at_0[0x6];
|
||||
u8 lossy[0x1];
|
||||
u8 epsb[0x1];
|
||||
u8 reserved_at_8[0xc];
|
||||
u8 size[0xc];
|
||||
u8 reserved_at_8[0x8];
|
||||
u8 size[0x10];
|
||||
|
||||
u8 xoff_threshold[0x10];
|
||||
u8 xon_threshold[0x10];
|
||||
|
@ -4602,6 +4602,8 @@ int skb_csum_hwoffload_help(struct sk_buff *skb,
|
||||
|
||||
struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
|
||||
netdev_features_t features, bool tx_path);
|
||||
struct sk_buff *skb_eth_gso_segment(struct sk_buff *skb,
|
||||
netdev_features_t features, __be16 type);
|
||||
struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
|
||||
netdev_features_t features);
|
||||
|
||||
|
@ -87,8 +87,8 @@ extern const int phy_10gbit_features_array[1];
|
||||
*
|
||||
* @PHY_INTERFACE_MODE_NA: Not Applicable - don't touch
|
||||
* @PHY_INTERFACE_MODE_INTERNAL: No interface, MAC and PHY combined
|
||||
* @PHY_INTERFACE_MODE_MII: Median-independent interface
|
||||
* @PHY_INTERFACE_MODE_GMII: Gigabit median-independent interface
|
||||
* @PHY_INTERFACE_MODE_MII: Media-independent interface
|
||||
* @PHY_INTERFACE_MODE_GMII: Gigabit media-independent interface
|
||||
* @PHY_INTERFACE_MODE_SGMII: Serial gigabit media-independent interface
|
||||
* @PHY_INTERFACE_MODE_TBI: Ten Bit Interface
|
||||
* @PHY_INTERFACE_MODE_REVMII: Reverse Media Independent Interface
|
||||
|
@ -4,6 +4,8 @@
|
||||
|
||||
#include <linux/skbuff.h>
|
||||
|
||||
#define ESP_SKB_FRAG_MAXSIZE (PAGE_SIZE << SKB_FRAG_PAGE_ORDER)
|
||||
|
||||
struct ip_esp_hdr;
|
||||
|
||||
static inline struct ip_esp_hdr *ip_esp_hdr(const struct sk_buff *skb)
|
||||
|
@ -87,6 +87,13 @@ static void ax25_kill_by_device(struct net_device *dev)
|
||||
ax25_for_each(s, &ax25_list) {
|
||||
if (s->ax25_dev == ax25_dev) {
|
||||
sk = s->sk;
|
||||
if (!sk) {
|
||||
spin_unlock_bh(&ax25_list_lock);
|
||||
s->ax25_dev = NULL;
|
||||
ax25_disconnect(s, ENETUNREACH);
|
||||
spin_lock_bh(&ax25_list_lock);
|
||||
goto again;
|
||||
}
|
||||
sock_hold(sk);
|
||||
spin_unlock_bh(&ax25_list_lock);
|
||||
lock_sock(sk);
|
||||
|
@ -276,40 +276,37 @@ EXPORT_SYMBOL(__hci_cmd_sync_status);
|
||||
static void hci_cmd_sync_work(struct work_struct *work)
|
||||
{
|
||||
struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_sync_work);
|
||||
struct hci_cmd_sync_work_entry *entry;
|
||||
hci_cmd_sync_work_func_t func;
|
||||
hci_cmd_sync_work_destroy_t destroy;
|
||||
void *data;
|
||||
|
||||
bt_dev_dbg(hdev, "");
|
||||
|
||||
mutex_lock(&hdev->cmd_sync_work_lock);
|
||||
entry = list_first_entry(&hdev->cmd_sync_work_list,
|
||||
struct hci_cmd_sync_work_entry, list);
|
||||
if (entry) {
|
||||
list_del(&entry->list);
|
||||
func = entry->func;
|
||||
data = entry->data;
|
||||
destroy = entry->destroy;
|
||||
/* Dequeue all entries and run them */
|
||||
while (1) {
|
||||
struct hci_cmd_sync_work_entry *entry;
|
||||
|
||||
mutex_lock(&hdev->cmd_sync_work_lock);
|
||||
entry = list_first_entry_or_null(&hdev->cmd_sync_work_list,
|
||||
struct hci_cmd_sync_work_entry,
|
||||
list);
|
||||
if (entry)
|
||||
list_del(&entry->list);
|
||||
mutex_unlock(&hdev->cmd_sync_work_lock);
|
||||
|
||||
if (!entry)
|
||||
break;
|
||||
|
||||
bt_dev_dbg(hdev, "entry %p", entry);
|
||||
|
||||
if (entry->func) {
|
||||
int err;
|
||||
|
||||
hci_req_sync_lock(hdev);
|
||||
err = entry->func(hdev, entry->data);
|
||||
if (entry->destroy)
|
||||
entry->destroy(hdev, entry->data, err);
|
||||
hci_req_sync_unlock(hdev);
|
||||
}
|
||||
|
||||
kfree(entry);
|
||||
} else {
|
||||
func = NULL;
|
||||
data = NULL;
|
||||
destroy = NULL;
|
||||
}
|
||||
mutex_unlock(&hdev->cmd_sync_work_lock);
|
||||
|
||||
if (func) {
|
||||
int err;
|
||||
|
||||
hci_req_sync_lock(hdev);
|
||||
|
||||
err = func(hdev, data);
|
||||
|
||||
if (destroy)
|
||||
destroy(hdev, data, err);
|
||||
|
||||
hci_req_sync_unlock(hdev);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -4541,9 +4541,9 @@ static int set_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
|
||||
}
|
||||
}
|
||||
|
||||
done:
|
||||
hci_dev_unlock(hdev);
|
||||
|
||||
done:
|
||||
if (status == MGMT_STATUS_SUCCESS)
|
||||
device_flags_changed(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
|
||||
supported_flags, current_flags);
|
||||
|
@ -92,6 +92,31 @@ void dev_remove_offload(struct packet_offload *po)
|
||||
}
|
||||
EXPORT_SYMBOL(dev_remove_offload);
|
||||
|
||||
/**
|
||||
* skb_eth_gso_segment - segmentation handler for ethernet protocols.
|
||||
* @skb: buffer to segment
|
||||
* @features: features for the output path (see dev->features)
|
||||
* @type: Ethernet Protocol ID
|
||||
*/
|
||||
struct sk_buff *skb_eth_gso_segment(struct sk_buff *skb,
|
||||
netdev_features_t features, __be16 type)
|
||||
{
|
||||
struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
|
||||
struct packet_offload *ptype;
|
||||
|
||||
rcu_read_lock();
|
||||
list_for_each_entry_rcu(ptype, &offload_base, list) {
|
||||
if (ptype->type == type && ptype->callbacks.gso_segment) {
|
||||
segs = ptype->callbacks.gso_segment(skb, features);
|
||||
break;
|
||||
}
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
return segs;
|
||||
}
|
||||
EXPORT_SYMBOL(skb_eth_gso_segment);
|
||||
|
||||
/**
|
||||
* skb_mac_gso_segment - mac layer segmentation handler.
|
||||
* @skb: buffer to segment
|
||||
|
@ -357,7 +357,8 @@ int xdp_rxq_info_reg_mem_model(struct xdp_rxq_info *xdp_rxq,
|
||||
if (IS_ERR(xdp_alloc))
|
||||
return PTR_ERR(xdp_alloc);
|
||||
|
||||
trace_mem_connect(xdp_alloc, xdp_rxq);
|
||||
if (trace_mem_connect_enabled() && xdp_alloc)
|
||||
trace_mem_connect(xdp_alloc, xdp_rxq);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1058,7 +1058,7 @@ static int dsa_tree_setup_switches(struct dsa_switch_tree *dst)
|
||||
static int dsa_tree_setup_master(struct dsa_switch_tree *dst)
|
||||
{
|
||||
struct dsa_port *dp;
|
||||
int err;
|
||||
int err = 0;
|
||||
|
||||
rtnl_lock();
|
||||
|
||||
@ -1066,13 +1066,13 @@ static int dsa_tree_setup_master(struct dsa_switch_tree *dst)
|
||||
if (dsa_port_is_cpu(dp)) {
|
||||
err = dsa_master_setup(dp->master, dp);
|
||||
if (err)
|
||||
return err;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
rtnl_unlock();
|
||||
|
||||
return 0;
|
||||
return err;
|
||||
}
|
||||
|
||||
static void dsa_tree_teardown_master(struct dsa_switch_tree *dst)
|
||||
|
@ -446,6 +446,7 @@ int esp_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *
|
||||
struct page *page;
|
||||
struct sk_buff *trailer;
|
||||
int tailen = esp->tailen;
|
||||
unsigned int allocsz;
|
||||
|
||||
/* this is non-NULL only with TCP/UDP Encapsulation */
|
||||
if (x->encap) {
|
||||
@ -455,6 +456,10 @@ int esp_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *
|
||||
return err;
|
||||
}
|
||||
|
||||
allocsz = ALIGN(skb->data_len + tailen, L1_CACHE_BYTES);
|
||||
if (allocsz > ESP_SKB_FRAG_MAXSIZE)
|
||||
goto cow;
|
||||
|
||||
if (!skb_cloned(skb)) {
|
||||
if (tailen <= skb_tailroom(skb)) {
|
||||
nfrags = 1;
|
||||
|
@ -110,8 +110,7 @@ static struct sk_buff *xfrm4_tunnel_gso_segment(struct xfrm_state *x,
|
||||
struct sk_buff *skb,
|
||||
netdev_features_t features)
|
||||
{
|
||||
__skb_push(skb, skb->mac_len);
|
||||
return skb_mac_gso_segment(skb, features);
|
||||
return skb_eth_gso_segment(skb, features, htons(ETH_P_IP));
|
||||
}
|
||||
|
||||
static struct sk_buff *xfrm4_transport_gso_segment(struct xfrm_state *x,
|
||||
@ -160,6 +159,9 @@ static struct sk_buff *xfrm4_beet_gso_segment(struct xfrm_state *x,
|
||||
skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV4;
|
||||
}
|
||||
|
||||
if (proto == IPPROTO_IPV6)
|
||||
skb_shinfo(skb)->gso_type |= SKB_GSO_IPXIP4;
|
||||
|
||||
__skb_pull(skb, skb_transport_offset(skb));
|
||||
ops = rcu_dereference(inet_offloads[proto]);
|
||||
if (likely(ops && ops->callbacks.gso_segment))
|
||||
|
@ -482,6 +482,7 @@ int esp6_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info
|
||||
struct page *page;
|
||||
struct sk_buff *trailer;
|
||||
int tailen = esp->tailen;
|
||||
unsigned int allocsz;
|
||||
|
||||
if (x->encap) {
|
||||
int err = esp6_output_encap(x, skb, esp);
|
||||
@ -490,6 +491,10 @@ int esp6_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info
|
||||
return err;
|
||||
}
|
||||
|
||||
allocsz = ALIGN(skb->data_len + tailen, L1_CACHE_BYTES);
|
||||
if (allocsz > ESP_SKB_FRAG_MAXSIZE)
|
||||
goto cow;
|
||||
|
||||
if (!skb_cloned(skb)) {
|
||||
if (tailen <= skb_tailroom(skb)) {
|
||||
nfrags = 1;
|
||||
|
@ -145,8 +145,7 @@ static struct sk_buff *xfrm6_tunnel_gso_segment(struct xfrm_state *x,
|
||||
struct sk_buff *skb,
|
||||
netdev_features_t features)
|
||||
{
|
||||
__skb_push(skb, skb->mac_len);
|
||||
return skb_mac_gso_segment(skb, features);
|
||||
return skb_eth_gso_segment(skb, features, htons(ETH_P_IPV6));
|
||||
}
|
||||
|
||||
static struct sk_buff *xfrm6_transport_gso_segment(struct xfrm_state *x,
|
||||
@ -199,6 +198,9 @@ static struct sk_buff *xfrm6_beet_gso_segment(struct xfrm_state *x,
|
||||
ipv6_skip_exthdr(skb, 0, &proto, &frag);
|
||||
}
|
||||
|
||||
if (proto == IPPROTO_IPIP)
|
||||
skb_shinfo(skb)->gso_type |= SKB_GSO_IPXIP6;
|
||||
|
||||
__skb_pull(skb, skb_transport_offset(skb));
|
||||
ops = rcu_dereference(inet6_offloads[proto]);
|
||||
if (likely(ops && ops->callbacks.gso_segment))
|
||||
|
@ -45,6 +45,19 @@ static int __xfrm6_output_finish(struct net *net, struct sock *sk, struct sk_buf
|
||||
return xfrm_output(sk, skb);
|
||||
}
|
||||
|
||||
static int xfrm6_noneed_fragment(struct sk_buff *skb)
|
||||
{
|
||||
struct frag_hdr *fh;
|
||||
u8 prevhdr = ipv6_hdr(skb)->nexthdr;
|
||||
|
||||
if (prevhdr != NEXTHDR_FRAGMENT)
|
||||
return 0;
|
||||
fh = (struct frag_hdr *)(skb->data + sizeof(struct ipv6hdr));
|
||||
if (fh->nexthdr == NEXTHDR_ESP || fh->nexthdr == NEXTHDR_AUTH)
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __xfrm6_output(struct net *net, struct sock *sk, struct sk_buff *skb)
|
||||
{
|
||||
struct dst_entry *dst = skb_dst(skb);
|
||||
@ -73,6 +86,9 @@ static int __xfrm6_output(struct net *net, struct sock *sk, struct sk_buff *skb)
|
||||
xfrm6_local_rxpmtu(skb, mtu);
|
||||
kfree_skb(skb);
|
||||
return -EMSGSIZE;
|
||||
} else if (toobig && xfrm6_noneed_fragment(skb)) {
|
||||
skb->ignore_df = 1;
|
||||
goto skip_frag;
|
||||
} else if (!skb->ignore_df && toobig && skb->sk) {
|
||||
xfrm_local_error(skb, mtu);
|
||||
kfree_skb(skb);
|
||||
|
@ -61,10 +61,6 @@ static void inet_diag_msg_sctpasoc_fill(struct inet_diag_msg *r,
|
||||
r->idiag_timer = SCTP_EVENT_TIMEOUT_T3_RTX;
|
||||
r->idiag_retrans = asoc->rtx_data_chunks;
|
||||
r->idiag_expires = jiffies_to_msecs(t3_rtx->expires - jiffies);
|
||||
} else {
|
||||
r->idiag_timer = 0;
|
||||
r->idiag_retrans = 0;
|
||||
r->idiag_expires = 0;
|
||||
}
|
||||
}
|
||||
|
||||
@ -144,13 +140,14 @@ static int inet_sctp_diag_fill(struct sock *sk, struct sctp_association *asoc,
|
||||
r = nlmsg_data(nlh);
|
||||
BUG_ON(!sk_fullsock(sk));
|
||||
|
||||
r->idiag_timer = 0;
|
||||
r->idiag_retrans = 0;
|
||||
r->idiag_expires = 0;
|
||||
if (asoc) {
|
||||
inet_diag_msg_sctpasoc_fill(r, sk, asoc);
|
||||
} else {
|
||||
inet_diag_msg_common_fill(r, sk);
|
||||
r->idiag_state = sk->sk_state;
|
||||
r->idiag_timer = 0;
|
||||
r->idiag_retrans = 0;
|
||||
}
|
||||
|
||||
if (inet_diag_msg_attrs_fill(sk, skb, r, ext, user_ns, net_admin))
|
||||
|
@ -352,16 +352,18 @@ static int tipc_enable_bearer(struct net *net, const char *name,
|
||||
goto rejected;
|
||||
}
|
||||
|
||||
/* Create monitoring data before accepting activate messages */
|
||||
if (tipc_mon_create(net, bearer_id)) {
|
||||
bearer_disable(net, b);
|
||||
kfree_skb(skb);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
test_and_set_bit_lock(0, &b->up);
|
||||
rcu_assign_pointer(tn->bearer_list[bearer_id], b);
|
||||
if (skb)
|
||||
tipc_bearer_xmit_skb(net, bearer_id, skb, &b->bcast_addr);
|
||||
|
||||
if (tipc_mon_create(net, bearer_id)) {
|
||||
bearer_disable(net, b);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
pr_info("Enabled bearer <%s>, priority %u\n", name, prio);
|
||||
|
||||
return res;
|
||||
|
@ -2286,6 +2286,11 @@ static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
|
||||
break;
|
||||
|
||||
case STATE_MSG:
|
||||
/* Validate Gap ACK blocks, drop if invalid */
|
||||
glen = tipc_get_gap_ack_blks(&ga, l, hdr, true);
|
||||
if (glen > dlen)
|
||||
break;
|
||||
|
||||
l->rcv_nxt_state = msg_seqno(hdr) + 1;
|
||||
|
||||
/* Update own tolerance if peer indicates a non-zero value */
|
||||
@ -2311,10 +2316,6 @@ static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
|
||||
break;
|
||||
}
|
||||
|
||||
/* Receive Gap ACK blocks from peer if any */
|
||||
glen = tipc_get_gap_ack_blks(&ga, l, hdr, true);
|
||||
if(glen > dlen)
|
||||
break;
|
||||
tipc_mon_rcv(l->net, data + glen, dlen - glen, l->addr,
|
||||
&l->mon_state, l->bearer_id);
|
||||
|
||||
|
@ -304,7 +304,10 @@ xfrmi_xmit2(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
|
||||
if (mtu < IPV6_MIN_MTU)
|
||||
mtu = IPV6_MIN_MTU;
|
||||
|
||||
icmpv6_ndo_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
|
||||
if (skb->len > 1280)
|
||||
icmpv6_ndo_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
|
||||
else
|
||||
goto xmit;
|
||||
} else {
|
||||
if (!(ip_hdr(skb)->frag_off & htons(IP_DF)))
|
||||
goto xmit;
|
||||
|
@ -630,13 +630,8 @@ static struct xfrm_state *xfrm_state_construct(struct net *net,
|
||||
|
||||
xfrm_smark_init(attrs, &x->props.smark);
|
||||
|
||||
if (attrs[XFRMA_IF_ID]) {
|
||||
if (attrs[XFRMA_IF_ID])
|
||||
x->if_id = nla_get_u32(attrs[XFRMA_IF_ID]);
|
||||
if (!x->if_id) {
|
||||
err = -EINVAL;
|
||||
goto error;
|
||||
}
|
||||
}
|
||||
|
||||
err = __xfrm_init_state(x, false, attrs[XFRMA_OFFLOAD_DEV]);
|
||||
if (err)
|
||||
@ -1432,13 +1427,8 @@ static int xfrm_alloc_userspi(struct sk_buff *skb, struct nlmsghdr *nlh,
|
||||
|
||||
mark = xfrm_mark_get(attrs, &m);
|
||||
|
||||
if (attrs[XFRMA_IF_ID]) {
|
||||
if (attrs[XFRMA_IF_ID])
|
||||
if_id = nla_get_u32(attrs[XFRMA_IF_ID]);
|
||||
if (!if_id) {
|
||||
err = -EINVAL;
|
||||
goto out_noput;
|
||||
}
|
||||
}
|
||||
|
||||
if (p->info.seq) {
|
||||
x = xfrm_find_acq_byseq(net, mark, p->info.seq);
|
||||
@ -1751,13 +1741,8 @@ static struct xfrm_policy *xfrm_policy_construct(struct net *net, struct xfrm_us
|
||||
|
||||
xfrm_mark_get(attrs, &xp->mark);
|
||||
|
||||
if (attrs[XFRMA_IF_ID]) {
|
||||
if (attrs[XFRMA_IF_ID])
|
||||
xp->if_id = nla_get_u32(attrs[XFRMA_IF_ID]);
|
||||
if (!xp->if_id) {
|
||||
err = -EINVAL;
|
||||
goto error;
|
||||
}
|
||||
}
|
||||
|
||||
return xp;
|
||||
error:
|
||||
|
@ -374,6 +374,16 @@ run_cmd() {
|
||||
return $rc
|
||||
}
|
||||
|
||||
run_cmd_bg() {
|
||||
cmd="$*"
|
||||
|
||||
if [ "$VERBOSE" = "1" ]; then
|
||||
printf " COMMAND: %s &\n" "${cmd}"
|
||||
fi
|
||||
|
||||
$cmd 2>&1 &
|
||||
}
|
||||
|
||||
# Find the auto-generated name for this namespace
|
||||
nsname() {
|
||||
eval echo \$NS_$1
|
||||
@ -670,10 +680,10 @@ setup_nettest_xfrm() {
|
||||
[ ${1} -eq 6 ] && proto="-6" || proto=""
|
||||
port=${2}
|
||||
|
||||
run_cmd ${ns_a} nettest ${proto} -q -D -s -x -p ${port} -t 5 &
|
||||
run_cmd_bg "${ns_a}" nettest "${proto}" -q -D -s -x -p "${port}" -t 5
|
||||
nettest_pids="${nettest_pids} $!"
|
||||
|
||||
run_cmd ${ns_b} nettest ${proto} -q -D -s -x -p ${port} -t 5 &
|
||||
run_cmd_bg "${ns_b}" nettest "${proto}" -q -D -s -x -p "${port}" -t 5
|
||||
nettest_pids="${nettest_pids} $!"
|
||||
}
|
||||
|
||||
@ -865,7 +875,6 @@ setup_ovs_bridge() {
|
||||
setup() {
|
||||
[ "$(id -u)" -ne 0 ] && echo " need to run as root" && return $ksft_skip
|
||||
|
||||
cleanup
|
||||
for arg do
|
||||
eval setup_${arg} || { echo " ${arg} not supported"; return 1; }
|
||||
done
|
||||
@ -876,7 +885,7 @@ trace() {
|
||||
|
||||
for arg do
|
||||
[ "${ns_cmd}" = "" ] && ns_cmd="${arg}" && continue
|
||||
${ns_cmd} tcpdump -s 0 -i "${arg}" -w "${name}_${arg}.pcap" 2> /dev/null &
|
||||
${ns_cmd} tcpdump --immediate-mode -s 0 -i "${arg}" -w "${name}_${arg}.pcap" 2> /dev/null &
|
||||
tcpdump_pids="${tcpdump_pids} $!"
|
||||
ns_cmd=
|
||||
done
|
||||
@ -1836,6 +1845,10 @@ run_test() {
|
||||
|
||||
unset IFS
|
||||
|
||||
# Since cleanup() relies on variables modified by this subshell, it
|
||||
# has to run in this context.
|
||||
trap cleanup EXIT
|
||||
|
||||
if [ "$VERBOSE" = "1" ]; then
|
||||
printf "\n##########################################################################\n\n"
|
||||
fi
|
||||
|
Loading…
Reference in New Issue
Block a user