mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-01-14 17:14:09 +00:00
Nothing major here. Over the last two weeks we gathered only around
two-thirds of our normal weekly fix count, but delaying sending these until -rc7 seemed like a really bad idea. AFAIK we have no bugs under investigation. One or two reverts for stuff for which we haven't gotten a proper fix will likely come in the next PR. Including fixes from wireles and netfilter. Current release - fix to a fix: - netfilter: nft_set_hash: unaligned atomic read on struct nft_set_ext - eth: gve: trigger RX NAPI instead of TX NAPI in gve_xsk_wakeup Previous releases - regressions: - net: reenable NETIF_F_IPV6_CSUM offload for BIG TCP packets - mptcp: - fix sleeping rcvmsg sleeping forever after bad recvbuffer adjust - fix TCP options overflow - prevent excessive coalescing on receive, fix throughput - net: fix memory leak in tcp_conn_request() if map insertion fails - wifi: cw1200: fix potential NULL dereference after conversion to GPIO descriptors - phy: micrel: dynamically control external clock of KSZ PHY, fix suspend behavior Previous releases - always broken: - af_packet: fix VLAN handling with MSG_PEEK - net: restrict SO_REUSEPORT to inet sockets - netdev-genl: avoid empty messages in NAPI get - dsa: microchip: fix set_ageing_time function on KSZ9477 and LAN937X - eth: gve: XDP fixes around transmit, queue wakeup etc. - eth: ti: icssg-prueth: fix firmware load sequence to prevent time jump which breaks timesync related operations Misc: - netlink: specs: mptcp: add missing attr and improve documentation Signed-off-by: Jakub Kicinski <kuba@kernel.org> -----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEE6jPA+I1ugmIBA4hXMUZtbf5SIrsFAmd4FCgACgkQMUZtbf5S IruB7hAAkppr4+4kywDq/PIqgkisjYzpNIQIrphebUeIHIcAizWbJEL9CiHXnCQD w95+nVhzDmsWXdzE2AvwM/2YAByLP6CpgKEa+7rI3K5BQZ86lbzm+ftOOFYiF3eV 2v5yLfjgMVbzwqzuP3ivkNrtG95IGQFPobLl7tI9Wpgm3yD0+H8BCqtZNAwA4N6g mEdZD1jMOzkdINoJBg35O70B2GDq/WSS1N8dgxtj1F7EPDuQdO1kijJmqFjYT3fw 30/NdjlfGtbFro6zp+8ZY6P9RG3CLNhYXu+nUqdOHJOJw65aXUxOPjWCRp3o6m12 HIIp22imDTvNTSCfn0wlbGJfC+v9HdIgB9gqApAaxNOS02bQgEy5tuxxXNTo2B1I bqH7GJ+6a7axsQ6BnpcOcDhu2fnbtguj1W/zC/MlGQmZZj2g+UNCf4QqPir/XkcG r43/iy2/L8cgZWYXzH1skfUMgbymI5uLytO+n+CLsb6P/N3WLa7wxHNV5Lni+Xng qDyd0TwUccCr2KBfZqjgFeGbvQ7qub+eeQCYx9kUAaFhKUsQB/eZ5i9C7SsfutMa xzwITqCkcvmdyU3gG3v/oABtQkxdTcRqWCfYDGcXm0zb1jzMf5ZGN4E/gABYM7p9 EewL1kCAt4ULmBc8Hqrj+2Erlzh+xsodvvfpgpJandMVXaJW44k= =YKS4 -----END PGP SIGNATURE----- Merge tag 'net-6.13-rc6' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net Pull networking fixes from Jakub Kicinski: "Including fixes from wireles and netfilter. Nothing major here. Over the last two weeks we gathered only around two-thirds of our normal weekly fix count, but delaying sending these until -rc7 seemed like a really bad idea. AFAIK we have no bugs under investigation. One or two reverts for stuff for which we haven't gotten a proper fix will likely come in the next PR. Current release - fix to a fix: - netfilter: nft_set_hash: unaligned atomic read on struct nft_set_ext - eth: gve: trigger RX NAPI instead of TX NAPI in gve_xsk_wakeup Previous releases - regressions: - net: reenable NETIF_F_IPV6_CSUM offload for BIG TCP packets - mptcp: - fix sleeping rcvmsg sleeping forever after bad recvbuffer adjust - fix TCP options overflow - prevent excessive coalescing on receive, fix throughput - net: fix memory leak in tcp_conn_request() if map insertion fails - wifi: cw1200: fix potential NULL dereference after conversion to GPIO descriptors - phy: micrel: dynamically control external clock of KSZ PHY, fix suspend behavior Previous releases - always broken: - af_packet: fix VLAN handling with MSG_PEEK - net: restrict SO_REUSEPORT to inet sockets - netdev-genl: avoid empty messages in NAPI get - dsa: microchip: fix set_ageing_time function on KSZ9477 and LAN937X - eth: - gve: XDP fixes around transmit, queue wakeup etc. - ti: icssg-prueth: fix firmware load sequence to prevent time jump which breaks timesync related operations Misc: - netlink: specs: mptcp: add missing attr and improve documentation" * tag 'net-6.13-rc6' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net: (50 commits) net: ti: icssg-prueth: Fix clearing of IEP_CMP_CFG registers during iep_init net: ti: icssg-prueth: Fix firmware load sequence. mptcp: prevent excessive coalescing on receive mptcp: don't always assume copied data in mptcp_cleanup_rbuf() mptcp: fix recvbuffer adjust on sleeping rcvmsg ila: serialize calls to nf_register_net_hooks() af_packet: fix vlan_get_protocol_dgram() vs MSG_PEEK af_packet: fix vlan_get_tci() vs MSG_PEEK net: wwan: iosm: Properly check for valid exec stage in ipc_mmio_init() net: restrict SO_REUSEPORT to inet sockets net: reenable NETIF_F_IPV6_CSUM offload for BIG TCP packets net: sfc: Correct key_len for efx_tc_ct_zone_ht_params net: wwan: t7xx: Fix FSM command timeout issue sky2: Add device ID 11ab:4373 for Marvell 88E8075 mptcp: fix TCP options overflow. net: mv643xx_eth: fix an OF node reference leak gve: trigger RX NAPI instead of TX NAPI in gve_xsk_wakeup eth: bcmsysport: fix call balance of priv->clk handling routines net: llc: reset skb->transport_header netlink: specs: mptcp: fix missing doc ...
This commit is contained in:
commit
aba74e639f
@ -22,65 +22,67 @@ definitions:
|
||||
doc: unused event
|
||||
-
|
||||
name: created
|
||||
doc:
|
||||
token, family, saddr4 | saddr6, daddr4 | daddr6, sport, dport
|
||||
doc: >-
|
||||
A new MPTCP connection has been created. It is the good time to
|
||||
allocate memory and send ADD_ADDR if needed. Depending on the
|
||||
traffic-patterns it can take a long time until the
|
||||
MPTCP_EVENT_ESTABLISHED is sent.
|
||||
Attributes: token, family, saddr4 | saddr6, daddr4 | daddr6, sport,
|
||||
dport, server-side.
|
||||
-
|
||||
name: established
|
||||
doc:
|
||||
token, family, saddr4 | saddr6, daddr4 | daddr6, sport, dport
|
||||
doc: >-
|
||||
A MPTCP connection is established (can start new subflows).
|
||||
Attributes: token, family, saddr4 | saddr6, daddr4 | daddr6, sport,
|
||||
dport, server-side.
|
||||
-
|
||||
name: closed
|
||||
doc:
|
||||
token
|
||||
doc: >-
|
||||
A MPTCP connection has stopped.
|
||||
Attribute: token.
|
||||
-
|
||||
name: announced
|
||||
value: 6
|
||||
doc:
|
||||
token, rem_id, family, daddr4 | daddr6 [, dport]
|
||||
doc: >-
|
||||
A new address has been announced by the peer.
|
||||
Attributes: token, rem_id, family, daddr4 | daddr6 [, dport].
|
||||
-
|
||||
name: removed
|
||||
doc:
|
||||
token, rem_id
|
||||
doc: >-
|
||||
An address has been lost by the peer.
|
||||
Attributes: token, rem_id.
|
||||
-
|
||||
name: sub-established
|
||||
value: 10
|
||||
doc:
|
||||
token, family, loc_id, rem_id, saddr4 | saddr6, daddr4 | daddr6, sport,
|
||||
dport, backup, if_idx [, error]
|
||||
doc: >-
|
||||
A new subflow has been established. 'error' should not be set.
|
||||
Attributes: token, family, loc_id, rem_id, saddr4 | saddr6, daddr4 |
|
||||
daddr6, sport, dport, backup, if_idx [, error].
|
||||
-
|
||||
name: sub-closed
|
||||
doc:
|
||||
token, family, loc_id, rem_id, saddr4 | saddr6, daddr4 | daddr6, sport,
|
||||
dport, backup, if_idx [, error]
|
||||
doc: >-
|
||||
A subflow has been closed. An error (copy of sk_err) could be set if an
|
||||
error has been detected for this subflow.
|
||||
Attributes: token, family, loc_id, rem_id, saddr4 | saddr6, daddr4 |
|
||||
daddr6, sport, dport, backup, if_idx [, error].
|
||||
-
|
||||
name: sub-priority
|
||||
value: 13
|
||||
doc:
|
||||
token, family, loc_id, rem_id, saddr4 | saddr6, daddr4 | daddr6, sport,
|
||||
dport, backup, if_idx [, error]
|
||||
doc: >-
|
||||
The priority of a subflow has changed. 'error' should not be set.
|
||||
Attributes: token, family, loc_id, rem_id, saddr4 | saddr6, daddr4 |
|
||||
daddr6, sport, dport, backup, if_idx [, error].
|
||||
-
|
||||
name: listener-created
|
||||
value: 15
|
||||
doc:
|
||||
family, sport, saddr4 | saddr6
|
||||
doc: >-
|
||||
A new PM listener is created.
|
||||
Attributes: family, sport, saddr4 | saddr6.
|
||||
-
|
||||
name: listener-closed
|
||||
doc:
|
||||
family, sport, saddr4 | saddr6
|
||||
doc: >-
|
||||
A PM listener is closed.
|
||||
Attributes: family, sport, saddr4 | saddr6.
|
||||
|
||||
attribute-sets:
|
||||
-
|
||||
@ -306,8 +308,8 @@ operations:
|
||||
attributes:
|
||||
- addr
|
||||
-
|
||||
name: flush-addrs
|
||||
doc: flush addresses
|
||||
name: flush-addrs
|
||||
doc: Flush addresses
|
||||
attribute-set: endpoint
|
||||
dont-validate: [ strict ]
|
||||
flags: [ uns-admin-perm ]
|
||||
@ -351,7 +353,7 @@ operations:
|
||||
- addr-remote
|
||||
-
|
||||
name: announce
|
||||
doc: announce new sf
|
||||
doc: Announce new address
|
||||
attribute-set: attr
|
||||
dont-validate: [ strict ]
|
||||
flags: [ uns-admin-perm ]
|
||||
@ -362,7 +364,7 @@ operations:
|
||||
- token
|
||||
-
|
||||
name: remove
|
||||
doc: announce removal
|
||||
doc: Announce removal
|
||||
attribute-set: attr
|
||||
dont-validate: [ strict ]
|
||||
flags: [ uns-admin-perm ]
|
||||
@ -373,7 +375,7 @@ operations:
|
||||
- loc-id
|
||||
-
|
||||
name: subflow-create
|
||||
doc: todo
|
||||
doc: Create subflow
|
||||
attribute-set: attr
|
||||
dont-validate: [ strict ]
|
||||
flags: [ uns-admin-perm ]
|
||||
@ -385,7 +387,7 @@ operations:
|
||||
- addr-remote
|
||||
-
|
||||
name: subflow-destroy
|
||||
doc: todo
|
||||
doc: Destroy subflow
|
||||
attribute-set: attr
|
||||
dont-validate: [ strict ]
|
||||
flags: [ uns-admin-perm ]
|
||||
|
@ -3607,6 +3607,7 @@ F: drivers/phy/qualcomm/phy-ath79-usb.c
|
||||
|
||||
ATHEROS ATH GENERIC UTILITIES
|
||||
M: Kalle Valo <kvalo@kernel.org>
|
||||
M: Jeff Johnson <jjohnson@kernel.org>
|
||||
L: linux-wireless@vger.kernel.org
|
||||
S: Supported
|
||||
F: drivers/net/wireless/ath/*
|
||||
|
@ -2,7 +2,7 @@
|
||||
/*
|
||||
* Microchip KSZ9477 switch driver main logic
|
||||
*
|
||||
* Copyright (C) 2017-2019 Microchip Technology Inc.
|
||||
* Copyright (C) 2017-2024 Microchip Technology Inc.
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
@ -983,26 +983,51 @@ void ksz9477_get_caps(struct ksz_device *dev, int port,
|
||||
int ksz9477_set_ageing_time(struct ksz_device *dev, unsigned int msecs)
|
||||
{
|
||||
u32 secs = msecs / 1000;
|
||||
u8 value;
|
||||
u8 data;
|
||||
u8 data, mult, value;
|
||||
u32 max_val;
|
||||
int ret;
|
||||
|
||||
value = FIELD_GET(SW_AGE_PERIOD_7_0_M, secs);
|
||||
#define MAX_TIMER_VAL ((1 << 8) - 1)
|
||||
|
||||
ret = ksz_write8(dev, REG_SW_LUE_CTRL_3, value);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
/* The aging timer comprises a 3-bit multiplier and an 8-bit second
|
||||
* value. Either of them cannot be zero. The maximum timer is then
|
||||
* 7 * 255 = 1785 seconds.
|
||||
*/
|
||||
if (!secs)
|
||||
secs = 1;
|
||||
|
||||
data = FIELD_GET(SW_AGE_PERIOD_10_8_M, secs);
|
||||
/* Return error if too large. */
|
||||
else if (secs > 7 * MAX_TIMER_VAL)
|
||||
return -EINVAL;
|
||||
|
||||
ret = ksz_read8(dev, REG_SW_LUE_CTRL_0, &value);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
value &= ~SW_AGE_CNT_M;
|
||||
value |= FIELD_PREP(SW_AGE_CNT_M, data);
|
||||
/* Check whether there is need to update the multiplier. */
|
||||
mult = FIELD_GET(SW_AGE_CNT_M, value);
|
||||
max_val = MAX_TIMER_VAL;
|
||||
if (mult > 0) {
|
||||
/* Try to use the same multiplier already in the register as
|
||||
* the hardware default uses multiplier 4 and 75 seconds for
|
||||
* 300 seconds.
|
||||
*/
|
||||
max_val = DIV_ROUND_UP(secs, mult);
|
||||
if (max_val > MAX_TIMER_VAL || max_val * mult != secs)
|
||||
max_val = MAX_TIMER_VAL;
|
||||
}
|
||||
|
||||
return ksz_write8(dev, REG_SW_LUE_CTRL_0, value);
|
||||
data = DIV_ROUND_UP(secs, max_val);
|
||||
if (mult != data) {
|
||||
value &= ~SW_AGE_CNT_M;
|
||||
value |= FIELD_PREP(SW_AGE_CNT_M, data);
|
||||
ret = ksz_write8(dev, REG_SW_LUE_CTRL_0, value);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
}
|
||||
|
||||
value = DIV_ROUND_UP(secs, data);
|
||||
return ksz_write8(dev, REG_SW_LUE_CTRL_3, value);
|
||||
}
|
||||
|
||||
void ksz9477_port_queue_split(struct ksz_device *dev, int port)
|
||||
|
@ -2,7 +2,7 @@
|
||||
/*
|
||||
* Microchip KSZ9477 register definitions
|
||||
*
|
||||
* Copyright (C) 2017-2018 Microchip Technology Inc.
|
||||
* Copyright (C) 2017-2024 Microchip Technology Inc.
|
||||
*/
|
||||
|
||||
#ifndef __KSZ9477_REGS_H
|
||||
@ -165,8 +165,6 @@
|
||||
#define SW_VLAN_ENABLE BIT(7)
|
||||
#define SW_DROP_INVALID_VID BIT(6)
|
||||
#define SW_AGE_CNT_M GENMASK(5, 3)
|
||||
#define SW_AGE_CNT_S 3
|
||||
#define SW_AGE_PERIOD_10_8_M GENMASK(10, 8)
|
||||
#define SW_RESV_MCAST_ENABLE BIT(2)
|
||||
#define SW_HASH_OPTION_M 0x03
|
||||
#define SW_HASH_OPTION_CRC 1
|
||||
|
@ -1,6 +1,6 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/* Microchip LAN937X switch driver main logic
|
||||
* Copyright (C) 2019-2022 Microchip Technology Inc.
|
||||
* Copyright (C) 2019-2024 Microchip Technology Inc.
|
||||
*/
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
@ -461,10 +461,66 @@ int lan937x_change_mtu(struct ksz_device *dev, int port, int new_mtu)
|
||||
|
||||
int lan937x_set_ageing_time(struct ksz_device *dev, unsigned int msecs)
|
||||
{
|
||||
u32 secs = msecs / 1000;
|
||||
u32 value;
|
||||
u8 data, mult, value8;
|
||||
bool in_msec = false;
|
||||
u32 max_val, value;
|
||||
u32 secs = msecs;
|
||||
int ret;
|
||||
|
||||
#define MAX_TIMER_VAL ((1 << 20) - 1)
|
||||
|
||||
/* The aging timer comprises a 3-bit multiplier and a 20-bit second
|
||||
* value. Either of them cannot be zero. The maximum timer is then
|
||||
* 7 * 1048575 = 7340025 seconds. As this value is too large for
|
||||
* practical use it can be interpreted as microseconds, making the
|
||||
* maximum timer 7340 seconds with finer control. This allows for
|
||||
* maximum 122 minutes compared to 29 minutes in KSZ9477 switch.
|
||||
*/
|
||||
if (msecs % 1000)
|
||||
in_msec = true;
|
||||
else
|
||||
secs /= 1000;
|
||||
if (!secs)
|
||||
secs = 1;
|
||||
|
||||
/* Return error if too large. */
|
||||
else if (secs > 7 * MAX_TIMER_VAL)
|
||||
return -EINVAL;
|
||||
|
||||
/* Configure how to interpret the number value. */
|
||||
ret = ksz_rmw8(dev, REG_SW_LUE_CTRL_2, SW_AGE_CNT_IN_MICROSEC,
|
||||
in_msec ? SW_AGE_CNT_IN_MICROSEC : 0);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
ret = ksz_read8(dev, REG_SW_LUE_CTRL_0, &value8);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
/* Check whether there is need to update the multiplier. */
|
||||
mult = FIELD_GET(SW_AGE_CNT_M, value8);
|
||||
max_val = MAX_TIMER_VAL;
|
||||
if (mult > 0) {
|
||||
/* Try to use the same multiplier already in the register as
|
||||
* the hardware default uses multiplier 4 and 75 seconds for
|
||||
* 300 seconds.
|
||||
*/
|
||||
max_val = DIV_ROUND_UP(secs, mult);
|
||||
if (max_val > MAX_TIMER_VAL || max_val * mult != secs)
|
||||
max_val = MAX_TIMER_VAL;
|
||||
}
|
||||
|
||||
data = DIV_ROUND_UP(secs, max_val);
|
||||
if (mult != data) {
|
||||
value8 &= ~SW_AGE_CNT_M;
|
||||
value8 |= FIELD_PREP(SW_AGE_CNT_M, data);
|
||||
ret = ksz_write8(dev, REG_SW_LUE_CTRL_0, value8);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
}
|
||||
|
||||
secs = DIV_ROUND_UP(secs, data);
|
||||
|
||||
value = FIELD_GET(SW_AGE_PERIOD_7_0_M, secs);
|
||||
|
||||
ret = ksz_write8(dev, REG_SW_AGE_PERIOD__1, value);
|
||||
|
@ -1,6 +1,6 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/* Microchip LAN937X switch register definitions
|
||||
* Copyright (C) 2019-2021 Microchip Technology Inc.
|
||||
* Copyright (C) 2019-2024 Microchip Technology Inc.
|
||||
*/
|
||||
#ifndef __LAN937X_REG_H
|
||||
#define __LAN937X_REG_H
|
||||
@ -56,8 +56,7 @@
|
||||
|
||||
#define SW_VLAN_ENABLE BIT(7)
|
||||
#define SW_DROP_INVALID_VID BIT(6)
|
||||
#define SW_AGE_CNT_M 0x7
|
||||
#define SW_AGE_CNT_S 3
|
||||
#define SW_AGE_CNT_M GENMASK(5, 3)
|
||||
#define SW_RESV_MCAST_ENABLE BIT(2)
|
||||
|
||||
#define REG_SW_LUE_CTRL_1 0x0311
|
||||
@ -70,6 +69,10 @@
|
||||
#define SW_FAST_AGING BIT(1)
|
||||
#define SW_LINK_AUTO_AGING BIT(0)
|
||||
|
||||
#define REG_SW_LUE_CTRL_2 0x0312
|
||||
|
||||
#define SW_AGE_CNT_IN_MICROSEC BIT(7)
|
||||
|
||||
#define REG_SW_AGE_PERIOD__1 0x0313
|
||||
#define SW_AGE_PERIOD_7_0_M GENMASK(7, 0)
|
||||
|
||||
|
@ -1933,7 +1933,11 @@ static int bcm_sysport_open(struct net_device *dev)
|
||||
unsigned int i;
|
||||
int ret;
|
||||
|
||||
clk_prepare_enable(priv->clk);
|
||||
ret = clk_prepare_enable(priv->clk);
|
||||
if (ret) {
|
||||
netdev_err(dev, "could not enable priv clock\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Reset UniMAC */
|
||||
umac_reset(priv);
|
||||
@ -2591,7 +2595,11 @@ static int bcm_sysport_probe(struct platform_device *pdev)
|
||||
goto err_deregister_notifier;
|
||||
}
|
||||
|
||||
clk_prepare_enable(priv->clk);
|
||||
ret = clk_prepare_enable(priv->clk);
|
||||
if (ret) {
|
||||
dev_err(&pdev->dev, "could not enable priv clock\n");
|
||||
goto err_deregister_netdev;
|
||||
}
|
||||
|
||||
priv->rev = topctrl_readl(priv, REV_CNTL) & REV_MASK;
|
||||
dev_info(&pdev->dev,
|
||||
@ -2605,6 +2613,8 @@ static int bcm_sysport_probe(struct platform_device *pdev)
|
||||
|
||||
return 0;
|
||||
|
||||
err_deregister_netdev:
|
||||
unregister_netdev(dev);
|
||||
err_deregister_notifier:
|
||||
unregister_netdevice_notifier(&priv->netdev_notifier);
|
||||
err_deregister_fixed_link:
|
||||
@ -2774,7 +2784,12 @@ static int __maybe_unused bcm_sysport_resume(struct device *d)
|
||||
if (!netif_running(dev))
|
||||
return 0;
|
||||
|
||||
clk_prepare_enable(priv->clk);
|
||||
ret = clk_prepare_enable(priv->clk);
|
||||
if (ret) {
|
||||
netdev_err(dev, "could not enable priv clock\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (priv->wolopts)
|
||||
clk_disable_unprepare(priv->wol_clk);
|
||||
|
||||
|
@ -1140,6 +1140,7 @@ int gve_xdp_xmit_one(struct gve_priv *priv, struct gve_tx_ring *tx,
|
||||
void gve_xdp_tx_flush(struct gve_priv *priv, u32 xdp_qid);
|
||||
bool gve_tx_poll(struct gve_notify_block *block, int budget);
|
||||
bool gve_xdp_poll(struct gve_notify_block *block, int budget);
|
||||
int gve_xsk_tx_poll(struct gve_notify_block *block, int budget);
|
||||
int gve_tx_alloc_rings_gqi(struct gve_priv *priv,
|
||||
struct gve_tx_alloc_rings_cfg *cfg);
|
||||
void gve_tx_free_rings_gqi(struct gve_priv *priv,
|
||||
|
@ -333,6 +333,14 @@ int gve_napi_poll(struct napi_struct *napi, int budget)
|
||||
|
||||
if (block->rx) {
|
||||
work_done = gve_rx_poll(block, budget);
|
||||
|
||||
/* Poll XSK TX as part of RX NAPI. Setup re-poll based on max of
|
||||
* TX and RX work done.
|
||||
*/
|
||||
if (priv->xdp_prog)
|
||||
work_done = max_t(int, work_done,
|
||||
gve_xsk_tx_poll(block, budget));
|
||||
|
||||
reschedule |= work_done == budget;
|
||||
}
|
||||
|
||||
@ -922,11 +930,13 @@ static void gve_init_sync_stats(struct gve_priv *priv)
|
||||
static void gve_tx_get_curr_alloc_cfg(struct gve_priv *priv,
|
||||
struct gve_tx_alloc_rings_cfg *cfg)
|
||||
{
|
||||
int num_xdp_queues = priv->xdp_prog ? priv->rx_cfg.num_queues : 0;
|
||||
|
||||
cfg->qcfg = &priv->tx_cfg;
|
||||
cfg->raw_addressing = !gve_is_qpl(priv);
|
||||
cfg->ring_size = priv->tx_desc_cnt;
|
||||
cfg->start_idx = 0;
|
||||
cfg->num_rings = gve_num_tx_queues(priv);
|
||||
cfg->num_rings = priv->tx_cfg.num_queues + num_xdp_queues;
|
||||
cfg->tx = priv->tx;
|
||||
}
|
||||
|
||||
@ -1623,8 +1633,8 @@ static int gve_xsk_pool_enable(struct net_device *dev,
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
/* If XDP prog is not installed, return */
|
||||
if (!priv->xdp_prog)
|
||||
/* If XDP prog is not installed or interface is down, return. */
|
||||
if (!priv->xdp_prog || !netif_running(dev))
|
||||
return 0;
|
||||
|
||||
rx = &priv->rx[qid];
|
||||
@ -1669,21 +1679,16 @@ static int gve_xsk_pool_disable(struct net_device *dev,
|
||||
if (qid >= priv->rx_cfg.num_queues)
|
||||
return -EINVAL;
|
||||
|
||||
/* If XDP prog is not installed, unmap DMA and return */
|
||||
if (!priv->xdp_prog)
|
||||
/* If XDP prog is not installed or interface is down, unmap DMA and
|
||||
* return.
|
||||
*/
|
||||
if (!priv->xdp_prog || !netif_running(dev))
|
||||
goto done;
|
||||
|
||||
tx_qid = gve_xdp_tx_queue_id(priv, qid);
|
||||
if (!netif_running(dev)) {
|
||||
priv->rx[qid].xsk_pool = NULL;
|
||||
xdp_rxq_info_unreg(&priv->rx[qid].xsk_rxq);
|
||||
priv->tx[tx_qid].xsk_pool = NULL;
|
||||
goto done;
|
||||
}
|
||||
|
||||
napi_rx = &priv->ntfy_blocks[priv->rx[qid].ntfy_id].napi;
|
||||
napi_disable(napi_rx); /* make sure current rx poll is done */
|
||||
|
||||
tx_qid = gve_xdp_tx_queue_id(priv, qid);
|
||||
napi_tx = &priv->ntfy_blocks[priv->tx[tx_qid].ntfy_id].napi;
|
||||
napi_disable(napi_tx); /* make sure current tx poll is done */
|
||||
|
||||
@ -1709,24 +1714,20 @@ done:
|
||||
static int gve_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags)
|
||||
{
|
||||
struct gve_priv *priv = netdev_priv(dev);
|
||||
int tx_queue_id = gve_xdp_tx_queue_id(priv, queue_id);
|
||||
struct napi_struct *napi;
|
||||
|
||||
if (!gve_get_napi_enabled(priv))
|
||||
return -ENETDOWN;
|
||||
|
||||
if (queue_id >= priv->rx_cfg.num_queues || !priv->xdp_prog)
|
||||
return -EINVAL;
|
||||
|
||||
if (flags & XDP_WAKEUP_TX) {
|
||||
struct gve_tx_ring *tx = &priv->tx[tx_queue_id];
|
||||
struct napi_struct *napi =
|
||||
&priv->ntfy_blocks[tx->ntfy_id].napi;
|
||||
|
||||
if (!napi_if_scheduled_mark_missed(napi)) {
|
||||
/* Call local_bh_enable to trigger SoftIRQ processing */
|
||||
local_bh_disable();
|
||||
napi_schedule(napi);
|
||||
local_bh_enable();
|
||||
}
|
||||
|
||||
tx->xdp_xsk_wakeup++;
|
||||
napi = &priv->ntfy_blocks[gve_rx_idx_to_ntfy(priv, queue_id)].napi;
|
||||
if (!napi_if_scheduled_mark_missed(napi)) {
|
||||
/* Call local_bh_enable to trigger SoftIRQ processing */
|
||||
local_bh_disable();
|
||||
napi_schedule(napi);
|
||||
local_bh_enable();
|
||||
}
|
||||
|
||||
return 0;
|
||||
@ -1837,6 +1838,7 @@ int gve_adjust_queues(struct gve_priv *priv,
|
||||
{
|
||||
struct gve_tx_alloc_rings_cfg tx_alloc_cfg = {0};
|
||||
struct gve_rx_alloc_rings_cfg rx_alloc_cfg = {0};
|
||||
int num_xdp_queues;
|
||||
int err;
|
||||
|
||||
gve_get_curr_alloc_cfgs(priv, &tx_alloc_cfg, &rx_alloc_cfg);
|
||||
@ -1847,6 +1849,10 @@ int gve_adjust_queues(struct gve_priv *priv,
|
||||
rx_alloc_cfg.qcfg = &new_rx_config;
|
||||
tx_alloc_cfg.num_rings = new_tx_config.num_queues;
|
||||
|
||||
/* Add dedicated XDP TX queues if enabled. */
|
||||
num_xdp_queues = priv->xdp_prog ? new_rx_config.num_queues : 0;
|
||||
tx_alloc_cfg.num_rings += num_xdp_queues;
|
||||
|
||||
if (netif_running(priv->dev)) {
|
||||
err = gve_adjust_config(priv, &tx_alloc_cfg, &rx_alloc_cfg);
|
||||
return err;
|
||||
@ -1899,6 +1905,9 @@ static void gve_turndown(struct gve_priv *priv)
|
||||
|
||||
gve_clear_napi_enabled(priv);
|
||||
gve_clear_report_stats(priv);
|
||||
|
||||
/* Make sure that all traffic is finished processing. */
|
||||
synchronize_net();
|
||||
}
|
||||
|
||||
static void gve_turnup(struct gve_priv *priv)
|
||||
|
@ -206,7 +206,10 @@ void gve_tx_stop_ring_gqi(struct gve_priv *priv, int idx)
|
||||
return;
|
||||
|
||||
gve_remove_napi(priv, ntfy_idx);
|
||||
gve_clean_tx_done(priv, tx, priv->tx_desc_cnt, false);
|
||||
if (tx->q_num < priv->tx_cfg.num_queues)
|
||||
gve_clean_tx_done(priv, tx, priv->tx_desc_cnt, false);
|
||||
else
|
||||
gve_clean_xdp_done(priv, tx, priv->tx_desc_cnt);
|
||||
netdev_tx_reset_queue(tx->netdev_txq);
|
||||
gve_tx_remove_from_block(priv, idx);
|
||||
}
|
||||
@ -834,9 +837,12 @@ int gve_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
|
||||
struct gve_tx_ring *tx;
|
||||
int i, err = 0, qid;
|
||||
|
||||
if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
|
||||
if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK) || !priv->xdp_prog)
|
||||
return -EINVAL;
|
||||
|
||||
if (!gve_get_napi_enabled(priv))
|
||||
return -ENETDOWN;
|
||||
|
||||
qid = gve_xdp_tx_queue_id(priv,
|
||||
smp_processor_id() % priv->num_xdp_queues);
|
||||
|
||||
@ -975,33 +981,41 @@ out:
|
||||
return sent;
|
||||
}
|
||||
|
||||
int gve_xsk_tx_poll(struct gve_notify_block *rx_block, int budget)
|
||||
{
|
||||
struct gve_rx_ring *rx = rx_block->rx;
|
||||
struct gve_priv *priv = rx->gve;
|
||||
struct gve_tx_ring *tx;
|
||||
int sent = 0;
|
||||
|
||||
tx = &priv->tx[gve_xdp_tx_queue_id(priv, rx->q_num)];
|
||||
if (tx->xsk_pool) {
|
||||
sent = gve_xsk_tx(priv, tx, budget);
|
||||
|
||||
u64_stats_update_begin(&tx->statss);
|
||||
tx->xdp_xsk_sent += sent;
|
||||
u64_stats_update_end(&tx->statss);
|
||||
if (xsk_uses_need_wakeup(tx->xsk_pool))
|
||||
xsk_set_tx_need_wakeup(tx->xsk_pool);
|
||||
}
|
||||
|
||||
return sent;
|
||||
}
|
||||
|
||||
bool gve_xdp_poll(struct gve_notify_block *block, int budget)
|
||||
{
|
||||
struct gve_priv *priv = block->priv;
|
||||
struct gve_tx_ring *tx = block->tx;
|
||||
u32 nic_done;
|
||||
bool repoll;
|
||||
u32 to_do;
|
||||
|
||||
/* Find out how much work there is to be done */
|
||||
nic_done = gve_tx_load_event_counter(priv, tx);
|
||||
to_do = min_t(u32, (nic_done - tx->done), budget);
|
||||
gve_clean_xdp_done(priv, tx, to_do);
|
||||
repoll = nic_done != tx->done;
|
||||
|
||||
if (tx->xsk_pool) {
|
||||
int sent = gve_xsk_tx(priv, tx, budget);
|
||||
|
||||
u64_stats_update_begin(&tx->statss);
|
||||
tx->xdp_xsk_sent += sent;
|
||||
u64_stats_update_end(&tx->statss);
|
||||
repoll |= (sent == budget);
|
||||
if (xsk_uses_need_wakeup(tx->xsk_pool))
|
||||
xsk_set_tx_need_wakeup(tx->xsk_pool);
|
||||
}
|
||||
|
||||
/* If we still have work we want to repoll */
|
||||
return repoll;
|
||||
return nic_done != tx->done;
|
||||
}
|
||||
|
||||
bool gve_tx_poll(struct gve_notify_block *block, int budget)
|
||||
|
@ -2704,9 +2704,15 @@ static struct platform_device *port_platdev[3];
|
||||
|
||||
static void mv643xx_eth_shared_of_remove(void)
|
||||
{
|
||||
struct mv643xx_eth_platform_data *pd;
|
||||
int n;
|
||||
|
||||
for (n = 0; n < 3; n++) {
|
||||
if (!port_platdev[n])
|
||||
continue;
|
||||
pd = dev_get_platdata(&port_platdev[n]->dev);
|
||||
if (pd)
|
||||
of_node_put(pd->phy_node);
|
||||
platform_device_del(port_platdev[n]);
|
||||
port_platdev[n] = NULL;
|
||||
}
|
||||
@ -2769,8 +2775,10 @@ static int mv643xx_eth_shared_of_add_port(struct platform_device *pdev,
|
||||
}
|
||||
|
||||
ppdev = platform_device_alloc(MV643XX_ETH_NAME, dev_num);
|
||||
if (!ppdev)
|
||||
return -ENOMEM;
|
||||
if (!ppdev) {
|
||||
ret = -ENOMEM;
|
||||
goto put_err;
|
||||
}
|
||||
ppdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
|
||||
ppdev->dev.of_node = pnp;
|
||||
|
||||
@ -2792,6 +2800,8 @@ static int mv643xx_eth_shared_of_add_port(struct platform_device *pdev,
|
||||
|
||||
port_err:
|
||||
platform_device_put(ppdev);
|
||||
put_err:
|
||||
of_node_put(ppd.phy_node);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -130,6 +130,7 @@ static const struct pci_device_id sky2_id_table[] = {
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x436C) }, /* 88E8072 */
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x436D) }, /* 88E8055 */
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4370) }, /* 88E8075 */
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4373) }, /* 88E8075 */
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4380) }, /* 88E8057 */
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4381) }, /* 88E8059 */
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4382) }, /* 88E8079 */
|
||||
|
@ -339,9 +339,13 @@ static int mlx5e_macsec_init_sa_fs(struct macsec_context *ctx,
|
||||
{
|
||||
struct mlx5e_priv *priv = macsec_netdev_priv(ctx->netdev);
|
||||
struct mlx5_macsec_fs *macsec_fs = priv->mdev->macsec_fs;
|
||||
const struct macsec_tx_sc *tx_sc = &ctx->secy->tx_sc;
|
||||
struct mlx5_macsec_rule_attrs rule_attrs;
|
||||
union mlx5_macsec_rule *macsec_rule;
|
||||
|
||||
if (is_tx && tx_sc->encoding_sa != sa->assoc_num)
|
||||
return 0;
|
||||
|
||||
rule_attrs.macsec_obj_id = sa->macsec_obj_id;
|
||||
rule_attrs.sci = sa->sci;
|
||||
rule_attrs.assoc_num = sa->assoc_num;
|
||||
|
@ -6542,8 +6542,23 @@ static void _mlx5e_remove(struct auxiliary_device *adev)
|
||||
|
||||
mlx5_core_uplink_netdev_set(mdev, NULL);
|
||||
mlx5e_dcbnl_delete_app(priv);
|
||||
unregister_netdev(priv->netdev);
|
||||
_mlx5e_suspend(adev, false);
|
||||
/* When unload driver, the netdev is in registered state
|
||||
* if it's from legacy mode. If from switchdev mode, it
|
||||
* is already unregistered before changing to NIC profile.
|
||||
*/
|
||||
if (priv->netdev->reg_state == NETREG_REGISTERED) {
|
||||
unregister_netdev(priv->netdev);
|
||||
_mlx5e_suspend(adev, false);
|
||||
} else {
|
||||
struct mlx5_core_dev *pos;
|
||||
int i;
|
||||
|
||||
if (test_bit(MLX5E_STATE_DESTROYING, &priv->state))
|
||||
mlx5_sd_for_each_dev(i, mdev, pos)
|
||||
mlx5e_destroy_mdev_resources(pos);
|
||||
else
|
||||
_mlx5e_suspend(adev, true);
|
||||
}
|
||||
/* Avoid cleanup if profile rollback failed. */
|
||||
if (priv->profile)
|
||||
priv->profile->cleanup(priv);
|
||||
|
@ -1509,6 +1509,21 @@ mlx5e_vport_uplink_rep_unload(struct mlx5e_rep_priv *rpriv)
|
||||
|
||||
priv = netdev_priv(netdev);
|
||||
|
||||
/* This bit is set when using devlink to change eswitch mode from
|
||||
* switchdev to legacy. As need to keep uplink netdev ifindex, we
|
||||
* detach uplink representor profile and attach NIC profile only.
|
||||
* The netdev will be unregistered later when unload NIC auxiliary
|
||||
* driver for this case.
|
||||
* We explicitly block devlink eswitch mode change if any IPSec rules
|
||||
* offloaded, but can't block other cases, such as driver unload
|
||||
* and devlink reload. We have to unregister netdev before profile
|
||||
* change for those cases. This is to avoid resource leak because
|
||||
* the offloaded rules don't have the chance to be unoffloaded before
|
||||
* cleanup which is triggered by detach uplink representor profile.
|
||||
*/
|
||||
if (!(priv->mdev->priv.flags & MLX5_PRIV_FLAGS_SWITCH_LEGACY))
|
||||
unregister_netdev(netdev);
|
||||
|
||||
mlx5e_netdev_attach_nic_profile(priv);
|
||||
}
|
||||
|
||||
|
@ -150,11 +150,11 @@ void mlx5_esw_ipsec_restore_dest_uplink(struct mlx5_core_dev *mdev)
|
||||
unsigned long i;
|
||||
int err;
|
||||
|
||||
xa_for_each(&esw->offloads.vport_reps, i, rep) {
|
||||
rpriv = rep->rep_data[REP_ETH].priv;
|
||||
if (!rpriv || !rpriv->netdev)
|
||||
mlx5_esw_for_each_rep(esw, i, rep) {
|
||||
if (atomic_read(&rep->rep_data[REP_ETH].state) != REP_LOADED)
|
||||
continue;
|
||||
|
||||
rpriv = rep->rep_data[REP_ETH].priv;
|
||||
rhashtable_walk_enter(&rpriv->tc_ht, &iter);
|
||||
rhashtable_walk_start(&iter);
|
||||
while ((flow = rhashtable_walk_next(&iter)) != NULL) {
|
||||
|
@ -714,6 +714,9 @@ void mlx5e_tc_clean_fdb_peer_flows(struct mlx5_eswitch *esw);
|
||||
MLX5_CAP_GEN_2((esw->dev), ec_vf_vport_base) +\
|
||||
(last) - 1)
|
||||
|
||||
#define mlx5_esw_for_each_rep(esw, i, rep) \
|
||||
xa_for_each(&((esw)->offloads.vport_reps), i, rep)
|
||||
|
||||
struct mlx5_eswitch *__must_check
|
||||
mlx5_devlink_eswitch_get(struct devlink *devlink);
|
||||
|
||||
|
@ -53,9 +53,6 @@
|
||||
#include "lag/lag.h"
|
||||
#include "en/tc/post_meter.h"
|
||||
|
||||
#define mlx5_esw_for_each_rep(esw, i, rep) \
|
||||
xa_for_each(&((esw)->offloads.vport_reps), i, rep)
|
||||
|
||||
/* There are two match-all miss flows, one for unicast dst mac and
|
||||
* one for multicast.
|
||||
*/
|
||||
@ -3780,6 +3777,8 @@ int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
|
||||
esw->eswitch_operation_in_progress = true;
|
||||
up_write(&esw->mode_lock);
|
||||
|
||||
if (mode == DEVLINK_ESWITCH_MODE_LEGACY)
|
||||
esw->dev->priv.flags |= MLX5_PRIV_FLAGS_SWITCH_LEGACY;
|
||||
mlx5_eswitch_disable_locked(esw);
|
||||
if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV) {
|
||||
if (mlx5_devlink_trap_get_num_active(esw->dev)) {
|
||||
|
@ -1067,7 +1067,6 @@ static struct mlx5dr_cq *dr_create_cq(struct mlx5_core_dev *mdev,
|
||||
int inlen, err, eqn;
|
||||
void *cqc, *in;
|
||||
__be64 *pas;
|
||||
int vector;
|
||||
u32 i;
|
||||
|
||||
cq = kzalloc(sizeof(*cq), GFP_KERNEL);
|
||||
@ -1096,8 +1095,7 @@ static struct mlx5dr_cq *dr_create_cq(struct mlx5_core_dev *mdev,
|
||||
if (!in)
|
||||
goto err_cqwq;
|
||||
|
||||
vector = raw_smp_processor_id() % mlx5_comp_vectors_max(mdev);
|
||||
err = mlx5_comp_eqn_get(mdev, vector, &eqn);
|
||||
err = mlx5_comp_eqn_get(mdev, 0, &eqn);
|
||||
if (err) {
|
||||
kvfree(in);
|
||||
goto err_cqwq;
|
||||
|
@ -423,8 +423,7 @@ mlxsw_sp_span_gretap4_route(const struct net_device *to_dev,
|
||||
|
||||
parms = mlxsw_sp_ipip_netdev_parms4(to_dev);
|
||||
ip_tunnel_init_flow(&fl4, parms.iph.protocol, *daddrp, *saddrp,
|
||||
0, 0, dev_net(to_dev), parms.link, tun->fwmark, 0,
|
||||
0);
|
||||
0, 0, tun->net, parms.link, tun->fwmark, 0, 0);
|
||||
|
||||
rt = ip_route_output_key(tun->net, &fl4);
|
||||
if (IS_ERR(rt))
|
||||
|
@ -64,7 +64,7 @@ static void fbnic_csr_get_regs_rpc_ram(struct fbnic_dev *fbd, u32 **data_p)
|
||||
u32 i, j;
|
||||
|
||||
*(data++) = start;
|
||||
*(data++) = end - 1;
|
||||
*(data++) = end;
|
||||
|
||||
/* FBNIC_RPC_TCAM_ACT */
|
||||
for (i = 0; i < FBNIC_RPC_TCAM_ACT_NUM_ENTRIES; i++) {
|
||||
|
@ -16,7 +16,7 @@ static int efx_tc_flow_block(enum tc_setup_type type, void *type_data,
|
||||
void *cb_priv);
|
||||
|
||||
static const struct rhashtable_params efx_tc_ct_zone_ht_params = {
|
||||
.key_len = offsetof(struct efx_tc_ct_zone, linkage),
|
||||
.key_len = sizeof_field(struct efx_tc_ct_zone, zone),
|
||||
.key_offset = 0,
|
||||
.head_offset = offsetof(struct efx_tc_ct_zone, linkage),
|
||||
};
|
||||
|
@ -405,22 +405,6 @@ static int stmmac_of_get_mac_mode(struct device_node *np)
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
/**
|
||||
* stmmac_remove_config_dt - undo the effects of stmmac_probe_config_dt()
|
||||
* @pdev: platform_device structure
|
||||
* @plat: driver data platform structure
|
||||
*
|
||||
* Release resources claimed by stmmac_probe_config_dt().
|
||||
*/
|
||||
static void stmmac_remove_config_dt(struct platform_device *pdev,
|
||||
struct plat_stmmacenet_data *plat)
|
||||
{
|
||||
clk_disable_unprepare(plat->stmmac_clk);
|
||||
clk_disable_unprepare(plat->pclk);
|
||||
of_node_put(plat->phy_node);
|
||||
of_node_put(plat->mdio_node);
|
||||
}
|
||||
|
||||
/**
|
||||
* stmmac_probe_config_dt - parse device-tree driver parameters
|
||||
* @pdev: platform_device structure
|
||||
@ -490,8 +474,10 @@ stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac)
|
||||
dev_warn(&pdev->dev, "snps,phy-addr property is deprecated\n");
|
||||
|
||||
rc = stmmac_mdio_setup(plat, np, &pdev->dev);
|
||||
if (rc)
|
||||
return ERR_PTR(rc);
|
||||
if (rc) {
|
||||
ret = ERR_PTR(rc);
|
||||
goto error_put_phy;
|
||||
}
|
||||
|
||||
of_property_read_u32(np, "tx-fifo-depth", &plat->tx_fifo_size);
|
||||
|
||||
@ -581,8 +567,8 @@ stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac)
|
||||
dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*dma_cfg),
|
||||
GFP_KERNEL);
|
||||
if (!dma_cfg) {
|
||||
stmmac_remove_config_dt(pdev, plat);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
ret = ERR_PTR(-ENOMEM);
|
||||
goto error_put_mdio;
|
||||
}
|
||||
plat->dma_cfg = dma_cfg;
|
||||
|
||||
@ -610,8 +596,8 @@ stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac)
|
||||
|
||||
rc = stmmac_mtl_setup(pdev, plat);
|
||||
if (rc) {
|
||||
stmmac_remove_config_dt(pdev, plat);
|
||||
return ERR_PTR(rc);
|
||||
ret = ERR_PTR(rc);
|
||||
goto error_put_mdio;
|
||||
}
|
||||
|
||||
/* clock setup */
|
||||
@ -663,6 +649,10 @@ error_hw_init:
|
||||
clk_disable_unprepare(plat->pclk);
|
||||
error_pclk_get:
|
||||
clk_disable_unprepare(plat->stmmac_clk);
|
||||
error_put_mdio:
|
||||
of_node_put(plat->mdio_node);
|
||||
error_put_phy:
|
||||
of_node_put(plat->phy_node);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -671,16 +661,17 @@ static void devm_stmmac_remove_config_dt(void *data)
|
||||
{
|
||||
struct plat_stmmacenet_data *plat = data;
|
||||
|
||||
/* Platform data argument is unused */
|
||||
stmmac_remove_config_dt(NULL, plat);
|
||||
clk_disable_unprepare(plat->stmmac_clk);
|
||||
clk_disable_unprepare(plat->pclk);
|
||||
of_node_put(plat->mdio_node);
|
||||
of_node_put(plat->phy_node);
|
||||
}
|
||||
|
||||
/**
|
||||
* devm_stmmac_probe_config_dt
|
||||
* @pdev: platform_device structure
|
||||
* @mac: MAC address to use
|
||||
* Description: Devres variant of stmmac_probe_config_dt(). Does not require
|
||||
* the user to call stmmac_remove_config_dt() at driver detach.
|
||||
* Description: Devres variant of stmmac_probe_config_dt().
|
||||
*/
|
||||
struct plat_stmmacenet_data *
|
||||
devm_stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac)
|
||||
|
@ -3551,7 +3551,7 @@ static int am65_cpsw_nuss_probe(struct platform_device *pdev)
|
||||
init_completion(&common->tdown_complete);
|
||||
common->tx_ch_num = AM65_CPSW_DEFAULT_TX_CHNS;
|
||||
common->rx_ch_num_flows = AM65_CPSW_DEFAULT_RX_CHN_FLOWS;
|
||||
common->pf_p0_rx_ptype_rrobin = false;
|
||||
common->pf_p0_rx_ptype_rrobin = true;
|
||||
common->default_vlan = 1;
|
||||
|
||||
common->ports = devm_kcalloc(dev, common->port_num,
|
||||
|
@ -215,6 +215,9 @@ static void icss_iep_enable_shadow_mode(struct icss_iep *iep)
|
||||
for (cmp = IEP_MIN_CMP; cmp < IEP_MAX_CMP; cmp++) {
|
||||
regmap_update_bits(iep->map, ICSS_IEP_CMP_STAT_REG,
|
||||
IEP_CMP_STATUS(cmp), IEP_CMP_STATUS(cmp));
|
||||
|
||||
regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG,
|
||||
IEP_CMP_CFG_CMP_EN(cmp), 0);
|
||||
}
|
||||
|
||||
/* enable reset counter on CMP0 event */
|
||||
@ -780,6 +783,11 @@ int icss_iep_exit(struct icss_iep *iep)
|
||||
}
|
||||
icss_iep_disable(iep);
|
||||
|
||||
if (iep->pps_enabled)
|
||||
icss_iep_pps_enable(iep, false);
|
||||
else if (iep->perout_enabled)
|
||||
icss_iep_perout_enable(iep, NULL, false);
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(icss_iep_exit);
|
||||
|
@ -855,31 +855,6 @@ irqreturn_t prueth_rx_irq(int irq, void *dev_id)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(prueth_rx_irq);
|
||||
|
||||
void prueth_emac_stop(struct prueth_emac *emac)
|
||||
{
|
||||
struct prueth *prueth = emac->prueth;
|
||||
int slice;
|
||||
|
||||
switch (emac->port_id) {
|
||||
case PRUETH_PORT_MII0:
|
||||
slice = ICSS_SLICE0;
|
||||
break;
|
||||
case PRUETH_PORT_MII1:
|
||||
slice = ICSS_SLICE1;
|
||||
break;
|
||||
default:
|
||||
netdev_err(emac->ndev, "invalid port\n");
|
||||
return;
|
||||
}
|
||||
|
||||
emac->fw_running = 0;
|
||||
if (!emac->is_sr1)
|
||||
rproc_shutdown(prueth->txpru[slice]);
|
||||
rproc_shutdown(prueth->rtu[slice]);
|
||||
rproc_shutdown(prueth->pru[slice]);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(prueth_emac_stop);
|
||||
|
||||
void prueth_cleanup_tx_ts(struct prueth_emac *emac)
|
||||
{
|
||||
int i;
|
||||
|
@ -397,7 +397,7 @@ static int prueth_emac_buffer_setup(struct prueth_emac *emac)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void icssg_init_emac_mode(struct prueth *prueth)
|
||||
void icssg_init_emac_mode(struct prueth *prueth)
|
||||
{
|
||||
/* When the device is configured as a bridge and it is being brought
|
||||
* back to the emac mode, the host mac address has to be set as 0.
|
||||
@ -406,9 +406,6 @@ static void icssg_init_emac_mode(struct prueth *prueth)
|
||||
int i;
|
||||
u8 mac[ETH_ALEN] = { 0 };
|
||||
|
||||
if (prueth->emacs_initialized)
|
||||
return;
|
||||
|
||||
/* Set VLAN TABLE address base */
|
||||
regmap_update_bits(prueth->miig_rt, FDB_GEN_CFG1, SMEM_VLAN_OFFSET_MASK,
|
||||
addr << SMEM_VLAN_OFFSET);
|
||||
@ -423,15 +420,13 @@ static void icssg_init_emac_mode(struct prueth *prueth)
|
||||
/* Clear host MAC address */
|
||||
icssg_class_set_host_mac_addr(prueth->miig_rt, mac);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(icssg_init_emac_mode);
|
||||
|
||||
static void icssg_init_fw_offload_mode(struct prueth *prueth)
|
||||
void icssg_init_fw_offload_mode(struct prueth *prueth)
|
||||
{
|
||||
u32 addr = prueth->shram.pa + EMAC_ICSSG_SWITCH_DEFAULT_VLAN_TABLE_OFFSET;
|
||||
int i;
|
||||
|
||||
if (prueth->emacs_initialized)
|
||||
return;
|
||||
|
||||
/* Set VLAN TABLE address base */
|
||||
regmap_update_bits(prueth->miig_rt, FDB_GEN_CFG1, SMEM_VLAN_OFFSET_MASK,
|
||||
addr << SMEM_VLAN_OFFSET);
|
||||
@ -448,6 +443,7 @@ static void icssg_init_fw_offload_mode(struct prueth *prueth)
|
||||
icssg_class_set_host_mac_addr(prueth->miig_rt, prueth->hw_bridge_dev->dev_addr);
|
||||
icssg_set_pvid(prueth, prueth->default_vlan, PRUETH_PORT_HOST);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(icssg_init_fw_offload_mode);
|
||||
|
||||
int icssg_config(struct prueth *prueth, struct prueth_emac *emac, int slice)
|
||||
{
|
||||
@ -455,11 +451,6 @@ int icssg_config(struct prueth *prueth, struct prueth_emac *emac, int slice)
|
||||
struct icssg_flow_cfg __iomem *flow_cfg;
|
||||
int ret;
|
||||
|
||||
if (prueth->is_switch_mode || prueth->is_hsr_offload_mode)
|
||||
icssg_init_fw_offload_mode(prueth);
|
||||
else
|
||||
icssg_init_emac_mode(prueth);
|
||||
|
||||
memset_io(config, 0, TAS_GATE_MASK_LIST0);
|
||||
icssg_miig_queues_init(prueth, slice);
|
||||
|
||||
@ -786,3 +777,27 @@ void icssg_set_pvid(struct prueth *prueth, u8 vid, u8 port)
|
||||
writel(pvid, prueth->shram.va + EMAC_ICSSG_SWITCH_PORT0_DEFAULT_VLAN_OFFSET);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(icssg_set_pvid);
|
||||
|
||||
int emac_fdb_flow_id_updated(struct prueth_emac *emac)
|
||||
{
|
||||
struct mgmt_cmd_rsp fdb_cmd_rsp = { 0 };
|
||||
int slice = prueth_emac_slice(emac);
|
||||
struct mgmt_cmd fdb_cmd = { 0 };
|
||||
int ret;
|
||||
|
||||
fdb_cmd.header = ICSSG_FW_MGMT_CMD_HEADER;
|
||||
fdb_cmd.type = ICSSG_FW_MGMT_FDB_CMD_TYPE_RX_FLOW;
|
||||
fdb_cmd.seqnum = ++(emac->prueth->icssg_hwcmdseq);
|
||||
fdb_cmd.param = 0;
|
||||
|
||||
fdb_cmd.param |= (slice << 4);
|
||||
fdb_cmd.cmd_args[0] = 0;
|
||||
|
||||
ret = icssg_send_fdb_msg(emac, &fdb_cmd, &fdb_cmd_rsp);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
WARN_ON(fdb_cmd.seqnum != fdb_cmd_rsp.seqnum);
|
||||
return fdb_cmd_rsp.status == 1 ? 0 : -EINVAL;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(emac_fdb_flow_id_updated);
|
||||
|
@ -55,6 +55,7 @@ struct icssg_rxq_ctx {
|
||||
#define ICSSG_FW_MGMT_FDB_CMD_TYPE 0x03
|
||||
#define ICSSG_FW_MGMT_CMD_TYPE 0x04
|
||||
#define ICSSG_FW_MGMT_PKT 0x80000000
|
||||
#define ICSSG_FW_MGMT_FDB_CMD_TYPE_RX_FLOW 0x05
|
||||
|
||||
struct icssg_r30_cmd {
|
||||
u32 cmd[4];
|
||||
|
@ -164,11 +164,26 @@ static struct icssg_firmwares icssg_emac_firmwares[] = {
|
||||
}
|
||||
};
|
||||
|
||||
static int prueth_emac_start(struct prueth *prueth, struct prueth_emac *emac)
|
||||
static int prueth_start(struct rproc *rproc, const char *fw_name)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = rproc_set_firmware(rproc, fw_name);
|
||||
if (ret)
|
||||
return ret;
|
||||
return rproc_boot(rproc);
|
||||
}
|
||||
|
||||
static void prueth_shutdown(struct rproc *rproc)
|
||||
{
|
||||
rproc_shutdown(rproc);
|
||||
}
|
||||
|
||||
static int prueth_emac_start(struct prueth *prueth)
|
||||
{
|
||||
struct icssg_firmwares *firmwares;
|
||||
struct device *dev = prueth->dev;
|
||||
int slice, ret;
|
||||
int ret, slice;
|
||||
|
||||
if (prueth->is_switch_mode)
|
||||
firmwares = icssg_switch_firmwares;
|
||||
@ -177,49 +192,126 @@ static int prueth_emac_start(struct prueth *prueth, struct prueth_emac *emac)
|
||||
else
|
||||
firmwares = icssg_emac_firmwares;
|
||||
|
||||
slice = prueth_emac_slice(emac);
|
||||
if (slice < 0) {
|
||||
netdev_err(emac->ndev, "invalid port\n");
|
||||
return -EINVAL;
|
||||
for (slice = 0; slice < PRUETH_NUM_MACS; slice++) {
|
||||
ret = prueth_start(prueth->pru[slice], firmwares[slice].pru);
|
||||
if (ret) {
|
||||
dev_err(dev, "failed to boot PRU%d: %d\n", slice, ret);
|
||||
goto unwind_slices;
|
||||
}
|
||||
|
||||
ret = prueth_start(prueth->rtu[slice], firmwares[slice].rtu);
|
||||
if (ret) {
|
||||
dev_err(dev, "failed to boot RTU%d: %d\n", slice, ret);
|
||||
rproc_shutdown(prueth->pru[slice]);
|
||||
goto unwind_slices;
|
||||
}
|
||||
|
||||
ret = prueth_start(prueth->txpru[slice], firmwares[slice].txpru);
|
||||
if (ret) {
|
||||
dev_err(dev, "failed to boot TX_PRU%d: %d\n", slice, ret);
|
||||
rproc_shutdown(prueth->rtu[slice]);
|
||||
rproc_shutdown(prueth->pru[slice]);
|
||||
goto unwind_slices;
|
||||
}
|
||||
}
|
||||
|
||||
ret = icssg_config(prueth, emac, slice);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = rproc_set_firmware(prueth->pru[slice], firmwares[slice].pru);
|
||||
ret = rproc_boot(prueth->pru[slice]);
|
||||
if (ret) {
|
||||
dev_err(dev, "failed to boot PRU%d: %d\n", slice, ret);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ret = rproc_set_firmware(prueth->rtu[slice], firmwares[slice].rtu);
|
||||
ret = rproc_boot(prueth->rtu[slice]);
|
||||
if (ret) {
|
||||
dev_err(dev, "failed to boot RTU%d: %d\n", slice, ret);
|
||||
goto halt_pru;
|
||||
}
|
||||
|
||||
ret = rproc_set_firmware(prueth->txpru[slice], firmwares[slice].txpru);
|
||||
ret = rproc_boot(prueth->txpru[slice]);
|
||||
if (ret) {
|
||||
dev_err(dev, "failed to boot TX_PRU%d: %d\n", slice, ret);
|
||||
goto halt_rtu;
|
||||
}
|
||||
|
||||
emac->fw_running = 1;
|
||||
return 0;
|
||||
|
||||
halt_rtu:
|
||||
rproc_shutdown(prueth->rtu[slice]);
|
||||
|
||||
halt_pru:
|
||||
rproc_shutdown(prueth->pru[slice]);
|
||||
unwind_slices:
|
||||
while (--slice >= 0) {
|
||||
prueth_shutdown(prueth->txpru[slice]);
|
||||
prueth_shutdown(prueth->rtu[slice]);
|
||||
prueth_shutdown(prueth->pru[slice]);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void prueth_emac_stop(struct prueth *prueth)
|
||||
{
|
||||
int slice;
|
||||
|
||||
for (slice = 0; slice < PRUETH_NUM_MACS; slice++) {
|
||||
prueth_shutdown(prueth->txpru[slice]);
|
||||
prueth_shutdown(prueth->rtu[slice]);
|
||||
prueth_shutdown(prueth->pru[slice]);
|
||||
}
|
||||
}
|
||||
|
||||
static int prueth_emac_common_start(struct prueth *prueth)
|
||||
{
|
||||
struct prueth_emac *emac;
|
||||
int ret = 0;
|
||||
int slice;
|
||||
|
||||
if (!prueth->emac[ICSS_SLICE0] && !prueth->emac[ICSS_SLICE1])
|
||||
return -EINVAL;
|
||||
|
||||
/* clear SMEM and MSMC settings for all slices */
|
||||
memset_io(prueth->msmcram.va, 0, prueth->msmcram.size);
|
||||
memset_io(prueth->shram.va, 0, ICSSG_CONFIG_OFFSET_SLICE1 * PRUETH_NUM_MACS);
|
||||
|
||||
icssg_class_default(prueth->miig_rt, ICSS_SLICE0, 0, false);
|
||||
icssg_class_default(prueth->miig_rt, ICSS_SLICE1, 0, false);
|
||||
|
||||
if (prueth->is_switch_mode || prueth->is_hsr_offload_mode)
|
||||
icssg_init_fw_offload_mode(prueth);
|
||||
else
|
||||
icssg_init_emac_mode(prueth);
|
||||
|
||||
for (slice = 0; slice < PRUETH_NUM_MACS; slice++) {
|
||||
emac = prueth->emac[slice];
|
||||
if (!emac)
|
||||
continue;
|
||||
ret = icssg_config(prueth, emac, slice);
|
||||
if (ret)
|
||||
goto disable_class;
|
||||
}
|
||||
|
||||
ret = prueth_emac_start(prueth);
|
||||
if (ret)
|
||||
goto disable_class;
|
||||
|
||||
emac = prueth->emac[ICSS_SLICE0] ? prueth->emac[ICSS_SLICE0] :
|
||||
prueth->emac[ICSS_SLICE1];
|
||||
ret = icss_iep_init(emac->iep, &prueth_iep_clockops,
|
||||
emac, IEP_DEFAULT_CYCLE_TIME_NS);
|
||||
if (ret) {
|
||||
dev_err(prueth->dev, "Failed to initialize IEP module\n");
|
||||
goto stop_pruss;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
stop_pruss:
|
||||
prueth_emac_stop(prueth);
|
||||
|
||||
disable_class:
|
||||
icssg_class_disable(prueth->miig_rt, ICSS_SLICE0);
|
||||
icssg_class_disable(prueth->miig_rt, ICSS_SLICE1);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int prueth_emac_common_stop(struct prueth *prueth)
|
||||
{
|
||||
struct prueth_emac *emac;
|
||||
|
||||
if (!prueth->emac[ICSS_SLICE0] && !prueth->emac[ICSS_SLICE1])
|
||||
return -EINVAL;
|
||||
|
||||
icssg_class_disable(prueth->miig_rt, ICSS_SLICE0);
|
||||
icssg_class_disable(prueth->miig_rt, ICSS_SLICE1);
|
||||
|
||||
prueth_emac_stop(prueth);
|
||||
|
||||
emac = prueth->emac[ICSS_SLICE0] ? prueth->emac[ICSS_SLICE0] :
|
||||
prueth->emac[ICSS_SLICE1];
|
||||
icss_iep_exit(emac->iep);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* called back by PHY layer if there is change in link state of hw port*/
|
||||
static void emac_adjust_link(struct net_device *ndev)
|
||||
{
|
||||
@ -374,9 +466,6 @@ static void prueth_iep_settime(void *clockops_data, u64 ns)
|
||||
u32 cycletime;
|
||||
int timeout;
|
||||
|
||||
if (!emac->fw_running)
|
||||
return;
|
||||
|
||||
sc_descp = emac->prueth->shram.va + TIMESYNC_FW_WC_SETCLOCK_DESC_OFFSET;
|
||||
|
||||
cycletime = IEP_DEFAULT_CYCLE_TIME_NS;
|
||||
@ -543,23 +632,17 @@ static int emac_ndo_open(struct net_device *ndev)
|
||||
{
|
||||
struct prueth_emac *emac = netdev_priv(ndev);
|
||||
int ret, i, num_data_chn = emac->tx_ch_num;
|
||||
struct icssg_flow_cfg __iomem *flow_cfg;
|
||||
struct prueth *prueth = emac->prueth;
|
||||
int slice = prueth_emac_slice(emac);
|
||||
struct device *dev = prueth->dev;
|
||||
int max_rx_flows;
|
||||
int rx_flow;
|
||||
|
||||
/* clear SMEM and MSMC settings for all slices */
|
||||
if (!prueth->emacs_initialized) {
|
||||
memset_io(prueth->msmcram.va, 0, prueth->msmcram.size);
|
||||
memset_io(prueth->shram.va, 0, ICSSG_CONFIG_OFFSET_SLICE1 * PRUETH_NUM_MACS);
|
||||
}
|
||||
|
||||
/* set h/w MAC as user might have re-configured */
|
||||
ether_addr_copy(emac->mac_addr, ndev->dev_addr);
|
||||
|
||||
icssg_class_set_mac_addr(prueth->miig_rt, slice, emac->mac_addr);
|
||||
icssg_class_default(prueth->miig_rt, slice, 0, false);
|
||||
icssg_ft1_set_mac_addr(prueth->miig_rt, slice, emac->mac_addr);
|
||||
|
||||
/* Notify the stack of the actual queue counts. */
|
||||
@ -597,18 +680,23 @@ static int emac_ndo_open(struct net_device *ndev)
|
||||
goto cleanup_napi;
|
||||
}
|
||||
|
||||
/* reset and start PRU firmware */
|
||||
ret = prueth_emac_start(prueth, emac);
|
||||
if (ret)
|
||||
goto free_rx_irq;
|
||||
if (!prueth->emacs_initialized) {
|
||||
ret = prueth_emac_common_start(prueth);
|
||||
if (ret)
|
||||
goto free_rx_irq;
|
||||
}
|
||||
|
||||
flow_cfg = emac->dram.va + ICSSG_CONFIG_OFFSET + PSI_L_REGULAR_FLOW_ID_BASE_OFFSET;
|
||||
writew(emac->rx_flow_id_base, &flow_cfg->rx_base_flow);
|
||||
ret = emac_fdb_flow_id_updated(emac);
|
||||
|
||||
if (ret) {
|
||||
netdev_err(ndev, "Failed to update Rx Flow ID %d", ret);
|
||||
goto stop;
|
||||
}
|
||||
|
||||
icssg_mii_update_mtu(prueth->mii_rt, slice, ndev->max_mtu);
|
||||
|
||||
if (!prueth->emacs_initialized) {
|
||||
ret = icss_iep_init(emac->iep, &prueth_iep_clockops,
|
||||
emac, IEP_DEFAULT_CYCLE_TIME_NS);
|
||||
}
|
||||
|
||||
ret = request_threaded_irq(emac->tx_ts_irq, NULL, prueth_tx_ts_irq,
|
||||
IRQF_ONESHOT, dev_name(dev), emac);
|
||||
if (ret)
|
||||
@ -653,7 +741,8 @@ reset_rx_chn:
|
||||
free_tx_ts_irq:
|
||||
free_irq(emac->tx_ts_irq, emac);
|
||||
stop:
|
||||
prueth_emac_stop(emac);
|
||||
if (!prueth->emacs_initialized)
|
||||
prueth_emac_common_stop(prueth);
|
||||
free_rx_irq:
|
||||
free_irq(emac->rx_chns.irq[rx_flow], emac);
|
||||
cleanup_napi:
|
||||
@ -689,8 +778,6 @@ static int emac_ndo_stop(struct net_device *ndev)
|
||||
if (ndev->phydev)
|
||||
phy_stop(ndev->phydev);
|
||||
|
||||
icssg_class_disable(prueth->miig_rt, prueth_emac_slice(emac));
|
||||
|
||||
if (emac->prueth->is_hsr_offload_mode)
|
||||
__dev_mc_unsync(ndev, icssg_prueth_hsr_del_mcast);
|
||||
else
|
||||
@ -728,11 +815,9 @@ static int emac_ndo_stop(struct net_device *ndev)
|
||||
/* Destroying the queued work in ndo_stop() */
|
||||
cancel_delayed_work_sync(&emac->stats_work);
|
||||
|
||||
if (prueth->emacs_initialized == 1)
|
||||
icss_iep_exit(emac->iep);
|
||||
|
||||
/* stop PRUs */
|
||||
prueth_emac_stop(emac);
|
||||
if (prueth->emacs_initialized == 1)
|
||||
prueth_emac_common_stop(prueth);
|
||||
|
||||
free_irq(emac->tx_ts_irq, emac);
|
||||
|
||||
@ -1053,10 +1138,11 @@ static void prueth_offload_fwd_mark_update(struct prueth *prueth)
|
||||
}
|
||||
}
|
||||
|
||||
static void prueth_emac_restart(struct prueth *prueth)
|
||||
static int prueth_emac_restart(struct prueth *prueth)
|
||||
{
|
||||
struct prueth_emac *emac0 = prueth->emac[PRUETH_MAC0];
|
||||
struct prueth_emac *emac1 = prueth->emac[PRUETH_MAC1];
|
||||
int ret;
|
||||
|
||||
/* Detach the net_device for both PRUeth ports*/
|
||||
if (netif_running(emac0->ndev))
|
||||
@ -1065,36 +1151,46 @@ static void prueth_emac_restart(struct prueth *prueth)
|
||||
netif_device_detach(emac1->ndev);
|
||||
|
||||
/* Disable both PRUeth ports */
|
||||
icssg_set_port_state(emac0, ICSSG_EMAC_PORT_DISABLE);
|
||||
icssg_set_port_state(emac1, ICSSG_EMAC_PORT_DISABLE);
|
||||
ret = icssg_set_port_state(emac0, ICSSG_EMAC_PORT_DISABLE);
|
||||
ret |= icssg_set_port_state(emac1, ICSSG_EMAC_PORT_DISABLE);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* Stop both pru cores for both PRUeth ports*/
|
||||
prueth_emac_stop(emac0);
|
||||
prueth->emacs_initialized--;
|
||||
prueth_emac_stop(emac1);
|
||||
prueth->emacs_initialized--;
|
||||
ret = prueth_emac_common_stop(prueth);
|
||||
if (ret) {
|
||||
dev_err(prueth->dev, "Failed to stop the firmwares");
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Start both pru cores for both PRUeth ports */
|
||||
prueth_emac_start(prueth, emac0);
|
||||
prueth->emacs_initialized++;
|
||||
prueth_emac_start(prueth, emac1);
|
||||
prueth->emacs_initialized++;
|
||||
ret = prueth_emac_common_start(prueth);
|
||||
if (ret) {
|
||||
dev_err(prueth->dev, "Failed to start the firmwares");
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Enable forwarding for both PRUeth ports */
|
||||
icssg_set_port_state(emac0, ICSSG_EMAC_PORT_FORWARD);
|
||||
icssg_set_port_state(emac1, ICSSG_EMAC_PORT_FORWARD);
|
||||
ret = icssg_set_port_state(emac0, ICSSG_EMAC_PORT_FORWARD);
|
||||
ret |= icssg_set_port_state(emac1, ICSSG_EMAC_PORT_FORWARD);
|
||||
|
||||
/* Attache net_device for both PRUeth ports */
|
||||
netif_device_attach(emac0->ndev);
|
||||
netif_device_attach(emac1->ndev);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void icssg_change_mode(struct prueth *prueth)
|
||||
{
|
||||
struct prueth_emac *emac;
|
||||
int mac;
|
||||
int mac, ret;
|
||||
|
||||
prueth_emac_restart(prueth);
|
||||
ret = prueth_emac_restart(prueth);
|
||||
if (ret) {
|
||||
dev_err(prueth->dev, "Failed to restart the firmwares, aborting the process");
|
||||
return;
|
||||
}
|
||||
|
||||
for (mac = PRUETH_MAC0; mac < PRUETH_NUM_MACS; mac++) {
|
||||
emac = prueth->emac[mac];
|
||||
@ -1173,13 +1269,18 @@ static void prueth_netdevice_port_unlink(struct net_device *ndev)
|
||||
{
|
||||
struct prueth_emac *emac = netdev_priv(ndev);
|
||||
struct prueth *prueth = emac->prueth;
|
||||
int ret;
|
||||
|
||||
prueth->br_members &= ~BIT(emac->port_id);
|
||||
|
||||
if (prueth->is_switch_mode) {
|
||||
prueth->is_switch_mode = false;
|
||||
emac->port_vlan = 0;
|
||||
prueth_emac_restart(prueth);
|
||||
ret = prueth_emac_restart(prueth);
|
||||
if (ret) {
|
||||
dev_err(prueth->dev, "Failed to restart the firmwares, aborting the process");
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
prueth_offload_fwd_mark_update(prueth);
|
||||
@ -1228,6 +1329,7 @@ static void prueth_hsr_port_unlink(struct net_device *ndev)
|
||||
struct prueth *prueth = emac->prueth;
|
||||
struct prueth_emac *emac0;
|
||||
struct prueth_emac *emac1;
|
||||
int ret;
|
||||
|
||||
emac0 = prueth->emac[PRUETH_MAC0];
|
||||
emac1 = prueth->emac[PRUETH_MAC1];
|
||||
@ -1238,7 +1340,11 @@ static void prueth_hsr_port_unlink(struct net_device *ndev)
|
||||
emac0->port_vlan = 0;
|
||||
emac1->port_vlan = 0;
|
||||
prueth->hsr_dev = NULL;
|
||||
prueth_emac_restart(prueth);
|
||||
ret = prueth_emac_restart(prueth);
|
||||
if (ret) {
|
||||
dev_err(prueth->dev, "Failed to restart the firmwares, aborting the process");
|
||||
return;
|
||||
}
|
||||
netdev_dbg(ndev, "Disabling HSR Offload mode\n");
|
||||
}
|
||||
}
|
||||
@ -1413,13 +1519,10 @@ static int prueth_probe(struct platform_device *pdev)
|
||||
prueth->pa_stats = NULL;
|
||||
}
|
||||
|
||||
if (eth0_node) {
|
||||
if (eth0_node || eth1_node) {
|
||||
ret = prueth_get_cores(prueth, ICSS_SLICE0, false);
|
||||
if (ret)
|
||||
goto put_cores;
|
||||
}
|
||||
|
||||
if (eth1_node) {
|
||||
ret = prueth_get_cores(prueth, ICSS_SLICE1, false);
|
||||
if (ret)
|
||||
goto put_cores;
|
||||
@ -1618,14 +1721,12 @@ put_pruss:
|
||||
pruss_put(prueth->pruss);
|
||||
|
||||
put_cores:
|
||||
if (eth1_node) {
|
||||
prueth_put_cores(prueth, ICSS_SLICE1);
|
||||
of_node_put(eth1_node);
|
||||
}
|
||||
|
||||
if (eth0_node) {
|
||||
if (eth0_node || eth1_node) {
|
||||
prueth_put_cores(prueth, ICSS_SLICE0);
|
||||
of_node_put(eth0_node);
|
||||
|
||||
prueth_put_cores(prueth, ICSS_SLICE1);
|
||||
of_node_put(eth1_node);
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
@ -140,7 +140,6 @@ struct prueth_rx_chn {
|
||||
/* data for each emac port */
|
||||
struct prueth_emac {
|
||||
bool is_sr1;
|
||||
bool fw_running;
|
||||
struct prueth *prueth;
|
||||
struct net_device *ndev;
|
||||
u8 mac_addr[6];
|
||||
@ -361,6 +360,8 @@ int icssg_set_port_state(struct prueth_emac *emac,
|
||||
enum icssg_port_state_cmd state);
|
||||
void icssg_config_set_speed(struct prueth_emac *emac);
|
||||
void icssg_config_half_duplex(struct prueth_emac *emac);
|
||||
void icssg_init_emac_mode(struct prueth *prueth);
|
||||
void icssg_init_fw_offload_mode(struct prueth *prueth);
|
||||
|
||||
/* Buffer queue helpers */
|
||||
int icssg_queue_pop(struct prueth *prueth, u8 queue);
|
||||
@ -377,6 +378,7 @@ void icssg_vtbl_modify(struct prueth_emac *emac, u8 vid, u8 port_mask,
|
||||
u8 untag_mask, bool add);
|
||||
u16 icssg_get_pvid(struct prueth_emac *emac);
|
||||
void icssg_set_pvid(struct prueth *prueth, u8 vid, u8 port);
|
||||
int emac_fdb_flow_id_updated(struct prueth_emac *emac);
|
||||
#define prueth_napi_to_tx_chn(pnapi) \
|
||||
container_of(pnapi, struct prueth_tx_chn, napi_tx)
|
||||
|
||||
@ -407,7 +409,6 @@ void emac_rx_timestamp(struct prueth_emac *emac,
|
||||
struct sk_buff *skb, u32 *psdata);
|
||||
enum netdev_tx icssg_ndo_start_xmit(struct sk_buff *skb, struct net_device *ndev);
|
||||
irqreturn_t prueth_rx_irq(int irq, void *dev_id);
|
||||
void prueth_emac_stop(struct prueth_emac *emac);
|
||||
void prueth_cleanup_tx_ts(struct prueth_emac *emac);
|
||||
int icssg_napi_rx_poll(struct napi_struct *napi_rx, int budget);
|
||||
int prueth_prepare_rx_chan(struct prueth_emac *emac,
|
||||
|
@ -440,7 +440,6 @@ static int prueth_emac_start(struct prueth *prueth, struct prueth_emac *emac)
|
||||
goto halt_pru;
|
||||
}
|
||||
|
||||
emac->fw_running = 1;
|
||||
return 0;
|
||||
|
||||
halt_pru:
|
||||
@ -449,6 +448,29 @@ halt_pru:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void prueth_emac_stop(struct prueth_emac *emac)
|
||||
{
|
||||
struct prueth *prueth = emac->prueth;
|
||||
int slice;
|
||||
|
||||
switch (emac->port_id) {
|
||||
case PRUETH_PORT_MII0:
|
||||
slice = ICSS_SLICE0;
|
||||
break;
|
||||
case PRUETH_PORT_MII1:
|
||||
slice = ICSS_SLICE1;
|
||||
break;
|
||||
default:
|
||||
netdev_err(emac->ndev, "invalid port\n");
|
||||
return;
|
||||
}
|
||||
|
||||
if (!emac->is_sr1)
|
||||
rproc_shutdown(prueth->txpru[slice]);
|
||||
rproc_shutdown(prueth->rtu[slice]);
|
||||
rproc_shutdown(prueth->pru[slice]);
|
||||
}
|
||||
|
||||
/**
|
||||
* emac_ndo_open - EMAC device open
|
||||
* @ndev: network adapter device
|
||||
|
@ -432,10 +432,12 @@ struct kszphy_ptp_priv {
|
||||
struct kszphy_priv {
|
||||
struct kszphy_ptp_priv ptp_priv;
|
||||
const struct kszphy_type *type;
|
||||
struct clk *clk;
|
||||
int led_mode;
|
||||
u16 vct_ctrl1000;
|
||||
bool rmii_ref_clk_sel;
|
||||
bool rmii_ref_clk_sel_val;
|
||||
bool clk_enable;
|
||||
u64 stats[ARRAY_SIZE(kszphy_hw_stats)];
|
||||
};
|
||||
|
||||
@ -2050,6 +2052,46 @@ static void kszphy_get_stats(struct phy_device *phydev,
|
||||
data[i] = kszphy_get_stat(phydev, i);
|
||||
}
|
||||
|
||||
static void kszphy_enable_clk(struct phy_device *phydev)
|
||||
{
|
||||
struct kszphy_priv *priv = phydev->priv;
|
||||
|
||||
if (!priv->clk_enable && priv->clk) {
|
||||
clk_prepare_enable(priv->clk);
|
||||
priv->clk_enable = true;
|
||||
}
|
||||
}
|
||||
|
||||
static void kszphy_disable_clk(struct phy_device *phydev)
|
||||
{
|
||||
struct kszphy_priv *priv = phydev->priv;
|
||||
|
||||
if (priv->clk_enable && priv->clk) {
|
||||
clk_disable_unprepare(priv->clk);
|
||||
priv->clk_enable = false;
|
||||
}
|
||||
}
|
||||
|
||||
static int kszphy_generic_resume(struct phy_device *phydev)
|
||||
{
|
||||
kszphy_enable_clk(phydev);
|
||||
|
||||
return genphy_resume(phydev);
|
||||
}
|
||||
|
||||
static int kszphy_generic_suspend(struct phy_device *phydev)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = genphy_suspend(phydev);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
kszphy_disable_clk(phydev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int kszphy_suspend(struct phy_device *phydev)
|
||||
{
|
||||
/* Disable PHY Interrupts */
|
||||
@ -2059,7 +2101,7 @@ static int kszphy_suspend(struct phy_device *phydev)
|
||||
phydev->drv->config_intr(phydev);
|
||||
}
|
||||
|
||||
return genphy_suspend(phydev);
|
||||
return kszphy_generic_suspend(phydev);
|
||||
}
|
||||
|
||||
static void kszphy_parse_led_mode(struct phy_device *phydev)
|
||||
@ -2090,7 +2132,9 @@ static int kszphy_resume(struct phy_device *phydev)
|
||||
{
|
||||
int ret;
|
||||
|
||||
genphy_resume(phydev);
|
||||
ret = kszphy_generic_resume(phydev);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* After switching from power-down to normal mode, an internal global
|
||||
* reset is automatically generated. Wait a minimum of 1 ms before
|
||||
@ -2112,6 +2156,24 @@ static int kszphy_resume(struct phy_device *phydev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Because of errata DS80000700A, receiver error following software
|
||||
* power down. Suspend and resume callbacks only disable and enable
|
||||
* external rmii reference clock.
|
||||
*/
|
||||
static int ksz8041_resume(struct phy_device *phydev)
|
||||
{
|
||||
kszphy_enable_clk(phydev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ksz8041_suspend(struct phy_device *phydev)
|
||||
{
|
||||
kszphy_disable_clk(phydev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ksz9477_resume(struct phy_device *phydev)
|
||||
{
|
||||
int ret;
|
||||
@ -2159,7 +2221,10 @@ static int ksz8061_resume(struct phy_device *phydev)
|
||||
if (!(ret & BMCR_PDOWN))
|
||||
return 0;
|
||||
|
||||
genphy_resume(phydev);
|
||||
ret = kszphy_generic_resume(phydev);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
usleep_range(1000, 2000);
|
||||
|
||||
/* Re-program the value after chip is reset. */
|
||||
@ -2177,6 +2242,11 @@ static int ksz8061_resume(struct phy_device *phydev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ksz8061_suspend(struct phy_device *phydev)
|
||||
{
|
||||
return kszphy_suspend(phydev);
|
||||
}
|
||||
|
||||
static int kszphy_probe(struct phy_device *phydev)
|
||||
{
|
||||
const struct kszphy_type *type = phydev->drv->driver_data;
|
||||
@ -2217,10 +2287,14 @@ static int kszphy_probe(struct phy_device *phydev)
|
||||
} else if (!clk) {
|
||||
/* unnamed clock from the generic ethernet-phy binding */
|
||||
clk = devm_clk_get_optional_enabled(&phydev->mdio.dev, NULL);
|
||||
if (IS_ERR(clk))
|
||||
return PTR_ERR(clk);
|
||||
}
|
||||
|
||||
if (IS_ERR(clk))
|
||||
return PTR_ERR(clk);
|
||||
|
||||
clk_disable_unprepare(clk);
|
||||
priv->clk = clk;
|
||||
|
||||
if (ksz8041_fiber_mode(phydev))
|
||||
phydev->port = PORT_FIBRE;
|
||||
|
||||
@ -5290,6 +5364,21 @@ static int lan8841_probe(struct phy_device *phydev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int lan8804_resume(struct phy_device *phydev)
|
||||
{
|
||||
return kszphy_resume(phydev);
|
||||
}
|
||||
|
||||
static int lan8804_suspend(struct phy_device *phydev)
|
||||
{
|
||||
return kszphy_generic_suspend(phydev);
|
||||
}
|
||||
|
||||
static int lan8841_resume(struct phy_device *phydev)
|
||||
{
|
||||
return kszphy_generic_resume(phydev);
|
||||
}
|
||||
|
||||
static int lan8841_suspend(struct phy_device *phydev)
|
||||
{
|
||||
struct kszphy_priv *priv = phydev->priv;
|
||||
@ -5298,7 +5387,7 @@ static int lan8841_suspend(struct phy_device *phydev)
|
||||
if (ptp_priv->ptp_clock)
|
||||
ptp_cancel_worker_sync(ptp_priv->ptp_clock);
|
||||
|
||||
return genphy_suspend(phydev);
|
||||
return kszphy_generic_suspend(phydev);
|
||||
}
|
||||
|
||||
static struct phy_driver ksphy_driver[] = {
|
||||
@ -5358,9 +5447,8 @@ static struct phy_driver ksphy_driver[] = {
|
||||
.get_sset_count = kszphy_get_sset_count,
|
||||
.get_strings = kszphy_get_strings,
|
||||
.get_stats = kszphy_get_stats,
|
||||
/* No suspend/resume callbacks because of errata DS80000700A,
|
||||
* receiver error following software power down.
|
||||
*/
|
||||
.suspend = ksz8041_suspend,
|
||||
.resume = ksz8041_resume,
|
||||
}, {
|
||||
.phy_id = PHY_ID_KSZ8041RNLI,
|
||||
.phy_id_mask = MICREL_PHY_ID_MASK,
|
||||
@ -5436,7 +5524,7 @@ static struct phy_driver ksphy_driver[] = {
|
||||
.soft_reset = genphy_soft_reset,
|
||||
.config_intr = kszphy_config_intr,
|
||||
.handle_interrupt = kszphy_handle_interrupt,
|
||||
.suspend = kszphy_suspend,
|
||||
.suspend = ksz8061_suspend,
|
||||
.resume = ksz8061_resume,
|
||||
}, {
|
||||
.phy_id = PHY_ID_KSZ9021,
|
||||
@ -5507,8 +5595,8 @@ static struct phy_driver ksphy_driver[] = {
|
||||
.get_sset_count = kszphy_get_sset_count,
|
||||
.get_strings = kszphy_get_strings,
|
||||
.get_stats = kszphy_get_stats,
|
||||
.suspend = genphy_suspend,
|
||||
.resume = kszphy_resume,
|
||||
.suspend = lan8804_suspend,
|
||||
.resume = lan8804_resume,
|
||||
.config_intr = lan8804_config_intr,
|
||||
.handle_interrupt = lan8804_handle_interrupt,
|
||||
}, {
|
||||
@ -5526,7 +5614,7 @@ static struct phy_driver ksphy_driver[] = {
|
||||
.get_strings = kszphy_get_strings,
|
||||
.get_stats = kszphy_get_stats,
|
||||
.suspend = lan8841_suspend,
|
||||
.resume = genphy_resume,
|
||||
.resume = lan8841_resume,
|
||||
.cable_test_start = lan8814_cable_test_start,
|
||||
.cable_test_get_status = ksz886x_cable_test_get_status,
|
||||
}, {
|
||||
|
@ -64,15 +64,11 @@ static int tps23881_pi_enable(struct pse_controller_dev *pcdev, int id)
|
||||
if (id >= TPS23881_MAX_CHANS)
|
||||
return -ERANGE;
|
||||
|
||||
ret = i2c_smbus_read_word_data(client, TPS23881_REG_PW_STATUS);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
chan = priv->port[id].chan[0];
|
||||
if (chan < 4)
|
||||
val = (u16)(ret | BIT(chan));
|
||||
val = BIT(chan);
|
||||
else
|
||||
val = (u16)(ret | BIT(chan + 4));
|
||||
val = BIT(chan + 4);
|
||||
|
||||
if (priv->port[id].is_4p) {
|
||||
chan = priv->port[id].chan[1];
|
||||
@ -100,15 +96,11 @@ static int tps23881_pi_disable(struct pse_controller_dev *pcdev, int id)
|
||||
if (id >= TPS23881_MAX_CHANS)
|
||||
return -ERANGE;
|
||||
|
||||
ret = i2c_smbus_read_word_data(client, TPS23881_REG_PW_STATUS);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
chan = priv->port[id].chan[0];
|
||||
if (chan < 4)
|
||||
val = (u16)(ret | BIT(chan + 4));
|
||||
val = BIT(chan + 4);
|
||||
else
|
||||
val = (u16)(ret | BIT(chan + 8));
|
||||
val = BIT(chan + 8);
|
||||
|
||||
if (priv->port[id].is_4p) {
|
||||
chan = priv->port[id].chan[1];
|
||||
|
@ -161,6 +161,7 @@ const struct iwl_cfg_trans_params iwl_gl_trans_cfg = {
|
||||
|
||||
const char iwl_bz_name[] = "Intel(R) TBD Bz device";
|
||||
const char iwl_fm_name[] = "Intel(R) Wi-Fi 7 BE201 320MHz";
|
||||
const char iwl_wh_name[] = "Intel(R) Wi-Fi 7 BE211 320MHz";
|
||||
const char iwl_gl_name[] = "Intel(R) Wi-Fi 7 BE200 320MHz";
|
||||
const char iwl_mtp_name[] = "Intel(R) Wi-Fi 7 BE202 160MHz";
|
||||
|
||||
|
@ -545,6 +545,7 @@ extern const char iwl_ax231_name[];
|
||||
extern const char iwl_ax411_name[];
|
||||
extern const char iwl_bz_name[];
|
||||
extern const char iwl_fm_name[];
|
||||
extern const char iwl_wh_name[];
|
||||
extern const char iwl_gl_name[];
|
||||
extern const char iwl_mtp_name[];
|
||||
extern const char iwl_sc_name[];
|
||||
|
@ -2954,6 +2954,7 @@ static void iwl_mvm_query_set_freqs(struct iwl_mvm *mvm,
|
||||
int idx)
|
||||
{
|
||||
int i;
|
||||
int n_channels = 0;
|
||||
|
||||
if (fw_has_api(&mvm->fw->ucode_capa,
|
||||
IWL_UCODE_TLV_API_SCAN_OFFLOAD_CHANS)) {
|
||||
@ -2962,7 +2963,7 @@ static void iwl_mvm_query_set_freqs(struct iwl_mvm *mvm,
|
||||
|
||||
for (i = 0; i < SCAN_OFFLOAD_MATCHING_CHANNELS_LEN * 8; i++)
|
||||
if (matches[idx].matching_channels[i / 8] & (BIT(i % 8)))
|
||||
match->channels[match->n_channels++] =
|
||||
match->channels[n_channels++] =
|
||||
mvm->nd_channels[i]->center_freq;
|
||||
} else {
|
||||
struct iwl_scan_offload_profile_match_v1 *matches =
|
||||
@ -2970,9 +2971,11 @@ static void iwl_mvm_query_set_freqs(struct iwl_mvm *mvm,
|
||||
|
||||
for (i = 0; i < SCAN_OFFLOAD_MATCHING_CHANNELS_LEN_V1 * 8; i++)
|
||||
if (matches[idx].matching_channels[i / 8] & (BIT(i % 8)))
|
||||
match->channels[match->n_channels++] =
|
||||
match->channels[n_channels++] =
|
||||
mvm->nd_channels[i]->center_freq;
|
||||
}
|
||||
/* We may have ended up with fewer channels than we allocated. */
|
||||
match->n_channels = n_channels;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -3053,6 +3056,8 @@ static void iwl_mvm_query_netdetect_reasons(struct iwl_mvm *mvm,
|
||||
GFP_KERNEL);
|
||||
if (!net_detect || !n_matches)
|
||||
goto out_report_nd;
|
||||
net_detect->n_matches = n_matches;
|
||||
n_matches = 0;
|
||||
|
||||
for_each_set_bit(i, &matched_profiles, mvm->n_nd_match_sets) {
|
||||
struct cfg80211_wowlan_nd_match *match;
|
||||
@ -3066,8 +3071,9 @@ static void iwl_mvm_query_netdetect_reasons(struct iwl_mvm *mvm,
|
||||
GFP_KERNEL);
|
||||
if (!match)
|
||||
goto out_report_nd;
|
||||
match->n_channels = n_channels;
|
||||
|
||||
net_detect->matches[net_detect->n_matches++] = match;
|
||||
net_detect->matches[n_matches++] = match;
|
||||
|
||||
/* We inverted the order of the SSIDs in the scan
|
||||
* request, so invert the index here.
|
||||
@ -3082,6 +3088,8 @@ static void iwl_mvm_query_netdetect_reasons(struct iwl_mvm *mvm,
|
||||
|
||||
iwl_mvm_query_set_freqs(mvm, d3_data->nd_results, match, i);
|
||||
}
|
||||
/* We may have fewer matches than we allocated. */
|
||||
net_detect->n_matches = n_matches;
|
||||
|
||||
out_report_nd:
|
||||
wakeup.net_detect = net_detect;
|
||||
|
@ -1106,18 +1106,53 @@ VISIBLE_IF_IWLWIFI_KUNIT const struct iwl_dev_info iwl_dev_info_table[] = {
|
||||
iwlax210_2ax_cfg_so_jf_b0, iwl9462_name),
|
||||
|
||||
/* Bz */
|
||||
/* FIXME: need to change the naming according to the actual CRF */
|
||||
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
|
||||
IWL_CFG_MAC_TYPE_BZ, IWL_CFG_ANY,
|
||||
IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, IWL_CFG_ANY,
|
||||
IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
|
||||
iwl_cfg_bz, iwl_ax201_name),
|
||||
|
||||
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
|
||||
IWL_CFG_MAC_TYPE_BZ, IWL_CFG_ANY,
|
||||
IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, IWL_CFG_ANY,
|
||||
IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
|
||||
iwl_cfg_bz, iwl_ax211_name),
|
||||
|
||||
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
|
||||
IWL_CFG_MAC_TYPE_BZ, IWL_CFG_ANY,
|
||||
IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY, IWL_CFG_ANY,
|
||||
IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
|
||||
iwl_cfg_bz, iwl_fm_name),
|
||||
|
||||
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
|
||||
IWL_CFG_MAC_TYPE_BZ, IWL_CFG_ANY,
|
||||
IWL_CFG_RF_TYPE_WH, IWL_CFG_ANY, IWL_CFG_ANY,
|
||||
IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
|
||||
iwl_cfg_bz, iwl_wh_name),
|
||||
|
||||
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
|
||||
IWL_CFG_MAC_TYPE_BZ_W, IWL_CFG_ANY,
|
||||
IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, IWL_CFG_ANY,
|
||||
IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
|
||||
iwl_cfg_bz, iwl_ax201_name),
|
||||
|
||||
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
|
||||
IWL_CFG_MAC_TYPE_BZ_W, IWL_CFG_ANY,
|
||||
IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, IWL_CFG_ANY,
|
||||
IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
|
||||
iwl_cfg_bz, iwl_ax211_name),
|
||||
|
||||
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
|
||||
IWL_CFG_MAC_TYPE_BZ_W, IWL_CFG_ANY,
|
||||
IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY, IWL_CFG_ANY,
|
||||
IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
|
||||
iwl_cfg_bz, iwl_fm_name),
|
||||
|
||||
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
|
||||
IWL_CFG_MAC_TYPE_BZ_W, IWL_CFG_ANY,
|
||||
IWL_CFG_RF_TYPE_WH, IWL_CFG_ANY, IWL_CFG_ANY,
|
||||
IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
|
||||
IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
|
||||
iwl_cfg_bz, iwl_fm_name),
|
||||
iwl_cfg_bz, iwl_wh_name),
|
||||
|
||||
/* Ga (Gl) */
|
||||
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
|
||||
|
@ -442,8 +442,8 @@ static void cw1200_spi_disconnect(struct spi_device *func)
|
||||
cw1200_core_release(self->core);
|
||||
self->core = NULL;
|
||||
}
|
||||
cw1200_spi_off(self, dev_get_platdata(&func->dev));
|
||||
}
|
||||
cw1200_spi_off(self, dev_get_platdata(&func->dev));
|
||||
}
|
||||
|
||||
static int __maybe_unused cw1200_spi_suspend(struct device *dev)
|
||||
|
@ -104,7 +104,7 @@ struct iosm_mmio *ipc_mmio_init(void __iomem *mmio, struct device *dev)
|
||||
break;
|
||||
|
||||
msleep(20);
|
||||
} while (retries-- > 0);
|
||||
} while (--retries > 0);
|
||||
|
||||
if (!retries) {
|
||||
dev_err(ipc_mmio->dev, "invalid exec stage %X", stage);
|
||||
|
@ -104,14 +104,21 @@ void t7xx_fsm_broadcast_state(struct t7xx_fsm_ctl *ctl, enum md_state state)
|
||||
fsm_state_notify(ctl->md, state);
|
||||
}
|
||||
|
||||
static void fsm_release_command(struct kref *ref)
|
||||
{
|
||||
struct t7xx_fsm_command *cmd = container_of(ref, typeof(*cmd), refcnt);
|
||||
|
||||
kfree(cmd);
|
||||
}
|
||||
|
||||
static void fsm_finish_command(struct t7xx_fsm_ctl *ctl, struct t7xx_fsm_command *cmd, int result)
|
||||
{
|
||||
if (cmd->flag & FSM_CMD_FLAG_WAIT_FOR_COMPLETION) {
|
||||
*cmd->ret = result;
|
||||
complete_all(cmd->done);
|
||||
cmd->result = result;
|
||||
complete_all(&cmd->done);
|
||||
}
|
||||
|
||||
kfree(cmd);
|
||||
kref_put(&cmd->refcnt, fsm_release_command);
|
||||
}
|
||||
|
||||
static void fsm_del_kf_event(struct t7xx_fsm_event *event)
|
||||
@ -475,7 +482,6 @@ static int fsm_main_thread(void *data)
|
||||
|
||||
int t7xx_fsm_append_cmd(struct t7xx_fsm_ctl *ctl, enum t7xx_fsm_cmd_state cmd_id, unsigned int flag)
|
||||
{
|
||||
DECLARE_COMPLETION_ONSTACK(done);
|
||||
struct t7xx_fsm_command *cmd;
|
||||
unsigned long flags;
|
||||
int ret;
|
||||
@ -487,11 +493,13 @@ int t7xx_fsm_append_cmd(struct t7xx_fsm_ctl *ctl, enum t7xx_fsm_cmd_state cmd_id
|
||||
INIT_LIST_HEAD(&cmd->entry);
|
||||
cmd->cmd_id = cmd_id;
|
||||
cmd->flag = flag;
|
||||
kref_init(&cmd->refcnt);
|
||||
if (flag & FSM_CMD_FLAG_WAIT_FOR_COMPLETION) {
|
||||
cmd->done = &done;
|
||||
cmd->ret = &ret;
|
||||
init_completion(&cmd->done);
|
||||
kref_get(&cmd->refcnt);
|
||||
}
|
||||
|
||||
kref_get(&cmd->refcnt);
|
||||
spin_lock_irqsave(&ctl->command_lock, flags);
|
||||
list_add_tail(&cmd->entry, &ctl->command_queue);
|
||||
spin_unlock_irqrestore(&ctl->command_lock, flags);
|
||||
@ -501,11 +509,11 @@ int t7xx_fsm_append_cmd(struct t7xx_fsm_ctl *ctl, enum t7xx_fsm_cmd_state cmd_id
|
||||
if (flag & FSM_CMD_FLAG_WAIT_FOR_COMPLETION) {
|
||||
unsigned long wait_ret;
|
||||
|
||||
wait_ret = wait_for_completion_timeout(&done,
|
||||
wait_ret = wait_for_completion_timeout(&cmd->done,
|
||||
msecs_to_jiffies(FSM_CMD_TIMEOUT_MS));
|
||||
if (!wait_ret)
|
||||
return -ETIMEDOUT;
|
||||
|
||||
ret = wait_ret ? cmd->result : -ETIMEDOUT;
|
||||
kref_put(&cmd->refcnt, fsm_release_command);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -110,8 +110,9 @@ struct t7xx_fsm_command {
|
||||
struct list_head entry;
|
||||
enum t7xx_fsm_cmd_state cmd_id;
|
||||
unsigned int flag;
|
||||
struct completion *done;
|
||||
int *ret;
|
||||
struct completion done;
|
||||
int result;
|
||||
struct kref refcnt;
|
||||
};
|
||||
|
||||
struct t7xx_fsm_notifier {
|
||||
|
@ -585,13 +585,16 @@ static inline int vlan_get_tag(const struct sk_buff *skb, u16 *vlan_tci)
|
||||
* vlan_get_protocol - get protocol EtherType.
|
||||
* @skb: skbuff to query
|
||||
* @type: first vlan protocol
|
||||
* @mac_offset: MAC offset
|
||||
* @depth: buffer to store length of eth and vlan tags in bytes
|
||||
*
|
||||
* Returns the EtherType of the packet, regardless of whether it is
|
||||
* vlan encapsulated (normal or hardware accelerated) or not.
|
||||
*/
|
||||
static inline __be16 __vlan_get_protocol(const struct sk_buff *skb, __be16 type,
|
||||
int *depth)
|
||||
static inline __be16 __vlan_get_protocol_offset(const struct sk_buff *skb,
|
||||
__be16 type,
|
||||
int mac_offset,
|
||||
int *depth)
|
||||
{
|
||||
unsigned int vlan_depth = skb->mac_len, parse_depth = VLAN_MAX_DEPTH;
|
||||
|
||||
@ -610,7 +613,8 @@ static inline __be16 __vlan_get_protocol(const struct sk_buff *skb, __be16 type,
|
||||
do {
|
||||
struct vlan_hdr vhdr, *vh;
|
||||
|
||||
vh = skb_header_pointer(skb, vlan_depth, sizeof(vhdr), &vhdr);
|
||||
vh = skb_header_pointer(skb, mac_offset + vlan_depth,
|
||||
sizeof(vhdr), &vhdr);
|
||||
if (unlikely(!vh || !--parse_depth))
|
||||
return 0;
|
||||
|
||||
@ -625,6 +629,12 @@ static inline __be16 __vlan_get_protocol(const struct sk_buff *skb, __be16 type,
|
||||
return type;
|
||||
}
|
||||
|
||||
static inline __be16 __vlan_get_protocol(const struct sk_buff *skb, __be16 type,
|
||||
int *depth)
|
||||
{
|
||||
return __vlan_get_protocol_offset(skb, type, 0, depth);
|
||||
}
|
||||
|
||||
/**
|
||||
* vlan_get_protocol - get protocol EtherType.
|
||||
* @skb: skbuff to query
|
||||
|
@ -524,6 +524,7 @@ enum {
|
||||
* creation/deletion on drivers rescan. Unset during device attach.
|
||||
*/
|
||||
MLX5_PRIV_FLAGS_DETACH = 1 << 2,
|
||||
MLX5_PRIV_FLAGS_SWITCH_LEGACY = 1 << 3,
|
||||
};
|
||||
|
||||
struct mlx5_adev {
|
||||
|
@ -733,15 +733,18 @@ struct nft_set_ext_tmpl {
|
||||
/**
|
||||
* struct nft_set_ext - set extensions
|
||||
*
|
||||
* @genmask: generation mask
|
||||
* @genmask: generation mask, but also flags (see NFT_SET_ELEM_DEAD_BIT)
|
||||
* @offset: offsets of individual extension types
|
||||
* @data: beginning of extension data
|
||||
*
|
||||
* This structure must be aligned to word size, otherwise atomic bitops
|
||||
* on genmask field can cause alignment failure on some archs.
|
||||
*/
|
||||
struct nft_set_ext {
|
||||
u8 genmask;
|
||||
u8 offset[NFT_SET_EXT_NUM];
|
||||
char data[];
|
||||
};
|
||||
} __aligned(BITS_PER_LONG / 8);
|
||||
|
||||
static inline void nft_set_ext_prepare(struct nft_set_ext_tmpl *tmpl)
|
||||
{
|
||||
|
@ -12,31 +12,33 @@
|
||||
/**
|
||||
* enum mptcp_event_type
|
||||
* @MPTCP_EVENT_UNSPEC: unused event
|
||||
* @MPTCP_EVENT_CREATED: token, family, saddr4 | saddr6, daddr4 | daddr6,
|
||||
* sport, dport A new MPTCP connection has been created. It is the good time
|
||||
* to allocate memory and send ADD_ADDR if needed. Depending on the
|
||||
* @MPTCP_EVENT_CREATED: A new MPTCP connection has been created. It is the
|
||||
* good time to allocate memory and send ADD_ADDR if needed. Depending on the
|
||||
* traffic-patterns it can take a long time until the MPTCP_EVENT_ESTABLISHED
|
||||
* is sent.
|
||||
* @MPTCP_EVENT_ESTABLISHED: token, family, saddr4 | saddr6, daddr4 | daddr6,
|
||||
* sport, dport A MPTCP connection is established (can start new subflows).
|
||||
* @MPTCP_EVENT_CLOSED: token A MPTCP connection has stopped.
|
||||
* @MPTCP_EVENT_ANNOUNCED: token, rem_id, family, daddr4 | daddr6 [, dport] A
|
||||
* new address has been announced by the peer.
|
||||
* @MPTCP_EVENT_REMOVED: token, rem_id An address has been lost by the peer.
|
||||
* @MPTCP_EVENT_SUB_ESTABLISHED: token, family, loc_id, rem_id, saddr4 |
|
||||
* saddr6, daddr4 | daddr6, sport, dport, backup, if_idx [, error] A new
|
||||
* subflow has been established. 'error' should not be set.
|
||||
* @MPTCP_EVENT_SUB_CLOSED: token, family, loc_id, rem_id, saddr4 | saddr6,
|
||||
* daddr4 | daddr6, sport, dport, backup, if_idx [, error] A subflow has been
|
||||
* closed. An error (copy of sk_err) could be set if an error has been
|
||||
* detected for this subflow.
|
||||
* @MPTCP_EVENT_SUB_PRIORITY: token, family, loc_id, rem_id, saddr4 | saddr6,
|
||||
* daddr4 | daddr6, sport, dport, backup, if_idx [, error] The priority of a
|
||||
* subflow has changed. 'error' should not be set.
|
||||
* @MPTCP_EVENT_LISTENER_CREATED: family, sport, saddr4 | saddr6 A new PM
|
||||
* listener is created.
|
||||
* @MPTCP_EVENT_LISTENER_CLOSED: family, sport, saddr4 | saddr6 A PM listener
|
||||
* is closed.
|
||||
* is sent. Attributes: token, family, saddr4 | saddr6, daddr4 | daddr6,
|
||||
* sport, dport, server-side.
|
||||
* @MPTCP_EVENT_ESTABLISHED: A MPTCP connection is established (can start new
|
||||
* subflows). Attributes: token, family, saddr4 | saddr6, daddr4 | daddr6,
|
||||
* sport, dport, server-side.
|
||||
* @MPTCP_EVENT_CLOSED: A MPTCP connection has stopped. Attribute: token.
|
||||
* @MPTCP_EVENT_ANNOUNCED: A new address has been announced by the peer.
|
||||
* Attributes: token, rem_id, family, daddr4 | daddr6 [, dport].
|
||||
* @MPTCP_EVENT_REMOVED: An address has been lost by the peer. Attributes:
|
||||
* token, rem_id.
|
||||
* @MPTCP_EVENT_SUB_ESTABLISHED: A new subflow has been established. 'error'
|
||||
* should not be set. Attributes: token, family, loc_id, rem_id, saddr4 |
|
||||
* saddr6, daddr4 | daddr6, sport, dport, backup, if_idx [, error].
|
||||
* @MPTCP_EVENT_SUB_CLOSED: A subflow has been closed. An error (copy of
|
||||
* sk_err) could be set if an error has been detected for this subflow.
|
||||
* Attributes: token, family, loc_id, rem_id, saddr4 | saddr6, daddr4 |
|
||||
* daddr6, sport, dport, backup, if_idx [, error].
|
||||
* @MPTCP_EVENT_SUB_PRIORITY: The priority of a subflow has changed. 'error'
|
||||
* should not be set. Attributes: token, family, loc_id, rem_id, saddr4 |
|
||||
* saddr6, daddr4 | daddr6, sport, dport, backup, if_idx [, error].
|
||||
* @MPTCP_EVENT_LISTENER_CREATED: A new PM listener is created. Attributes:
|
||||
* family, sport, saddr4 | saddr6.
|
||||
* @MPTCP_EVENT_LISTENER_CLOSED: A PM listener is closed. Attributes: family,
|
||||
* sport, saddr4 | saddr6.
|
||||
*/
|
||||
enum mptcp_event_type {
|
||||
MPTCP_EVENT_UNSPEC,
|
||||
|
@ -3642,8 +3642,10 @@ int skb_csum_hwoffload_help(struct sk_buff *skb,
|
||||
|
||||
if (features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) {
|
||||
if (vlan_get_protocol(skb) == htons(ETH_P_IPV6) &&
|
||||
skb_network_header_len(skb) != sizeof(struct ipv6hdr))
|
||||
skb_network_header_len(skb) != sizeof(struct ipv6hdr) &&
|
||||
!ipv6_has_hopopt_jumbo(skb))
|
||||
goto sw_checksum;
|
||||
|
||||
switch (skb->csum_offset) {
|
||||
case offsetof(struct tcphdr, check):
|
||||
case offsetof(struct udphdr, check):
|
||||
|
@ -246,8 +246,12 @@ int netdev_nl_napi_get_doit(struct sk_buff *skb, struct genl_info *info)
|
||||
rcu_read_unlock();
|
||||
rtnl_unlock();
|
||||
|
||||
if (err)
|
||||
if (err) {
|
||||
goto err_free_msg;
|
||||
} else if (!rsp->len) {
|
||||
err = -ENOENT;
|
||||
goto err_free_msg;
|
||||
}
|
||||
|
||||
return genlmsg_reply(rsp, info);
|
||||
|
||||
|
@ -1295,7 +1295,10 @@ int sk_setsockopt(struct sock *sk, int level, int optname,
|
||||
sk->sk_reuse = (valbool ? SK_CAN_REUSE : SK_NO_REUSE);
|
||||
break;
|
||||
case SO_REUSEPORT:
|
||||
sk->sk_reuseport = valbool;
|
||||
if (valbool && !sk_is_inet(sk))
|
||||
ret = -EOPNOTSUPP;
|
||||
else
|
||||
sk->sk_reuseport = valbool;
|
||||
break;
|
||||
case SO_DONTROUTE:
|
||||
sock_valbool_flag(sk, SOCK_LOCALROUTE, valbool);
|
||||
|
@ -294,7 +294,7 @@ static int ip_tunnel_bind_dev(struct net_device *dev)
|
||||
|
||||
ip_tunnel_init_flow(&fl4, iph->protocol, iph->daddr,
|
||||
iph->saddr, tunnel->parms.o_key,
|
||||
iph->tos & INET_DSCP_MASK, dev_net(dev),
|
||||
iph->tos & INET_DSCP_MASK, tunnel->net,
|
||||
tunnel->parms.link, tunnel->fwmark, 0, 0);
|
||||
rt = ip_route_output_key(tunnel->net, &fl4);
|
||||
|
||||
@ -611,7 +611,7 @@ void ip_md_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
|
||||
}
|
||||
ip_tunnel_init_flow(&fl4, proto, key->u.ipv4.dst, key->u.ipv4.src,
|
||||
tunnel_id_to_key32(key->tun_id),
|
||||
tos & INET_DSCP_MASK, dev_net(dev), 0, skb->mark,
|
||||
tos & INET_DSCP_MASK, tunnel->net, 0, skb->mark,
|
||||
skb_get_hash(skb), key->flow_flags);
|
||||
|
||||
if (!tunnel_hlen)
|
||||
@ -774,7 +774,7 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
|
||||
|
||||
ip_tunnel_init_flow(&fl4, protocol, dst, tnl_params->saddr,
|
||||
tunnel->parms.o_key, tos & INET_DSCP_MASK,
|
||||
dev_net(dev), READ_ONCE(tunnel->parms.link),
|
||||
tunnel->net, READ_ONCE(tunnel->parms.link),
|
||||
tunnel->fwmark, skb_get_hash(skb), 0);
|
||||
|
||||
if (ip_tunnel_encap(skb, &tunnel->encap, &protocol, &fl4) < 0)
|
||||
|
@ -7328,6 +7328,7 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops,
|
||||
if (unlikely(!inet_csk_reqsk_queue_hash_add(sk, req,
|
||||
req->timeout))) {
|
||||
reqsk_free(req);
|
||||
dst_release(dst);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -195,6 +195,8 @@ static const struct nf_hook_ops ila_nf_hook_ops[] = {
|
||||
},
|
||||
};
|
||||
|
||||
static DEFINE_MUTEX(ila_mutex);
|
||||
|
||||
static int ila_add_mapping(struct net *net, struct ila_xlat_params *xp)
|
||||
{
|
||||
struct ila_net *ilan = net_generic(net, ila_net_id);
|
||||
@ -202,16 +204,20 @@ static int ila_add_mapping(struct net *net, struct ila_xlat_params *xp)
|
||||
spinlock_t *lock = ila_get_lock(ilan, xp->ip.locator_match);
|
||||
int err = 0, order;
|
||||
|
||||
if (!ilan->xlat.hooks_registered) {
|
||||
if (!READ_ONCE(ilan->xlat.hooks_registered)) {
|
||||
/* We defer registering net hooks in the namespace until the
|
||||
* first mapping is added.
|
||||
*/
|
||||
err = nf_register_net_hooks(net, ila_nf_hook_ops,
|
||||
ARRAY_SIZE(ila_nf_hook_ops));
|
||||
mutex_lock(&ila_mutex);
|
||||
if (!ilan->xlat.hooks_registered) {
|
||||
err = nf_register_net_hooks(net, ila_nf_hook_ops,
|
||||
ARRAY_SIZE(ila_nf_hook_ops));
|
||||
if (!err)
|
||||
WRITE_ONCE(ilan->xlat.hooks_registered, true);
|
||||
}
|
||||
mutex_unlock(&ila_mutex);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
ilan->xlat.hooks_registered = true;
|
||||
}
|
||||
|
||||
ila = kzalloc(sizeof(*ila), GFP_KERNEL);
|
||||
|
@ -124,8 +124,8 @@ static inline int llc_fixup_skb(struct sk_buff *skb)
|
||||
if (unlikely(!pskb_may_pull(skb, llc_len)))
|
||||
return 0;
|
||||
|
||||
skb->transport_header += llc_len;
|
||||
skb_pull(skb, llc_len);
|
||||
skb_reset_transport_header(skb);
|
||||
if (skb->protocol == htons(ETH_P_802_2)) {
|
||||
__be16 pdulen;
|
||||
s32 data_size;
|
||||
|
@ -667,8 +667,15 @@ static bool mptcp_established_options_add_addr(struct sock *sk, struct sk_buff *
|
||||
&echo, &drop_other_suboptions))
|
||||
return false;
|
||||
|
||||
/*
|
||||
* Later on, mptcp_write_options() will enforce mutually exclusion with
|
||||
* DSS, bail out if such option is set and we can't drop it.
|
||||
*/
|
||||
if (drop_other_suboptions)
|
||||
remaining += opt_size;
|
||||
else if (opts->suboptions & OPTION_MPTCP_DSS)
|
||||
return false;
|
||||
|
||||
len = mptcp_add_addr_len(opts->addr.family, echo, !!opts->addr.port);
|
||||
if (remaining < len)
|
||||
return false;
|
||||
|
@ -136,6 +136,7 @@ static bool mptcp_try_coalesce(struct sock *sk, struct sk_buff *to,
|
||||
int delta;
|
||||
|
||||
if (MPTCP_SKB_CB(from)->offset ||
|
||||
((to->len + from->len) > (sk->sk_rcvbuf >> 3)) ||
|
||||
!skb_try_coalesce(to, from, &fragstolen, &delta))
|
||||
return false;
|
||||
|
||||
@ -528,13 +529,13 @@ static void mptcp_send_ack(struct mptcp_sock *msk)
|
||||
mptcp_subflow_send_ack(mptcp_subflow_tcp_sock(subflow));
|
||||
}
|
||||
|
||||
static void mptcp_subflow_cleanup_rbuf(struct sock *ssk)
|
||||
static void mptcp_subflow_cleanup_rbuf(struct sock *ssk, int copied)
|
||||
{
|
||||
bool slow;
|
||||
|
||||
slow = lock_sock_fast(ssk);
|
||||
if (tcp_can_send_ack(ssk))
|
||||
tcp_cleanup_rbuf(ssk, 1);
|
||||
tcp_cleanup_rbuf(ssk, copied);
|
||||
unlock_sock_fast(ssk, slow);
|
||||
}
|
||||
|
||||
@ -551,7 +552,7 @@ static bool mptcp_subflow_could_cleanup(const struct sock *ssk, bool rx_empty)
|
||||
(ICSK_ACK_PUSHED2 | ICSK_ACK_PUSHED)));
|
||||
}
|
||||
|
||||
static void mptcp_cleanup_rbuf(struct mptcp_sock *msk)
|
||||
static void mptcp_cleanup_rbuf(struct mptcp_sock *msk, int copied)
|
||||
{
|
||||
int old_space = READ_ONCE(msk->old_wspace);
|
||||
struct mptcp_subflow_context *subflow;
|
||||
@ -559,14 +560,14 @@ static void mptcp_cleanup_rbuf(struct mptcp_sock *msk)
|
||||
int space = __mptcp_space(sk);
|
||||
bool cleanup, rx_empty;
|
||||
|
||||
cleanup = (space > 0) && (space >= (old_space << 1));
|
||||
rx_empty = !__mptcp_rmem(sk);
|
||||
cleanup = (space > 0) && (space >= (old_space << 1)) && copied;
|
||||
rx_empty = !__mptcp_rmem(sk) && copied;
|
||||
|
||||
mptcp_for_each_subflow(msk, subflow) {
|
||||
struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
|
||||
|
||||
if (cleanup || mptcp_subflow_could_cleanup(ssk, rx_empty))
|
||||
mptcp_subflow_cleanup_rbuf(ssk);
|
||||
mptcp_subflow_cleanup_rbuf(ssk, copied);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1939,6 +1940,8 @@ do_error:
|
||||
goto out;
|
||||
}
|
||||
|
||||
static void mptcp_rcv_space_adjust(struct mptcp_sock *msk, int copied);
|
||||
|
||||
static int __mptcp_recvmsg_mskq(struct mptcp_sock *msk,
|
||||
struct msghdr *msg,
|
||||
size_t len, int flags,
|
||||
@ -1992,6 +1995,7 @@ static int __mptcp_recvmsg_mskq(struct mptcp_sock *msk,
|
||||
break;
|
||||
}
|
||||
|
||||
mptcp_rcv_space_adjust(msk, copied);
|
||||
return copied;
|
||||
}
|
||||
|
||||
@ -2217,9 +2221,6 @@ static int mptcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
|
||||
|
||||
copied += bytes_read;
|
||||
|
||||
/* be sure to advertise window change */
|
||||
mptcp_cleanup_rbuf(msk);
|
||||
|
||||
if (skb_queue_empty(&msk->receive_queue) && __mptcp_move_skbs(msk))
|
||||
continue;
|
||||
|
||||
@ -2268,7 +2269,7 @@ static int mptcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
|
||||
}
|
||||
|
||||
pr_debug("block timeout %ld\n", timeo);
|
||||
mptcp_rcv_space_adjust(msk, copied);
|
||||
mptcp_cleanup_rbuf(msk, copied);
|
||||
err = sk_wait_data(sk, &timeo, NULL);
|
||||
if (err < 0) {
|
||||
err = copied ? : err;
|
||||
@ -2276,7 +2277,7 @@ static int mptcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
|
||||
}
|
||||
}
|
||||
|
||||
mptcp_rcv_space_adjust(msk, copied);
|
||||
mptcp_cleanup_rbuf(msk, copied);
|
||||
|
||||
out_err:
|
||||
if (cmsg_flags && copied >= 0) {
|
||||
|
@ -754,6 +754,12 @@ int nr_route_frame(struct sk_buff *skb, ax25_cb *ax25)
|
||||
int ret;
|
||||
struct sk_buff *skbn;
|
||||
|
||||
/*
|
||||
* Reject malformed packets early. Check that it contains at least 2
|
||||
* addresses and 1 byte more for Time-To-Live
|
||||
*/
|
||||
if (skb->len < 2 * sizeof(ax25_address) + 1)
|
||||
return 0;
|
||||
|
||||
nr_src = (ax25_address *)(skb->data + 0);
|
||||
nr_dest = (ax25_address *)(skb->data + 7);
|
||||
|
@ -538,10 +538,8 @@ static void *packet_current_frame(struct packet_sock *po,
|
||||
return packet_lookup_frame(po, rb, rb->head, status);
|
||||
}
|
||||
|
||||
static u16 vlan_get_tci(struct sk_buff *skb, struct net_device *dev)
|
||||
static u16 vlan_get_tci(const struct sk_buff *skb, struct net_device *dev)
|
||||
{
|
||||
u8 *skb_orig_data = skb->data;
|
||||
int skb_orig_len = skb->len;
|
||||
struct vlan_hdr vhdr, *vh;
|
||||
unsigned int header_len;
|
||||
|
||||
@ -562,33 +560,21 @@ static u16 vlan_get_tci(struct sk_buff *skb, struct net_device *dev)
|
||||
else
|
||||
return 0;
|
||||
|
||||
skb_push(skb, skb->data - skb_mac_header(skb));
|
||||
vh = skb_header_pointer(skb, header_len, sizeof(vhdr), &vhdr);
|
||||
if (skb_orig_data != skb->data) {
|
||||
skb->data = skb_orig_data;
|
||||
skb->len = skb_orig_len;
|
||||
}
|
||||
vh = skb_header_pointer(skb, skb_mac_offset(skb) + header_len,
|
||||
sizeof(vhdr), &vhdr);
|
||||
if (unlikely(!vh))
|
||||
return 0;
|
||||
|
||||
return ntohs(vh->h_vlan_TCI);
|
||||
}
|
||||
|
||||
static __be16 vlan_get_protocol_dgram(struct sk_buff *skb)
|
||||
static __be16 vlan_get_protocol_dgram(const struct sk_buff *skb)
|
||||
{
|
||||
__be16 proto = skb->protocol;
|
||||
|
||||
if (unlikely(eth_type_vlan(proto))) {
|
||||
u8 *skb_orig_data = skb->data;
|
||||
int skb_orig_len = skb->len;
|
||||
|
||||
skb_push(skb, skb->data - skb_mac_header(skb));
|
||||
proto = __vlan_get_protocol(skb, proto, NULL);
|
||||
if (skb_orig_data != skb->data) {
|
||||
skb->data = skb_orig_data;
|
||||
skb->len = skb_orig_len;
|
||||
}
|
||||
}
|
||||
if (unlikely(eth_type_vlan(proto)))
|
||||
proto = __vlan_get_protocol_offset(skb, proto,
|
||||
skb_mac_offset(skb), NULL);
|
||||
|
||||
return proto;
|
||||
}
|
||||
|
@ -137,7 +137,8 @@ static struct sctp_association *sctp_association_init(
|
||||
= 5 * asoc->rto_max;
|
||||
|
||||
asoc->timeouts[SCTP_EVENT_TIMEOUT_SACK] = asoc->sackdelay;
|
||||
asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE] = sp->autoclose * HZ;
|
||||
asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE] =
|
||||
(unsigned long)sp->autoclose * HZ;
|
||||
|
||||
/* Initializes the timers */
|
||||
for (i = SCTP_EVENT_TIMEOUT_NONE; i < SCTP_NUM_TIMEOUT_TYPES; ++i)
|
||||
|
@ -1,10 +1,12 @@
|
||||
#!/usr/bin/env python3
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
|
||||
from lib.py import ksft_run, ksft_exit, ksft_eq, KsftSkipEx
|
||||
from lib.py import EthtoolFamily, NetdevFamily
|
||||
from lib.py import ksft_disruptive, ksft_exit, ksft_run
|
||||
from lib.py import ksft_eq, ksft_raises, KsftSkipEx
|
||||
from lib.py import EthtoolFamily, NetdevFamily, NlError
|
||||
from lib.py import NetDrvEnv
|
||||
from lib.py import cmd
|
||||
from lib.py import cmd, defer, ip
|
||||
import errno
|
||||
import glob
|
||||
|
||||
|
||||
@ -59,9 +61,27 @@ def addremove_queues(cfg, nl) -> None:
|
||||
ksft_eq(queues, expected)
|
||||
|
||||
|
||||
@ksft_disruptive
|
||||
def check_down(cfg, nl) -> None:
|
||||
# Check the NAPI IDs before interface goes down and hides them
|
||||
napis = nl.napi_get({'ifindex': cfg.ifindex}, dump=True)
|
||||
|
||||
ip(f"link set dev {cfg.dev['ifname']} down")
|
||||
defer(ip, f"link set dev {cfg.dev['ifname']} up")
|
||||
|
||||
with ksft_raises(NlError) as cm:
|
||||
nl.queue_get({'ifindex': cfg.ifindex, 'id': 0, 'type': 'rx'})
|
||||
ksft_eq(cm.exception.nl_msg.error, -errno.ENOENT)
|
||||
|
||||
if napis:
|
||||
with ksft_raises(NlError) as cm:
|
||||
nl.napi_get({'id': napis[0]['id']})
|
||||
ksft_eq(cm.exception.nl_msg.error, -errno.ENOENT)
|
||||
|
||||
|
||||
def main() -> None:
|
||||
with NetDrvEnv(__file__, queue_count=100) as cfg:
|
||||
ksft_run([get_queues, addremove_queues], args=(cfg, NetdevFamily()))
|
||||
ksft_run([get_queues, addremove_queues, check_down], args=(cfg, NetdevFamily()))
|
||||
ksft_exit()
|
||||
|
||||
|
||||
|
@ -7,7 +7,6 @@ ALL_TESTS="standalone vlan_unaware_bridge vlan_aware_bridge test_vlan \
|
||||
NUM_NETIFS=2
|
||||
PING_COUNT=1
|
||||
REQUIRE_MTOOLS=yes
|
||||
REQUIRE_MZ=no
|
||||
|
||||
source lib.sh
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user