Networking fixes for 6.4-rc3, including fixes from can, xfrm,

bluetooth and netfilter.
 
 Current release - regressions:
 
   - ipv6: fix RCU splat in ipv6_route_seq_show()
 
   - wifi: iwlwifi: disable RFI feature
 
 Previous releases - regressions:
 
   - tcp: fix possible sk_priority leak in tcp_v4_send_reset()
 
   - tipc: do not update mtu if msg_max is too small in mtu negotiation
 
   - netfilter: fix null deref on element insertion
 
   - devlink: change per-devlink netdev notifier to static one
 
   - phylink: fix ksettings_set() ethtool call
 
   - wifi: mac80211: fortify the spinlock against deadlock by interrupt
 
   - wifi: brcmfmac: check for probe() id argument being NULL
 
   - eth: ice:
     - fix undersized tx_flags variable
     - fix ice VF reset during iavf initialization
 
   - eth: hns3: fix sending pfc frames after reset issue
 
 Previous releases - always broken:
 
   - xfrm: release all offloaded policy memory
 
   - nsh: use correct mac_offset to unwind gso skb in nsh_gso_segment()
 
   - vsock: avoid to close connected socket after the timeout
 
   - dsa: rzn1-a5psw: enable management frames for CPU port
 
   - eth: virtio_net: fix error unwinding of XDP initialization
 
   - eth: tun: fix memory leak for detached NAPI queue.
 
 Misc:
 
   - MAINTAINERS: sctp: move Neil to CREDITS
 
 Signed-off-by: Paolo Abeni <pabeni@redhat.com>
 -----BEGIN PGP SIGNATURE-----
 
 iQJGBAABCAAwFiEEg1AjqC77wbdLX2LbKSR5jcyPE6QFAmRmEHoSHHBhYmVuaUBy
 ZWRoYXQuY29tAAoJECkkeY3MjxOkXu0P/AnSeVtu2CYZSjyCQYvkKpdpbDOSlsee
 GOnG0jduOdJ+OabYyM6prg9JHp1t3QxxTVJs+Spc7Eh9EX+YHRwK5DNPhv3GQ7RI
 pSwQxwiEhLVXVjaEtqUo1Wf8JgCQJ+ThisSIDgQRnaHKQnrRIlbZRngwn6TwNFba
 kxBpCUn2RZcZZhL8xdVF4UbSbEeLEupN76rHiePBVKNG70QVRptX3Rd6EV6FxmTa
 EzAtfNh1r0p3BHW1YYgsbpy7PeXhKGhlVLqIld8h9r/y4hATsrQ2f7Bv0RNrVBDf
 f8r1bdG5E0K3V8AzFPpyOe304G3GAwV+V/wtvA3GRjiwmPUwzJOzaNnSgJZZ/sbq
 mnR4pCwJ4gDGnDxLa8hBh6+emQGB6LJJX0JTQY7vMPNmUwtIuEQc6tLxJ4DpXTzW
 psEndQPDA7yR/7pNoE7ax+8CKCxPvfiBRnV9sxzmPV6FcxWtzJeLQihOuOA4IB8i
 Ddhq2OYH+HCodTNOLWNyMSjk65O7Whee1O/YGiVW9+iUbKBUSBFatJ9fJjH4bXMT
 VRZZnhlFGGIMuZXhkL5+a4ZnomqfPRXNGJ/QBbB4Ty7CXr84mXb0SX5gW4qsLOBA
 YEuxiqD8Oej2Mrid4ypF5GmwBYLAf4CCZajGVii0yyz2hp69RvlQ0c5lA0WisQu3
 wvY2HDFGBsa+
 =PfWP
 -----END PGP SIGNATURE-----

Merge tag 'net-6.4-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net

Pull networking fixes from Paolo Abeni:
 "Including fixes from can, xfrm, bluetooth and netfilter.

  Current release - regressions:

   - ipv6: fix RCU splat in ipv6_route_seq_show()

   - wifi: iwlwifi: disable RFI feature

  Previous releases - regressions:

   - tcp: fix possible sk_priority leak in tcp_v4_send_reset()

   - tipc: do not update mtu if msg_max is too small in mtu negotiation

   - netfilter: fix null deref on element insertion

   - devlink: change per-devlink netdev notifier to static one

   - phylink: fix ksettings_set() ethtool call

   - wifi: mac80211: fortify the spinlock against deadlock by interrupt

   - wifi: brcmfmac: check for probe() id argument being NULL

   - eth: ice:
      - fix undersized tx_flags variable
      - fix ice VF reset during iavf initialization

   - eth: hns3: fix sending pfc frames after reset issue

  Previous releases - always broken:

   - xfrm: release all offloaded policy memory

   - nsh: use correct mac_offset to unwind gso skb in nsh_gso_segment()

   - vsock: avoid to close connected socket after the timeout

   - dsa: rzn1-a5psw: enable management frames for CPU port

   - eth: virtio_net: fix error unwinding of XDP initialization

   - eth: tun: fix memory leak for detached NAPI queue.

  Misc:

   - MAINTAINERS: sctp: move Neil to CREDITS"

* tag 'net-6.4-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net: (107 commits)
  MAINTAINERS: skip CCing netdev for Bluetooth patches
  mdio_bus: unhide mdio_bus_init prototype
  bridge: always declare tunnel functions
  atm: hide unused procfs functions
  net: isa: include net/Space.h
  Revert "ARM: dts: stm32: add CAN support on stm32f746"
  netfilter: nft_set_rbtree: fix null deref on element insertion
  netfilter: nf_tables: fix nft_trans type confusion
  netfilter: conntrack: define variables exp_nat_nla_policy and any_addr with CONFIG_NF_NAT
  net: wwan: t7xx: Ensure init is completed before system sleep
  net: selftests: Fix optstring
  net: pcs: xpcs: fix C73 AN not getting enabled
  net: wwan: iosm: fix NULL pointer dereference when removing device
  vlan: fix a potential uninit-value in vlan_dev_hard_start_xmit()
  mailmap: add entries for Nikolay Aleksandrov
  igb: fix bit_shift to be in [1..8] range
  net: dsa: mv88e6xxx: Fix mv88e6393x EPC write command offset
  cassini: Fix a memory leak in the error handling path of cas_init_one()
  tun: Fix memory leak for detached NAPI queue.
  can: kvaser_pciefd: Disable interrupts in probe error path
  ...
This commit is contained in:
Linus Torvalds 2023-05-18 08:52:14 -07:00
commit 1f594fe7c9
123 changed files with 878 additions and 455 deletions

View File

@ -364,6 +364,11 @@ Nicolas Pitre <nico@fluxnic.net> <nico@linaro.org>
Nicolas Saenz Julienne <nsaenz@kernel.org> <nsaenzjulienne@suse.de>
Nicolas Saenz Julienne <nsaenz@kernel.org> <nsaenzjulienne@suse.com>
Niklas Söderlund <niklas.soderlund+renesas@ragnatech.se>
Nikolay Aleksandrov <razor@blackwall.org> <naleksan@redhat.com>
Nikolay Aleksandrov <razor@blackwall.org> <nikolay@redhat.com>
Nikolay Aleksandrov <razor@blackwall.org> <nikolay@cumulusnetworks.com>
Nikolay Aleksandrov <razor@blackwall.org> <nikolay@nvidia.com>
Nikolay Aleksandrov <razor@blackwall.org> <nikolay@isovalent.com>
Oleksandr Natalenko <oleksandr@natalenko.name> <oleksandr@redhat.com>
Oleksij Rempel <linux@rempel-privat.de> <bug-track@fisher-privat.net>
Oleksij Rempel <linux@rempel-privat.de> <external.Oleksij.Rempel@de.bosch.com>

View File

@ -1706,6 +1706,10 @@ S: Panoramastrasse 18
S: D-69126 Heidelberg
S: Germany
N: Neil Horman
M: nhorman@tuxdriver.com
D: SCTP protocol maintainer.
N: Simon Horman
M: horms@verge.net.au
D: Renesas ARM/ARM64 SoC maintainer

View File

@ -21,11 +21,22 @@ properties:
st,can-primary:
description:
Primary and secondary mode of the bxCAN peripheral is only relevant
if the chip has two CAN peripherals. In that case they share some
of the required logic.
Primary mode of the bxCAN peripheral is only relevant if the chip has
two CAN peripherals in dual CAN configuration. In that case they share
some of the required logic.
Not to be used if the peripheral is in single CAN configuration.
To avoid misunderstandings, it should be noted that ST documentation
uses the terms master/slave instead of primary/secondary.
uses the terms master instead of primary.
type: boolean
st,can-secondary:
description:
Secondary mode of the bxCAN peripheral is only relevant if the chip
has two CAN peripherals in dual CAN configuration. In that case they
share some of the required logic.
Not to be used if the peripheral is in single CAN configuration.
To avoid misunderstandings, it should be noted that ST documentation
uses the terms slave instead of secondary.
type: boolean
reg:

View File

@ -6211,6 +6211,7 @@ X: Documentation/devicetree/
X: Documentation/driver-api/media/
X: Documentation/firmware-guide/acpi/
X: Documentation/i2c/
X: Documentation/netlink/
X: Documentation/power/
X: Documentation/spi/
X: Documentation/userspace-api/media/
@ -14566,6 +14567,7 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-next.git
F: Documentation/devicetree/bindings/net/
F: drivers/connector/
F: drivers/net/
X: drivers/net/wireless/
F: include/dt-bindings/net/
F: include/linux/etherdevice.h
F: include/linux/fcdevice.h
@ -14615,6 +14617,7 @@ B: mailto:netdev@vger.kernel.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net.git
T: git git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-next.git
F: Documentation/core-api/netlink.rst
F: Documentation/netlink/
F: Documentation/networking/
F: Documentation/process/maintainer-netdev.rst
F: Documentation/userspace-api/netlink/
@ -14629,6 +14632,7 @@ F: include/uapi/linux/netdevice.h
F: lib/net_utils.c
F: lib/random32.c
F: net/
X: net/bluetooth/
F: tools/net/
F: tools/testing/selftests/net/
@ -18842,12 +18846,11 @@ F: drivers/target/
F: include/target/
SCTP PROTOCOL
M: Neil Horman <nhorman@tuxdriver.com>
M: Marcelo Ricardo Leitner <marcelo.leitner@gmail.com>
M: Xin Long <lucien.xin@gmail.com>
L: linux-sctp@vger.kernel.org
S: Maintained
W: http://lksctp.sourceforge.net
W: https://github.com/sctp/lksctp-tools/wiki
F: Documentation/networking/sctp.rst
F: include/linux/sctp.h
F: include/net/sctp/

View File

@ -387,6 +387,7 @@ can2: can@40006800 {
interrupt-names = "tx", "rx0", "rx1", "sce";
resets = <&rcc STM32F4_APB1_RESET(CAN2)>;
clocks = <&rcc 0 STM32F4_APB1_CLOCK(CAN2)>;
st,can-secondary;
st,gcan = <&gcan>;
status = "disabled";
};

View File

@ -283,6 +283,88 @@ pins2 {
slew-rate = <2>;
};
};
can1_pins_a: can1-0 {
pins1 {
pinmux = <STM32_PINMUX('A', 12, AF9)>; /* CAN1_TX */
};
pins2 {
pinmux = <STM32_PINMUX('A', 11, AF9)>; /* CAN1_RX */
bias-pull-up;
};
};
can1_pins_b: can1-1 {
pins1 {
pinmux = <STM32_PINMUX('B', 9, AF9)>; /* CAN1_TX */
};
pins2 {
pinmux = <STM32_PINMUX('B', 8, AF9)>; /* CAN1_RX */
bias-pull-up;
};
};
can1_pins_c: can1-2 {
pins1 {
pinmux = <STM32_PINMUX('D', 1, AF9)>; /* CAN1_TX */
};
pins2 {
pinmux = <STM32_PINMUX('D', 0, AF9)>; /* CAN1_RX */
bias-pull-up;
};
};
can1_pins_d: can1-3 {
pins1 {
pinmux = <STM32_PINMUX('H', 13, AF9)>; /* CAN1_TX */
};
pins2 {
pinmux = <STM32_PINMUX('H', 14, AF9)>; /* CAN1_RX */
bias-pull-up;
};
};
can2_pins_a: can2-0 {
pins1 {
pinmux = <STM32_PINMUX('B', 6, AF9)>; /* CAN2_TX */
};
pins2 {
pinmux = <STM32_PINMUX('B', 5, AF9)>; /* CAN2_RX */
bias-pull-up;
};
};
can2_pins_b: can2-1 {
pins1 {
pinmux = <STM32_PINMUX('B', 13, AF9)>; /* CAN2_TX */
};
pins2 {
pinmux = <STM32_PINMUX('B', 12, AF9)>; /* CAN2_RX */
bias-pull-up;
};
};
can3_pins_a: can3-0 {
pins1 {
pinmux = <STM32_PINMUX('A', 15, AF11)>; /* CAN3_TX */
};
pins2 {
pinmux = <STM32_PINMUX('A', 8, AF11)>; /* CAN3_RX */
bias-pull-up;
};
};
can3_pins_b: can3-1 {
pins1 {
pinmux = <STM32_PINMUX('B', 4, AF11)>; /* CAN3_TX */
};
pins2 {
pinmux = <STM32_PINMUX('B', 3, AF11)>; /* CAN3_RX */
bias-pull-up;
};
};
};
};
};

View File

@ -95,7 +95,7 @@ config CAN_AT91
config CAN_BXCAN
tristate "STM32 Basic Extended CAN (bxCAN) devices"
depends on OF || ARCH_STM32 || COMPILE_TEST
depends on ARCH_STM32 || COMPILE_TEST
depends on HAS_IOMEM
select CAN_RX_OFFLOAD
help

View File

@ -118,7 +118,7 @@
#define BXCAN_FiR1_REG(b) (0x40 + (b) * 8)
#define BXCAN_FiR2_REG(b) (0x44 + (b) * 8)
#define BXCAN_FILTER_ID(primary) (primary ? 0 : 14)
#define BXCAN_FILTER_ID(cfg) ((cfg) == BXCAN_CFG_DUAL_SECONDARY ? 14 : 0)
/* Filter primary register (FMR) bits */
#define BXCAN_FMR_CANSB_MASK GENMASK(13, 8)
@ -135,6 +135,12 @@ enum bxcan_lec_code {
BXCAN_LEC_UNUSED
};
enum bxcan_cfg {
BXCAN_CFG_SINGLE = 0,
BXCAN_CFG_DUAL_PRIMARY,
BXCAN_CFG_DUAL_SECONDARY
};
/* Structure of the message buffer */
struct bxcan_mb {
u32 id; /* can identifier */
@ -167,7 +173,7 @@ struct bxcan_priv {
struct regmap *gcan;
int tx_irq;
int sce_irq;
bool primary;
enum bxcan_cfg cfg;
struct clk *clk;
spinlock_t rmw_lock; /* lock for read-modify-write operations */
unsigned int tx_head;
@ -202,17 +208,17 @@ static inline void bxcan_rmw(struct bxcan_priv *priv, void __iomem *addr,
spin_unlock_irqrestore(&priv->rmw_lock, flags);
}
static void bxcan_disable_filters(struct bxcan_priv *priv, bool primary)
static void bxcan_disable_filters(struct bxcan_priv *priv, enum bxcan_cfg cfg)
{
unsigned int fid = BXCAN_FILTER_ID(primary);
unsigned int fid = BXCAN_FILTER_ID(cfg);
u32 fmask = BIT(fid);
regmap_update_bits(priv->gcan, BXCAN_FA1R_REG, fmask, 0);
}
static void bxcan_enable_filters(struct bxcan_priv *priv, bool primary)
static void bxcan_enable_filters(struct bxcan_priv *priv, enum bxcan_cfg cfg)
{
unsigned int fid = BXCAN_FILTER_ID(primary);
unsigned int fid = BXCAN_FILTER_ID(cfg);
u32 fmask = BIT(fid);
/* Filter settings:
@ -680,7 +686,7 @@ static int bxcan_chip_start(struct net_device *ndev)
BXCAN_BTR_BRP_MASK | BXCAN_BTR_TS1_MASK | BXCAN_BTR_TS2_MASK |
BXCAN_BTR_SJW_MASK, set);
bxcan_enable_filters(priv, priv->primary);
bxcan_enable_filters(priv, priv->cfg);
/* Clear all internal status */
priv->tx_head = 0;
@ -806,7 +812,7 @@ static void bxcan_chip_stop(struct net_device *ndev)
BXCAN_IER_EPVIE | BXCAN_IER_EWGIE | BXCAN_IER_FOVIE1 |
BXCAN_IER_FFIE1 | BXCAN_IER_FMPIE1 | BXCAN_IER_FOVIE0 |
BXCAN_IER_FFIE0 | BXCAN_IER_FMPIE0 | BXCAN_IER_TMEIE, 0);
bxcan_disable_filters(priv, priv->primary);
bxcan_disable_filters(priv, priv->cfg);
bxcan_enter_sleep_mode(priv);
priv->can.state = CAN_STATE_STOPPED;
}
@ -931,7 +937,7 @@ static int bxcan_probe(struct platform_device *pdev)
struct clk *clk = NULL;
void __iomem *regs;
struct regmap *gcan;
bool primary;
enum bxcan_cfg cfg;
int err, rx_irq, tx_irq, sce_irq;
regs = devm_platform_ioremap_resource(pdev, 0);
@ -946,7 +952,13 @@ static int bxcan_probe(struct platform_device *pdev)
return PTR_ERR(gcan);
}
primary = of_property_read_bool(np, "st,can-primary");
if (of_property_read_bool(np, "st,can-primary"))
cfg = BXCAN_CFG_DUAL_PRIMARY;
else if (of_property_read_bool(np, "st,can-secondary"))
cfg = BXCAN_CFG_DUAL_SECONDARY;
else
cfg = BXCAN_CFG_SINGLE;
clk = devm_clk_get(dev, NULL);
if (IS_ERR(clk)) {
dev_err(dev, "failed to get clock\n");
@ -992,7 +1004,7 @@ static int bxcan_probe(struct platform_device *pdev)
priv->clk = clk;
priv->tx_irq = tx_irq;
priv->sce_irq = sce_irq;
priv->primary = primary;
priv->cfg = cfg;
priv->can.clock.freq = clk_get_rate(clk);
spin_lock_init(&priv->rmw_lock);
priv->tx_head = 0;

View File

@ -54,7 +54,8 @@ int can_put_echo_skb(struct sk_buff *skb, struct net_device *dev,
/* check flag whether this packet has to be looped back */
if (!(dev->flags & IFF_ECHO) ||
(skb->protocol != htons(ETH_P_CAN) &&
skb->protocol != htons(ETH_P_CANFD))) {
skb->protocol != htons(ETH_P_CANFD) &&
skb->protocol != htons(ETH_P_CANXL))) {
kfree_skb(skb);
return 0;
}

View File

@ -71,10 +71,12 @@ MODULE_DESCRIPTION("CAN driver for Kvaser CAN/PCIe devices");
#define KVASER_PCIEFD_SYSID_BUILD_REG (KVASER_PCIEFD_SYSID_BASE + 0x14)
/* Shared receive buffer registers */
#define KVASER_PCIEFD_SRB_BASE 0x1f200
#define KVASER_PCIEFD_SRB_FIFO_LAST_REG (KVASER_PCIEFD_SRB_BASE + 0x1f4)
#define KVASER_PCIEFD_SRB_CMD_REG (KVASER_PCIEFD_SRB_BASE + 0x200)
#define KVASER_PCIEFD_SRB_IEN_REG (KVASER_PCIEFD_SRB_BASE + 0x204)
#define KVASER_PCIEFD_SRB_IRQ_REG (KVASER_PCIEFD_SRB_BASE + 0x20c)
#define KVASER_PCIEFD_SRB_STAT_REG (KVASER_PCIEFD_SRB_BASE + 0x210)
#define KVASER_PCIEFD_SRB_RX_NR_PACKETS_REG (KVASER_PCIEFD_SRB_BASE + 0x214)
#define KVASER_PCIEFD_SRB_CTRL_REG (KVASER_PCIEFD_SRB_BASE + 0x218)
/* EPCS flash controller registers */
#define KVASER_PCIEFD_SPI_BASE 0x1fc00
@ -111,6 +113,9 @@ MODULE_DESCRIPTION("CAN driver for Kvaser CAN/PCIe devices");
/* DMA support */
#define KVASER_PCIEFD_SRB_STAT_DMA BIT(24)
/* SRB current packet level */
#define KVASER_PCIEFD_SRB_RX_NR_PACKETS_MASK 0xff
/* DMA Enable */
#define KVASER_PCIEFD_SRB_CTRL_DMA_ENABLE BIT(0)
@ -526,7 +531,7 @@ static int kvaser_pciefd_set_tx_irq(struct kvaser_pciefd_can *can)
KVASER_PCIEFD_KCAN_IRQ_TOF | KVASER_PCIEFD_KCAN_IRQ_ABD |
KVASER_PCIEFD_KCAN_IRQ_TAE | KVASER_PCIEFD_KCAN_IRQ_TAL |
KVASER_PCIEFD_KCAN_IRQ_FDIC | KVASER_PCIEFD_KCAN_IRQ_BPP |
KVASER_PCIEFD_KCAN_IRQ_TAR | KVASER_PCIEFD_KCAN_IRQ_TFD;
KVASER_PCIEFD_KCAN_IRQ_TAR;
iowrite32(msk, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
@ -554,6 +559,8 @@ static void kvaser_pciefd_setup_controller(struct kvaser_pciefd_can *can)
if (can->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
mode |= KVASER_PCIEFD_KCAN_MODE_LOM;
else
mode &= ~KVASER_PCIEFD_KCAN_MODE_LOM;
mode |= KVASER_PCIEFD_KCAN_MODE_EEN;
mode |= KVASER_PCIEFD_KCAN_MODE_EPEN;
@ -572,7 +579,7 @@ static void kvaser_pciefd_start_controller_flush(struct kvaser_pciefd_can *can)
spin_lock_irqsave(&can->lock, irq);
iowrite32(-1, can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG);
iowrite32(KVASER_PCIEFD_KCAN_IRQ_ABD | KVASER_PCIEFD_KCAN_IRQ_TFD,
iowrite32(KVASER_PCIEFD_KCAN_IRQ_ABD,
can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
status = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_STAT_REG);
@ -615,7 +622,7 @@ static int kvaser_pciefd_bus_on(struct kvaser_pciefd_can *can)
iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
iowrite32(-1, can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG);
iowrite32(KVASER_PCIEFD_KCAN_IRQ_ABD | KVASER_PCIEFD_KCAN_IRQ_TFD,
iowrite32(KVASER_PCIEFD_KCAN_IRQ_ABD,
can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
mode = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
@ -719,6 +726,7 @@ static int kvaser_pciefd_stop(struct net_device *netdev)
iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
del_timer(&can->bec_poll_timer);
}
can->can.state = CAN_STATE_STOPPED;
close_candev(netdev);
return ret;
@ -1007,8 +1015,7 @@ static int kvaser_pciefd_setup_can_ctrls(struct kvaser_pciefd *pcie)
SET_NETDEV_DEV(netdev, &pcie->pci->dev);
iowrite32(-1, can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG);
iowrite32(KVASER_PCIEFD_KCAN_IRQ_ABD |
KVASER_PCIEFD_KCAN_IRQ_TFD,
iowrite32(KVASER_PCIEFD_KCAN_IRQ_ABD,
can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
pcie->can[i] = can;
@ -1058,6 +1065,7 @@ static int kvaser_pciefd_setup_dma(struct kvaser_pciefd *pcie)
{
int i;
u32 srb_status;
u32 srb_packet_count;
dma_addr_t dma_addr[KVASER_PCIEFD_DMA_COUNT];
/* Disable the DMA */
@ -1085,6 +1093,15 @@ static int kvaser_pciefd_setup_dma(struct kvaser_pciefd *pcie)
KVASER_PCIEFD_SRB_CMD_RDB1,
pcie->reg_base + KVASER_PCIEFD_SRB_CMD_REG);
/* Empty Rx FIFO */
srb_packet_count = ioread32(pcie->reg_base + KVASER_PCIEFD_SRB_RX_NR_PACKETS_REG) &
KVASER_PCIEFD_SRB_RX_NR_PACKETS_MASK;
while (srb_packet_count) {
/* Drop current packet in FIFO */
ioread32(pcie->reg_base + KVASER_PCIEFD_SRB_FIFO_LAST_REG);
srb_packet_count--;
}
srb_status = ioread32(pcie->reg_base + KVASER_PCIEFD_SRB_STAT_REG);
if (!(srb_status & KVASER_PCIEFD_SRB_STAT_DI)) {
dev_err(&pcie->pci->dev, "DMA not idle before enabling\n");
@ -1425,9 +1442,6 @@ static int kvaser_pciefd_handle_status_packet(struct kvaser_pciefd *pcie,
cmd = KVASER_PCIEFD_KCAN_CMD_AT;
cmd |= ++can->cmd_seq << KVASER_PCIEFD_KCAN_CMD_SEQ_SHIFT;
iowrite32(cmd, can->reg_base + KVASER_PCIEFD_KCAN_CMD_REG);
iowrite32(KVASER_PCIEFD_KCAN_IRQ_TFD,
can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
} else if (p->header[0] & KVASER_PCIEFD_SPACK_IDET &&
p->header[0] & KVASER_PCIEFD_SPACK_IRM &&
cmdseq == (p->header[1] & KVASER_PCIEFD_PACKET_SEQ_MSK) &&
@ -1714,15 +1728,6 @@ static int kvaser_pciefd_transmit_irq(struct kvaser_pciefd_can *can)
if (irq & KVASER_PCIEFD_KCAN_IRQ_TOF)
netdev_err(can->can.dev, "Tx FIFO overflow\n");
if (irq & KVASER_PCIEFD_KCAN_IRQ_TFD) {
u8 count = ioread32(can->reg_base +
KVASER_PCIEFD_KCAN_TX_NPACKETS_REG) & 0xff;
if (count == 0)
iowrite32(KVASER_PCIEFD_KCAN_CTRL_EFLUSH,
can->reg_base + KVASER_PCIEFD_KCAN_CTRL_REG);
}
if (irq & KVASER_PCIEFD_KCAN_IRQ_BPP)
netdev_err(can->can.dev,
"Fail to change bittiming, when not in reset mode\n");
@ -1824,6 +1829,11 @@ static int kvaser_pciefd_probe(struct pci_dev *pdev,
if (err)
goto err_teardown_can_ctrls;
err = request_irq(pcie->pci->irq, kvaser_pciefd_irq_handler,
IRQF_SHARED, KVASER_PCIEFD_DRV_NAME, pcie);
if (err)
goto err_teardown_can_ctrls;
iowrite32(KVASER_PCIEFD_SRB_IRQ_DPD0 | KVASER_PCIEFD_SRB_IRQ_DPD1,
pcie->reg_base + KVASER_PCIEFD_SRB_IRQ_REG);
@ -1844,11 +1854,6 @@ static int kvaser_pciefd_probe(struct pci_dev *pdev,
iowrite32(KVASER_PCIEFD_SRB_CMD_RDB1,
pcie->reg_base + KVASER_PCIEFD_SRB_CMD_REG);
err = request_irq(pcie->pci->irq, kvaser_pciefd_irq_handler,
IRQF_SHARED, KVASER_PCIEFD_DRV_NAME, pcie);
if (err)
goto err_teardown_can_ctrls;
err = kvaser_pciefd_reg_candev(pcie);
if (err)
goto err_free_irq;
@ -1856,6 +1861,8 @@ static int kvaser_pciefd_probe(struct pci_dev *pdev,
return 0;
err_free_irq:
/* Disable PCI interrupts */
iowrite32(0, pcie->reg_base + KVASER_PCIEFD_IEN_REG);
free_irq(pcie->pci->irq, pcie);
err_teardown_can_ctrls:

View File

@ -276,7 +276,7 @@
/* Offset 0x10: Extended Port Control Command */
#define MV88E6393X_PORT_EPC_CMD 0x10
#define MV88E6393X_PORT_EPC_CMD_BUSY 0x8000
#define MV88E6393X_PORT_EPC_CMD_WRITE 0x0300
#define MV88E6393X_PORT_EPC_CMD_WRITE 0x3000
#define MV88E6393X_PORT_EPC_INDEX_PORT_ETYPE 0x02
/* Offset 0x11: Extended Port Control Data */

View File

@ -120,6 +120,22 @@ static void a5psw_port_mgmtfwd_set(struct a5psw *a5psw, int port, bool enable)
a5psw_port_pattern_set(a5psw, port, A5PSW_PATTERN_MGMTFWD, enable);
}
static void a5psw_port_tx_enable(struct a5psw *a5psw, int port, bool enable)
{
u32 mask = A5PSW_PORT_ENA_TX(port);
u32 reg = enable ? mask : 0;
/* Even though the port TX is disabled through TXENA bit in the
* PORT_ENA register, it can still send BPDUs. This depends on the tag
* configuration added when sending packets from the CPU port to the
* switch port. Indeed, when using forced forwarding without filtering,
* even disabled ports will be able to send packets that are tagged.
* This allows to implement STP support when ports are in a state where
* forwarding traffic should be stopped but BPDUs should still be sent.
*/
a5psw_reg_rmw(a5psw, A5PSW_PORT_ENA, mask, reg);
}
static void a5psw_port_enable_set(struct a5psw *a5psw, int port, bool enable)
{
u32 port_ena = 0;
@ -292,6 +308,22 @@ static int a5psw_set_ageing_time(struct dsa_switch *ds, unsigned int msecs)
return 0;
}
static void a5psw_port_learning_set(struct a5psw *a5psw, int port, bool learn)
{
u32 mask = A5PSW_INPUT_LEARN_DIS(port);
u32 reg = !learn ? mask : 0;
a5psw_reg_rmw(a5psw, A5PSW_INPUT_LEARN, mask, reg);
}
static void a5psw_port_rx_block_set(struct a5psw *a5psw, int port, bool block)
{
u32 mask = A5PSW_INPUT_LEARN_BLOCK(port);
u32 reg = block ? mask : 0;
a5psw_reg_rmw(a5psw, A5PSW_INPUT_LEARN, mask, reg);
}
static void a5psw_flooding_set_resolution(struct a5psw *a5psw, int port,
bool set)
{
@ -308,6 +340,14 @@ static void a5psw_flooding_set_resolution(struct a5psw *a5psw, int port,
a5psw_reg_writel(a5psw, offsets[i], a5psw->bridged_ports);
}
static void a5psw_port_set_standalone(struct a5psw *a5psw, int port,
bool standalone)
{
a5psw_port_learning_set(a5psw, port, !standalone);
a5psw_flooding_set_resolution(a5psw, port, !standalone);
a5psw_port_mgmtfwd_set(a5psw, port, standalone);
}
static int a5psw_port_bridge_join(struct dsa_switch *ds, int port,
struct dsa_bridge bridge,
bool *tx_fwd_offload,
@ -323,8 +363,7 @@ static int a5psw_port_bridge_join(struct dsa_switch *ds, int port,
}
a5psw->br_dev = bridge.dev;
a5psw_flooding_set_resolution(a5psw, port, true);
a5psw_port_mgmtfwd_set(a5psw, port, false);
a5psw_port_set_standalone(a5psw, port, false);
return 0;
}
@ -334,8 +373,7 @@ static void a5psw_port_bridge_leave(struct dsa_switch *ds, int port,
{
struct a5psw *a5psw = ds->priv;
a5psw_flooding_set_resolution(a5psw, port, false);
a5psw_port_mgmtfwd_set(a5psw, port, true);
a5psw_port_set_standalone(a5psw, port, true);
/* No more ports bridged */
if (a5psw->bridged_ports == BIT(A5PSW_CPU_PORT))
@ -344,28 +382,35 @@ static void a5psw_port_bridge_leave(struct dsa_switch *ds, int port,
static void a5psw_port_stp_state_set(struct dsa_switch *ds, int port, u8 state)
{
u32 mask = A5PSW_INPUT_LEARN_DIS(port) | A5PSW_INPUT_LEARN_BLOCK(port);
bool learning_enabled, rx_enabled, tx_enabled;
struct a5psw *a5psw = ds->priv;
u32 reg = 0;
switch (state) {
case BR_STATE_DISABLED:
case BR_STATE_BLOCKING:
reg |= A5PSW_INPUT_LEARN_DIS(port);
reg |= A5PSW_INPUT_LEARN_BLOCK(port);
break;
case BR_STATE_LISTENING:
reg |= A5PSW_INPUT_LEARN_DIS(port);
rx_enabled = false;
tx_enabled = false;
learning_enabled = false;
break;
case BR_STATE_LEARNING:
reg |= A5PSW_INPUT_LEARN_BLOCK(port);
rx_enabled = false;
tx_enabled = false;
learning_enabled = true;
break;
case BR_STATE_FORWARDING:
default:
rx_enabled = true;
tx_enabled = true;
learning_enabled = true;
break;
default:
dev_err(ds->dev, "invalid STP state: %d\n", state);
return;
}
a5psw_reg_rmw(a5psw, A5PSW_INPUT_LEARN, mask, reg);
a5psw_port_learning_set(a5psw, port, learning_enabled);
a5psw_port_rx_block_set(a5psw, port, !rx_enabled);
a5psw_port_tx_enable(a5psw, port, tx_enabled);
}
static void a5psw_port_fast_age(struct dsa_switch *ds, int port)
@ -673,7 +718,7 @@ static int a5psw_setup(struct dsa_switch *ds)
}
/* Configure management port */
reg = A5PSW_CPU_PORT | A5PSW_MGMT_CFG_DISCARD;
reg = A5PSW_CPU_PORT | A5PSW_MGMT_CFG_ENABLE;
a5psw_reg_writel(a5psw, A5PSW_MGMT_CFG, reg);
/* Set pattern 0 to forward all frame to mgmt port */
@ -722,13 +767,15 @@ static int a5psw_setup(struct dsa_switch *ds)
if (dsa_port_is_unused(dp))
continue;
/* Enable egress flooding for CPU port */
if (dsa_port_is_cpu(dp))
/* Enable egress flooding and learning for CPU port */
if (dsa_port_is_cpu(dp)) {
a5psw_flooding_set_resolution(a5psw, port, true);
a5psw_port_learning_set(a5psw, port, true);
}
/* Enable management forward only for user ports */
/* Enable standalone mode for user ports */
if (dsa_port_is_user(dp))
a5psw_port_mgmtfwd_set(a5psw, port, true);
a5psw_port_set_standalone(a5psw, port, true);
}
return 0;

View File

@ -19,6 +19,7 @@
#define A5PSW_PORT_OFFSET(port) (0x400 * (port))
#define A5PSW_PORT_ENA 0x8
#define A5PSW_PORT_ENA_TX(port) BIT(port)
#define A5PSW_PORT_ENA_RX_SHIFT 16
#define A5PSW_PORT_ENA_TX_RX(port) (BIT((port) + A5PSW_PORT_ENA_RX_SHIFT) | \
BIT(port))
@ -36,7 +37,7 @@
#define A5PSW_INPUT_LEARN_BLOCK(p) BIT(p)
#define A5PSW_MGMT_CFG 0x20
#define A5PSW_MGMT_CFG_DISCARD BIT(7)
#define A5PSW_MGMT_CFG_ENABLE BIT(6)
#define A5PSW_MODE_CFG 0x24
#define A5PSW_MODE_STATS_RESET BIT(31)

View File

@ -66,8 +66,10 @@ static int max_interrupt_work = 20;
#include <linux/timer.h>
#include <linux/ethtool.h>
#include <linux/bitops.h>
#include <linux/uaccess.h>
#include <net/Space.h>
#include <asm/io.h>
#include <asm/dma.h>

View File

@ -52,6 +52,7 @@ static const char version2[] =
#include <linux/etherdevice.h>
#include <linux/jiffies.h>
#include <linux/platform_device.h>
#include <net/Space.h>
#include <asm/io.h>

View File

@ -66,6 +66,7 @@ static const char version[] =
#include <linux/isapnp.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <net/Space.h>
#include <asm/io.h>
#include <asm/irq.h>

View File

@ -37,6 +37,7 @@ static const char version[] =
#include <linux/delay.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <net/Space.h>
#include <asm/io.h>

View File

@ -59,6 +59,7 @@ static const char version[] = "lance.c:v1.16 2006/11/09 dplatt@3do.com, becker@c
#include <linux/skbuff.h>
#include <linux/mm.h>
#include <linux/bitops.h>
#include <net/Space.h>
#include <asm/io.h>
#include <asm/dma.h>

View File

@ -3450,7 +3450,7 @@ static int bcmgenet_open(struct net_device *dev)
return ret;
}
static void bcmgenet_netif_stop(struct net_device *dev)
static void bcmgenet_netif_stop(struct net_device *dev, bool stop_phy)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
@ -3465,6 +3465,8 @@ static void bcmgenet_netif_stop(struct net_device *dev)
/* Disable MAC transmit. TX DMA disabled must be done before this */
umac_enable_set(priv, CMD_TX_EN, false);
if (stop_phy)
phy_stop(dev->phydev);
bcmgenet_disable_rx_napi(priv);
bcmgenet_intr_disable(priv);
@ -3485,7 +3487,7 @@ static int bcmgenet_close(struct net_device *dev)
netif_dbg(priv, ifdown, dev, "bcmgenet_close\n");
bcmgenet_netif_stop(dev);
bcmgenet_netif_stop(dev, false);
/* Really kill the PHY state machine and disconnect from it */
phy_disconnect(dev->phydev);
@ -4303,7 +4305,7 @@ static int bcmgenet_suspend(struct device *d)
netif_device_detach(dev);
bcmgenet_netif_stop(dev);
bcmgenet_netif_stop(dev, true);
if (!device_may_wakeup(d))
phy_suspend(dev->phydev);

View File

@ -72,6 +72,8 @@
#include <linux/gfp.h>
#include <linux/io.h>
#include <net/Space.h>
#include <asm/irq.h>
#include <linux/atomic.h>
#if ALLOW_DMA

View File

@ -3798,7 +3798,6 @@ static int fec_enet_txq_xmit_frame(struct fec_enet_private *fep,
entries_free = fec_enet_get_free_txdesc_num(txq);
if (entries_free < MAX_SKB_FRAGS + 1) {
netdev_err(fep->netdev, "NOT enough BD for SG!\n");
xdp_return_frame(frame);
return NETDEV_TX_BUSY;
}
@ -4478,9 +4477,11 @@ fec_drv_remove(struct platform_device *pdev)
struct device_node *np = pdev->dev.of_node;
int ret;
ret = pm_runtime_resume_and_get(&pdev->dev);
ret = pm_runtime_get_sync(&pdev->dev);
if (ret < 0)
return ret;
dev_err(&pdev->dev,
"Failed to resume device in remove callback (%pe)\n",
ERR_PTR(ret));
cancel_work_sync(&fep->tx_timeout_work);
fec_ptp_stop(pdev);
@ -4493,8 +4494,13 @@ fec_drv_remove(struct platform_device *pdev)
of_phy_deregister_fixed_link(np);
of_node_put(fep->phy_node);
clk_disable_unprepare(fep->clk_ahb);
clk_disable_unprepare(fep->clk_ipg);
/* After pm_runtime_get_sync() failed, the clks are still off, so skip
* disabling them again.
*/
if (ret >= 0) {
clk_disable_unprepare(fep->clk_ahb);
clk_disable_unprepare(fep->clk_ipg);
}
pm_runtime_put_noidle(&pdev->dev);
pm_runtime_disable(&pdev->dev);

View File

@ -331,9 +331,25 @@ static int hclge_comm_cmd_csq_done(struct hclge_comm_hw *hw)
return head == hw->cmq.csq.next_to_use;
}
static void hclge_comm_wait_for_resp(struct hclge_comm_hw *hw,
static u32 hclge_get_cmdq_tx_timeout(u16 opcode, u32 tx_timeout)
{
static const struct hclge_cmdq_tx_timeout_map cmdq_tx_timeout_map[] = {
{HCLGE_OPC_CFG_RST_TRIGGER, HCLGE_COMM_CMDQ_TX_TIMEOUT_500MS},
};
u32 i;
for (i = 0; i < ARRAY_SIZE(cmdq_tx_timeout_map); i++)
if (cmdq_tx_timeout_map[i].opcode == opcode)
return cmdq_tx_timeout_map[i].tx_timeout;
return tx_timeout;
}
static void hclge_comm_wait_for_resp(struct hclge_comm_hw *hw, u16 opcode,
bool *is_completed)
{
u32 cmdq_tx_timeout = hclge_get_cmdq_tx_timeout(opcode,
hw->cmq.tx_timeout);
u32 timeout = 0;
do {
@ -343,7 +359,7 @@ static void hclge_comm_wait_for_resp(struct hclge_comm_hw *hw,
}
udelay(1);
timeout++;
} while (timeout < hw->cmq.tx_timeout);
} while (timeout < cmdq_tx_timeout);
}
static int hclge_comm_cmd_convert_err_code(u16 desc_ret)
@ -407,7 +423,8 @@ static int hclge_comm_cmd_check_result(struct hclge_comm_hw *hw,
* if multi descriptors to be sent, use the first one to check
*/
if (HCLGE_COMM_SEND_SYNC(le16_to_cpu(desc->flag)))
hclge_comm_wait_for_resp(hw, &is_completed);
hclge_comm_wait_for_resp(hw, le16_to_cpu(desc->opcode),
&is_completed);
if (!is_completed)
ret = -EBADE;
@ -529,7 +546,7 @@ int hclge_comm_cmd_queue_init(struct pci_dev *pdev, struct hclge_comm_hw *hw)
cmdq->crq.desc_num = HCLGE_COMM_NIC_CMQ_DESC_NUM;
/* Setup Tx write back timeout */
cmdq->tx_timeout = HCLGE_COMM_CMDQ_TX_TIMEOUT;
cmdq->tx_timeout = HCLGE_COMM_CMDQ_TX_TIMEOUT_DEFAULT;
/* Setup queue rings */
ret = hclge_comm_alloc_cmd_queue(hw, HCLGE_COMM_TYPE_CSQ);

View File

@ -54,7 +54,8 @@
#define HCLGE_COMM_NIC_SW_RST_RDY BIT(HCLGE_COMM_NIC_SW_RST_RDY_B)
#define HCLGE_COMM_NIC_CMQ_DESC_NUM_S 3
#define HCLGE_COMM_NIC_CMQ_DESC_NUM 1024
#define HCLGE_COMM_CMDQ_TX_TIMEOUT 30000
#define HCLGE_COMM_CMDQ_TX_TIMEOUT_DEFAULT 30000
#define HCLGE_COMM_CMDQ_TX_TIMEOUT_500MS 500000
enum hclge_opcode_type {
/* Generic commands */
@ -360,6 +361,11 @@ struct hclge_comm_caps_bit_map {
u16 local_bit;
};
struct hclge_cmdq_tx_timeout_map {
u32 opcode;
u32 tx_timeout;
};
struct hclge_comm_firmware_compat_cmd {
__le32 compat;
u8 rsv[20];

View File

@ -130,7 +130,7 @@ static struct hns3_dbg_cmd_info hns3_dbg_cmd[] = {
.name = "tx_bd_queue",
.cmd = HNAE3_DBG_CMD_TX_BD,
.dentry = HNS3_DBG_DENTRY_TX_BD,
.buf_len = HNS3_DBG_READ_LEN_4MB,
.buf_len = HNS3_DBG_READ_LEN_5MB,
.init = hns3_dbg_bd_file_init,
},
{

View File

@ -10,6 +10,7 @@
#define HNS3_DBG_READ_LEN_128KB 0x20000
#define HNS3_DBG_READ_LEN_1MB 0x100000
#define HNS3_DBG_READ_LEN_4MB 0x400000
#define HNS3_DBG_READ_LEN_5MB 0x500000
#define HNS3_DBG_WRITE_LEN 1024
#define HNS3_DBG_DATA_STR_LEN 32

View File

@ -8053,12 +8053,15 @@ static void hclge_ae_stop(struct hnae3_handle *handle)
/* If it is not PF reset or FLR, the firmware will disable the MAC,
* so it only need to stop phy here.
*/
if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
hdev->reset_type != HNAE3_FUNC_RESET &&
hdev->reset_type != HNAE3_FLR_RESET) {
hclge_mac_stop_phy(hdev);
hclge_update_link_status(hdev);
return;
if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) {
hclge_pfc_pause_en_cfg(hdev, HCLGE_PFC_TX_RX_DISABLE,
HCLGE_PFC_DISABLE);
if (hdev->reset_type != HNAE3_FUNC_RESET &&
hdev->reset_type != HNAE3_FLR_RESET) {
hclge_mac_stop_phy(hdev);
hclge_update_link_status(hdev);
return;
}
}
hclge_reset_tqp(handle);

View File

@ -171,8 +171,8 @@ int hclge_mac_pause_en_cfg(struct hclge_dev *hdev, bool tx, bool rx)
return hclge_cmd_send(&hdev->hw, &desc, 1);
}
static int hclge_pfc_pause_en_cfg(struct hclge_dev *hdev, u8 tx_rx_bitmap,
u8 pfc_bitmap)
int hclge_pfc_pause_en_cfg(struct hclge_dev *hdev, u8 tx_rx_bitmap,
u8 pfc_bitmap)
{
struct hclge_desc desc;
struct hclge_pfc_en_cmd *pfc = (struct hclge_pfc_en_cmd *)desc.data;

View File

@ -164,6 +164,9 @@ struct hclge_bp_to_qs_map_cmd {
u32 rsvd1;
};
#define HCLGE_PFC_DISABLE 0
#define HCLGE_PFC_TX_RX_DISABLE 0
struct hclge_pfc_en_cmd {
u8 tx_rx_en_bitmap;
u8 pri_en_bitmap;
@ -235,6 +238,8 @@ void hclge_tm_schd_info_update(struct hclge_dev *hdev, u8 num_tc);
void hclge_tm_pfc_info_update(struct hclge_dev *hdev);
int hclge_tm_dwrr_cfg(struct hclge_dev *hdev);
int hclge_tm_init_hw(struct hclge_dev *hdev, bool init);
int hclge_pfc_pause_en_cfg(struct hclge_dev *hdev, u8 tx_rx_bitmap,
u8 pfc_bitmap);
int hclge_mac_pause_en_cfg(struct hclge_dev *hdev, bool tx, bool rx);
int hclge_pause_addr_cfg(struct hclge_dev *hdev, const u8 *mac_addr);
void hclge_pfc_rx_stats_get(struct hclge_dev *hdev, u64 *stats);

View File

@ -1436,7 +1436,10 @@ static int hclgevf_reset_wait(struct hclgevf_dev *hdev)
* might happen in case reset assertion was made by PF. Yes, this also
* means we might end up waiting bit more even for VF reset.
*/
msleep(5000);
if (hdev->reset_type == HNAE3_VF_FULL_RESET)
msleep(5000);
else
msleep(500);
return 0;
}

View File

@ -2238,11 +2238,6 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
iavf_process_config(adapter);
adapter->flags |= IAVF_FLAG_SETUP_NETDEV_FEATURES;
/* Request VLAN offload settings */
if (VLAN_V2_ALLOWED(adapter))
iavf_set_vlan_offload_features(adapter, 0,
netdev->features);
iavf_set_queue_vlan_tag_loc(adapter);
was_mac_changed = !ether_addr_equal(netdev->dev_addr,

View File

@ -932,10 +932,9 @@ ice_tx_prepare_vlan_flags_dcb(struct ice_tx_ring *tx_ring,
if ((first->tx_flags & ICE_TX_FLAGS_HW_VLAN ||
first->tx_flags & ICE_TX_FLAGS_HW_OUTER_SINGLE_VLAN) ||
skb->priority != TC_PRIO_CONTROL) {
first->tx_flags &= ~ICE_TX_FLAGS_VLAN_PR_M;
first->vid &= ~VLAN_PRIO_MASK;
/* Mask the lower 3 bits to set the 802.1p priority */
first->tx_flags |= (skb->priority & 0x7) <<
ICE_TX_FLAGS_VLAN_PR_S;
first->vid |= (skb->priority << VLAN_PRIO_SHIFT) & VLAN_PRIO_MASK;
/* if this is not already set it means a VLAN 0 + priority needs
* to be offloaded
*/

View File

@ -2745,6 +2745,8 @@ ice_vsi_cfg_def(struct ice_vsi *vsi, struct ice_vsi_cfg_params *params)
goto unroll_vector_base;
ice_vsi_map_rings_to_vectors(vsi);
vsi->stat_offsets_loaded = false;
if (ice_is_xdp_ena_vsi(vsi)) {
ret = ice_vsi_determine_xdp_res(vsi);
if (ret)
@ -2793,6 +2795,9 @@ ice_vsi_cfg_def(struct ice_vsi *vsi, struct ice_vsi_cfg_params *params)
ret = ice_vsi_alloc_ring_stats(vsi);
if (ret)
goto unroll_vector_base;
vsi->stat_offsets_loaded = false;
/* Do not exit if configuring RSS had an issue, at least
* receive traffic on first queue. Hence no need to capture
* return value

View File

@ -1171,7 +1171,7 @@ int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena)
if (!vf)
return -EINVAL;
ret = ice_check_vf_ready_for_cfg(vf);
ret = ice_check_vf_ready_for_reset(vf);
if (ret)
goto out_put_vf;
@ -1286,7 +1286,7 @@ int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
goto out_put_vf;
}
ret = ice_check_vf_ready_for_cfg(vf);
ret = ice_check_vf_ready_for_reset(vf);
if (ret)
goto out_put_vf;
@ -1340,7 +1340,7 @@ int ice_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted)
return -EOPNOTSUPP;
}
ret = ice_check_vf_ready_for_cfg(vf);
ret = ice_check_vf_ready_for_reset(vf);
if (ret)
goto out_put_vf;
@ -1653,7 +1653,7 @@ ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos,
if (!vf)
return -EINVAL;
ret = ice_check_vf_ready_for_cfg(vf);
ret = ice_check_vf_ready_for_reset(vf);
if (ret)
goto out_put_vf;

View File

@ -1664,8 +1664,7 @@ ice_tx_map(struct ice_tx_ring *tx_ring, struct ice_tx_buf *first,
if (first->tx_flags & ICE_TX_FLAGS_HW_VLAN) {
td_cmd |= (u64)ICE_TX_DESC_CMD_IL2TAG1;
td_tag = (first->tx_flags & ICE_TX_FLAGS_VLAN_M) >>
ICE_TX_FLAGS_VLAN_S;
td_tag = first->vid;
}
dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
@ -1998,7 +1997,7 @@ ice_tx_prepare_vlan_flags(struct ice_tx_ring *tx_ring, struct ice_tx_buf *first)
* VLAN offloads exclusively so we only care about the VLAN ID here
*/
if (skb_vlan_tag_present(skb)) {
first->tx_flags |= skb_vlan_tag_get(skb) << ICE_TX_FLAGS_VLAN_S;
first->vid = skb_vlan_tag_get(skb);
if (tx_ring->flags & ICE_TX_FLAGS_RING_VLAN_L2TAG2)
first->tx_flags |= ICE_TX_FLAGS_HW_OUTER_SINGLE_VLAN;
else
@ -2388,8 +2387,7 @@ ice_xmit_frame_ring(struct sk_buff *skb, struct ice_tx_ring *tx_ring)
offload.cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX |
(ICE_TX_CTX_DESC_IL2TAG2 <<
ICE_TXD_CTX_QW1_CMD_S));
offload.cd_l2tag2 = (first->tx_flags & ICE_TX_FLAGS_VLAN_M) >>
ICE_TX_FLAGS_VLAN_S;
offload.cd_l2tag2 = first->vid;
}
/* set up TSO offload */

View File

@ -127,10 +127,6 @@ static inline int ice_skb_pad(void)
#define ICE_TX_FLAGS_IPV6 BIT(6)
#define ICE_TX_FLAGS_TUNNEL BIT(7)
#define ICE_TX_FLAGS_HW_OUTER_SINGLE_VLAN BIT(8)
#define ICE_TX_FLAGS_VLAN_M 0xffff0000
#define ICE_TX_FLAGS_VLAN_PR_M 0xe0000000
#define ICE_TX_FLAGS_VLAN_PR_S 29
#define ICE_TX_FLAGS_VLAN_S 16
#define ICE_XDP_PASS 0
#define ICE_XDP_CONSUMED BIT(0)
@ -182,8 +178,9 @@ struct ice_tx_buf {
unsigned int gso_segs;
unsigned int nr_frags; /* used for mbuf XDP */
};
u32 type:16; /* &ice_tx_buf_type */
u32 tx_flags:16;
u32 tx_flags:12;
u32 type:4; /* &ice_tx_buf_type */
u32 vid:16;
DEFINE_DMA_UNMAP_LEN(len);
DEFINE_DMA_UNMAP_ADDR(dma);
};

View File

@ -185,6 +185,25 @@ int ice_check_vf_ready_for_cfg(struct ice_vf *vf)
return 0;
}
/**
* ice_check_vf_ready_for_reset - check if VF is ready to be reset
* @vf: VF to check if it's ready to be reset
*
* The purpose of this function is to ensure that the VF is not in reset,
* disabled, and is both initialized and active, thus enabling us to safely
* initialize another reset.
*/
int ice_check_vf_ready_for_reset(struct ice_vf *vf)
{
int ret;
ret = ice_check_vf_ready_for_cfg(vf);
if (!ret && !test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
ret = -EAGAIN;
return ret;
}
/**
* ice_trigger_vf_reset - Reset a VF on HW
* @vf: pointer to the VF structure

View File

@ -215,6 +215,7 @@ u16 ice_get_num_vfs(struct ice_pf *pf);
struct ice_vsi *ice_get_vf_vsi(struct ice_vf *vf);
bool ice_is_vf_disabled(struct ice_vf *vf);
int ice_check_vf_ready_for_cfg(struct ice_vf *vf);
int ice_check_vf_ready_for_reset(struct ice_vf *vf);
void ice_set_vf_state_dis(struct ice_vf *vf);
bool ice_is_any_vf_in_unicast_promisc(struct ice_pf *pf);
void

View File

@ -3955,6 +3955,7 @@ void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event,
ice_vc_notify_vf_link_state(vf);
break;
case VIRTCHNL_OP_RESET_VF:
clear_bit(ICE_VF_STATE_ACTIVE, vf->vf_states);
ops->reset_vf(vf);
break;
case VIRTCHNL_OP_ADD_ETH_ADDR:

View File

@ -426,7 +426,7 @@ void igb_mta_set(struct e1000_hw *hw, u32 hash_value)
static u32 igb_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr)
{
u32 hash_value, hash_mask;
u8 bit_shift = 0;
u8 bit_shift = 1;
/* Register count multiplied by bits per register */
hash_mask = (hw->mac.mta_reg_count * 32) - 1;
@ -434,7 +434,7 @@ static u32 igb_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr)
/* For a mc_filter_type of 0, bit_shift is the number of left-shifts
* where 0xFF would still fall within the hash mask.
*/
while (hash_mask >> bit_shift != 0xFF)
while (hash_mask >> bit_shift != 0xFF && bit_shift < 4)
bit_shift++;
/* The portion of the address that is used for the hash table

View File

@ -8,7 +8,7 @@
#ifdef CONFIG_DCB
/* DCB feature definitions */
#define NFP_NET_MAX_DSCP 4
#define NFP_NET_MAX_DSCP 64
#define NFP_NET_MAX_TC IEEE_8021QAZ_MAX_TCS
#define NFP_NET_MAX_PRIO 8
#define NFP_DCB_CFG_STRIDE 256

View File

@ -378,7 +378,9 @@ int ef100_probe_netdev(struct efx_probe_data *probe_data)
efx->net_dev = net_dev;
SET_NETDEV_DEV(net_dev, &efx->pci_dev->dev);
net_dev->features |= efx->type->offload_features;
/* enable all supported features except rx-fcs and rx-all */
net_dev->features |= efx->type->offload_features &
~(NETIF_F_RXFCS | NETIF_F_RXALL);
net_dev->hw_features |= efx->type->offload_features;
net_dev->hw_enc_features |= efx->type->offload_features;
net_dev->vlan_features |= NETIF_F_HW_CSUM | NETIF_F_SG |

View File

@ -5077,6 +5077,8 @@ static int cas_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
cas_shutdown(cp);
mutex_unlock(&cp->pm_mutex);
vfree(cp->fw_data);
pci_iounmap(pdev, cp->regs);

View File

@ -291,7 +291,8 @@ static int i2c_rollball_mii_cmd(struct mii_bus *bus, int bus_addr, u8 cmd,
return i2c_transfer_rollball(i2c, msgs, ARRAY_SIZE(msgs));
}
static int i2c_mii_read_rollball(struct mii_bus *bus, int phy_id, int reg)
static int i2c_mii_read_rollball(struct mii_bus *bus, int phy_id, int devad,
int reg)
{
u8 buf[4], res[6];
int bus_addr, ret;
@ -302,7 +303,7 @@ static int i2c_mii_read_rollball(struct mii_bus *bus, int phy_id, int reg)
return 0xffff;
buf[0] = ROLLBALL_DATA_ADDR;
buf[1] = (reg >> 16) & 0x1f;
buf[1] = devad;
buf[2] = (reg >> 8) & 0xff;
buf[3] = reg & 0xff;
@ -322,8 +323,8 @@ static int i2c_mii_read_rollball(struct mii_bus *bus, int phy_id, int reg)
return val;
}
static int i2c_mii_write_rollball(struct mii_bus *bus, int phy_id, int reg,
u16 val)
static int i2c_mii_write_rollball(struct mii_bus *bus, int phy_id, int devad,
int reg, u16 val)
{
int bus_addr, ret;
u8 buf[6];
@ -333,7 +334,7 @@ static int i2c_mii_write_rollball(struct mii_bus *bus, int phy_id, int reg,
return 0;
buf[0] = ROLLBALL_DATA_ADDR;
buf[1] = (reg >> 16) & 0x1f;
buf[1] = devad;
buf[2] = (reg >> 8) & 0xff;
buf[3] = reg & 0xff;
buf[4] = val >> 8;
@ -405,8 +406,8 @@ struct mii_bus *mdio_i2c_alloc(struct device *parent, struct i2c_adapter *i2c,
return ERR_PTR(ret);
}
mii->read = i2c_mii_read_rollball;
mii->write = i2c_mii_write_rollball;
mii->read_c45 = i2c_mii_read_rollball;
mii->write_c45 = i2c_mii_write_rollball;
break;
default:
mii->read = i2c_mii_read_default_c22;

View File

@ -873,7 +873,7 @@ int xpcs_do_config(struct dw_xpcs *xpcs, phy_interface_t interface,
switch (compat->an_mode) {
case DW_AN_C73:
if (phylink_autoneg_inband(mode)) {
if (test_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, advertising)) {
ret = xpcs_config_aneg_c73(xpcs, compat);
if (ret)
return ret;

View File

@ -44,6 +44,7 @@
#define DP83867_STRAP_STS1 0x006E
#define DP83867_STRAP_STS2 0x006f
#define DP83867_RGMIIDCTL 0x0086
#define DP83867_DSP_FFE_CFG 0x012c
#define DP83867_RXFCFG 0x0134
#define DP83867_RXFPMD1 0x0136
#define DP83867_RXFPMD2 0x0137
@ -941,8 +942,27 @@ static int dp83867_phy_reset(struct phy_device *phydev)
usleep_range(10, 20);
return phy_modify(phydev, MII_DP83867_PHYCTRL,
err = phy_modify(phydev, MII_DP83867_PHYCTRL,
DP83867_PHYCR_FORCE_LINK_GOOD, 0);
if (err < 0)
return err;
/* Configure the DSP Feedforward Equalizer Configuration register to
* improve short cable (< 1 meter) performance. This will not affect
* long cable performance.
*/
err = phy_write_mmd(phydev, DP83867_DEVADDR, DP83867_DSP_FFE_CFG,
0x0e81);
if (err < 0)
return err;
err = phy_write(phydev, DP83867_CTRL, DP83867_SW_RESTART);
if (err < 0)
return err;
usleep_range(10, 20);
return 0;
}
static void dp83867_link_change_notify(struct phy_device *phydev)

View File

@ -2225,6 +2225,10 @@ int phylink_ethtool_ksettings_set(struct phylink *pl,
ASSERT_RTNL();
/* Mask out unsupported advertisements */
linkmode_and(config.advertising, kset->link_modes.advertising,
pl->supported);
if (pl->phydev) {
/* We can rely on phylib for this update; we also do not need
* to update the pl->link_config settings:
@ -2249,10 +2253,6 @@ int phylink_ethtool_ksettings_set(struct phylink *pl,
config = pl->link_config;
/* Mask out unsupported advertisements */
linkmode_and(config.advertising, kset->link_modes.advertising,
pl->supported);
/* FIXME: should we reject autoneg if phy/mac does not support it? */
switch (kset->base.autoneg) {
case AUTONEG_DISABLE:

View File

@ -1977,6 +1977,14 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
int queue_len;
spin_lock_bh(&queue->lock);
if (unlikely(tfile->detached)) {
spin_unlock_bh(&queue->lock);
rcu_read_unlock();
err = -EBUSY;
goto free_skb;
}
__skb_queue_tail(queue, skb);
queue_len = skb_queue_len(queue);
spin_unlock(&queue->lock);
@ -2512,6 +2520,13 @@ static int tun_xdp_one(struct tun_struct *tun,
if (tfile->napi_enabled) {
queue = &tfile->sk.sk_write_queue;
spin_lock(&queue->lock);
if (unlikely(tfile->detached)) {
spin_unlock(&queue->lock);
kfree_skb(skb);
return -EBUSY;
}
__skb_queue_tail(queue, skb);
spin_unlock(&queue->lock);
ret = 1;

View File

@ -1868,6 +1868,38 @@ static int virtnet_poll(struct napi_struct *napi, int budget)
return received;
}
static void virtnet_disable_queue_pair(struct virtnet_info *vi, int qp_index)
{
virtnet_napi_tx_disable(&vi->sq[qp_index].napi);
napi_disable(&vi->rq[qp_index].napi);
xdp_rxq_info_unreg(&vi->rq[qp_index].xdp_rxq);
}
static int virtnet_enable_queue_pair(struct virtnet_info *vi, int qp_index)
{
struct net_device *dev = vi->dev;
int err;
err = xdp_rxq_info_reg(&vi->rq[qp_index].xdp_rxq, dev, qp_index,
vi->rq[qp_index].napi.napi_id);
if (err < 0)
return err;
err = xdp_rxq_info_reg_mem_model(&vi->rq[qp_index].xdp_rxq,
MEM_TYPE_PAGE_SHARED, NULL);
if (err < 0)
goto err_xdp_reg_mem_model;
virtnet_napi_enable(vi->rq[qp_index].vq, &vi->rq[qp_index].napi);
virtnet_napi_tx_enable(vi, vi->sq[qp_index].vq, &vi->sq[qp_index].napi);
return 0;
err_xdp_reg_mem_model:
xdp_rxq_info_unreg(&vi->rq[qp_index].xdp_rxq);
return err;
}
static int virtnet_open(struct net_device *dev)
{
struct virtnet_info *vi = netdev_priv(dev);
@ -1881,22 +1913,20 @@ static int virtnet_open(struct net_device *dev)
if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL))
schedule_delayed_work(&vi->refill, 0);
err = xdp_rxq_info_reg(&vi->rq[i].xdp_rxq, dev, i, vi->rq[i].napi.napi_id);
err = virtnet_enable_queue_pair(vi, i);
if (err < 0)
return err;
err = xdp_rxq_info_reg_mem_model(&vi->rq[i].xdp_rxq,
MEM_TYPE_PAGE_SHARED, NULL);
if (err < 0) {
xdp_rxq_info_unreg(&vi->rq[i].xdp_rxq);
return err;
}
virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
virtnet_napi_tx_enable(vi, vi->sq[i].vq, &vi->sq[i].napi);
goto err_enable_qp;
}
return 0;
err_enable_qp:
disable_delayed_refill(vi);
cancel_delayed_work_sync(&vi->refill);
for (i--; i >= 0; i--)
virtnet_disable_queue_pair(vi, i);
return err;
}
static int virtnet_poll_tx(struct napi_struct *napi, int budget)
@ -2305,11 +2335,8 @@ static int virtnet_close(struct net_device *dev)
/* Make sure refill_work doesn't re-enable napi! */
cancel_delayed_work_sync(&vi->refill);
for (i = 0; i < vi->max_queue_pairs; i++) {
virtnet_napi_tx_disable(&vi->sq[i].napi);
napi_disable(&vi->rq[i].napi);
xdp_rxq_info_unreg(&vi->rq[i].xdp_rxq);
}
for (i = 0; i < vi->max_queue_pairs; i++)
virtnet_disable_queue_pair(vi, i);
return 0;
}

View File

@ -651,7 +651,7 @@ struct b43_iv {
union {
__be16 d16;
__be32 d32;
} data __packed;
} __packed data;
} __packed;

View File

@ -379,7 +379,7 @@ struct b43legacy_iv {
union {
__be16 d16;
__be32 d32;
} data __packed;
} __packed data;
} __packed;
#define B43legacy_PHYMODE(phytype) (1 << (phytype))

View File

@ -1039,6 +1039,11 @@ static int brcmf_ops_sdio_probe(struct sdio_func *func,
struct brcmf_sdio_dev *sdiodev;
struct brcmf_bus *bus_if;
if (!id) {
dev_err(&func->dev, "Error no sdio_device_id passed for %x:%x\n", func->vendor, func->device);
return -ENODEV;
}
brcmf_dbg(SDIO, "Enter\n");
brcmf_dbg(SDIO, "Class=%x\n", func->class);
brcmf_dbg(SDIO, "sdio vendor ID: 0x%04x\n", func->vendor);

View File

@ -2394,6 +2394,9 @@ static void brcmf_pcie_debugfs_create(struct device *dev)
}
#endif
/* Forward declaration for pci_match_id() call */
static const struct pci_device_id brcmf_pcie_devid_table[];
static int
brcmf_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
@ -2404,6 +2407,14 @@ brcmf_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
struct brcmf_core *core;
struct brcmf_bus *bus;
if (!id) {
id = pci_match_id(brcmf_pcie_devid_table, pdev);
if (!id) {
pci_err(pdev, "Error could not find pci_device_id for %x:%x\n", pdev->vendor, pdev->device);
return -ENODEV;
}
}
brcmf_dbg(PCIE, "Enter %x:%x\n", pdev->vendor, pdev->device);
ret = -ENOMEM;

View File

@ -1331,6 +1331,9 @@ brcmf_usb_disconnect_cb(struct brcmf_usbdev_info *devinfo)
brcmf_usb_detach(devinfo);
}
/* Forward declaration for usb_match_id() call */
static const struct usb_device_id brcmf_usb_devid_table[];
static int
brcmf_usb_probe(struct usb_interface *intf, const struct usb_device_id *id)
{
@ -1342,6 +1345,14 @@ brcmf_usb_probe(struct usb_interface *intf, const struct usb_device_id *id)
u32 num_of_eps;
u8 endpoint_num, ep;
if (!id) {
id = usb_match_id(intf, brcmf_usb_devid_table);
if (!id) {
dev_err(&intf->dev, "Error could not find matching usb_device_id\n");
return -ENODEV;
}
}
brcmf_dbg(USB, "Enter 0x%04x:0x%04x\n", id->idVendor, id->idProduct);
devinfo = kzalloc(sizeof(*devinfo), GFP_ATOMIC);

View File

@ -38,7 +38,7 @@ static const struct dmi_system_id dmi_ppag_approved_list[] = {
},
{ .ident = "ASUS",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTek COMPUTER INC."),
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
},
},
{}

View File

@ -1664,14 +1664,10 @@ static __le32 iwl_get_mon_reg(struct iwl_fw_runtime *fwrt, u32 alloc_id,
}
static void *
iwl_dump_ini_mon_fill_header(struct iwl_fw_runtime *fwrt,
struct iwl_dump_ini_region_data *reg_data,
iwl_dump_ini_mon_fill_header(struct iwl_fw_runtime *fwrt, u32 alloc_id,
struct iwl_fw_ini_monitor_dump *data,
const struct iwl_fw_mon_regs *addrs)
{
struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
u32 alloc_id = le32_to_cpu(reg->dram_alloc_id);
if (!iwl_trans_grab_nic_access(fwrt->trans)) {
IWL_ERR(fwrt, "Failed to get monitor header\n");
return NULL;
@ -1702,8 +1698,10 @@ iwl_dump_ini_mon_dram_fill_header(struct iwl_fw_runtime *fwrt,
void *data, u32 data_len)
{
struct iwl_fw_ini_monitor_dump *mon_dump = (void *)data;
struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
u32 alloc_id = le32_to_cpu(reg->dram_alloc_id);
return iwl_dump_ini_mon_fill_header(fwrt, reg_data, mon_dump,
return iwl_dump_ini_mon_fill_header(fwrt, alloc_id, mon_dump,
&fwrt->trans->cfg->mon_dram_regs);
}
@ -1713,8 +1711,10 @@ iwl_dump_ini_mon_smem_fill_header(struct iwl_fw_runtime *fwrt,
void *data, u32 data_len)
{
struct iwl_fw_ini_monitor_dump *mon_dump = (void *)data;
struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
u32 alloc_id = le32_to_cpu(reg->internal_buffer.alloc_id);
return iwl_dump_ini_mon_fill_header(fwrt, reg_data, mon_dump,
return iwl_dump_ini_mon_fill_header(fwrt, alloc_id, mon_dump,
&fwrt->trans->cfg->mon_smem_regs);
}
@ -1725,7 +1725,10 @@ iwl_dump_ini_mon_dbgi_fill_header(struct iwl_fw_runtime *fwrt,
{
struct iwl_fw_ini_monitor_dump *mon_dump = (void *)data;
return iwl_dump_ini_mon_fill_header(fwrt, reg_data, mon_dump,
return iwl_dump_ini_mon_fill_header(fwrt,
/* no offset calculation later */
IWL_FW_INI_ALLOCATION_ID_DBGC1,
mon_dump,
&fwrt->trans->cfg->mon_dbgi_regs);
}

View File

@ -526,6 +526,11 @@ iwl_mvm_ftm_put_target(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
rcu_read_lock();
sta = rcu_dereference(mvm->fw_id_to_mac_id[mvmvif->deflink.ap_sta_id]);
if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) {
rcu_read_unlock();
return PTR_ERR_OR_ZERO(sta);
}
if (sta->mfp && (peer->ftm.trigger_based || peer->ftm.non_trigger_based))
FTM_PUT_FLAG(PMF);

View File

@ -1091,7 +1091,7 @@ static const struct dmi_system_id dmi_tas_approved_list[] = {
},
{ .ident = "LENOVO",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Lenovo"),
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
},
},
{ .ident = "DELL",
@ -1727,8 +1727,7 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
iwl_mvm_tas_init(mvm);
iwl_mvm_leds_sync(mvm);
if (fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_RFIM_SUPPORT)) {
if (iwl_rfi_supported(mvm)) {
if (iwl_mvm_eval_dsm_rfi(mvm) == DSM_VALUE_RFI_ENABLE)
iwl_rfi_send_config_cmd(mvm, NULL);
}

View File

@ -123,11 +123,13 @@ int iwl_mvm_link_changed(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
if (mvmvif->link[i]->phy_ctxt)
count++;
/* FIXME: IWL_MVM_FW_MAX_ACTIVE_LINKS_NUM should be
* defined per HW
*/
if (count >= IWL_MVM_FW_MAX_ACTIVE_LINKS_NUM)
return -EINVAL;
if (vif->type == NL80211_IFTYPE_AP) {
if (count > mvm->fw->ucode_capa.num_beacons)
return -EOPNOTSUPP;
/* this should be per HW or such */
} else if (count >= IWL_MVM_FW_MAX_ACTIVE_LINKS_NUM) {
return -EOPNOTSUPP;
}
}
/* Catch early if driver tries to activate or deactivate a link

View File

@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2012-2014, 2018-2022 Intel Corporation
* Copyright (C) 2012-2014, 2018-2023 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@ -3607,7 +3607,8 @@ static bool iwl_mvm_vif_conf_from_sta(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
{
unsigned int i;
struct ieee80211_link_sta *link_sta;
unsigned int link_id;
/* Beacon interval check - firmware will crash if the beacon
* interval is less than 16. We can't avoid connecting at all,
@ -3616,14 +3617,11 @@ static bool iwl_mvm_vif_conf_from_sta(struct iwl_mvm *mvm,
* wpa_s will blocklist the AP...
*/
for_each_set_bit(i, (unsigned long *)&sta->valid_links,
IEEE80211_MLD_MAX_NUM_LINKS) {
struct ieee80211_link_sta *link_sta =
link_sta_dereference_protected(sta, i);
for_each_sta_active_link(vif, sta, link_sta, link_id) {
struct ieee80211_bss_conf *link_conf =
link_conf_dereference_protected(vif, i);
link_conf_dereference_protected(vif, link_id);
if (!link_conf || !link_sta)
if (!link_conf)
continue;
if (link_conf->beacon_int < IWL_MVM_MIN_BEACON_INTERVAL_TU) {
@ -3645,24 +3643,23 @@ static void iwl_mvm_vif_set_he_support(struct ieee80211_hw *hw,
bool is_sta)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
unsigned int i;
struct ieee80211_link_sta *link_sta;
unsigned int link_id;
for_each_set_bit(i, (unsigned long *)&sta->valid_links,
IEEE80211_MLD_MAX_NUM_LINKS) {
struct ieee80211_link_sta *link_sta =
link_sta_dereference_protected(sta, i);
for_each_sta_active_link(vif, sta, link_sta, link_id) {
struct ieee80211_bss_conf *link_conf =
link_conf_dereference_protected(vif, i);
link_conf_dereference_protected(vif, link_id);
if (!link_conf || !link_sta || !mvmvif->link[i])
if (!link_conf || !mvmvif->link[link_id])
continue;
link_conf->he_support = link_sta->he_cap.has_he;
if (is_sta) {
mvmvif->link[i]->he_ru_2mhz_block = false;
mvmvif->link[link_id]->he_ru_2mhz_block = false;
if (link_sta->he_cap.has_he)
iwl_mvm_check_he_obss_narrow_bw_ru(hw, vif, i,
iwl_mvm_check_he_obss_narrow_bw_ru(hw, vif,
link_id,
link_conf);
}
}
@ -3675,6 +3672,7 @@ iwl_mvm_sta_state_notexist_to_none(struct iwl_mvm *mvm,
struct iwl_mvm_sta_state_ops *callbacks)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct ieee80211_link_sta *link_sta;
unsigned int i;
int ret;
@ -3699,15 +3697,9 @@ iwl_mvm_sta_state_notexist_to_none(struct iwl_mvm *mvm,
NL80211_TDLS_SETUP);
}
for (i = 0; i < ARRAY_SIZE(sta->link); i++) {
struct ieee80211_link_sta *link_sta;
link_sta = link_sta_dereference_protected(sta, i);
if (!link_sta)
continue;
for_each_sta_active_link(vif, sta, link_sta, i)
link_sta->agg.max_rc_amsdu_len = 1;
}
ieee80211_sta_recalc_aggregates(sta);
if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls)
@ -3725,7 +3717,8 @@ iwl_mvm_sta_state_auth_to_assoc(struct ieee80211_hw *hw,
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
unsigned int i;
struct ieee80211_link_sta *link_sta;
unsigned int link_id;
lockdep_assert_held(&mvm->mutex);
@ -3751,14 +3744,13 @@ iwl_mvm_sta_state_auth_to_assoc(struct ieee80211_hw *hw,
if (!mvm->mld_api_is_used)
goto out;
for_each_set_bit(i, (unsigned long *)&sta->valid_links,
IEEE80211_MLD_MAX_NUM_LINKS) {
for_each_sta_active_link(vif, sta, link_sta, link_id) {
struct ieee80211_bss_conf *link_conf =
link_conf_dereference_protected(vif, i);
link_conf_dereference_protected(vif, link_id);
if (WARN_ON(!link_conf))
return -EINVAL;
if (!mvmvif->link[i])
if (!mvmvif->link[link_id])
continue;
iwl_mvm_link_changed(mvm, vif, link_conf,
@ -3889,6 +3881,9 @@ int iwl_mvm_mac_sta_state_common(struct ieee80211_hw *hw,
* from the AP now.
*/
iwl_mvm_reset_cca_40mhz_workaround(mvm, vif);
/* Also free dup data just in case any assertions below fail */
kfree(mvm_sta->dup_data);
}
mutex_lock(&mvm->mutex);

View File

@ -906,11 +906,12 @@ iwl_mvm_mld_change_vif_links(struct ieee80211_hw *hw,
n_active++;
}
if (vif->type == NL80211_IFTYPE_AP &&
n_active > mvm->fw->ucode_capa.num_beacons)
return -EOPNOTSUPP;
else if (n_active > 1)
if (vif->type == NL80211_IFTYPE_AP) {
if (n_active > mvm->fw->ucode_capa.num_beacons)
return -EOPNOTSUPP;
} else if (n_active > 1) {
return -EOPNOTSUPP;
}
}
for (i = 0; i < IEEE80211_MLD_MAX_NUM_LINKS; i++) {

View File

@ -667,15 +667,15 @@ int iwl_mvm_mld_add_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
ret = iwl_mvm_mld_alloc_sta_links(mvm, vif, sta);
if (ret)
return ret;
}
spin_lock_init(&mvm_sta->lock);
spin_lock_init(&mvm_sta->lock);
if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
ret = iwl_mvm_alloc_sta_after_restart(mvm, vif, sta);
else
ret = iwl_mvm_sta_init(mvm, vif, sta, IWL_MVM_INVALID_STA,
STATION_TYPE_PEER);
} else {
ret = iwl_mvm_alloc_sta_after_restart(mvm, vif, sta);
}
if (ret)
goto err;
@ -728,7 +728,7 @@ int iwl_mvm_mld_update_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
struct ieee80211_link_sta *link_sta;
unsigned int link_id;
int ret = 0;
int ret = -EINVAL;
lockdep_assert_held(&mvm->mutex);
@ -791,8 +791,6 @@ int iwl_mvm_mld_rm_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
lockdep_assert_held(&mvm->mutex);
kfree(mvm_sta->dup_data);
/* flush its queues here since we are freeing mvm_sta */
for_each_sta_active_link(vif, sta, link_sta, link_id) {
struct iwl_mvm_link_sta *mvm_link_sta =

View File

@ -2347,6 +2347,7 @@ int iwl_mvm_mld_update_sta_keys(struct iwl_mvm *mvm,
u32 old_sta_mask,
u32 new_sta_mask);
bool iwl_rfi_supported(struct iwl_mvm *mvm);
int iwl_rfi_send_config_cmd(struct iwl_mvm *mvm,
struct iwl_rfi_lut_entry *rfi_table);
struct iwl_rfi_freq_table_resp_cmd *iwl_rfi_get_freq_table(struct iwl_mvm *mvm);

View File

@ -445,6 +445,11 @@ iwl_mvm_update_mcc(struct iwl_mvm *mvm, const char *alpha2,
struct iwl_mcc_update_resp *mcc_resp = (void *)pkt->data;
n_channels = __le32_to_cpu(mcc_resp->n_channels);
if (iwl_rx_packet_payload_len(pkt) !=
struct_size(mcc_resp, channels, n_channels)) {
resp_cp = ERR_PTR(-EINVAL);
goto exit;
}
resp_len = sizeof(struct iwl_mcc_update_resp) +
n_channels * sizeof(__le32);
resp_cp = kmemdup(mcc_resp, resp_len, GFP_KERNEL);
@ -456,6 +461,11 @@ iwl_mvm_update_mcc(struct iwl_mvm *mvm, const char *alpha2,
struct iwl_mcc_update_resp_v3 *mcc_resp_v3 = (void *)pkt->data;
n_channels = __le32_to_cpu(mcc_resp_v3->n_channels);
if (iwl_rx_packet_payload_len(pkt) !=
struct_size(mcc_resp_v3, channels, n_channels)) {
resp_cp = ERR_PTR(-EINVAL);
goto exit;
}
resp_len = sizeof(struct iwl_mcc_update_resp) +
n_channels * sizeof(__le32);
resp_cp = kzalloc(resp_len, GFP_KERNEL);

View File

@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2020 - 2021 Intel Corporation
* Copyright (C) 2020 - 2022 Intel Corporation
*/
#include "mvm.h"
@ -70,6 +70,16 @@ static const struct iwl_rfi_lut_entry iwl_rfi_table[IWL_RFI_LUT_SIZE] = {
PHY_BAND_6, PHY_BAND_6,}},
};
bool iwl_rfi_supported(struct iwl_mvm *mvm)
{
/* The feature depends on a platform bugfix, so for now
* it's always disabled.
* When the platform support detection is implemented we should
* check FW TLV and platform support instead.
*/
return false;
}
int iwl_rfi_send_config_cmd(struct iwl_mvm *mvm, struct iwl_rfi_lut_entry *rfi_table)
{
int ret;
@ -81,7 +91,7 @@ int iwl_rfi_send_config_cmd(struct iwl_mvm *mvm, struct iwl_rfi_lut_entry *rfi_t
.len[0] = sizeof(cmd),
};
if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_RFIM_SUPPORT))
if (!iwl_rfi_supported(mvm))
return -EOPNOTSUPP;
lockdep_assert_held(&mvm->mutex);
@ -113,7 +123,7 @@ struct iwl_rfi_freq_table_resp_cmd *iwl_rfi_get_freq_table(struct iwl_mvm *mvm)
.flags = CMD_WANT_SKB,
};
if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_RFIM_SUPPORT))
if (!iwl_rfi_supported(mvm))
return ERR_PTR(-EOPNOTSUPP);
mutex_lock(&mvm->mutex);

View File

@ -2691,6 +2691,8 @@ static void rs_drv_get_rate(void *mvm_r, struct ieee80211_sta *sta,
return;
lq_sta = mvm_sta;
spin_lock(&lq_sta->pers.lock);
iwl_mvm_hwrate_to_tx_rate_v1(lq_sta->last_rate_n_flags,
info->band, &info->control.rates[0]);
info->control.rates[0].count = 1;
@ -2705,6 +2707,7 @@ static void rs_drv_get_rate(void *mvm_r, struct ieee80211_sta *sta,
iwl_mvm_hwrate_to_tx_rate_v1(last_ucode_rate, info->band,
&txrc->reported_rate);
}
spin_unlock(&lq_sta->pers.lock);
}
static void *rs_drv_alloc_sta(void *mvm_rate, struct ieee80211_sta *sta,

View File

@ -691,6 +691,11 @@ void iwl_mvm_reorder_timer_expired(struct timer_list *t)
rcu_read_lock();
sta = rcu_dereference(buf->mvm->fw_id_to_mac_id[sta_id]);
if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) {
rcu_read_unlock();
goto out;
}
mvmsta = iwl_mvm_sta_from_mac80211(sta);
/* SN is set to the last expired frame + 1 */
@ -712,6 +717,8 @@ void iwl_mvm_reorder_timer_expired(struct timer_list *t)
entries[index].e.reorder_time +
1 + RX_REORDER_BUF_TIMEOUT_MQ);
}
out:
spin_unlock(&buf->lock);
}
@ -2512,7 +2519,7 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
RCU_INIT_POINTER(mvm->csa_tx_blocked_vif, NULL);
/* Unblock BCAST / MCAST station */
iwl_mvm_modify_all_sta_disable_tx(mvm, mvmvif, false);
cancel_delayed_work_sync(&mvm->cs_tx_unblock_dwork);
cancel_delayed_work(&mvm->cs_tx_unblock_dwork);
}
}

View File

@ -281,7 +281,7 @@ static void iwl_mvm_rx_agg_session_expired(struct timer_list *t)
* A-MDPU and hence the timer continues to run. Then, the
* timer expires and sta is NULL.
*/
if (!sta)
if (IS_ERR_OR_NULL(sta))
goto unlock;
mvm_sta = iwl_mvm_sta_from_mac80211(sta);
@ -2089,9 +2089,6 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
lockdep_assert_held(&mvm->mutex);
if (iwl_mvm_has_new_rx_api(mvm))
kfree(mvm_sta->dup_data);
ret = iwl_mvm_drain_sta(mvm, mvm_sta, true);
if (ret)
return ret;
@ -3785,6 +3782,9 @@ static inline u8 *iwl_mvm_get_mac_addr(struct iwl_mvm *mvm,
u8 sta_id = mvmvif->deflink.ap_sta_id;
sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
lockdep_is_held(&mvm->mutex));
if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta)))
return NULL;
return sta->addr;
}
@ -3822,6 +3822,11 @@ static int __iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
if (keyconf->cipher == WLAN_CIPHER_SUITE_TKIP) {
addr = iwl_mvm_get_mac_addr(mvm, vif, sta);
if (!addr) {
IWL_ERR(mvm, "Failed to find mac address\n");
return -EINVAL;
}
/* get phase 1 key from mac80211 */
ieee80211_get_key_rx_seq(keyconf, 0, &seq);
ieee80211_get_tkip_rx_p1k(keyconf, addr, seq.tkip.iv32, p1k);

View File

@ -1875,7 +1875,7 @@ static void iwl_mvm_rx_tx_cmd_agg(struct iwl_mvm *mvm,
mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, sta_id);
sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
if (WARN_ON_ONCE(!sta || !sta->wme)) {
if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta) || !sta->wme)) {
rcu_read_unlock();
return;
}

View File

@ -173,7 +173,7 @@ enum {
#define MT_TXS5_MPDU_TX_CNT GENMASK(31, 23)
#define MT_TXS6_MPDU_FAIL_CNT GENMASK(31, 23)
#define MT_TXS7_MPDU_RETRY_BYTE GENMASK(22, 0)
#define MT_TXS7_MPDU_RETRY_CNT GENMASK(31, 23)
/* RXD DW0 */

View File

@ -608,7 +608,8 @@ bool mt76_connac2_mac_fill_txs(struct mt76_dev *dev, struct mt76_wcid *wcid,
/* PPDU based reporting */
if (FIELD_GET(MT_TXS0_TXS_FORMAT, txs) > 1) {
stats->tx_bytes +=
le32_get_bits(txs_data[5], MT_TXS5_MPDU_TX_BYTE);
le32_get_bits(txs_data[5], MT_TXS5_MPDU_TX_BYTE) -
le32_get_bits(txs_data[7], MT_TXS7_MPDU_RETRY_BYTE);
stats->tx_packets +=
le32_get_bits(txs_data[5], MT_TXS5_MPDU_TX_CNT);
stats->tx_failed +=

View File

@ -1088,7 +1088,7 @@ void mt7996_mac_write_txwi(struct mt7996_dev *dev, __le32 *txwi,
else if (beacon && mvif->beacon_rates_idx)
idx = mvif->beacon_rates_idx;
txwi[6] |= FIELD_PREP(MT_TXD6_TX_RATE, idx);
txwi[6] |= cpu_to_le32(FIELD_PREP(MT_TXD6_TX_RATE, idx));
txwi[3] |= cpu_to_le32(MT_TXD3_BA_DISABLE);
}
}

View File

@ -1803,6 +1803,7 @@ struct rtl8xxxu_priv {
u32 rege9c;
u32 regeb4;
u32 regebc;
u32 regrcr;
int next_mbox;
int nr_out_eps;

View File

@ -4171,6 +4171,7 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw)
RCR_ACCEPT_MGMT_FRAME | RCR_HTC_LOC_CTRL |
RCR_APPEND_PHYSTAT | RCR_APPEND_ICV | RCR_APPEND_MIC;
rtl8xxxu_write32(priv, REG_RCR, val32);
priv->regrcr = val32;
if (fops->init_reg_rxfltmap) {
/* Accept all data frames */
@ -6501,7 +6502,7 @@ static void rtl8xxxu_configure_filter(struct ieee80211_hw *hw,
unsigned int *total_flags, u64 multicast)
{
struct rtl8xxxu_priv *priv = hw->priv;
u32 rcr = rtl8xxxu_read32(priv, REG_RCR);
u32 rcr = priv->regrcr;
dev_dbg(&priv->udev->dev, "%s: changed_flags %08x, total_flags %08x\n",
__func__, changed_flags, *total_flags);
@ -6547,6 +6548,7 @@ static void rtl8xxxu_configure_filter(struct ieee80211_hw *hw,
*/
rtl8xxxu_write32(priv, REG_RCR, rcr);
priv->regrcr = rcr;
*total_flags &= (FIF_ALLMULTI | FIF_FCSFAIL | FIF_BCN_PRBRESP_PROMISC |
FIF_CONTROL | FIF_OTHER_BSS | FIF_PSPOLL |

View File

@ -918,7 +918,7 @@ static void rtw_ops_sta_rc_update(struct ieee80211_hw *hw,
struct rtw_sta_info *si = (struct rtw_sta_info *)sta->drv_priv;
if (changed & IEEE80211_RC_BW_CHANGED)
rtw_update_sta_info(rtwdev, si, true);
ieee80211_queue_work(rtwdev->hw, &si->rc_work);
}
const struct ieee80211_ops rtw_ops = {

View File

@ -319,6 +319,17 @@ static u8 rtw_acquire_macid(struct rtw_dev *rtwdev)
return mac_id;
}
static void rtw_sta_rc_work(struct work_struct *work)
{
struct rtw_sta_info *si = container_of(work, struct rtw_sta_info,
rc_work);
struct rtw_dev *rtwdev = si->rtwdev;
mutex_lock(&rtwdev->mutex);
rtw_update_sta_info(rtwdev, si, true);
mutex_unlock(&rtwdev->mutex);
}
int rtw_sta_add(struct rtw_dev *rtwdev, struct ieee80211_sta *sta,
struct ieee80211_vif *vif)
{
@ -329,12 +340,14 @@ int rtw_sta_add(struct rtw_dev *rtwdev, struct ieee80211_sta *sta,
if (si->mac_id >= RTW_MAX_MAC_ID_NUM)
return -ENOSPC;
si->rtwdev = rtwdev;
si->sta = sta;
si->vif = vif;
si->init_ra_lv = 1;
ewma_rssi_init(&si->avg_rssi);
for (i = 0; i < ARRAY_SIZE(sta->txq); i++)
rtw_txq_init(rtwdev, sta->txq[i]);
INIT_WORK(&si->rc_work, rtw_sta_rc_work);
rtw_update_sta_info(rtwdev, si, true);
rtw_fw_media_status_report(rtwdev, si->mac_id, true);
@ -353,6 +366,8 @@ void rtw_sta_remove(struct rtw_dev *rtwdev, struct ieee80211_sta *sta,
struct rtw_sta_info *si = (struct rtw_sta_info *)sta->drv_priv;
int i;
cancel_work_sync(&si->rc_work);
rtw_release_macid(rtwdev, si->mac_id);
if (fw_exist)
rtw_fw_media_status_report(rtwdev, si->mac_id, false);

View File

@ -743,6 +743,7 @@ struct rtw_txq {
DECLARE_EWMA(rssi, 10, 16);
struct rtw_sta_info {
struct rtw_dev *rtwdev;
struct ieee80211_sta *sta;
struct ieee80211_vif *vif;
@ -767,6 +768,8 @@ struct rtw_sta_info {
bool use_cfg_mask;
struct cfg80211_bitrate_mask *mask;
struct work_struct rc_work;
};
enum rtw_bfee_role {

View File

@ -87,11 +87,6 @@ static void rtw_sdio_writew(struct rtw_dev *rtwdev, u16 val, u32 addr,
u8 buf[2];
int i;
if (rtw_sdio_use_memcpy_io(rtwdev, addr, 2)) {
sdio_writew(rtwsdio->sdio_func, val, addr, err_ret);
return;
}
*(__le16 *)buf = cpu_to_le16(val);
for (i = 0; i < 2; i++) {
@ -125,9 +120,6 @@ static u16 rtw_sdio_readw(struct rtw_dev *rtwdev, u32 addr, int *err_ret)
u8 buf[2];
int i;
if (rtw_sdio_use_memcpy_io(rtwdev, addr, 2))
return sdio_readw(rtwsdio->sdio_func, addr, err_ret);
for (i = 0; i < 2; i++) {
buf[i] = sdio_readb(rtwsdio->sdio_func, addr + i, err_ret);
if (*err_ret)

View File

@ -78,7 +78,7 @@ struct rtw_usb {
u8 pipe_interrupt;
u8 pipe_in;
u8 out_ep[RTW_USB_EP_MAX];
u8 qsel_to_ep[TX_DESC_QSEL_MAX];
int qsel_to_ep[TX_DESC_QSEL_MAX];
u8 usb_txagg_num;
struct workqueue_struct *txwq, *rxwq;

View File

@ -1425,6 +1425,8 @@ const struct rtw89_mac_size_set rtw89_mac_size = {
.wde_size4 = {RTW89_WDE_PG_64, 0, 4096,},
/* PCIE 64 */
.wde_size6 = {RTW89_WDE_PG_64, 512, 0,},
/* 8852B PCIE SCC */
.wde_size7 = {RTW89_WDE_PG_64, 510, 2,},
/* DLFW */
.wde_size9 = {RTW89_WDE_PG_64, 0, 1024,},
/* 8852C DLFW */
@ -1449,6 +1451,8 @@ const struct rtw89_mac_size_set rtw89_mac_size = {
.wde_qt4 = {0, 0, 0, 0,},
/* PCIE 64 */
.wde_qt6 = {448, 48, 0, 16,},
/* 8852B PCIE SCC */
.wde_qt7 = {446, 48, 0, 16,},
/* 8852C DLFW */
.wde_qt17 = {0, 0, 0, 0,},
/* 8852C PCIE SCC */

View File

@ -792,6 +792,7 @@ struct rtw89_mac_size_set {
const struct rtw89_dle_size wde_size0;
const struct rtw89_dle_size wde_size4;
const struct rtw89_dle_size wde_size6;
const struct rtw89_dle_size wde_size7;
const struct rtw89_dle_size wde_size9;
const struct rtw89_dle_size wde_size18;
const struct rtw89_dle_size wde_size19;
@ -804,6 +805,7 @@ struct rtw89_mac_size_set {
const struct rtw89_wde_quota wde_qt0;
const struct rtw89_wde_quota wde_qt4;
const struct rtw89_wde_quota wde_qt6;
const struct rtw89_wde_quota wde_qt7;
const struct rtw89_wde_quota wde_qt17;
const struct rtw89_wde_quota wde_qt18;
const struct rtw89_ple_quota ple_qt4;

View File

@ -18,25 +18,25 @@
RTW8852B_FW_BASENAME "-" __stringify(RTW8852B_FW_FORMAT_MAX) ".bin"
static const struct rtw89_hfc_ch_cfg rtw8852b_hfc_chcfg_pcie[] = {
{5, 343, grp_0}, /* ACH 0 */
{5, 343, grp_0}, /* ACH 1 */
{5, 343, grp_0}, /* ACH 2 */
{5, 343, grp_0}, /* ACH 3 */
{5, 341, grp_0}, /* ACH 0 */
{5, 341, grp_0}, /* ACH 1 */
{4, 342, grp_0}, /* ACH 2 */
{4, 342, grp_0}, /* ACH 3 */
{0, 0, grp_0}, /* ACH 4 */
{0, 0, grp_0}, /* ACH 5 */
{0, 0, grp_0}, /* ACH 6 */
{0, 0, grp_0}, /* ACH 7 */
{4, 344, grp_0}, /* B0MGQ */
{4, 344, grp_0}, /* B0HIQ */
{4, 342, grp_0}, /* B0MGQ */
{4, 342, grp_0}, /* B0HIQ */
{0, 0, grp_0}, /* B1MGQ */
{0, 0, grp_0}, /* B1HIQ */
{40, 0, 0} /* FWCMDQ */
};
static const struct rtw89_hfc_pub_cfg rtw8852b_hfc_pubcfg_pcie = {
448, /* Group 0 */
446, /* Group 0 */
0, /* Group 1 */
448, /* Public Max */
446, /* Public Max */
0 /* WP threshold */
};
@ -49,13 +49,13 @@ static const struct rtw89_hfc_param_ini rtw8852b_hfc_param_ini_pcie[] = {
};
static const struct rtw89_dle_mem rtw8852b_dle_mem_pcie[] = {
[RTW89_QTA_SCC] = {RTW89_QTA_SCC, &rtw89_mac_size.wde_size6,
&rtw89_mac_size.ple_size6, &rtw89_mac_size.wde_qt6,
&rtw89_mac_size.wde_qt6, &rtw89_mac_size.ple_qt18,
[RTW89_QTA_SCC] = {RTW89_QTA_SCC, &rtw89_mac_size.wde_size7,
&rtw89_mac_size.ple_size6, &rtw89_mac_size.wde_qt7,
&rtw89_mac_size.wde_qt7, &rtw89_mac_size.ple_qt18,
&rtw89_mac_size.ple_qt58},
[RTW89_QTA_WOW] = {RTW89_QTA_WOW, &rtw89_mac_size.wde_size6,
&rtw89_mac_size.ple_size6, &rtw89_mac_size.wde_qt6,
&rtw89_mac_size.wde_qt6, &rtw89_mac_size.ple_qt18,
[RTW89_QTA_WOW] = {RTW89_QTA_WOW, &rtw89_mac_size.wde_size7,
&rtw89_mac_size.ple_size6, &rtw89_mac_size.wde_qt7,
&rtw89_mac_size.wde_qt7, &rtw89_mac_size.ple_qt18,
&rtw89_mac_size.ple_qt_52b_wow},
[RTW89_QTA_DLFW] = {RTW89_QTA_DLFW, &rtw89_mac_size.wde_size9,
&rtw89_mac_size.ple_size8, &rtw89_mac_size.wde_qt4,

View File

@ -5964,10 +5964,11 @@ static int hwsim_new_radio_nl(struct sk_buff *msg, struct genl_info *info)
ret = -ENOMEM;
goto out_free;
}
param.pmsr_capa = pmsr_capa;
ret = parse_pmsr_capa(info->attrs[HWSIM_ATTR_PMSR_SUPPORT], pmsr_capa, info);
if (ret)
goto out_free;
param.pmsr_capa = pmsr_capa;
}
ret = mac80211_hwsim_new_radio(info, &param);

View File

@ -565,24 +565,32 @@ static void ipc_imem_run_state_worker(struct work_struct *instance)
struct ipc_mux_config mux_cfg;
struct iosm_imem *ipc_imem;
u8 ctrl_chl_idx = 0;
int ret;
ipc_imem = container_of(instance, struct iosm_imem, run_state_worker);
if (ipc_imem->phase != IPC_P_RUN) {
dev_err(ipc_imem->dev,
"Modem link down. Exit run state worker.");
return;
goto err_out;
}
if (test_and_clear_bit(IOSM_DEVLINK_INIT, &ipc_imem->flag))
ipc_devlink_deinit(ipc_imem->ipc_devlink);
if (!ipc_imem_setup_cp_mux_cap_init(ipc_imem, &mux_cfg))
ipc_imem->mux = ipc_mux_init(&mux_cfg, ipc_imem);
ret = ipc_imem_setup_cp_mux_cap_init(ipc_imem, &mux_cfg);
if (ret < 0)
goto err_out;
ipc_imem_wwan_channel_init(ipc_imem, mux_cfg.protocol);
if (ipc_imem->mux)
ipc_imem->mux->wwan = ipc_imem->wwan;
ipc_imem->mux = ipc_mux_init(&mux_cfg, ipc_imem);
if (!ipc_imem->mux)
goto err_out;
ret = ipc_imem_wwan_channel_init(ipc_imem, mux_cfg.protocol);
if (ret < 0)
goto err_ipc_mux_deinit;
ipc_imem->mux->wwan = ipc_imem->wwan;
while (ctrl_chl_idx < IPC_MEM_MAX_CHANNELS) {
if (!ipc_chnl_cfg_get(&chnl_cfg_port, ctrl_chl_idx)) {
@ -622,6 +630,13 @@ static void ipc_imem_run_state_worker(struct work_struct *instance)
/* Complete all memory stores after setting bit */
smp_mb__after_atomic();
return;
err_ipc_mux_deinit:
ipc_mux_deinit(ipc_imem->mux);
err_out:
ipc_uevent_send(ipc_imem->dev, UEVENT_CD_READY_LINK_DOWN);
}
static void ipc_imem_handle_irq(struct iosm_imem *ipc_imem, int irq)

View File

@ -77,8 +77,8 @@ int ipc_imem_sys_wwan_transmit(struct iosm_imem *ipc_imem,
}
/* Initialize wwan channel */
void ipc_imem_wwan_channel_init(struct iosm_imem *ipc_imem,
enum ipc_mux_protocol mux_type)
int ipc_imem_wwan_channel_init(struct iosm_imem *ipc_imem,
enum ipc_mux_protocol mux_type)
{
struct ipc_chnl_cfg chnl_cfg = { 0 };
@ -87,7 +87,7 @@ void ipc_imem_wwan_channel_init(struct iosm_imem *ipc_imem,
/* If modem version is invalid (0xffffffff), do not initialize WWAN. */
if (ipc_imem->cp_version == -1) {
dev_err(ipc_imem->dev, "invalid CP version");
return;
return -EIO;
}
ipc_chnl_cfg_get(&chnl_cfg, ipc_imem->nr_of_channels);
@ -104,9 +104,13 @@ void ipc_imem_wwan_channel_init(struct iosm_imem *ipc_imem,
/* WWAN registration. */
ipc_imem->wwan = ipc_wwan_init(ipc_imem, ipc_imem->dev);
if (!ipc_imem->wwan)
if (!ipc_imem->wwan) {
dev_err(ipc_imem->dev,
"failed to register the ipc_wwan interfaces");
return -ENOMEM;
}
return 0;
}
/* Map SKB to DMA for transfer */

View File

@ -91,9 +91,11 @@ int ipc_imem_sys_wwan_transmit(struct iosm_imem *ipc_imem, int if_id,
* MUX.
* @ipc_imem: Pointer to iosm_imem struct.
* @mux_type: Type of mux protocol.
*
* Return: 0 on success and failure value on error
*/
void ipc_imem_wwan_channel_init(struct iosm_imem *ipc_imem,
enum ipc_mux_protocol mux_type);
int ipc_imem_wwan_channel_init(struct iosm_imem *ipc_imem,
enum ipc_mux_protocol mux_type);
/**
* ipc_imem_sys_devlink_open - Open a Flash/CD Channel link to CP

View File

@ -45,6 +45,7 @@
#define T7XX_PCI_IREG_BASE 0
#define T7XX_PCI_EREG_BASE 2
#define T7XX_INIT_TIMEOUT 20
#define PM_SLEEP_DIS_TIMEOUT_MS 20
#define PM_ACK_TIMEOUT_MS 1500
#define PM_AUTOSUSPEND_MS 20000
@ -96,6 +97,7 @@ static int t7xx_pci_pm_init(struct t7xx_pci_dev *t7xx_dev)
spin_lock_init(&t7xx_dev->md_pm_lock);
init_completion(&t7xx_dev->sleep_lock_acquire);
init_completion(&t7xx_dev->pm_sr_ack);
init_completion(&t7xx_dev->init_done);
atomic_set(&t7xx_dev->md_pm_state, MTK_PM_INIT);
device_init_wakeup(&pdev->dev, true);
@ -124,6 +126,7 @@ void t7xx_pci_pm_init_late(struct t7xx_pci_dev *t7xx_dev)
pm_runtime_mark_last_busy(&t7xx_dev->pdev->dev);
pm_runtime_allow(&t7xx_dev->pdev->dev);
pm_runtime_put_noidle(&t7xx_dev->pdev->dev);
complete_all(&t7xx_dev->init_done);
}
static int t7xx_pci_pm_reinit(struct t7xx_pci_dev *t7xx_dev)
@ -529,6 +532,20 @@ static void t7xx_pci_shutdown(struct pci_dev *pdev)
__t7xx_pci_pm_suspend(pdev);
}
static int t7xx_pci_pm_prepare(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct t7xx_pci_dev *t7xx_dev;
t7xx_dev = pci_get_drvdata(pdev);
if (!wait_for_completion_timeout(&t7xx_dev->init_done, T7XX_INIT_TIMEOUT * HZ)) {
dev_warn(dev, "Not ready for system sleep.\n");
return -ETIMEDOUT;
}
return 0;
}
static int t7xx_pci_pm_suspend(struct device *dev)
{
return __t7xx_pci_pm_suspend(to_pci_dev(dev));
@ -555,6 +572,7 @@ static int t7xx_pci_pm_runtime_resume(struct device *dev)
}
static const struct dev_pm_ops t7xx_pci_pm_ops = {
.prepare = t7xx_pci_pm_prepare,
.suspend = t7xx_pci_pm_suspend,
.resume = t7xx_pci_pm_resume,
.resume_noirq = t7xx_pci_pm_resume_noirq,

View File

@ -69,6 +69,7 @@ struct t7xx_pci_dev {
struct t7xx_modem *md;
struct t7xx_ccmni_ctrl *ccmni_ctlb;
bool rgu_pci_irq_en;
struct completion init_done;
/* Low Power Items */
struct list_head md_pm_entities;

View File

@ -1900,10 +1900,8 @@ void phy_package_leave(struct phy_device *phydev);
int devm_phy_package_join(struct device *dev, struct phy_device *phydev,
int addr, size_t priv_size);
#if IS_ENABLED(CONFIG_PHYLIB)
int __init mdio_bus_init(void);
void mdio_bus_exit(void);
#endif
int phy_ethtool_get_strings(struct phy_device *phydev, u8 *data);
int phy_ethtool_get_sset_count(struct phy_device *phydev);

View File

@ -497,29 +497,6 @@ static inline struct fib6_nh *nexthop_fib6_nh(struct nexthop *nh)
return NULL;
}
/* Variant of nexthop_fib6_nh().
* Caller should either hold rcu_read_lock(), or RTNL.
*/
static inline struct fib6_nh *nexthop_fib6_nh_bh(struct nexthop *nh)
{
struct nh_info *nhi;
if (nh->is_group) {
struct nh_group *nh_grp;
nh_grp = rcu_dereference_rtnl(nh->nh_grp);
nh = nexthop_mpath_select(nh_grp, 0);
if (!nh)
return NULL;
}
nhi = rcu_dereference_rtnl(nh->nh_info);
if (nhi->family == AF_INET6)
return &nhi->fib6_nh;
return NULL;
}
static inline struct net_device *fib6_info_nh_dev(struct fib6_info *f6i)
{
struct fib6_nh *fib6_nh;

View File

@ -109,8 +109,8 @@ static netdev_tx_t vlan_dev_hard_start_xmit(struct sk_buff *skb,
* NOTE: THIS ASSUMES DIX ETHERNET, SPECIFICALLY NOT SUPPORTING
* OTHER THINGS LIKE FDDI/TokenRing/802.3 SNAPs...
*/
if (veth->h_vlan_proto != vlan->vlan_proto ||
vlan->flags & VLAN_FLAG_REORDER_HDR) {
if (vlan->flags & VLAN_FLAG_REORDER_HDR ||
veth->h_vlan_proto != vlan->vlan_proto) {
u16 vlan_tci;
vlan_tci = vlan->vlan_id;
vlan_tci |= vlan_dev_get_egress_qos_mask(dev, skb->priority);

View File

@ -400,6 +400,7 @@ int atm_dev_ioctl(unsigned int cmd, void __user *buf, int __user *sioc_len,
return error;
}
#ifdef CONFIG_PROC_FS
void *atm_dev_seq_start(struct seq_file *seq, loff_t *pos)
{
mutex_lock(&atm_dev_mutex);
@ -415,3 +416,4 @@ void *atm_dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
return seq_list_next(v, &atm_devs, pos);
}
#endif

View File

@ -27,6 +27,10 @@ int br_process_vlan_tunnel_info(const struct net_bridge *br,
int br_get_vlan_tunnel_info_size(struct net_bridge_vlan_group *vg);
int br_fill_vlan_tunnel_info(struct sk_buff *skb,
struct net_bridge_vlan_group *vg);
bool vlan_tunid_inrange(const struct net_bridge_vlan *v_curr,
const struct net_bridge_vlan *v_last);
int br_vlan_tunnel_info(const struct net_bridge_port *p, int cmd,
u16 vid, u32 tun_id, bool *changed);
#ifdef CONFIG_BRIDGE_VLAN_FILTERING
/* br_vlan_tunnel.c */
@ -43,10 +47,6 @@ void br_handle_ingress_vlan_tunnel(struct sk_buff *skb,
struct net_bridge_vlan_group *vg);
int br_handle_egress_vlan_tunnel(struct sk_buff *skb,
struct net_bridge_vlan *vlan);
bool vlan_tunid_inrange(const struct net_bridge_vlan *v_curr,
const struct net_bridge_vlan *v_last);
int br_vlan_tunnel_info(const struct net_bridge_port *p, int cmd,
u16 vid, u32 tun_id, bool *changed);
#else
static inline int vlan_tunnel_init(struct net_bridge_vlan_group *vg)
{

View File

@ -1139,7 +1139,7 @@ static int isotp_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
struct isotp_sock *so = isotp_sk(sk);
int ret = 0;
if (flags & ~(MSG_DONTWAIT | MSG_TRUNC | MSG_PEEK))
if (flags & ~(MSG_DONTWAIT | MSG_TRUNC | MSG_PEEK | MSG_CMSG_COMPAT))
return -EINVAL;
if (!so->bound)

View File

@ -798,7 +798,7 @@ static int j1939_sk_recvmsg(struct socket *sock, struct msghdr *msg,
struct j1939_sk_buff_cb *skcb;
int ret = 0;
if (flags & ~(MSG_DONTWAIT | MSG_ERRQUEUE))
if (flags & ~(MSG_DONTWAIT | MSG_ERRQUEUE | MSG_CMSG_COMPAT))
return -EINVAL;
if (flags & MSG_ERRQUEUE)

View File

@ -204,11 +204,6 @@ struct devlink *devlink_alloc_ns(const struct devlink_ops *ops,
if (ret < 0)
goto err_xa_alloc;
devlink->netdevice_nb.notifier_call = devlink_port_netdevice_event;
ret = register_netdevice_notifier(&devlink->netdevice_nb);
if (ret)
goto err_register_netdevice_notifier;
devlink->dev = dev;
devlink->ops = ops;
xa_init_flags(&devlink->ports, XA_FLAGS_ALLOC);
@ -233,8 +228,6 @@ struct devlink *devlink_alloc_ns(const struct devlink_ops *ops,
return devlink;
err_register_netdevice_notifier:
xa_erase(&devlinks, devlink->index);
err_xa_alloc:
kfree(devlink);
return NULL;
@ -266,8 +259,6 @@ void devlink_free(struct devlink *devlink)
xa_destroy(&devlink->params);
xa_destroy(&devlink->ports);
WARN_ON_ONCE(unregister_netdevice_notifier(&devlink->netdevice_nb));
xa_erase(&devlinks, devlink->index);
devlink_put(devlink);
@ -303,6 +294,10 @@ static struct pernet_operations devlink_pernet_ops __net_initdata = {
.pre_exit = devlink_pernet_pre_exit,
};
static struct notifier_block devlink_port_netdevice_nb = {
.notifier_call = devlink_port_netdevice_event,
};
static int __init devlink_init(void)
{
int err;
@ -311,6 +306,9 @@ static int __init devlink_init(void)
if (err)
goto out;
err = register_pernet_subsys(&devlink_pernet_ops);
if (err)
goto out;
err = register_netdevice_notifier(&devlink_port_netdevice_nb);
out:
WARN_ON(err);

View File

@ -50,7 +50,6 @@ struct devlink {
u8 reload_failed:1;
refcount_t refcount;
struct rcu_work rwork;
struct notifier_block netdevice_nb;
char priv[] __aligned(NETDEV_ALIGN);
};

View File

@ -7073,10 +7073,9 @@ int devlink_port_netdevice_event(struct notifier_block *nb,
struct devlink_port *devlink_port = netdev->devlink_port;
struct devlink *devlink;
devlink = container_of(nb, struct devlink, netdevice_nb);
if (!devlink_port || devlink_port->devlink != devlink)
if (!devlink_port)
return NOTIFY_OK;
devlink = devlink_port->devlink;
switch (event) {
case NETDEV_POST_INIT:

View File

@ -829,6 +829,9 @@ static void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb)
inet_twsk(sk)->tw_priority : sk->sk_priority;
transmit_time = tcp_transmit_time(sk);
xfrm_sk_clone_policy(ctl_sk, sk);
} else {
ctl_sk->sk_mark = 0;
ctl_sk->sk_priority = 0;
}
ip_send_unicast_reply(ctl_sk,
skb, &TCP_SKB_CB(skb)->header.h4.opt,
@ -836,7 +839,6 @@ static void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb)
&arg, arg.iov[0].iov_len,
transmit_time);
ctl_sk->sk_mark = 0;
xfrm_sk_free_policy(ctl_sk);
sock_net_set(ctl_sk, &init_net);
__TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
@ -935,7 +937,6 @@ static void tcp_v4_send_ack(const struct sock *sk,
&arg, arg.iov[0].iov_len,
transmit_time);
ctl_sk->sk_mark = 0;
sock_net_set(ctl_sk, &init_net);
__TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
local_bh_enable();

View File

@ -2491,7 +2491,7 @@ static int ipv6_route_native_seq_show(struct seq_file *seq, void *v)
const struct net_device *dev;
if (rt->nh)
fib6_nh = nexthop_fib6_nh_bh(rt->nh);
fib6_nh = nexthop_fib6_nh(rt->nh);
seq_printf(seq, "%pi6 %02x ", &rt->fib6_dst.addr, rt->fib6_dst.plen);
@ -2556,14 +2556,14 @@ static struct fib6_table *ipv6_route_seq_next_table(struct fib6_table *tbl,
if (tbl) {
h = (tbl->tb6_id & (FIB6_TABLE_HASHSZ - 1)) + 1;
node = rcu_dereference_bh(hlist_next_rcu(&tbl->tb6_hlist));
node = rcu_dereference(hlist_next_rcu(&tbl->tb6_hlist));
} else {
h = 0;
node = NULL;
}
while (!node && h < FIB6_TABLE_HASHSZ) {
node = rcu_dereference_bh(
node = rcu_dereference(
hlist_first_rcu(&net->ipv6.fib_table_hash[h++]));
}
return hlist_entry_safe(node, struct fib6_table, tb6_hlist);
@ -2593,7 +2593,7 @@ static void *ipv6_route_seq_next(struct seq_file *seq, void *v, loff_t *pos)
if (!v)
goto iter_table;
n = rcu_dereference_bh(((struct fib6_info *)v)->fib6_next);
n = rcu_dereference(((struct fib6_info *)v)->fib6_next);
if (n)
return n;
@ -2619,12 +2619,12 @@ static void *ipv6_route_seq_next(struct seq_file *seq, void *v, loff_t *pos)
}
static void *ipv6_route_seq_start(struct seq_file *seq, loff_t *pos)
__acquires(RCU_BH)
__acquires(RCU)
{
struct net *net = seq_file_net(seq);
struct ipv6_route_iter *iter = seq->private;
rcu_read_lock_bh();
rcu_read_lock();
iter->tbl = ipv6_route_seq_next_table(NULL, net);
iter->skip = *pos;
@ -2645,7 +2645,7 @@ static bool ipv6_route_iter_active(struct ipv6_route_iter *iter)
}
static void ipv6_route_native_seq_stop(struct seq_file *seq, void *v)
__releases(RCU_BH)
__releases(RCU)
{
struct net *net = seq_file_net(seq);
struct ipv6_route_iter *iter = seq->private;
@ -2653,7 +2653,7 @@ static void ipv6_route_native_seq_stop(struct seq_file *seq, void *v)
if (ipv6_route_iter_active(iter))
fib6_walker_unlink(net, &iter->w);
rcu_read_unlock_bh();
rcu_read_unlock();
}
#if IS_BUILTIN(CONFIG_IPV6) && defined(CONFIG_BPF_SYSCALL)

View File

@ -1015,12 +1015,14 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb,
ntohl(tun_id),
ntohl(md->u.index), truncate,
false);
proto = htons(ETH_P_ERSPAN);
} else if (md->version == 2) {
erspan_build_header_v2(skb,
ntohl(tun_id),
md->u.md2.dir,
get_hwid(&md->u.md2),
truncate, false);
proto = htons(ETH_P_ERSPAN2);
} else {
goto tx_err;
}
@ -1043,24 +1045,25 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb,
break;
}
if (t->parms.erspan_ver == 1)
if (t->parms.erspan_ver == 1) {
erspan_build_header(skb, ntohl(t->parms.o_key),
t->parms.index,
truncate, false);
else if (t->parms.erspan_ver == 2)
proto = htons(ETH_P_ERSPAN);
} else if (t->parms.erspan_ver == 2) {
erspan_build_header_v2(skb, ntohl(t->parms.o_key),
t->parms.dir,
t->parms.hwid,
truncate, false);
else
proto = htons(ETH_P_ERSPAN2);
} else {
goto tx_err;
}
fl6.daddr = t->parms.raddr;
}
/* Push GRE header. */
proto = (t->parms.erspan_ver == 1) ? htons(ETH_P_ERSPAN)
: htons(ETH_P_ERSPAN2);
gre_build_header(skb, 8, TUNNEL_SEQ, proto, 0, htonl(atomic_fetch_inc(&t->o_seqno)));
/* TooBig packet may have updated dst->dev's mtu */

Some files were not shown because too many files have changed in this diff Show More