mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-12-29 01:05:29 +00:00
Including fixes from can and netfilter.
Current release - regressions: - rtnetlink: fix double call of rtnl_link_get_net_ifla() - tcp: populate XPS related fields of timewait sockets - ethtool: fix access to uninitialized fields in set RXNFC command - selinux: use sk_to_full_sk() in selinux_ip_output() Current release - new code bugs: - net: make napi_hash_lock irq safe - eth: bnxt_en: support header page pool in queue API - eth: ice: fix NULL pointer dereference in switchdev Previous releases - regressions: - core: fix icmp host relookup triggering ip_rt_bug - ipv6: - avoid possible NULL deref in modify_prefix_route() - release expired exception dst cached in socket - smc: fix LGR and link use-after-free issue - hsr: avoid potential out-of-bound access in fill_frame_info() - can: hi311x: fix potential use-after-free - eth: ice: fix VLAN pruning in switchdev mode Previous releases - always broken: - netfilter: - ipset: hold module reference while requesting a module - nft_inner: incorrect percpu area handling under softirq - can: j1939: fix skb reference counting - eth: mlxsw: use correct key block on Spectrum-4 - eth: mlx5: fix memory leak in mlx5hws_definer_calc_layout Signed-off-by: Paolo Abeni <pabeni@redhat.com> -----BEGIN PGP SIGNATURE----- iQJGBAABCAAwFiEEg1AjqC77wbdLX2LbKSR5jcyPE6QFAmdRve4SHHBhYmVuaUBy ZWRoYXQuY29tAAoJECkkeY3MjxOk/TUQAItDVkTQDiAUUqd0TDH2SSnboxXozcMF fKVrdWulFAOe7qcajUqjkzvTDjAOw9Xbfh8rEiFBLaXmUzSUT2eqbm2VahvWIR5/ k08v7fTTuCNzEwhnbQlsZ47Nd26LJVPwOvbtE/4V8d50pU/serjWuI/74tUCWAjn DQOjyyqRjKgKKY+WWCQ6g4tVpD//DB4Sj15GiE3MhlW1f08AAnPJSe2oTaq0RZBG nXo7abOGn8x3RYilrlp/ZwWYuNpVI4oF+qmp+t/46NV+7ER1JgrC97k0kFyyCYVD g7vBvFjvA7vDmiuzfsOW2n7IRdcfBtkfi8UJYOIvVgJg7KDF0DXoxj3qD4FagI35 RWRMJw+PoNlFFkPprlp0we/19jPJWOO6rx+AOMEQD78jrH7NoFQ/+eeBf/nppfjy wX0LD1aQsgPk2ju0I8GbcM/qaJ81EJUiYYVLJHieH0+vvmqts8cMDRLZf5t08EHa myXcRGB9N8gjguGp5mdR5KmtY82zASlNC0PbDp3nlPYc3e/opCmMRYGjBohO+vqn n7u250WThPwiBtOwYmSbcK7zpS034/VX0ufnTT2X3MWnFGGNDv6XVmho/OBuCHqJ m/EiJo/D9qVGAIql/vAxN9a4lQKrcZgaMzCyEPYCf7rtLmzx7sfyHWbf/GZlUAU/ 9dUUfWqCbZcL =nkPk -----END PGP SIGNATURE----- Merge tag 'net-6.13-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net Pull networking fixes from Paolo Abeni: "Including fixes from can and netfilter. Current release - regressions: - rtnetlink: fix double call of rtnl_link_get_net_ifla() - tcp: populate XPS related fields of timewait sockets - ethtool: fix access to uninitialized fields in set RXNFC command - selinux: use sk_to_full_sk() in selinux_ip_output() Current release - new code bugs: - net: make napi_hash_lock irq safe - eth: - bnxt_en: support header page pool in queue API - ice: fix NULL pointer dereference in switchdev Previous releases - regressions: - core: fix icmp host relookup triggering ip_rt_bug - ipv6: - avoid possible NULL deref in modify_prefix_route() - release expired exception dst cached in socket - smc: fix LGR and link use-after-free issue - hsr: avoid potential out-of-bound access in fill_frame_info() - can: hi311x: fix potential use-after-free - eth: ice: fix VLAN pruning in switchdev mode Previous releases - always broken: - netfilter: - ipset: hold module reference while requesting a module - nft_inner: incorrect percpu area handling under softirq - can: j1939: fix skb reference counting - eth: - mlxsw: use correct key block on Spectrum-4 - mlx5: fix memory leak in mlx5hws_definer_calc_layout" * tag 'net-6.13-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net: (76 commits) net :mana :Request a V2 response version for MANA_QUERY_GF_STAT net: avoid potential UAF in default_operstate() vsock/test: verify socket options after setting them vsock/test: fix parameter types in SO_VM_SOCKETS_* calls vsock/test: fix failures due to wrong SO_RCVLOWAT parameter net/mlx5e: Remove workaround to avoid syndrome for internal port net/mlx5e: SD, Use correct mdev to build channel param net/mlx5: E-Switch, Fix switching to switchdev mode in MPV net/mlx5: E-Switch, Fix switching to switchdev mode with IB device disabled net/mlx5: HWS: Properly set bwc queue locks lock classes net/mlx5: HWS: Fix memory leak in mlx5hws_definer_calc_layout bnxt_en: handle tpa_info in queue API implementation bnxt_en: refactor bnxt_alloc_rx_rings() to call bnxt_alloc_rx_agg_bmap() bnxt_en: refactor tpa_info alloc/free into helpers geneve: do not assume mac header is set in geneve_xmit_skb() mlxsw: spectrum_acl_flex_keys: Use correct key block on Spectrum-4 ethtool: Fix wrong mod state in case of verbose and no_mask bitset ipmr: tune the ipmr_can_free_table() checks. netfilter: nft_set_hash: skip duplicated elements pending gc run netfilter: ipset: Hold module reference while requesting a module ...
This commit is contained in:
commit
896d8946da
@ -6,16 +6,17 @@ Bare UDP Tunnelling Module Documentation
|
||||
|
||||
There are various L3 encapsulation standards using UDP being discussed to
|
||||
leverage the UDP based load balancing capability of different networks.
|
||||
MPLSoUDP (__ https://tools.ietf.org/html/rfc7510) is one among them.
|
||||
MPLSoUDP (https://tools.ietf.org/html/rfc7510) is one among them.
|
||||
|
||||
The Bareudp tunnel module provides a generic L3 encapsulation support for
|
||||
tunnelling different L3 protocols like MPLS, IP, NSH etc. inside a UDP tunnel.
|
||||
|
||||
Special Handling
|
||||
----------------
|
||||
|
||||
The bareudp device supports special handling for MPLS & IP as they can have
|
||||
multiple ethertypes.
|
||||
MPLS procotcol can have ethertypes ETH_P_MPLS_UC (unicast) & ETH_P_MPLS_MC (multicast).
|
||||
The MPLS protocol can have ethertypes ETH_P_MPLS_UC (unicast) & ETH_P_MPLS_MC (multicast).
|
||||
IP protocol can have ethertypes ETH_P_IP (v4) & ETH_P_IPV6 (v6).
|
||||
This special handling can be enabled only for ethertypes ETH_P_IP & ETH_P_MPLS_UC
|
||||
with a flag called multiproto mode.
|
||||
@ -52,7 +53,7 @@ be enabled explicitly with the "multiproto" flag.
|
||||
3) Device Usage
|
||||
|
||||
The bareudp device could be used along with OVS or flower filter in TC.
|
||||
The OVS or TC flower layer must set the tunnel information in SKB dst field before
|
||||
sending packet buffer to the bareudp device for transmission. On reception the
|
||||
bareudp device extracts and stores the tunnel information in SKB dst field before
|
||||
The OVS or TC flower layer must set the tunnel information in the SKB dst field before
|
||||
sending the packet buffer to the bareudp device for transmission. On reception, the
|
||||
bareUDP device extracts and stores the tunnel information in the SKB dst field before
|
||||
passing the packet buffer to the network stack.
|
||||
|
@ -16267,6 +16267,7 @@ F: Documentation/devicetree/bindings/net/
|
||||
F: Documentation/networking/net_cachelines/net_device.rst
|
||||
F: drivers/connector/
|
||||
F: drivers/net/
|
||||
F: drivers/ptp/
|
||||
F: include/dt-bindings/net/
|
||||
F: include/linux/cn_proc.h
|
||||
F: include/linux/etherdevice.h
|
||||
|
@ -1014,49 +1014,57 @@ static int c_can_handle_bus_err(struct net_device *dev,
|
||||
|
||||
/* propagate the error condition to the CAN stack */
|
||||
skb = alloc_can_err_skb(dev, &cf);
|
||||
if (unlikely(!skb))
|
||||
return 0;
|
||||
|
||||
/* check for 'last error code' which tells us the
|
||||
* type of the last error to occur on the CAN bus
|
||||
*/
|
||||
cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
|
||||
if (likely(skb))
|
||||
cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
|
||||
|
||||
switch (lec_type) {
|
||||
case LEC_STUFF_ERROR:
|
||||
netdev_dbg(dev, "stuff error\n");
|
||||
cf->data[2] |= CAN_ERR_PROT_STUFF;
|
||||
if (likely(skb))
|
||||
cf->data[2] |= CAN_ERR_PROT_STUFF;
|
||||
stats->rx_errors++;
|
||||
break;
|
||||
case LEC_FORM_ERROR:
|
||||
netdev_dbg(dev, "form error\n");
|
||||
cf->data[2] |= CAN_ERR_PROT_FORM;
|
||||
if (likely(skb))
|
||||
cf->data[2] |= CAN_ERR_PROT_FORM;
|
||||
stats->rx_errors++;
|
||||
break;
|
||||
case LEC_ACK_ERROR:
|
||||
netdev_dbg(dev, "ack error\n");
|
||||
cf->data[3] = CAN_ERR_PROT_LOC_ACK;
|
||||
if (likely(skb))
|
||||
cf->data[3] = CAN_ERR_PROT_LOC_ACK;
|
||||
stats->tx_errors++;
|
||||
break;
|
||||
case LEC_BIT1_ERROR:
|
||||
netdev_dbg(dev, "bit1 error\n");
|
||||
cf->data[2] |= CAN_ERR_PROT_BIT1;
|
||||
if (likely(skb))
|
||||
cf->data[2] |= CAN_ERR_PROT_BIT1;
|
||||
stats->tx_errors++;
|
||||
break;
|
||||
case LEC_BIT0_ERROR:
|
||||
netdev_dbg(dev, "bit0 error\n");
|
||||
cf->data[2] |= CAN_ERR_PROT_BIT0;
|
||||
if (likely(skb))
|
||||
cf->data[2] |= CAN_ERR_PROT_BIT0;
|
||||
stats->tx_errors++;
|
||||
break;
|
||||
case LEC_CRC_ERROR:
|
||||
netdev_dbg(dev, "CRC error\n");
|
||||
cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
|
||||
if (likely(skb))
|
||||
cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
|
||||
stats->rx_errors++;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
if (unlikely(!skb))
|
||||
return 0;
|
||||
|
||||
netif_receive_skb(skb);
|
||||
return 1;
|
||||
}
|
||||
|
@ -468,7 +468,7 @@ static int can_set_termination(struct net_device *ndev, u16 term)
|
||||
else
|
||||
set = 0;
|
||||
|
||||
gpiod_set_value(priv->termination_gpio, set);
|
||||
gpiod_set_value_cansleep(priv->termination_gpio, set);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -390,36 +390,55 @@ static int ifi_canfd_handle_lec_err(struct net_device *ndev)
|
||||
return 0;
|
||||
|
||||
priv->can.can_stats.bus_error++;
|
||||
stats->rx_errors++;
|
||||
|
||||
/* Propagate the error condition to the CAN stack. */
|
||||
skb = alloc_can_err_skb(ndev, &cf);
|
||||
if (unlikely(!skb))
|
||||
return 0;
|
||||
|
||||
/* Read the error counter register and check for new errors. */
|
||||
cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
|
||||
if (likely(skb))
|
||||
cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
|
||||
|
||||
if (errctr & IFI_CANFD_ERROR_CTR_OVERLOAD_FIRST)
|
||||
cf->data[2] |= CAN_ERR_PROT_OVERLOAD;
|
||||
if (errctr & IFI_CANFD_ERROR_CTR_OVERLOAD_FIRST) {
|
||||
stats->rx_errors++;
|
||||
if (likely(skb))
|
||||
cf->data[2] |= CAN_ERR_PROT_OVERLOAD;
|
||||
}
|
||||
|
||||
if (errctr & IFI_CANFD_ERROR_CTR_ACK_ERROR_FIRST)
|
||||
cf->data[3] = CAN_ERR_PROT_LOC_ACK;
|
||||
if (errctr & IFI_CANFD_ERROR_CTR_ACK_ERROR_FIRST) {
|
||||
stats->tx_errors++;
|
||||
if (likely(skb))
|
||||
cf->data[3] = CAN_ERR_PROT_LOC_ACK;
|
||||
}
|
||||
|
||||
if (errctr & IFI_CANFD_ERROR_CTR_BIT0_ERROR_FIRST)
|
||||
cf->data[2] |= CAN_ERR_PROT_BIT0;
|
||||
if (errctr & IFI_CANFD_ERROR_CTR_BIT0_ERROR_FIRST) {
|
||||
stats->tx_errors++;
|
||||
if (likely(skb))
|
||||
cf->data[2] |= CAN_ERR_PROT_BIT0;
|
||||
}
|
||||
|
||||
if (errctr & IFI_CANFD_ERROR_CTR_BIT1_ERROR_FIRST)
|
||||
cf->data[2] |= CAN_ERR_PROT_BIT1;
|
||||
if (errctr & IFI_CANFD_ERROR_CTR_BIT1_ERROR_FIRST) {
|
||||
stats->tx_errors++;
|
||||
if (likely(skb))
|
||||
cf->data[2] |= CAN_ERR_PROT_BIT1;
|
||||
}
|
||||
|
||||
if (errctr & IFI_CANFD_ERROR_CTR_STUFF_ERROR_FIRST)
|
||||
cf->data[2] |= CAN_ERR_PROT_STUFF;
|
||||
if (errctr & IFI_CANFD_ERROR_CTR_STUFF_ERROR_FIRST) {
|
||||
stats->rx_errors++;
|
||||
if (likely(skb))
|
||||
cf->data[2] |= CAN_ERR_PROT_STUFF;
|
||||
}
|
||||
|
||||
if (errctr & IFI_CANFD_ERROR_CTR_CRC_ERROR_FIRST)
|
||||
cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
|
||||
if (errctr & IFI_CANFD_ERROR_CTR_CRC_ERROR_FIRST) {
|
||||
stats->rx_errors++;
|
||||
if (likely(skb))
|
||||
cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
|
||||
}
|
||||
|
||||
if (errctr & IFI_CANFD_ERROR_CTR_FORM_ERROR_FIRST)
|
||||
cf->data[2] |= CAN_ERR_PROT_FORM;
|
||||
if (errctr & IFI_CANFD_ERROR_CTR_FORM_ERROR_FIRST) {
|
||||
stats->rx_errors++;
|
||||
if (likely(skb))
|
||||
cf->data[2] |= CAN_ERR_PROT_FORM;
|
||||
}
|
||||
|
||||
/* Reset the error counter, ack the IRQ and re-enable the counter. */
|
||||
writel(IFI_CANFD_ERROR_CTR_ER_RESET, priv->base + IFI_CANFD_ERROR_CTR);
|
||||
@ -427,6 +446,9 @@ static int ifi_canfd_handle_lec_err(struct net_device *ndev)
|
||||
priv->base + IFI_CANFD_INTERRUPT);
|
||||
writel(IFI_CANFD_ERROR_CTR_ER_ENABLE, priv->base + IFI_CANFD_ERROR_CTR);
|
||||
|
||||
if (unlikely(!skb))
|
||||
return 0;
|
||||
|
||||
netif_receive_skb(skb);
|
||||
|
||||
return 1;
|
||||
|
@ -695,47 +695,60 @@ static int m_can_handle_lec_err(struct net_device *dev,
|
||||
u32 timestamp = 0;
|
||||
|
||||
cdev->can.can_stats.bus_error++;
|
||||
stats->rx_errors++;
|
||||
|
||||
/* propagate the error condition to the CAN stack */
|
||||
skb = alloc_can_err_skb(dev, &cf);
|
||||
if (unlikely(!skb))
|
||||
return 0;
|
||||
|
||||
/* check for 'last error code' which tells us the
|
||||
* type of the last error to occur on the CAN bus
|
||||
*/
|
||||
cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
|
||||
if (likely(skb))
|
||||
cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
|
||||
|
||||
switch (lec_type) {
|
||||
case LEC_STUFF_ERROR:
|
||||
netdev_dbg(dev, "stuff error\n");
|
||||
cf->data[2] |= CAN_ERR_PROT_STUFF;
|
||||
stats->rx_errors++;
|
||||
if (likely(skb))
|
||||
cf->data[2] |= CAN_ERR_PROT_STUFF;
|
||||
break;
|
||||
case LEC_FORM_ERROR:
|
||||
netdev_dbg(dev, "form error\n");
|
||||
cf->data[2] |= CAN_ERR_PROT_FORM;
|
||||
stats->rx_errors++;
|
||||
if (likely(skb))
|
||||
cf->data[2] |= CAN_ERR_PROT_FORM;
|
||||
break;
|
||||
case LEC_ACK_ERROR:
|
||||
netdev_dbg(dev, "ack error\n");
|
||||
cf->data[3] = CAN_ERR_PROT_LOC_ACK;
|
||||
stats->tx_errors++;
|
||||
if (likely(skb))
|
||||
cf->data[3] = CAN_ERR_PROT_LOC_ACK;
|
||||
break;
|
||||
case LEC_BIT1_ERROR:
|
||||
netdev_dbg(dev, "bit1 error\n");
|
||||
cf->data[2] |= CAN_ERR_PROT_BIT1;
|
||||
stats->tx_errors++;
|
||||
if (likely(skb))
|
||||
cf->data[2] |= CAN_ERR_PROT_BIT1;
|
||||
break;
|
||||
case LEC_BIT0_ERROR:
|
||||
netdev_dbg(dev, "bit0 error\n");
|
||||
cf->data[2] |= CAN_ERR_PROT_BIT0;
|
||||
stats->tx_errors++;
|
||||
if (likely(skb))
|
||||
cf->data[2] |= CAN_ERR_PROT_BIT0;
|
||||
break;
|
||||
case LEC_CRC_ERROR:
|
||||
netdev_dbg(dev, "CRC error\n");
|
||||
cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
|
||||
stats->rx_errors++;
|
||||
if (likely(skb))
|
||||
cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
if (unlikely(!skb))
|
||||
return 0;
|
||||
|
||||
if (cdev->is_peripheral)
|
||||
timestamp = m_can_get_timestamp(cdev);
|
||||
|
||||
|
@ -416,8 +416,6 @@ static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status)
|
||||
int ret = 0;
|
||||
|
||||
skb = alloc_can_err_skb(dev, &cf);
|
||||
if (skb == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
txerr = priv->read_reg(priv, SJA1000_TXERR);
|
||||
rxerr = priv->read_reg(priv, SJA1000_RXERR);
|
||||
@ -425,8 +423,11 @@ static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status)
|
||||
if (isrc & IRQ_DOI) {
|
||||
/* data overrun interrupt */
|
||||
netdev_dbg(dev, "data overrun interrupt\n");
|
||||
cf->can_id |= CAN_ERR_CRTL;
|
||||
cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
|
||||
if (skb) {
|
||||
cf->can_id |= CAN_ERR_CRTL;
|
||||
cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
|
||||
}
|
||||
|
||||
stats->rx_over_errors++;
|
||||
stats->rx_errors++;
|
||||
sja1000_write_cmdreg(priv, CMD_CDO); /* clear bit */
|
||||
@ -452,7 +453,7 @@ static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status)
|
||||
else
|
||||
state = CAN_STATE_ERROR_ACTIVE;
|
||||
}
|
||||
if (state != CAN_STATE_BUS_OFF) {
|
||||
if (state != CAN_STATE_BUS_OFF && skb) {
|
||||
cf->can_id |= CAN_ERR_CNT;
|
||||
cf->data[6] = txerr;
|
||||
cf->data[7] = rxerr;
|
||||
@ -460,33 +461,38 @@ static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status)
|
||||
if (isrc & IRQ_BEI) {
|
||||
/* bus error interrupt */
|
||||
priv->can.can_stats.bus_error++;
|
||||
stats->rx_errors++;
|
||||
|
||||
ecc = priv->read_reg(priv, SJA1000_ECC);
|
||||
if (skb) {
|
||||
cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
|
||||
|
||||
cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
|
||||
/* set error type */
|
||||
switch (ecc & ECC_MASK) {
|
||||
case ECC_BIT:
|
||||
cf->data[2] |= CAN_ERR_PROT_BIT;
|
||||
break;
|
||||
case ECC_FORM:
|
||||
cf->data[2] |= CAN_ERR_PROT_FORM;
|
||||
break;
|
||||
case ECC_STUFF:
|
||||
cf->data[2] |= CAN_ERR_PROT_STUFF;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
/* set error type */
|
||||
switch (ecc & ECC_MASK) {
|
||||
case ECC_BIT:
|
||||
cf->data[2] |= CAN_ERR_PROT_BIT;
|
||||
break;
|
||||
case ECC_FORM:
|
||||
cf->data[2] |= CAN_ERR_PROT_FORM;
|
||||
break;
|
||||
case ECC_STUFF:
|
||||
cf->data[2] |= CAN_ERR_PROT_STUFF;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
/* set error location */
|
||||
cf->data[3] = ecc & ECC_SEG;
|
||||
}
|
||||
|
||||
/* set error location */
|
||||
cf->data[3] = ecc & ECC_SEG;
|
||||
|
||||
/* Error occurred during transmission? */
|
||||
if ((ecc & ECC_DIR) == 0)
|
||||
cf->data[2] |= CAN_ERR_PROT_TX;
|
||||
if ((ecc & ECC_DIR) == 0) {
|
||||
stats->tx_errors++;
|
||||
if (skb)
|
||||
cf->data[2] |= CAN_ERR_PROT_TX;
|
||||
} else {
|
||||
stats->rx_errors++;
|
||||
}
|
||||
}
|
||||
if (isrc & IRQ_EPI) {
|
||||
/* error passive interrupt */
|
||||
@ -502,8 +508,10 @@ static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status)
|
||||
netdev_dbg(dev, "arbitration lost interrupt\n");
|
||||
alc = priv->read_reg(priv, SJA1000_ALC);
|
||||
priv->can.can_stats.arbitration_lost++;
|
||||
cf->can_id |= CAN_ERR_LOSTARB;
|
||||
cf->data[0] = alc & 0x1f;
|
||||
if (skb) {
|
||||
cf->can_id |= CAN_ERR_LOSTARB;
|
||||
cf->data[0] = alc & 0x1f;
|
||||
}
|
||||
}
|
||||
|
||||
if (state != priv->can.state) {
|
||||
@ -516,6 +524,9 @@ static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status)
|
||||
can_bus_off(dev);
|
||||
}
|
||||
|
||||
if (!skb)
|
||||
return -ENOMEM;
|
||||
|
||||
netif_rx(skb);
|
||||
|
||||
return ret;
|
||||
|
@ -663,27 +663,27 @@ static irqreturn_t hi3110_can_ist(int irq, void *dev_id)
|
||||
u8 rxerr, txerr;
|
||||
|
||||
skb = alloc_can_err_skb(net, &cf);
|
||||
if (!skb)
|
||||
break;
|
||||
|
||||
txerr = hi3110_read(spi, HI3110_READ_TEC);
|
||||
rxerr = hi3110_read(spi, HI3110_READ_REC);
|
||||
tx_state = txerr >= rxerr ? new_state : 0;
|
||||
rx_state = txerr <= rxerr ? new_state : 0;
|
||||
can_change_state(net, cf, tx_state, rx_state);
|
||||
netif_rx(skb);
|
||||
|
||||
if (new_state == CAN_STATE_BUS_OFF) {
|
||||
if (skb)
|
||||
netif_rx(skb);
|
||||
can_bus_off(net);
|
||||
if (priv->can.restart_ms == 0) {
|
||||
priv->force_quit = 1;
|
||||
hi3110_hw_sleep(spi);
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
} else if (skb) {
|
||||
cf->can_id |= CAN_ERR_CNT;
|
||||
cf->data[6] = txerr;
|
||||
cf->data[7] = rxerr;
|
||||
netif_rx(skb);
|
||||
}
|
||||
}
|
||||
|
||||
@ -696,27 +696,38 @@ static irqreturn_t hi3110_can_ist(int irq, void *dev_id)
|
||||
/* Check for protocol errors */
|
||||
if (eflag & HI3110_ERR_PROTOCOL_MASK) {
|
||||
skb = alloc_can_err_skb(net, &cf);
|
||||
if (!skb)
|
||||
break;
|
||||
if (skb)
|
||||
cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
|
||||
|
||||
cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
|
||||
priv->can.can_stats.bus_error++;
|
||||
priv->net->stats.rx_errors++;
|
||||
if (eflag & HI3110_ERR_BITERR)
|
||||
cf->data[2] |= CAN_ERR_PROT_BIT;
|
||||
else if (eflag & HI3110_ERR_FRMERR)
|
||||
cf->data[2] |= CAN_ERR_PROT_FORM;
|
||||
else if (eflag & HI3110_ERR_STUFERR)
|
||||
cf->data[2] |= CAN_ERR_PROT_STUFF;
|
||||
else if (eflag & HI3110_ERR_CRCERR)
|
||||
cf->data[3] |= CAN_ERR_PROT_LOC_CRC_SEQ;
|
||||
else if (eflag & HI3110_ERR_ACKERR)
|
||||
cf->data[3] |= CAN_ERR_PROT_LOC_ACK;
|
||||
if (eflag & HI3110_ERR_BITERR) {
|
||||
priv->net->stats.tx_errors++;
|
||||
if (skb)
|
||||
cf->data[2] |= CAN_ERR_PROT_BIT;
|
||||
} else if (eflag & HI3110_ERR_FRMERR) {
|
||||
priv->net->stats.rx_errors++;
|
||||
if (skb)
|
||||
cf->data[2] |= CAN_ERR_PROT_FORM;
|
||||
} else if (eflag & HI3110_ERR_STUFERR) {
|
||||
priv->net->stats.rx_errors++;
|
||||
if (skb)
|
||||
cf->data[2] |= CAN_ERR_PROT_STUFF;
|
||||
} else if (eflag & HI3110_ERR_CRCERR) {
|
||||
priv->net->stats.rx_errors++;
|
||||
if (skb)
|
||||
cf->data[3] |= CAN_ERR_PROT_LOC_CRC_SEQ;
|
||||
} else if (eflag & HI3110_ERR_ACKERR) {
|
||||
priv->net->stats.tx_errors++;
|
||||
if (skb)
|
||||
cf->data[3] |= CAN_ERR_PROT_LOC_ACK;
|
||||
}
|
||||
|
||||
cf->data[6] = hi3110_read(spi, HI3110_READ_TEC);
|
||||
cf->data[7] = hi3110_read(spi, HI3110_READ_REC);
|
||||
netdev_dbg(priv->net, "Bus Error\n");
|
||||
netif_rx(skb);
|
||||
if (skb) {
|
||||
cf->data[6] = hi3110_read(spi, HI3110_READ_TEC);
|
||||
cf->data[7] = hi3110_read(spi, HI3110_READ_REC);
|
||||
netif_rx(skb);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -21,6 +21,11 @@ static inline bool mcp251xfd_tx_fifo_sta_empty(u32 fifo_sta)
|
||||
return fifo_sta & MCP251XFD_REG_FIFOSTA_TFERFFIF;
|
||||
}
|
||||
|
||||
static inline bool mcp251xfd_tx_fifo_sta_less_than_half_full(u32 fifo_sta)
|
||||
{
|
||||
return fifo_sta & MCP251XFD_REG_FIFOSTA_TFHRFHIF;
|
||||
}
|
||||
|
||||
static inline int
|
||||
mcp251xfd_tef_tail_get_from_chip(const struct mcp251xfd_priv *priv,
|
||||
u8 *tef_tail)
|
||||
@ -147,7 +152,29 @@ mcp251xfd_get_tef_len(struct mcp251xfd_priv *priv, u8 *len_p)
|
||||
BUILD_BUG_ON(sizeof(tx_ring->obj_num) != sizeof(len));
|
||||
|
||||
len = (chip_tx_tail << shift) - (tail << shift);
|
||||
*len_p = len >> shift;
|
||||
len >>= shift;
|
||||
|
||||
/* According to mcp2518fd erratum DS80000789E 6. the FIFOCI
|
||||
* bits of a FIFOSTA register, here the TX-FIFO tail index
|
||||
* might be corrupted.
|
||||
*
|
||||
* However here it seems the bit indicating that the TX-FIFO
|
||||
* is empty (MCP251XFD_REG_FIFOSTA_TFERFFIF) is not correct
|
||||
* while the TX-FIFO tail index is.
|
||||
*
|
||||
* We assume the TX-FIFO is empty, i.e. all pending CAN frames
|
||||
* haven been send, if:
|
||||
* - Chip's head and tail index are equal (len == 0).
|
||||
* - The TX-FIFO is less than half full.
|
||||
* (The TX-FIFO empty case has already been checked at the
|
||||
* beginning of this function.)
|
||||
* - No free buffers in the TX ring.
|
||||
*/
|
||||
if (len == 0 && mcp251xfd_tx_fifo_sta_less_than_half_full(fifo_sta) &&
|
||||
mcp251xfd_get_tx_free(tx_ring) == 0)
|
||||
len = tx_ring->obj_num;
|
||||
|
||||
*len_p = len;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -579,11 +579,9 @@ static int sun4i_can_err(struct net_device *dev, u8 isrc, u8 status)
|
||||
/* bus error interrupt */
|
||||
netdev_dbg(dev, "bus error interrupt\n");
|
||||
priv->can.can_stats.bus_error++;
|
||||
stats->rx_errors++;
|
||||
ecc = readl(priv->base + SUN4I_REG_STA_ADDR);
|
||||
|
||||
if (likely(skb)) {
|
||||
ecc = readl(priv->base + SUN4I_REG_STA_ADDR);
|
||||
|
||||
cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
|
||||
|
||||
switch (ecc & SUN4I_STA_MASK_ERR) {
|
||||
@ -601,9 +599,15 @@ static int sun4i_can_err(struct net_device *dev, u8 isrc, u8 status)
|
||||
>> 16;
|
||||
break;
|
||||
}
|
||||
/* error occurred during transmission? */
|
||||
if ((ecc & SUN4I_STA_ERR_DIR) == 0)
|
||||
}
|
||||
|
||||
/* error occurred during transmission? */
|
||||
if ((ecc & SUN4I_STA_ERR_DIR) == 0) {
|
||||
if (likely(skb))
|
||||
cf->data[2] |= CAN_ERR_PROT_TX;
|
||||
stats->tx_errors++;
|
||||
} else {
|
||||
stats->rx_errors++;
|
||||
}
|
||||
}
|
||||
if (isrc & SUN4I_INT_ERR_PASSIVE) {
|
||||
@ -629,10 +633,10 @@ static int sun4i_can_err(struct net_device *dev, u8 isrc, u8 status)
|
||||
tx_state = txerr >= rxerr ? state : 0;
|
||||
rx_state = txerr <= rxerr ? state : 0;
|
||||
|
||||
if (likely(skb))
|
||||
can_change_state(dev, cf, tx_state, rx_state);
|
||||
else
|
||||
priv->can.state = state;
|
||||
/* The skb allocation might fail, but can_change_state()
|
||||
* handles cf == NULL.
|
||||
*/
|
||||
can_change_state(dev, cf, tx_state, rx_state);
|
||||
if (state == CAN_STATE_BUS_OFF)
|
||||
can_bus_off(dev);
|
||||
}
|
||||
|
@ -335,15 +335,14 @@ static void ems_usb_rx_err(struct ems_usb *dev, struct ems_cpc_msg *msg)
|
||||
struct net_device_stats *stats = &dev->netdev->stats;
|
||||
|
||||
skb = alloc_can_err_skb(dev->netdev, &cf);
|
||||
if (skb == NULL)
|
||||
return;
|
||||
|
||||
if (msg->type == CPC_MSG_TYPE_CAN_STATE) {
|
||||
u8 state = msg->msg.can_state;
|
||||
|
||||
if (state & SJA1000_SR_BS) {
|
||||
dev->can.state = CAN_STATE_BUS_OFF;
|
||||
cf->can_id |= CAN_ERR_BUSOFF;
|
||||
if (skb)
|
||||
cf->can_id |= CAN_ERR_BUSOFF;
|
||||
|
||||
dev->can.can_stats.bus_off++;
|
||||
can_bus_off(dev->netdev);
|
||||
@ -361,44 +360,53 @@ static void ems_usb_rx_err(struct ems_usb *dev, struct ems_cpc_msg *msg)
|
||||
|
||||
/* bus error interrupt */
|
||||
dev->can.can_stats.bus_error++;
|
||||
stats->rx_errors++;
|
||||
|
||||
cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
|
||||
if (skb) {
|
||||
cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
|
||||
|
||||
switch (ecc & SJA1000_ECC_MASK) {
|
||||
case SJA1000_ECC_BIT:
|
||||
cf->data[2] |= CAN_ERR_PROT_BIT;
|
||||
break;
|
||||
case SJA1000_ECC_FORM:
|
||||
cf->data[2] |= CAN_ERR_PROT_FORM;
|
||||
break;
|
||||
case SJA1000_ECC_STUFF:
|
||||
cf->data[2] |= CAN_ERR_PROT_STUFF;
|
||||
break;
|
||||
default:
|
||||
cf->data[3] = ecc & SJA1000_ECC_SEG;
|
||||
break;
|
||||
switch (ecc & SJA1000_ECC_MASK) {
|
||||
case SJA1000_ECC_BIT:
|
||||
cf->data[2] |= CAN_ERR_PROT_BIT;
|
||||
break;
|
||||
case SJA1000_ECC_FORM:
|
||||
cf->data[2] |= CAN_ERR_PROT_FORM;
|
||||
break;
|
||||
case SJA1000_ECC_STUFF:
|
||||
cf->data[2] |= CAN_ERR_PROT_STUFF;
|
||||
break;
|
||||
default:
|
||||
cf->data[3] = ecc & SJA1000_ECC_SEG;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/* Error occurred during transmission? */
|
||||
if ((ecc & SJA1000_ECC_DIR) == 0)
|
||||
cf->data[2] |= CAN_ERR_PROT_TX;
|
||||
if ((ecc & SJA1000_ECC_DIR) == 0) {
|
||||
stats->tx_errors++;
|
||||
if (skb)
|
||||
cf->data[2] |= CAN_ERR_PROT_TX;
|
||||
} else {
|
||||
stats->rx_errors++;
|
||||
}
|
||||
|
||||
if (dev->can.state == CAN_STATE_ERROR_WARNING ||
|
||||
dev->can.state == CAN_STATE_ERROR_PASSIVE) {
|
||||
if (skb && (dev->can.state == CAN_STATE_ERROR_WARNING ||
|
||||
dev->can.state == CAN_STATE_ERROR_PASSIVE)) {
|
||||
cf->can_id |= CAN_ERR_CRTL;
|
||||
cf->data[1] = (txerr > rxerr) ?
|
||||
CAN_ERR_CRTL_TX_PASSIVE : CAN_ERR_CRTL_RX_PASSIVE;
|
||||
}
|
||||
} else if (msg->type == CPC_MSG_TYPE_OVERRUN) {
|
||||
cf->can_id |= CAN_ERR_CRTL;
|
||||
cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
|
||||
if (skb) {
|
||||
cf->can_id |= CAN_ERR_CRTL;
|
||||
cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
|
||||
}
|
||||
|
||||
stats->rx_over_errors++;
|
||||
stats->rx_errors++;
|
||||
}
|
||||
|
||||
netif_rx(skb);
|
||||
if (skb)
|
||||
netif_rx(skb);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -526,7 +526,6 @@ static void f81604_handle_can_bus_errors(struct f81604_port_priv *priv,
|
||||
netdev_dbg(netdev, "bus error interrupt\n");
|
||||
|
||||
priv->can.can_stats.bus_error++;
|
||||
stats->rx_errors++;
|
||||
|
||||
if (skb) {
|
||||
cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
|
||||
@ -548,10 +547,15 @@ static void f81604_handle_can_bus_errors(struct f81604_port_priv *priv,
|
||||
|
||||
/* set error location */
|
||||
cf->data[3] = data->ecc & F81604_SJA1000_ECC_SEG;
|
||||
}
|
||||
|
||||
/* Error occurred during transmission? */
|
||||
if ((data->ecc & F81604_SJA1000_ECC_DIR) == 0)
|
||||
/* Error occurred during transmission? */
|
||||
if ((data->ecc & F81604_SJA1000_ECC_DIR) == 0) {
|
||||
stats->tx_errors++;
|
||||
if (skb)
|
||||
cf->data[2] |= CAN_ERR_PROT_TX;
|
||||
} else {
|
||||
stats->rx_errors++;
|
||||
}
|
||||
|
||||
set_bit(F81604_CLEAR_ECC, &priv->clear_flags);
|
||||
|
@ -43,9 +43,6 @@
|
||||
#define USB_XYLANTA_SAINT3_VENDOR_ID 0x16d0
|
||||
#define USB_XYLANTA_SAINT3_PRODUCT_ID 0x0f30
|
||||
|
||||
#define GS_USB_ENDPOINT_IN 1
|
||||
#define GS_USB_ENDPOINT_OUT 2
|
||||
|
||||
/* Timestamp 32 bit timer runs at 1 MHz (1 µs tick). Worker accounts
|
||||
* for timer overflow (will be after ~71 minutes)
|
||||
*/
|
||||
@ -336,6 +333,9 @@ struct gs_usb {
|
||||
|
||||
unsigned int hf_size_rx;
|
||||
u8 active_channels;
|
||||
|
||||
unsigned int pipe_in;
|
||||
unsigned int pipe_out;
|
||||
};
|
||||
|
||||
/* 'allocate' a tx context.
|
||||
@ -687,7 +687,7 @@ static void gs_usb_receive_bulk_callback(struct urb *urb)
|
||||
|
||||
resubmit_urb:
|
||||
usb_fill_bulk_urb(urb, parent->udev,
|
||||
usb_rcvbulkpipe(parent->udev, GS_USB_ENDPOINT_IN),
|
||||
parent->pipe_in,
|
||||
hf, dev->parent->hf_size_rx,
|
||||
gs_usb_receive_bulk_callback, parent);
|
||||
|
||||
@ -819,7 +819,7 @@ static netdev_tx_t gs_can_start_xmit(struct sk_buff *skb,
|
||||
}
|
||||
|
||||
usb_fill_bulk_urb(urb, dev->udev,
|
||||
usb_sndbulkpipe(dev->udev, GS_USB_ENDPOINT_OUT),
|
||||
dev->parent->pipe_out,
|
||||
hf, dev->hf_size_tx,
|
||||
gs_usb_xmit_callback, txc);
|
||||
|
||||
@ -925,8 +925,7 @@ static int gs_can_open(struct net_device *netdev)
|
||||
/* fill, anchor, and submit rx urb */
|
||||
usb_fill_bulk_urb(urb,
|
||||
dev->udev,
|
||||
usb_rcvbulkpipe(dev->udev,
|
||||
GS_USB_ENDPOINT_IN),
|
||||
dev->parent->pipe_in,
|
||||
buf,
|
||||
dev->parent->hf_size_rx,
|
||||
gs_usb_receive_bulk_callback, parent);
|
||||
@ -1413,6 +1412,7 @@ static int gs_usb_probe(struct usb_interface *intf,
|
||||
const struct usb_device_id *id)
|
||||
{
|
||||
struct usb_device *udev = interface_to_usbdev(intf);
|
||||
struct usb_endpoint_descriptor *ep_in, *ep_out;
|
||||
struct gs_host_frame *hf;
|
||||
struct gs_usb *parent;
|
||||
struct gs_host_config hconf = {
|
||||
@ -1422,6 +1422,13 @@ static int gs_usb_probe(struct usb_interface *intf,
|
||||
unsigned int icount, i;
|
||||
int rc;
|
||||
|
||||
rc = usb_find_common_endpoints(intf->cur_altsetting,
|
||||
&ep_in, &ep_out, NULL, NULL);
|
||||
if (rc) {
|
||||
dev_err(&intf->dev, "Required endpoints not found\n");
|
||||
return rc;
|
||||
}
|
||||
|
||||
/* send host config */
|
||||
rc = usb_control_msg_send(udev, 0,
|
||||
GS_USB_BREQ_HOST_FORMAT,
|
||||
@ -1466,6 +1473,10 @@ static int gs_usb_probe(struct usb_interface *intf,
|
||||
usb_set_intfdata(intf, parent);
|
||||
parent->udev = udev;
|
||||
|
||||
/* store the detected endpoints */
|
||||
parent->pipe_in = usb_rcvbulkpipe(parent->udev, ep_in->bEndpointAddress);
|
||||
parent->pipe_out = usb_sndbulkpipe(parent->udev, ep_out->bEndpointAddress);
|
||||
|
||||
for (i = 0; i < icount; i++) {
|
||||
unsigned int hf_size_rx = 0;
|
||||
|
||||
|
@ -172,13 +172,12 @@ static void vxcan_setup(struct net_device *dev)
|
||||
/* forward declaration for rtnl_create_link() */
|
||||
static struct rtnl_link_ops vxcan_link_ops;
|
||||
|
||||
static int vxcan_newlink(struct net *net, struct net_device *dev,
|
||||
static int vxcan_newlink(struct net *peer_net, struct net_device *dev,
|
||||
struct nlattr *tb[], struct nlattr *data[],
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
struct vxcan_priv *priv;
|
||||
struct net_device *peer;
|
||||
struct net *peer_net;
|
||||
|
||||
struct nlattr *peer_tb[IFLA_MAX + 1], **tbp = tb;
|
||||
char ifname[IFNAMSIZ];
|
||||
@ -203,20 +202,15 @@ static int vxcan_newlink(struct net *net, struct net_device *dev,
|
||||
name_assign_type = NET_NAME_ENUM;
|
||||
}
|
||||
|
||||
peer_net = rtnl_link_get_net(net, tbp);
|
||||
peer = rtnl_create_link(peer_net, ifname, name_assign_type,
|
||||
&vxcan_link_ops, tbp, extack);
|
||||
if (IS_ERR(peer)) {
|
||||
put_net(peer_net);
|
||||
if (IS_ERR(peer))
|
||||
return PTR_ERR(peer);
|
||||
}
|
||||
|
||||
if (ifmp && dev->ifindex)
|
||||
peer->ifindex = ifmp->ifi_index;
|
||||
|
||||
err = register_netdevice(peer);
|
||||
put_net(peer_net);
|
||||
peer_net = NULL;
|
||||
if (err < 0) {
|
||||
free_netdev(peer);
|
||||
return err;
|
||||
|
@ -3421,15 +3421,11 @@ static void bnxt_free_one_rx_agg_ring(struct bnxt *bp, struct bnxt_rx_ring_info
|
||||
}
|
||||
}
|
||||
|
||||
static void bnxt_free_one_rx_ring_skbs(struct bnxt *bp, int ring_nr)
|
||||
static void bnxt_free_one_tpa_info_data(struct bnxt *bp,
|
||||
struct bnxt_rx_ring_info *rxr)
|
||||
{
|
||||
struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
|
||||
struct bnxt_tpa_idx_map *map;
|
||||
int i;
|
||||
|
||||
if (!rxr->rx_tpa)
|
||||
goto skip_rx_tpa_free;
|
||||
|
||||
for (i = 0; i < bp->max_tpa; i++) {
|
||||
struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[i];
|
||||
u8 *data = tpa_info->data;
|
||||
@ -3440,6 +3436,17 @@ static void bnxt_free_one_rx_ring_skbs(struct bnxt *bp, int ring_nr)
|
||||
tpa_info->data = NULL;
|
||||
page_pool_free_va(rxr->head_pool, data, false);
|
||||
}
|
||||
}
|
||||
|
||||
static void bnxt_free_one_rx_ring_skbs(struct bnxt *bp,
|
||||
struct bnxt_rx_ring_info *rxr)
|
||||
{
|
||||
struct bnxt_tpa_idx_map *map;
|
||||
|
||||
if (!rxr->rx_tpa)
|
||||
goto skip_rx_tpa_free;
|
||||
|
||||
bnxt_free_one_tpa_info_data(bp, rxr);
|
||||
|
||||
skip_rx_tpa_free:
|
||||
if (!rxr->rx_buf_ring)
|
||||
@ -3467,7 +3474,7 @@ static void bnxt_free_rx_skbs(struct bnxt *bp)
|
||||
return;
|
||||
|
||||
for (i = 0; i < bp->rx_nr_rings; i++)
|
||||
bnxt_free_one_rx_ring_skbs(bp, i);
|
||||
bnxt_free_one_rx_ring_skbs(bp, &bp->rx_ring[i]);
|
||||
}
|
||||
|
||||
static void bnxt_free_skbs(struct bnxt *bp)
|
||||
@ -3608,29 +3615,64 @@ static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void bnxt_free_one_tpa_info(struct bnxt *bp,
|
||||
struct bnxt_rx_ring_info *rxr)
|
||||
{
|
||||
int i;
|
||||
|
||||
kfree(rxr->rx_tpa_idx_map);
|
||||
rxr->rx_tpa_idx_map = NULL;
|
||||
if (rxr->rx_tpa) {
|
||||
for (i = 0; i < bp->max_tpa; i++) {
|
||||
kfree(rxr->rx_tpa[i].agg_arr);
|
||||
rxr->rx_tpa[i].agg_arr = NULL;
|
||||
}
|
||||
}
|
||||
kfree(rxr->rx_tpa);
|
||||
rxr->rx_tpa = NULL;
|
||||
}
|
||||
|
||||
static void bnxt_free_tpa_info(struct bnxt *bp)
|
||||
{
|
||||
int i, j;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < bp->rx_nr_rings; i++) {
|
||||
struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
|
||||
|
||||
kfree(rxr->rx_tpa_idx_map);
|
||||
rxr->rx_tpa_idx_map = NULL;
|
||||
if (rxr->rx_tpa) {
|
||||
for (j = 0; j < bp->max_tpa; j++) {
|
||||
kfree(rxr->rx_tpa[j].agg_arr);
|
||||
rxr->rx_tpa[j].agg_arr = NULL;
|
||||
}
|
||||
}
|
||||
kfree(rxr->rx_tpa);
|
||||
rxr->rx_tpa = NULL;
|
||||
bnxt_free_one_tpa_info(bp, rxr);
|
||||
}
|
||||
}
|
||||
|
||||
static int bnxt_alloc_one_tpa_info(struct bnxt *bp,
|
||||
struct bnxt_rx_ring_info *rxr)
|
||||
{
|
||||
struct rx_agg_cmp *agg;
|
||||
int i;
|
||||
|
||||
rxr->rx_tpa = kcalloc(bp->max_tpa, sizeof(struct bnxt_tpa_info),
|
||||
GFP_KERNEL);
|
||||
if (!rxr->rx_tpa)
|
||||
return -ENOMEM;
|
||||
|
||||
if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
|
||||
return 0;
|
||||
for (i = 0; i < bp->max_tpa; i++) {
|
||||
agg = kcalloc(MAX_SKB_FRAGS, sizeof(*agg), GFP_KERNEL);
|
||||
if (!agg)
|
||||
return -ENOMEM;
|
||||
rxr->rx_tpa[i].agg_arr = agg;
|
||||
}
|
||||
rxr->rx_tpa_idx_map = kzalloc(sizeof(*rxr->rx_tpa_idx_map),
|
||||
GFP_KERNEL);
|
||||
if (!rxr->rx_tpa_idx_map)
|
||||
return -ENOMEM;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int bnxt_alloc_tpa_info(struct bnxt *bp)
|
||||
{
|
||||
int i, j;
|
||||
int i, rc;
|
||||
|
||||
bp->max_tpa = MAX_TPA;
|
||||
if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
|
||||
@ -3641,25 +3683,10 @@ static int bnxt_alloc_tpa_info(struct bnxt *bp)
|
||||
|
||||
for (i = 0; i < bp->rx_nr_rings; i++) {
|
||||
struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
|
||||
struct rx_agg_cmp *agg;
|
||||
|
||||
rxr->rx_tpa = kcalloc(bp->max_tpa, sizeof(struct bnxt_tpa_info),
|
||||
GFP_KERNEL);
|
||||
if (!rxr->rx_tpa)
|
||||
return -ENOMEM;
|
||||
|
||||
if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
|
||||
continue;
|
||||
for (j = 0; j < bp->max_tpa; j++) {
|
||||
agg = kcalloc(MAX_SKB_FRAGS, sizeof(*agg), GFP_KERNEL);
|
||||
if (!agg)
|
||||
return -ENOMEM;
|
||||
rxr->rx_tpa[j].agg_arr = agg;
|
||||
}
|
||||
rxr->rx_tpa_idx_map = kzalloc(sizeof(*rxr->rx_tpa_idx_map),
|
||||
GFP_KERNEL);
|
||||
if (!rxr->rx_tpa_idx_map)
|
||||
return -ENOMEM;
|
||||
rc = bnxt_alloc_one_tpa_info(bp, rxr);
|
||||
if (rc)
|
||||
return rc;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
@ -3683,7 +3710,7 @@ static void bnxt_free_rx_rings(struct bnxt *bp)
|
||||
xdp_rxq_info_unreg(&rxr->xdp_rxq);
|
||||
|
||||
page_pool_destroy(rxr->page_pool);
|
||||
if (rxr->page_pool != rxr->head_pool)
|
||||
if (bnxt_separate_head_pool())
|
||||
page_pool_destroy(rxr->head_pool);
|
||||
rxr->page_pool = rxr->head_pool = NULL;
|
||||
|
||||
@ -3737,6 +3764,19 @@ static int bnxt_alloc_rx_page_pool(struct bnxt *bp,
|
||||
return PTR_ERR(pool);
|
||||
}
|
||||
|
||||
static int bnxt_alloc_rx_agg_bmap(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
|
||||
{
|
||||
u16 mem_size;
|
||||
|
||||
rxr->rx_agg_bmap_size = bp->rx_agg_ring_mask + 1;
|
||||
mem_size = rxr->rx_agg_bmap_size / 8;
|
||||
rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL);
|
||||
if (!rxr->rx_agg_bmap)
|
||||
return -ENOMEM;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int bnxt_alloc_rx_rings(struct bnxt *bp)
|
||||
{
|
||||
int numa_node = dev_to_node(&bp->pdev->dev);
|
||||
@ -3781,19 +3821,15 @@ static int bnxt_alloc_rx_rings(struct bnxt *bp)
|
||||
|
||||
ring->grp_idx = i;
|
||||
if (agg_rings) {
|
||||
u16 mem_size;
|
||||
|
||||
ring = &rxr->rx_agg_ring_struct;
|
||||
rc = bnxt_alloc_ring(bp, &ring->ring_mem);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
ring->grp_idx = i;
|
||||
rxr->rx_agg_bmap_size = bp->rx_agg_ring_mask + 1;
|
||||
mem_size = rxr->rx_agg_bmap_size / 8;
|
||||
rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL);
|
||||
if (!rxr->rx_agg_bmap)
|
||||
return -ENOMEM;
|
||||
rc = bnxt_alloc_rx_agg_bmap(bp, rxr);
|
||||
if (rc)
|
||||
return rc;
|
||||
}
|
||||
}
|
||||
if (bp->flags & BNXT_FLAG_TPA)
|
||||
@ -4268,10 +4304,31 @@ static void bnxt_alloc_one_rx_ring_page(struct bnxt *bp,
|
||||
rxr->rx_agg_prod = prod;
|
||||
}
|
||||
|
||||
static int bnxt_alloc_one_tpa_info_data(struct bnxt *bp,
|
||||
struct bnxt_rx_ring_info *rxr)
|
||||
{
|
||||
dma_addr_t mapping;
|
||||
u8 *data;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < bp->max_tpa; i++) {
|
||||
data = __bnxt_alloc_rx_frag(bp, &mapping, rxr,
|
||||
GFP_KERNEL);
|
||||
if (!data)
|
||||
return -ENOMEM;
|
||||
|
||||
rxr->rx_tpa[i].data = data;
|
||||
rxr->rx_tpa[i].data_ptr = data + bp->rx_offset;
|
||||
rxr->rx_tpa[i].mapping = mapping;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int bnxt_alloc_one_rx_ring(struct bnxt *bp, int ring_nr)
|
||||
{
|
||||
struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
|
||||
int i;
|
||||
int rc;
|
||||
|
||||
bnxt_alloc_one_rx_ring_skb(bp, rxr, ring_nr);
|
||||
|
||||
@ -4281,19 +4338,9 @@ static int bnxt_alloc_one_rx_ring(struct bnxt *bp, int ring_nr)
|
||||
bnxt_alloc_one_rx_ring_page(bp, rxr, ring_nr);
|
||||
|
||||
if (rxr->rx_tpa) {
|
||||
dma_addr_t mapping;
|
||||
u8 *data;
|
||||
|
||||
for (i = 0; i < bp->max_tpa; i++) {
|
||||
data = __bnxt_alloc_rx_frag(bp, &mapping, rxr,
|
||||
GFP_KERNEL);
|
||||
if (!data)
|
||||
return -ENOMEM;
|
||||
|
||||
rxr->rx_tpa[i].data = data;
|
||||
rxr->rx_tpa[i].data_ptr = data + bp->rx_offset;
|
||||
rxr->rx_tpa[i].mapping = mapping;
|
||||
}
|
||||
rc = bnxt_alloc_one_tpa_info_data(bp, rxr);
|
||||
if (rc)
|
||||
return rc;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
@ -13663,7 +13710,7 @@ static void bnxt_rx_ring_reset(struct bnxt *bp)
|
||||
bnxt_reset_task(bp, true);
|
||||
break;
|
||||
}
|
||||
bnxt_free_one_rx_ring_skbs(bp, i);
|
||||
bnxt_free_one_rx_ring_skbs(bp, rxr);
|
||||
rxr->rx_prod = 0;
|
||||
rxr->rx_agg_prod = 0;
|
||||
rxr->rx_sw_agg_prod = 0;
|
||||
@ -15293,19 +15340,6 @@ static const struct netdev_stat_ops bnxt_stat_ops = {
|
||||
.get_base_stats = bnxt_get_base_stats,
|
||||
};
|
||||
|
||||
static int bnxt_alloc_rx_agg_bmap(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
|
||||
{
|
||||
u16 mem_size;
|
||||
|
||||
rxr->rx_agg_bmap_size = bp->rx_agg_ring_mask + 1;
|
||||
mem_size = rxr->rx_agg_bmap_size / 8;
|
||||
rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL);
|
||||
if (!rxr->rx_agg_bmap)
|
||||
return -ENOMEM;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int bnxt_queue_mem_alloc(struct net_device *dev, void *qmem, int idx)
|
||||
{
|
||||
struct bnxt_rx_ring_info *rxr, *clone;
|
||||
@ -15354,15 +15388,25 @@ static int bnxt_queue_mem_alloc(struct net_device *dev, void *qmem, int idx)
|
||||
goto err_free_rx_agg_ring;
|
||||
}
|
||||
|
||||
if (bp->flags & BNXT_FLAG_TPA) {
|
||||
rc = bnxt_alloc_one_tpa_info(bp, clone);
|
||||
if (rc)
|
||||
goto err_free_tpa_info;
|
||||
}
|
||||
|
||||
bnxt_init_one_rx_ring_rxbd(bp, clone);
|
||||
bnxt_init_one_rx_agg_ring_rxbd(bp, clone);
|
||||
|
||||
bnxt_alloc_one_rx_ring_skb(bp, clone, idx);
|
||||
if (bp->flags & BNXT_FLAG_AGG_RINGS)
|
||||
bnxt_alloc_one_rx_ring_page(bp, clone, idx);
|
||||
if (bp->flags & BNXT_FLAG_TPA)
|
||||
bnxt_alloc_one_tpa_info_data(bp, clone);
|
||||
|
||||
return 0;
|
||||
|
||||
err_free_tpa_info:
|
||||
bnxt_free_one_tpa_info(bp, clone);
|
||||
err_free_rx_agg_ring:
|
||||
bnxt_free_ring(bp, &clone->rx_agg_ring_struct.ring_mem);
|
||||
err_free_rx_ring:
|
||||
@ -15370,9 +15414,11 @@ static int bnxt_queue_mem_alloc(struct net_device *dev, void *qmem, int idx)
|
||||
err_rxq_info_unreg:
|
||||
xdp_rxq_info_unreg(&clone->xdp_rxq);
|
||||
err_page_pool_destroy:
|
||||
clone->page_pool->p.napi = NULL;
|
||||
page_pool_destroy(clone->page_pool);
|
||||
if (bnxt_separate_head_pool())
|
||||
page_pool_destroy(clone->head_pool);
|
||||
clone->page_pool = NULL;
|
||||
clone->head_pool = NULL;
|
||||
return rc;
|
||||
}
|
||||
|
||||
@ -15382,13 +15428,15 @@ static void bnxt_queue_mem_free(struct net_device *dev, void *qmem)
|
||||
struct bnxt *bp = netdev_priv(dev);
|
||||
struct bnxt_ring_struct *ring;
|
||||
|
||||
bnxt_free_one_rx_ring(bp, rxr);
|
||||
bnxt_free_one_rx_agg_ring(bp, rxr);
|
||||
bnxt_free_one_rx_ring_skbs(bp, rxr);
|
||||
|
||||
xdp_rxq_info_unreg(&rxr->xdp_rxq);
|
||||
|
||||
page_pool_destroy(rxr->page_pool);
|
||||
if (bnxt_separate_head_pool())
|
||||
page_pool_destroy(rxr->head_pool);
|
||||
rxr->page_pool = NULL;
|
||||
rxr->head_pool = NULL;
|
||||
|
||||
ring = &rxr->rx_ring_struct;
|
||||
bnxt_free_ring(bp, &ring->ring_mem);
|
||||
@ -15470,7 +15518,10 @@ static int bnxt_queue_start(struct net_device *dev, void *qmem, int idx)
|
||||
rxr->rx_agg_prod = clone->rx_agg_prod;
|
||||
rxr->rx_sw_agg_prod = clone->rx_sw_agg_prod;
|
||||
rxr->rx_next_cons = clone->rx_next_cons;
|
||||
rxr->rx_tpa = clone->rx_tpa;
|
||||
rxr->rx_tpa_idx_map = clone->rx_tpa_idx_map;
|
||||
rxr->page_pool = clone->page_pool;
|
||||
rxr->head_pool = clone->head_pool;
|
||||
rxr->xdp_rxq = clone->xdp_rxq;
|
||||
|
||||
bnxt_copy_rx_ring(bp, rxr, clone);
|
||||
@ -15529,6 +15580,8 @@ static int bnxt_queue_stop(struct net_device *dev, void *qmem, int idx)
|
||||
bnxt_hwrm_rx_agg_ring_free(bp, rxr, false);
|
||||
rxr->rx_next_cons = 0;
|
||||
page_pool_disable_direct_recycling(rxr->page_pool);
|
||||
if (bnxt_separate_head_pool())
|
||||
page_pool_disable_direct_recycling(rxr->head_pool);
|
||||
|
||||
memcpy(qmem, rxr, sizeof(*rxr));
|
||||
bnxt_init_rx_ring_struct(bp, qmem);
|
||||
|
@ -1187,10 +1187,14 @@ static int bnxt_grxclsrule(struct bnxt *bp, struct ethtool_rxnfc *cmd)
|
||||
}
|
||||
}
|
||||
|
||||
if (fltr->base.flags & BNXT_ACT_DROP)
|
||||
if (fltr->base.flags & BNXT_ACT_DROP) {
|
||||
fs->ring_cookie = RX_CLS_FLOW_DISC;
|
||||
else
|
||||
} else if (fltr->base.flags & BNXT_ACT_RSS_CTX) {
|
||||
fs->flow_type |= FLOW_RSS;
|
||||
cmd->rss_context = fltr->base.fw_vnic_id;
|
||||
} else {
|
||||
fs->ring_cookie = fltr->base.rxq;
|
||||
}
|
||||
rc = 0;
|
||||
|
||||
fltr_err:
|
||||
|
@ -29,6 +29,9 @@ EXPORT_SYMBOL_GPL(enetc_port_mac_wr);
|
||||
static void enetc_change_preemptible_tcs(struct enetc_ndev_priv *priv,
|
||||
u8 preemptible_tcs)
|
||||
{
|
||||
if (!(priv->si->hw_features & ENETC_SI_F_QBU))
|
||||
return;
|
||||
|
||||
priv->preemptible_tcs = preemptible_tcs;
|
||||
enetc_mm_commit_preemptible_tcs(priv);
|
||||
}
|
||||
@ -1756,15 +1759,6 @@ void enetc_get_si_caps(struct enetc_si *si)
|
||||
rss = enetc_rd(hw, ENETC_SIRSSCAPR);
|
||||
si->num_rss = ENETC_SIRSSCAPR_GET_NUM_RSS(rss);
|
||||
}
|
||||
|
||||
if (val & ENETC_SIPCAPR0_QBV)
|
||||
si->hw_features |= ENETC_SI_F_QBV;
|
||||
|
||||
if (val & ENETC_SIPCAPR0_QBU)
|
||||
si->hw_features |= ENETC_SI_F_QBU;
|
||||
|
||||
if (val & ENETC_SIPCAPR0_PSFP)
|
||||
si->hw_features |= ENETC_SI_F_PSFP;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(enetc_get_si_caps);
|
||||
|
||||
|
@ -23,10 +23,7 @@
|
||||
#define ENETC_SICTR0 0x18
|
||||
#define ENETC_SICTR1 0x1c
|
||||
#define ENETC_SIPCAPR0 0x20
|
||||
#define ENETC_SIPCAPR0_PSFP BIT(9)
|
||||
#define ENETC_SIPCAPR0_RSS BIT(8)
|
||||
#define ENETC_SIPCAPR0_QBV BIT(4)
|
||||
#define ENETC_SIPCAPR0_QBU BIT(3)
|
||||
#define ENETC_SIPCAPR0_RFS BIT(2)
|
||||
#define ENETC_SIPCAPR1 0x24
|
||||
#define ENETC_SITGTGR 0x30
|
||||
@ -194,6 +191,9 @@ enum enetc_bdr_type {TX, RX};
|
||||
#define ENETC_PCAPR0 0x0900
|
||||
#define ENETC_PCAPR0_RXBDR(val) ((val) >> 24)
|
||||
#define ENETC_PCAPR0_TXBDR(val) (((val) >> 16) & 0xff)
|
||||
#define ENETC_PCAPR0_PSFP BIT(9)
|
||||
#define ENETC_PCAPR0_QBV BIT(4)
|
||||
#define ENETC_PCAPR0_QBU BIT(3)
|
||||
#define ENETC_PCAPR1 0x0904
|
||||
#define ENETC_PSICFGR0(n) (0x0940 + (n) * 0xc) /* n = SI index */
|
||||
#define ENETC_PSICFGR0_SET_TXBDR(val) ((val) & 0xff)
|
||||
|
@ -409,6 +409,23 @@ static void enetc_port_assign_rfs_entries(struct enetc_si *si)
|
||||
enetc_port_wr(hw, ENETC_PRFSMR, ENETC_PRFSMR_RFSE);
|
||||
}
|
||||
|
||||
static void enetc_port_get_caps(struct enetc_si *si)
|
||||
{
|
||||
struct enetc_hw *hw = &si->hw;
|
||||
u32 val;
|
||||
|
||||
val = enetc_port_rd(hw, ENETC_PCAPR0);
|
||||
|
||||
if (val & ENETC_PCAPR0_QBV)
|
||||
si->hw_features |= ENETC_SI_F_QBV;
|
||||
|
||||
if (val & ENETC_PCAPR0_QBU)
|
||||
si->hw_features |= ENETC_SI_F_QBU;
|
||||
|
||||
if (val & ENETC_PCAPR0_PSFP)
|
||||
si->hw_features |= ENETC_SI_F_PSFP;
|
||||
}
|
||||
|
||||
static void enetc_port_si_configure(struct enetc_si *si)
|
||||
{
|
||||
struct enetc_pf *pf = enetc_si_priv(si);
|
||||
@ -416,6 +433,8 @@ static void enetc_port_si_configure(struct enetc_si *si)
|
||||
int num_rings, i;
|
||||
u32 val;
|
||||
|
||||
enetc_port_get_caps(si);
|
||||
|
||||
val = enetc_port_rd(hw, ENETC_PCAPR0);
|
||||
num_rings = min(ENETC_PCAPR0_RXBDR(val), ENETC_PCAPR0_TXBDR(val));
|
||||
|
||||
|
@ -542,7 +542,8 @@ ice_aq_get_netlist_node(struct ice_hw *hw, struct ice_aqc_get_link_topo *cmd,
|
||||
/**
|
||||
* ice_find_netlist_node
|
||||
* @hw: pointer to the hw struct
|
||||
* @node_type_ctx: type of netlist node to look for
|
||||
* @node_type: type of netlist node to look for
|
||||
* @ctx: context of the search
|
||||
* @node_part_number: node part number to look for
|
||||
* @node_handle: output parameter if node found - optional
|
||||
*
|
||||
@ -552,10 +553,12 @@ ice_aq_get_netlist_node(struct ice_hw *hw, struct ice_aqc_get_link_topo *cmd,
|
||||
* valid if the function returns zero, and should be ignored on any non-zero
|
||||
* return value.
|
||||
*
|
||||
* Returns: 0 if the node is found, -ENOENT if no handle was found, and
|
||||
* a negative error code on failure to access the AQ.
|
||||
* Return:
|
||||
* * 0 if the node is found,
|
||||
* * -ENOENT if no handle was found,
|
||||
* * negative error code on failure to access the AQ.
|
||||
*/
|
||||
static int ice_find_netlist_node(struct ice_hw *hw, u8 node_type_ctx,
|
||||
static int ice_find_netlist_node(struct ice_hw *hw, u8 node_type, u8 ctx,
|
||||
u8 node_part_number, u16 *node_handle)
|
||||
{
|
||||
u8 idx;
|
||||
@ -566,8 +569,8 @@ static int ice_find_netlist_node(struct ice_hw *hw, u8 node_type_ctx,
|
||||
int status;
|
||||
|
||||
cmd.addr.topo_params.node_type_ctx =
|
||||
FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_TYPE_M,
|
||||
node_type_ctx);
|
||||
FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_TYPE_M, node_type) |
|
||||
FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_CTX_M, ctx);
|
||||
cmd.addr.topo_params.index = idx;
|
||||
|
||||
status = ice_aq_get_netlist_node(hw, &cmd,
|
||||
@ -2747,9 +2750,11 @@ bool ice_is_pf_c827(struct ice_hw *hw)
|
||||
*/
|
||||
bool ice_is_phy_rclk_in_netlist(struct ice_hw *hw)
|
||||
{
|
||||
if (ice_find_netlist_node(hw, ICE_AQC_LINK_TOPO_NODE_TYPE_CLK_CTRL,
|
||||
if (ice_find_netlist_node(hw, ICE_AQC_LINK_TOPO_NODE_TYPE_PHY,
|
||||
ICE_AQC_LINK_TOPO_NODE_CTX_PORT,
|
||||
ICE_AQC_GET_LINK_TOPO_NODE_NR_C827, NULL) &&
|
||||
ice_find_netlist_node(hw, ICE_AQC_LINK_TOPO_NODE_TYPE_CLK_CTRL,
|
||||
ice_find_netlist_node(hw, ICE_AQC_LINK_TOPO_NODE_TYPE_PHY,
|
||||
ICE_AQC_LINK_TOPO_NODE_CTX_PORT,
|
||||
ICE_AQC_GET_LINK_TOPO_NODE_NR_E822_PHY, NULL))
|
||||
return false;
|
||||
|
||||
@ -2765,6 +2770,7 @@ bool ice_is_phy_rclk_in_netlist(struct ice_hw *hw)
|
||||
bool ice_is_clock_mux_in_netlist(struct ice_hw *hw)
|
||||
{
|
||||
if (ice_find_netlist_node(hw, ICE_AQC_LINK_TOPO_NODE_TYPE_CLK_MUX,
|
||||
ICE_AQC_LINK_TOPO_NODE_CTX_GLOBAL,
|
||||
ICE_AQC_GET_LINK_TOPO_NODE_NR_GEN_CLK_MUX,
|
||||
NULL))
|
||||
return false;
|
||||
@ -2785,12 +2791,14 @@ bool ice_is_clock_mux_in_netlist(struct ice_hw *hw)
|
||||
bool ice_is_cgu_in_netlist(struct ice_hw *hw)
|
||||
{
|
||||
if (!ice_find_netlist_node(hw, ICE_AQC_LINK_TOPO_NODE_TYPE_CLK_CTRL,
|
||||
ICE_AQC_LINK_TOPO_NODE_CTX_GLOBAL,
|
||||
ICE_AQC_GET_LINK_TOPO_NODE_NR_ZL30632_80032,
|
||||
NULL)) {
|
||||
hw->cgu_part_number = ICE_AQC_GET_LINK_TOPO_NODE_NR_ZL30632_80032;
|
||||
return true;
|
||||
} else if (!ice_find_netlist_node(hw,
|
||||
ICE_AQC_LINK_TOPO_NODE_TYPE_CLK_CTRL,
|
||||
ICE_AQC_LINK_TOPO_NODE_CTX_GLOBAL,
|
||||
ICE_AQC_GET_LINK_TOPO_NODE_NR_SI5383_5384,
|
||||
NULL)) {
|
||||
hw->cgu_part_number = ICE_AQC_GET_LINK_TOPO_NODE_NR_SI5383_5384;
|
||||
@ -2809,6 +2817,7 @@ bool ice_is_cgu_in_netlist(struct ice_hw *hw)
|
||||
bool ice_is_gps_in_netlist(struct ice_hw *hw)
|
||||
{
|
||||
if (ice_find_netlist_node(hw, ICE_AQC_LINK_TOPO_NODE_TYPE_GPS,
|
||||
ICE_AQC_LINK_TOPO_NODE_CTX_GLOBAL,
|
||||
ICE_AQC_GET_LINK_TOPO_NODE_NR_GEN_GPS, NULL))
|
||||
return false;
|
||||
|
||||
|
@ -6408,10 +6408,12 @@ ice_set_vlan_filtering_features(struct ice_vsi *vsi, netdev_features_t features)
|
||||
int err = 0;
|
||||
|
||||
/* support Single VLAN Mode (SVM) and Double VLAN Mode (DVM) by checking
|
||||
* if either bit is set
|
||||
* if either bit is set. In switchdev mode Rx filtering should never be
|
||||
* enabled.
|
||||
*/
|
||||
if (features &
|
||||
(NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_STAG_FILTER))
|
||||
if ((features &
|
||||
(NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_STAG_FILTER)) &&
|
||||
!ice_is_eswitch_mode_switchdev(vsi->back))
|
||||
err = vlan_ops->ena_rx_filtering(vsi);
|
||||
else
|
||||
err = vlan_ops->dis_rx_filtering(vsi);
|
||||
|
@ -1518,7 +1518,8 @@ static int ice_read_ptp_tstamp_eth56g(struct ice_hw *hw, u8 port, u8 idx,
|
||||
* lower 8 bits in the low register, and the upper 32 bits in the high
|
||||
* register.
|
||||
*/
|
||||
*tstamp = ((u64)hi) << TS_PHY_HIGH_S | ((u64)lo & TS_PHY_LOW_M);
|
||||
*tstamp = FIELD_PREP(TS_PHY_HIGH_M, hi) |
|
||||
FIELD_PREP(TS_PHY_LOW_M, lo);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -682,9 +682,8 @@ static inline bool ice_is_dual(struct ice_hw *hw)
|
||||
#define TS_HIGH_M 0xFF
|
||||
#define TS_HIGH_S 32
|
||||
|
||||
#define TS_PHY_LOW_M 0xFF
|
||||
#define TS_PHY_HIGH_M 0xFFFFFFFF
|
||||
#define TS_PHY_HIGH_S 8
|
||||
#define TS_PHY_LOW_M GENMASK(7, 0)
|
||||
#define TS_PHY_HIGH_M GENMASK_ULL(39, 8)
|
||||
|
||||
#define BYTES_PER_IDX_ADDR_L_U 8
|
||||
#define BYTES_PER_IDX_ADDR_L 4
|
||||
|
@ -4128,6 +4128,9 @@ static const struct ice_virtchnl_ops ice_virtchnl_dflt_ops = {
|
||||
.get_qos_caps = ice_vc_get_qos_caps,
|
||||
.cfg_q_bw = ice_vc_cfg_q_bw,
|
||||
.cfg_q_quanta = ice_vc_cfg_q_quanta,
|
||||
/* If you add a new op here please make sure to add it to
|
||||
* ice_virtchnl_repr_ops as well.
|
||||
*/
|
||||
};
|
||||
|
||||
/**
|
||||
@ -4258,6 +4261,9 @@ static const struct ice_virtchnl_ops ice_virtchnl_repr_ops = {
|
||||
.dis_vlan_stripping_v2_msg = ice_vc_dis_vlan_stripping_v2_msg,
|
||||
.ena_vlan_insertion_v2_msg = ice_vc_ena_vlan_insertion_v2_msg,
|
||||
.dis_vlan_insertion_v2_msg = ice_vc_dis_vlan_insertion_v2_msg,
|
||||
.get_qos_caps = ice_vc_get_qos_caps,
|
||||
.cfg_q_bw = ice_vc_cfg_q_bw,
|
||||
.cfg_q_quanta = ice_vc_cfg_q_quanta,
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -2448,6 +2448,7 @@ static void idpf_tx_splitq_map(struct idpf_tx_queue *tx_q,
|
||||
* rest of the packet.
|
||||
*/
|
||||
tx_buf->type = LIBETH_SQE_EMPTY;
|
||||
idpf_tx_buf_compl_tag(tx_buf) = params->compl_tag;
|
||||
|
||||
/* Adjust the DMA offset and the remaining size of the
|
||||
* fragment. On the first iteration of this loop,
|
||||
|
@ -637,6 +637,10 @@ static int __init igb_init_module(void)
|
||||
dca_register_notify(&dca_notifier);
|
||||
#endif
|
||||
ret = pci_register_driver(&igb_driver);
|
||||
#ifdef CONFIG_IGB_DCA
|
||||
if (ret)
|
||||
dca_unregister_notify(&dca_notifier);
|
||||
#endif
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -194,6 +194,8 @@ u32 ixgbe_read_reg(struct ixgbe_hw *hw, u32 reg);
|
||||
dev_err(&adapter->pdev->dev, format, ## arg)
|
||||
#define e_dev_notice(format, arg...) \
|
||||
dev_notice(&adapter->pdev->dev, format, ## arg)
|
||||
#define e_dbg(msglvl, format, arg...) \
|
||||
netif_dbg(adapter, msglvl, adapter->netdev, format, ## arg)
|
||||
#define e_info(msglvl, format, arg...) \
|
||||
netif_info(adapter, msglvl, adapter->netdev, format, ## arg)
|
||||
#define e_err(msglvl, format, arg...) \
|
||||
|
@ -40,7 +40,7 @@
|
||||
#define IXGBE_SFF_1GBASESX_CAPABLE 0x1
|
||||
#define IXGBE_SFF_1GBASELX_CAPABLE 0x2
|
||||
#define IXGBE_SFF_1GBASET_CAPABLE 0x8
|
||||
#define IXGBE_SFF_BASEBX10_CAPABLE 0x64
|
||||
#define IXGBE_SFF_BASEBX10_CAPABLE 0x40
|
||||
#define IXGBE_SFF_10GBASESR_CAPABLE 0x10
|
||||
#define IXGBE_SFF_10GBASELR_CAPABLE 0x20
|
||||
#define IXGBE_SFF_SOFT_RS_SELECT_MASK 0x8
|
||||
|
@ -1048,7 +1048,7 @@ static int ixgbe_negotiate_vf_api(struct ixgbe_adapter *adapter,
|
||||
break;
|
||||
}
|
||||
|
||||
e_info(drv, "VF %d requested invalid api version %u\n", vf, api);
|
||||
e_dbg(drv, "VF %d requested unsupported api version %u\n", vf, api);
|
||||
|
||||
return -1;
|
||||
}
|
||||
|
@ -629,7 +629,6 @@ void ixgbevf_init_ipsec_offload(struct ixgbevf_adapter *adapter)
|
||||
|
||||
switch (adapter->hw.api_version) {
|
||||
case ixgbe_mbox_api_14:
|
||||
case ixgbe_mbox_api_15:
|
||||
break;
|
||||
default:
|
||||
return;
|
||||
|
@ -159,6 +159,7 @@ enum nix_scheduler {
|
||||
#define SDP_HW_MIN_FRS 16
|
||||
#define CN10K_LMAC_LINK_MAX_FRS 16380 /* 16k - FCS */
|
||||
#define CN10K_LBK_LINK_MAX_FRS 65535 /* 64k */
|
||||
#define SDP_LINK_CREDIT 0x320202
|
||||
|
||||
/* NIX RX action operation*/
|
||||
#define NIX_RX_ACTIONOP_DROP (0x0ull)
|
||||
|
@ -4672,6 +4672,9 @@ static void nix_link_config(struct rvu *rvu, int blkaddr,
|
||||
rvu_get_lbk_link_max_frs(rvu, &lbk_max_frs);
|
||||
rvu_get_lmac_link_max_frs(rvu, &lmac_max_frs);
|
||||
|
||||
/* Set SDP link credit */
|
||||
rvu_write64(rvu, blkaddr, NIX_AF_SDP_LINK_CREDIT, SDP_LINK_CREDIT);
|
||||
|
||||
/* Set default min/max packet lengths allowed on NIX Rx links.
|
||||
*
|
||||
* With HW reset minlen value of 60byte, HW will treat ARP pkts
|
||||
|
@ -5,6 +5,7 @@
|
||||
#include <net/nexthop.h>
|
||||
#include <net/ip_tunnels.h>
|
||||
#include "tc_tun_encap.h"
|
||||
#include "fs_core.h"
|
||||
#include "en_tc.h"
|
||||
#include "tc_tun.h"
|
||||
#include "rep/tc.h"
|
||||
@ -24,10 +25,18 @@ static int mlx5e_set_int_port_tunnel(struct mlx5e_priv *priv,
|
||||
|
||||
route_dev = dev_get_by_index(dev_net(e->out_dev), e->route_dev_ifindex);
|
||||
|
||||
if (!route_dev || !netif_is_ovs_master(route_dev) ||
|
||||
attr->parse_attr->filter_dev == e->out_dev)
|
||||
if (!route_dev || !netif_is_ovs_master(route_dev))
|
||||
goto out;
|
||||
|
||||
if (priv->mdev->priv.steering->mode == MLX5_FLOW_STEERING_MODE_DMFS &&
|
||||
mlx5e_eswitch_uplink_rep(attr->parse_attr->filter_dev) &&
|
||||
(attr->esw_attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP)) {
|
||||
mlx5_core_warn(priv->mdev,
|
||||
"Matching on external port with encap + fwd to table actions is not allowed for firmware steering\n");
|
||||
err = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
err = mlx5e_set_fwd_to_int_port_actions(priv, attr, e->route_dev_ifindex,
|
||||
MLX5E_TC_INT_PORT_EGRESS,
|
||||
&attr->action, out_index);
|
||||
|
@ -2680,11 +2680,11 @@ void mlx5e_trigger_napi_sched(struct napi_struct *napi)
|
||||
|
||||
static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
|
||||
struct mlx5e_params *params,
|
||||
struct mlx5e_channel_param *cparam,
|
||||
struct xsk_buff_pool *xsk_pool,
|
||||
struct mlx5e_channel **cp)
|
||||
{
|
||||
struct net_device *netdev = priv->netdev;
|
||||
struct mlx5e_channel_param *cparam;
|
||||
struct mlx5_core_dev *mdev;
|
||||
struct mlx5e_xsk_param xsk;
|
||||
struct mlx5e_channel *c;
|
||||
@ -2706,8 +2706,15 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
|
||||
return err;
|
||||
|
||||
c = kvzalloc_node(sizeof(*c), GFP_KERNEL, cpu_to_node(cpu));
|
||||
if (!c)
|
||||
return -ENOMEM;
|
||||
cparam = kvzalloc(sizeof(*cparam), GFP_KERNEL);
|
||||
if (!c || !cparam) {
|
||||
err = -ENOMEM;
|
||||
goto err_free;
|
||||
}
|
||||
|
||||
err = mlx5e_build_channel_param(mdev, params, cparam);
|
||||
if (err)
|
||||
goto err_free;
|
||||
|
||||
c->priv = priv;
|
||||
c->mdev = mdev;
|
||||
@ -2741,6 +2748,7 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
|
||||
|
||||
*cp = c;
|
||||
|
||||
kvfree(cparam);
|
||||
return 0;
|
||||
|
||||
err_close_queues:
|
||||
@ -2749,6 +2757,8 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
|
||||
err_napi_del:
|
||||
netif_napi_del(&c->napi);
|
||||
|
||||
err_free:
|
||||
kvfree(cparam);
|
||||
kvfree(c);
|
||||
|
||||
return err;
|
||||
@ -2807,20 +2817,14 @@ static void mlx5e_close_channel(struct mlx5e_channel *c)
|
||||
int mlx5e_open_channels(struct mlx5e_priv *priv,
|
||||
struct mlx5e_channels *chs)
|
||||
{
|
||||
struct mlx5e_channel_param *cparam;
|
||||
int err = -ENOMEM;
|
||||
int i;
|
||||
|
||||
chs->num = chs->params.num_channels;
|
||||
|
||||
chs->c = kcalloc(chs->num, sizeof(struct mlx5e_channel *), GFP_KERNEL);
|
||||
cparam = kvzalloc(sizeof(struct mlx5e_channel_param), GFP_KERNEL);
|
||||
if (!chs->c || !cparam)
|
||||
goto err_free;
|
||||
|
||||
err = mlx5e_build_channel_param(priv->mdev, &chs->params, cparam);
|
||||
if (err)
|
||||
goto err_free;
|
||||
if (!chs->c)
|
||||
goto err_out;
|
||||
|
||||
for (i = 0; i < chs->num; i++) {
|
||||
struct xsk_buff_pool *xsk_pool = NULL;
|
||||
@ -2828,7 +2832,7 @@ int mlx5e_open_channels(struct mlx5e_priv *priv,
|
||||
if (chs->params.xdp_prog)
|
||||
xsk_pool = mlx5e_xsk_get_pool(&chs->params, chs->params.xsk, i);
|
||||
|
||||
err = mlx5e_open_channel(priv, i, &chs->params, cparam, xsk_pool, &chs->c[i]);
|
||||
err = mlx5e_open_channel(priv, i, &chs->params, xsk_pool, &chs->c[i]);
|
||||
if (err)
|
||||
goto err_close_channels;
|
||||
}
|
||||
@ -2846,7 +2850,6 @@ int mlx5e_open_channels(struct mlx5e_priv *priv,
|
||||
}
|
||||
|
||||
mlx5e_health_channels_update(priv);
|
||||
kvfree(cparam);
|
||||
return 0;
|
||||
|
||||
err_close_ptp:
|
||||
@ -2857,9 +2860,8 @@ int mlx5e_open_channels(struct mlx5e_priv *priv,
|
||||
for (i--; i >= 0; i--)
|
||||
mlx5e_close_channel(chs->c[i]);
|
||||
|
||||
err_free:
|
||||
kfree(chs->c);
|
||||
kvfree(cparam);
|
||||
err_out:
|
||||
chs->num = 0;
|
||||
return err;
|
||||
}
|
||||
|
@ -2335,9 +2335,10 @@ static int esw_create_restore_table(struct mlx5_eswitch *esw)
|
||||
static void esw_mode_change(struct mlx5_eswitch *esw, u16 mode)
|
||||
{
|
||||
mlx5_devcom_comp_lock(esw->dev->priv.hca_devcom_comp);
|
||||
|
||||
if (esw->dev->priv.flags & MLX5_PRIV_FLAGS_DISABLE_IB_ADEV) {
|
||||
if (esw->dev->priv.flags & MLX5_PRIV_FLAGS_DISABLE_IB_ADEV ||
|
||||
mlx5_core_mp_enabled(esw->dev)) {
|
||||
esw->mode = mode;
|
||||
mlx5_rescan_drivers_locked(esw->dev);
|
||||
mlx5_devcom_comp_unlock(esw->dev->priv.hca_devcom_comp);
|
||||
return;
|
||||
}
|
||||
|
@ -39,6 +39,8 @@ bool mlx5hws_bwc_match_params_is_complex(struct mlx5hws_context *ctx,
|
||||
} else {
|
||||
mlx5hws_err(ctx, "Failed to calculate matcher definer layout\n");
|
||||
}
|
||||
} else {
|
||||
kfree(mt->fc);
|
||||
}
|
||||
|
||||
mlx5hws_match_template_destroy(mt);
|
||||
|
@ -990,6 +990,7 @@ static int hws_bwc_send_queues_init(struct mlx5hws_context *ctx)
|
||||
for (i = 0; i < bwc_queues; i++) {
|
||||
mutex_init(&ctx->bwc_send_queue_locks[i]);
|
||||
lockdep_register_key(ctx->bwc_lock_class_keys + i);
|
||||
lockdep_set_class(ctx->bwc_send_queue_locks + i, ctx->bwc_lock_class_keys + i);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -324,6 +324,10 @@ static const struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_mac_5b[] =
|
||||
MLXSW_AFK_ELEMENT_INST_EXT_U32(SRC_SYS_PORT, 0x04, 0, 9, -1, true), /* RX_ACL_SYSTEM_PORT */
|
||||
};
|
||||
|
||||
static const struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_1b[] = {
|
||||
MLXSW_AFK_ELEMENT_INST_BUF(SRC_IP_0_31, 0x04, 4),
|
||||
};
|
||||
|
||||
static const struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_5b[] = {
|
||||
MLXSW_AFK_ELEMENT_INST_U32(VIRT_ROUTER, 0x04, 20, 12),
|
||||
};
|
||||
@ -341,7 +345,7 @@ static const struct mlxsw_afk_block mlxsw_sp4_afk_blocks[] = {
|
||||
MLXSW_AFK_BLOCK(0x14, mlxsw_sp_afk_element_info_mac_4),
|
||||
MLXSW_AFK_BLOCK_HIGH_ENTROPY(0x1A, mlxsw_sp_afk_element_info_mac_5b),
|
||||
MLXSW_AFK_BLOCK_HIGH_ENTROPY(0x38, mlxsw_sp_afk_element_info_ipv4_0),
|
||||
MLXSW_AFK_BLOCK_HIGH_ENTROPY(0x39, mlxsw_sp_afk_element_info_ipv4_1),
|
||||
MLXSW_AFK_BLOCK_HIGH_ENTROPY(0x3F, mlxsw_sp_afk_element_info_ipv4_1b),
|
||||
MLXSW_AFK_BLOCK(0x3A, mlxsw_sp_afk_element_info_ipv4_2),
|
||||
MLXSW_AFK_BLOCK(0x36, mlxsw_sp_afk_element_info_ipv4_5b),
|
||||
MLXSW_AFK_BLOCK(0x40, mlxsw_sp_afk_element_info_ipv6_0),
|
||||
|
@ -2536,6 +2536,7 @@ void mana_query_gf_stats(struct mana_port_context *apc)
|
||||
|
||||
mana_gd_init_req_hdr(&req.hdr, MANA_QUERY_GF_STAT,
|
||||
sizeof(req), sizeof(resp));
|
||||
req.hdr.resp.msg_version = GDMA_MESSAGE_V2;
|
||||
req.req_stats = STATISTICS_FLAGS_RX_DISCARDS_NO_WQE |
|
||||
STATISTICS_FLAGS_RX_ERRORS_VPORT_DISABLED |
|
||||
STATISTICS_FLAGS_HC_RX_BYTES |
|
||||
|
@ -3301,7 +3301,9 @@ int qed_mcp_bist_nvm_get_num_images(struct qed_hwfn *p_hwfn,
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK))
|
||||
if (((rsp & FW_MSG_CODE_MASK) == FW_MSG_CODE_UNSUPPORTED))
|
||||
rc = -EOPNOTSUPP;
|
||||
else if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK))
|
||||
rc = -EINVAL;
|
||||
|
||||
return rc;
|
||||
|
@ -895,7 +895,7 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
|
||||
if (geneve->cfg.df == GENEVE_DF_SET) {
|
||||
df = htons(IP_DF);
|
||||
} else if (geneve->cfg.df == GENEVE_DF_INHERIT) {
|
||||
struct ethhdr *eth = eth_hdr(skb);
|
||||
struct ethhdr *eth = skb_eth_hdr(skb);
|
||||
|
||||
if (ntohs(eth->h_proto) == ETH_P_IPV6) {
|
||||
df = htons(IP_DF);
|
||||
|
@ -327,7 +327,7 @@ static int netkit_validate(struct nlattr *tb[], struct nlattr *data[],
|
||||
|
||||
static struct rtnl_link_ops netkit_link_ops;
|
||||
|
||||
static int netkit_new_link(struct net *src_net, struct net_device *dev,
|
||||
static int netkit_new_link(struct net *peer_net, struct net_device *dev,
|
||||
struct nlattr *tb[], struct nlattr *data[],
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
@ -342,7 +342,6 @@ static int netkit_new_link(struct net *src_net, struct net_device *dev,
|
||||
struct net_device *peer;
|
||||
char ifname[IFNAMSIZ];
|
||||
struct netkit *nk;
|
||||
struct net *net;
|
||||
int err;
|
||||
|
||||
if (data) {
|
||||
@ -385,13 +384,10 @@ static int netkit_new_link(struct net *src_net, struct net_device *dev,
|
||||
(tb[IFLA_ADDRESS] || tbp[IFLA_ADDRESS]))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
net = rtnl_link_get_net(src_net, tbp);
|
||||
peer = rtnl_create_link(net, ifname, ifname_assign_type,
|
||||
peer = rtnl_create_link(peer_net, ifname, ifname_assign_type,
|
||||
&netkit_link_ops, tbp, extack);
|
||||
if (IS_ERR(peer)) {
|
||||
put_net(net);
|
||||
if (IS_ERR(peer))
|
||||
return PTR_ERR(peer);
|
||||
}
|
||||
|
||||
netif_inherit_tso_max(peer, dev);
|
||||
|
||||
@ -408,7 +404,6 @@ static int netkit_new_link(struct net *src_net, struct net_device *dev,
|
||||
bpf_mprog_bundle_init(&nk->bundle);
|
||||
|
||||
err = register_netdevice(peer);
|
||||
put_net(net);
|
||||
if (err < 0)
|
||||
goto err_register_peer;
|
||||
netif_carrier_off(peer);
|
||||
|
@ -351,6 +351,22 @@ static int lan88xx_config_aneg(struct phy_device *phydev)
|
||||
static void lan88xx_link_change_notify(struct phy_device *phydev)
|
||||
{
|
||||
int temp;
|
||||
int ret;
|
||||
|
||||
/* Reset PHY to ensure MII_LPA provides up-to-date information. This
|
||||
* issue is reproducible only after parallel detection, as described
|
||||
* in IEEE 802.3-2022, Section 28.2.3.1 ("Parallel detection function"),
|
||||
* where the link partner does not support auto-negotiation.
|
||||
*/
|
||||
if (phydev->state == PHY_NOLINK) {
|
||||
ret = phy_init_hw(phydev);
|
||||
if (ret < 0)
|
||||
goto link_change_notify_failed;
|
||||
|
||||
ret = _phy_start_aneg(phydev);
|
||||
if (ret < 0)
|
||||
goto link_change_notify_failed;
|
||||
}
|
||||
|
||||
/* At forced 100 F/H mode, chip may fail to set mode correctly
|
||||
* when cable is switched between long(~50+m) and short one.
|
||||
@ -377,6 +393,11 @@ static void lan88xx_link_change_notify(struct phy_device *phydev)
|
||||
temp |= LAN88XX_INT_MASK_MDINTPIN_EN_;
|
||||
phy_write(phydev, LAN88XX_INT_MASK, temp);
|
||||
}
|
||||
|
||||
return;
|
||||
|
||||
link_change_notify_failed:
|
||||
phydev_err(phydev, "Link change process failed %pe\n", ERR_PTR(ret));
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1765,7 +1765,7 @@ static int veth_init_queues(struct net_device *dev, struct nlattr *tb[])
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int veth_newlink(struct net *src_net, struct net_device *dev,
|
||||
static int veth_newlink(struct net *peer_net, struct net_device *dev,
|
||||
struct nlattr *tb[], struct nlattr *data[],
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
@ -1776,7 +1776,6 @@ static int veth_newlink(struct net *src_net, struct net_device *dev,
|
||||
struct nlattr *peer_tb[IFLA_MAX + 1], **tbp;
|
||||
unsigned char name_assign_type;
|
||||
struct ifinfomsg *ifmp;
|
||||
struct net *net;
|
||||
|
||||
/*
|
||||
* create and register peer first
|
||||
@ -1800,13 +1799,10 @@ static int veth_newlink(struct net *src_net, struct net_device *dev,
|
||||
name_assign_type = NET_NAME_ENUM;
|
||||
}
|
||||
|
||||
net = rtnl_link_get_net(src_net, tbp);
|
||||
peer = rtnl_create_link(net, ifname, name_assign_type,
|
||||
peer = rtnl_create_link(peer_net, ifname, name_assign_type,
|
||||
&veth_link_ops, tbp, extack);
|
||||
if (IS_ERR(peer)) {
|
||||
put_net(net);
|
||||
if (IS_ERR(peer))
|
||||
return PTR_ERR(peer);
|
||||
}
|
||||
|
||||
if (!ifmp || !tbp[IFLA_ADDRESS])
|
||||
eth_hw_addr_random(peer);
|
||||
@ -1817,8 +1813,6 @@ static int veth_newlink(struct net *src_net, struct net_device *dev,
|
||||
netif_inherit_tso_max(peer, dev);
|
||||
|
||||
err = register_netdevice(peer);
|
||||
put_net(net);
|
||||
net = NULL;
|
||||
if (err < 0)
|
||||
goto err_register_peer;
|
||||
|
||||
|
@ -150,7 +150,8 @@ static int ptp_clock_adjtime(struct posix_clock *pc, struct __kernel_timex *tx)
|
||||
if (ppb > ops->max_adj || ppb < -ops->max_adj)
|
||||
return -ERANGE;
|
||||
err = ops->adjfine(ops, tx->freq);
|
||||
ptp->dialed_frequency = tx->freq;
|
||||
if (!err)
|
||||
ptp->dialed_frequency = tx->freq;
|
||||
} else if (tx->modes & ADJ_OFFSET) {
|
||||
if (ops->adjphase) {
|
||||
s32 max_phase_adj = ops->getmaxphase(ops);
|
||||
|
@ -45,6 +45,8 @@ struct inet_timewait_sock {
|
||||
#define tw_node __tw_common.skc_nulls_node
|
||||
#define tw_bind_node __tw_common.skc_bind_node
|
||||
#define tw_refcnt __tw_common.skc_refcnt
|
||||
#define tw_tx_queue_mapping __tw_common.skc_tx_queue_mapping
|
||||
#define tw_rx_queue_mapping __tw_common.skc_rx_queue_mapping
|
||||
#define tw_hash __tw_common.skc_hash
|
||||
#define tw_prot __tw_common.skc_prot
|
||||
#define tw_net __tw_common.skc_net
|
||||
|
@ -325,6 +325,11 @@ static inline int check_net(const struct net *net)
|
||||
#define net_drop_ns NULL
|
||||
#endif
|
||||
|
||||
/* Returns true if the netns initialization is completed successfully */
|
||||
static inline bool net_initialized(const struct net *net)
|
||||
{
|
||||
return READ_ONCE(net->list.next);
|
||||
}
|
||||
|
||||
static inline void __netns_tracker_alloc(struct net *net,
|
||||
netns_tracker *tracker,
|
||||
|
@ -161,6 +161,7 @@ enum {
|
||||
};
|
||||
|
||||
struct nft_inner_tun_ctx {
|
||||
unsigned long cookie;
|
||||
u16 type;
|
||||
u16 inner_tunoff;
|
||||
u16 inner_lloff;
|
||||
|
@ -1505,7 +1505,7 @@ static struct j1939_session *j1939_session_new(struct j1939_priv *priv,
|
||||
session->state = J1939_SESSION_NEW;
|
||||
|
||||
skb_queue_head_init(&session->skb_queue);
|
||||
skb_queue_tail(&session->skb_queue, skb);
|
||||
skb_queue_tail(&session->skb_queue, skb_get(skb));
|
||||
|
||||
skcb = j1939_skb_to_cb(skb);
|
||||
memcpy(&session->skcb, skcb, sizeof(session->skcb));
|
||||
|
@ -6557,18 +6557,22 @@ static void __napi_hash_add_with_id(struct napi_struct *napi,
|
||||
static void napi_hash_add_with_id(struct napi_struct *napi,
|
||||
unsigned int napi_id)
|
||||
{
|
||||
spin_lock(&napi_hash_lock);
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&napi_hash_lock, flags);
|
||||
WARN_ON_ONCE(napi_by_id(napi_id));
|
||||
__napi_hash_add_with_id(napi, napi_id);
|
||||
spin_unlock(&napi_hash_lock);
|
||||
spin_unlock_irqrestore(&napi_hash_lock, flags);
|
||||
}
|
||||
|
||||
static void napi_hash_add(struct napi_struct *napi)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
if (test_bit(NAPI_STATE_NO_BUSY_POLL, &napi->state))
|
||||
return;
|
||||
|
||||
spin_lock(&napi_hash_lock);
|
||||
spin_lock_irqsave(&napi_hash_lock, flags);
|
||||
|
||||
/* 0..NR_CPUS range is reserved for sender_cpu use */
|
||||
do {
|
||||
@ -6578,7 +6582,7 @@ static void napi_hash_add(struct napi_struct *napi)
|
||||
|
||||
__napi_hash_add_with_id(napi, napi_gen_id);
|
||||
|
||||
spin_unlock(&napi_hash_lock);
|
||||
spin_unlock_irqrestore(&napi_hash_lock, flags);
|
||||
}
|
||||
|
||||
/* Warning : caller is responsible to make sure rcu grace period
|
||||
@ -6586,11 +6590,13 @@ static void napi_hash_add(struct napi_struct *napi)
|
||||
*/
|
||||
static void napi_hash_del(struct napi_struct *napi)
|
||||
{
|
||||
spin_lock(&napi_hash_lock);
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&napi_hash_lock, flags);
|
||||
|
||||
hlist_del_init_rcu(&napi->napi_hash_node);
|
||||
|
||||
spin_unlock(&napi_hash_lock);
|
||||
spin_unlock_irqrestore(&napi_hash_lock, flags);
|
||||
}
|
||||
|
||||
static enum hrtimer_restart napi_watchdog(struct hrtimer *timer)
|
||||
|
@ -45,9 +45,14 @@ static unsigned int default_operstate(const struct net_device *dev)
|
||||
int iflink = dev_get_iflink(dev);
|
||||
struct net_device *peer;
|
||||
|
||||
if (iflink == dev->ifindex)
|
||||
/* If called from netdev_run_todo()/linkwatch_sync_dev(),
|
||||
* dev_net(dev) can be already freed, and RTNL is not held.
|
||||
*/
|
||||
if (dev->reg_state == NETREG_UNREGISTERED ||
|
||||
iflink == dev->ifindex)
|
||||
return IF_OPER_DOWN;
|
||||
|
||||
ASSERT_RTNL();
|
||||
peer = __dev_get_by_index(dev_net(dev), iflink);
|
||||
if (!peer)
|
||||
return IF_OPER_DOWN;
|
||||
|
@ -3746,6 +3746,7 @@ static int rtnl_group_changelink(const struct sk_buff *skb,
|
||||
static int rtnl_newlink_create(struct sk_buff *skb, struct ifinfomsg *ifm,
|
||||
const struct rtnl_link_ops *ops,
|
||||
struct net *tgt_net, struct net *link_net,
|
||||
struct net *peer_net,
|
||||
const struct nlmsghdr *nlh,
|
||||
struct nlattr **tb, struct nlattr **data,
|
||||
struct netlink_ext_ack *extack)
|
||||
@ -3776,8 +3777,13 @@ static int rtnl_newlink_create(struct sk_buff *skb, struct ifinfomsg *ifm,
|
||||
|
||||
dev->ifindex = ifm->ifi_index;
|
||||
|
||||
if (link_net)
|
||||
net = link_net;
|
||||
if (peer_net)
|
||||
net = peer_net;
|
||||
|
||||
if (ops->newlink)
|
||||
err = ops->newlink(link_net ? : net, dev, tb, data, extack);
|
||||
err = ops->newlink(net, dev, tb, data, extack);
|
||||
else
|
||||
err = register_netdevice(dev);
|
||||
if (err < 0) {
|
||||
@ -3812,40 +3818,33 @@ static int rtnl_newlink_create(struct sk_buff *skb, struct ifinfomsg *ifm,
|
||||
goto out;
|
||||
}
|
||||
|
||||
static int rtnl_add_peer_net(struct rtnl_nets *rtnl_nets,
|
||||
const struct rtnl_link_ops *ops,
|
||||
struct nlattr *data[],
|
||||
struct netlink_ext_ack *extack)
|
||||
static struct net *rtnl_get_peer_net(const struct rtnl_link_ops *ops,
|
||||
struct nlattr *data[],
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
struct nlattr *tb[IFLA_MAX + 1];
|
||||
struct net *net;
|
||||
int err;
|
||||
|
||||
if (!data || !data[ops->peer_type])
|
||||
return 0;
|
||||
return NULL;
|
||||
|
||||
err = rtnl_nla_parse_ifinfomsg(tb, data[ops->peer_type], extack);
|
||||
if (err < 0)
|
||||
return err;
|
||||
return ERR_PTR(err);
|
||||
|
||||
if (ops->validate) {
|
||||
err = ops->validate(tb, NULL, extack);
|
||||
if (err < 0)
|
||||
return err;
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
net = rtnl_link_get_net_ifla(tb);
|
||||
if (IS_ERR(net))
|
||||
return PTR_ERR(net);
|
||||
if (net)
|
||||
rtnl_nets_add(rtnl_nets, net);
|
||||
|
||||
return 0;
|
||||
return rtnl_link_get_net_ifla(tb);
|
||||
}
|
||||
|
||||
static int __rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh,
|
||||
const struct rtnl_link_ops *ops,
|
||||
struct net *tgt_net, struct net *link_net,
|
||||
struct net *peer_net,
|
||||
struct rtnl_newlink_tbs *tbs,
|
||||
struct nlattr **data,
|
||||
struct netlink_ext_ack *extack)
|
||||
@ -3894,14 +3893,15 @@ static int __rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh,
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
return rtnl_newlink_create(skb, ifm, ops, tgt_net, link_net, nlh, tb, data, extack);
|
||||
return rtnl_newlink_create(skb, ifm, ops, tgt_net, link_net, peer_net, nlh,
|
||||
tb, data, extack);
|
||||
}
|
||||
|
||||
static int rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh,
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
struct net *tgt_net, *link_net = NULL, *peer_net = NULL;
|
||||
struct nlattr **tb, **linkinfo, **data = NULL;
|
||||
struct net *tgt_net, *link_net = NULL;
|
||||
struct rtnl_link_ops *ops = NULL;
|
||||
struct rtnl_newlink_tbs *tbs;
|
||||
struct rtnl_nets rtnl_nets;
|
||||
@ -3971,9 +3971,11 @@ static int rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh,
|
||||
}
|
||||
|
||||
if (ops->peer_type) {
|
||||
ret = rtnl_add_peer_net(&rtnl_nets, ops, data, extack);
|
||||
if (ret < 0)
|
||||
peer_net = rtnl_get_peer_net(ops, data, extack);
|
||||
if (IS_ERR(peer_net))
|
||||
goto put_ops;
|
||||
if (peer_net)
|
||||
rtnl_nets_add(&rtnl_nets, peer_net);
|
||||
}
|
||||
}
|
||||
|
||||
@ -4004,7 +4006,7 @@ static int rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh,
|
||||
}
|
||||
|
||||
rtnl_nets_lock(&rtnl_nets);
|
||||
ret = __rtnl_newlink(skb, nlh, ops, tgt_net, link_net, tbs, data, extack);
|
||||
ret = __rtnl_newlink(skb, nlh, ops, tgt_net, link_net, peer_net, tbs, data, extack);
|
||||
rtnl_nets_unlock(&rtnl_nets);
|
||||
|
||||
put_net:
|
||||
|
@ -1166,8 +1166,12 @@ static u8 dccp_feat_change_recv(struct list_head *fn, u8 is_mandatory, u8 opt,
|
||||
goto not_valid_or_not_known;
|
||||
}
|
||||
|
||||
return dccp_feat_push_confirm(fn, feat, local, &fval);
|
||||
if (dccp_feat_push_confirm(fn, feat, local, &fval)) {
|
||||
kfree(fval.sp.vec);
|
||||
return DCCP_RESET_CODE_TOO_BUSY;
|
||||
}
|
||||
|
||||
return 0;
|
||||
} else if (entry->state == FEAT_UNSTABLE) { /* 6.6.2 */
|
||||
return 0;
|
||||
}
|
||||
|
@ -425,12 +425,32 @@ static int ethnl_parse_bit(unsigned int *index, bool *val, unsigned int nbits,
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* ethnl_bitmap32_equal() - Compare two bitmaps
|
||||
* @map1: first bitmap
|
||||
* @map2: second bitmap
|
||||
* @nbits: bit size to compare
|
||||
*
|
||||
* Return: true if first @nbits are equal, false if not
|
||||
*/
|
||||
static bool ethnl_bitmap32_equal(const u32 *map1, const u32 *map2,
|
||||
unsigned int nbits)
|
||||
{
|
||||
if (memcmp(map1, map2, nbits / 32 * sizeof(u32)))
|
||||
return false;
|
||||
if (nbits % 32 == 0)
|
||||
return true;
|
||||
return !((map1[nbits / 32] ^ map2[nbits / 32]) &
|
||||
ethnl_lower_bits(nbits % 32));
|
||||
}
|
||||
|
||||
static int
|
||||
ethnl_update_bitset32_verbose(u32 *bitmap, unsigned int nbits,
|
||||
const struct nlattr *attr, struct nlattr **tb,
|
||||
ethnl_string_array_t names,
|
||||
struct netlink_ext_ack *extack, bool *mod)
|
||||
{
|
||||
u32 *saved_bitmap = NULL;
|
||||
struct nlattr *bit_attr;
|
||||
bool no_mask;
|
||||
int rem;
|
||||
@ -448,8 +468,20 @@ ethnl_update_bitset32_verbose(u32 *bitmap, unsigned int nbits,
|
||||
}
|
||||
|
||||
no_mask = tb[ETHTOOL_A_BITSET_NOMASK];
|
||||
if (no_mask)
|
||||
ethnl_bitmap32_clear(bitmap, 0, nbits, mod);
|
||||
if (no_mask) {
|
||||
unsigned int nwords = DIV_ROUND_UP(nbits, 32);
|
||||
unsigned int nbytes = nwords * sizeof(u32);
|
||||
bool dummy;
|
||||
|
||||
/* The bitmap size is only the size of the map part without
|
||||
* its mask part.
|
||||
*/
|
||||
saved_bitmap = kcalloc(nwords, sizeof(u32), GFP_KERNEL);
|
||||
if (!saved_bitmap)
|
||||
return -ENOMEM;
|
||||
memcpy(saved_bitmap, bitmap, nbytes);
|
||||
ethnl_bitmap32_clear(bitmap, 0, nbits, &dummy);
|
||||
}
|
||||
|
||||
nla_for_each_nested(bit_attr, tb[ETHTOOL_A_BITSET_BITS], rem) {
|
||||
bool old_val, new_val;
|
||||
@ -458,22 +490,30 @@ ethnl_update_bitset32_verbose(u32 *bitmap, unsigned int nbits,
|
||||
if (nla_type(bit_attr) != ETHTOOL_A_BITSET_BITS_BIT) {
|
||||
NL_SET_ERR_MSG_ATTR(extack, bit_attr,
|
||||
"only ETHTOOL_A_BITSET_BITS_BIT allowed in ETHTOOL_A_BITSET_BITS");
|
||||
kfree(saved_bitmap);
|
||||
return -EINVAL;
|
||||
}
|
||||
ret = ethnl_parse_bit(&idx, &new_val, nbits, bit_attr, no_mask,
|
||||
names, extack);
|
||||
if (ret < 0)
|
||||
if (ret < 0) {
|
||||
kfree(saved_bitmap);
|
||||
return ret;
|
||||
}
|
||||
old_val = bitmap[idx / 32] & ((u32)1 << (idx % 32));
|
||||
if (new_val != old_val) {
|
||||
if (new_val)
|
||||
bitmap[idx / 32] |= ((u32)1 << (idx % 32));
|
||||
else
|
||||
bitmap[idx / 32] &= ~((u32)1 << (idx % 32));
|
||||
*mod = true;
|
||||
if (!no_mask)
|
||||
*mod = true;
|
||||
}
|
||||
}
|
||||
|
||||
if (no_mask && !ethnl_bitmap32_equal(saved_bitmap, bitmap, nbits))
|
||||
*mod = true;
|
||||
|
||||
kfree(saved_bitmap);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -993,7 +993,8 @@ static noinline_for_stack int ethtool_set_rxnfc(struct net_device *dev,
|
||||
return rc;
|
||||
|
||||
/* Nonzero ring with RSS only makes sense if NIC adds them together */
|
||||
if (info.flow_type & FLOW_RSS && !ops->cap_rss_rxnfc_adds &&
|
||||
if (cmd == ETHTOOL_SRXCLSRLINS && info.flow_type & FLOW_RSS &&
|
||||
!ops->cap_rss_rxnfc_adds &&
|
||||
ethtool_get_flow_spec_ring(info.fs.ring_cookie))
|
||||
return -EINVAL;
|
||||
|
||||
|
@ -246,20 +246,22 @@ static const struct header_ops hsr_header_ops = {
|
||||
.parse = eth_header_parse,
|
||||
};
|
||||
|
||||
static struct sk_buff *hsr_init_skb(struct hsr_port *master)
|
||||
static struct sk_buff *hsr_init_skb(struct hsr_port *master, int extra)
|
||||
{
|
||||
struct hsr_priv *hsr = master->hsr;
|
||||
struct sk_buff *skb;
|
||||
int hlen, tlen;
|
||||
int len;
|
||||
|
||||
hlen = LL_RESERVED_SPACE(master->dev);
|
||||
tlen = master->dev->needed_tailroom;
|
||||
len = sizeof(struct hsr_sup_tag) + sizeof(struct hsr_sup_payload);
|
||||
/* skb size is same for PRP/HSR frames, only difference
|
||||
* being, for PRP it is a trailer and for HSR it is a
|
||||
* header
|
||||
* header.
|
||||
* RedBox might use @extra more bytes.
|
||||
*/
|
||||
skb = dev_alloc_skb(sizeof(struct hsr_sup_tag) +
|
||||
sizeof(struct hsr_sup_payload) + hlen + tlen);
|
||||
skb = dev_alloc_skb(len + extra + hlen + tlen);
|
||||
|
||||
if (!skb)
|
||||
return skb;
|
||||
@ -295,6 +297,7 @@ static void send_hsr_supervision_frame(struct hsr_port *port,
|
||||
struct hsr_sup_tlv *hsr_stlv;
|
||||
struct hsr_sup_tag *hsr_stag;
|
||||
struct sk_buff *skb;
|
||||
int extra = 0;
|
||||
|
||||
*interval = msecs_to_jiffies(HSR_LIFE_CHECK_INTERVAL);
|
||||
if (hsr->announce_count < 3 && hsr->prot_version == 0) {
|
||||
@ -303,7 +306,11 @@ static void send_hsr_supervision_frame(struct hsr_port *port,
|
||||
hsr->announce_count++;
|
||||
}
|
||||
|
||||
skb = hsr_init_skb(port);
|
||||
if (hsr->redbox)
|
||||
extra = sizeof(struct hsr_sup_tlv) +
|
||||
sizeof(struct hsr_sup_payload);
|
||||
|
||||
skb = hsr_init_skb(port, extra);
|
||||
if (!skb) {
|
||||
netdev_warn_once(port->dev, "HSR: Could not send supervision frame\n");
|
||||
return;
|
||||
@ -362,7 +369,7 @@ static void send_prp_supervision_frame(struct hsr_port *master,
|
||||
struct hsr_sup_tag *hsr_stag;
|
||||
struct sk_buff *skb;
|
||||
|
||||
skb = hsr_init_skb(master);
|
||||
skb = hsr_init_skb(master, 0);
|
||||
if (!skb) {
|
||||
netdev_warn_once(master->dev, "PRP: Could not send supervision frame\n");
|
||||
return;
|
||||
|
@ -700,6 +700,8 @@ static int fill_frame_info(struct hsr_frame_info *frame,
|
||||
frame->is_vlan = true;
|
||||
|
||||
if (frame->is_vlan) {
|
||||
if (skb->mac_len < offsetofend(struct hsr_vlan_ethhdr, vlanhdr))
|
||||
return -EINVAL;
|
||||
vlan_hdr = (struct hsr_vlan_ethhdr *)ethhdr;
|
||||
proto = vlan_hdr->vlanhdr.h_vlan_encapsulated_proto;
|
||||
}
|
||||
|
@ -517,6 +517,9 @@ static struct rtable *icmp_route_lookup(struct net *net, struct flowi4 *fl4,
|
||||
if (!IS_ERR(dst)) {
|
||||
if (rt != rt2)
|
||||
return rt;
|
||||
if (inet_addr_type_dev_table(net, route_lookup_dev,
|
||||
fl4->daddr) == RTN_LOCAL)
|
||||
return rt;
|
||||
} else if (PTR_ERR(dst) == -EPERM) {
|
||||
rt = NULL;
|
||||
} else {
|
||||
|
@ -122,7 +122,7 @@ static void ipmr_expire_process(struct timer_list *t);
|
||||
|
||||
static bool ipmr_can_free_table(struct net *net)
|
||||
{
|
||||
return !check_net(net) || !net->ipv4.mr_rules_ops;
|
||||
return !check_net(net) || !net_initialized(net);
|
||||
}
|
||||
|
||||
static struct mr_table *ipmr_mr_table_iter(struct net *net,
|
||||
|
@ -326,6 +326,10 @@ void tcp_time_wait(struct sock *sk, int state, int timeo)
|
||||
tcptw->tw_last_oow_ack_time = 0;
|
||||
tcptw->tw_tx_delay = tp->tcp_tx_delay;
|
||||
tw->tw_txhash = sk->sk_txhash;
|
||||
tw->tw_tx_queue_mapping = sk->sk_tx_queue_mapping;
|
||||
#ifdef CONFIG_SOCK_RX_QUEUE_MAPPING
|
||||
tw->tw_rx_queue_mapping = sk->sk_rx_queue_mapping;
|
||||
#endif
|
||||
#if IS_ENABLED(CONFIG_IPV6)
|
||||
if (tw->tw_family == PF_INET6) {
|
||||
struct ipv6_pinfo *np = inet6_sk(sk);
|
||||
|
@ -1674,7 +1674,6 @@ int __udp_enqueue_schedule_skb(struct sock *sk, struct sk_buff *skb)
|
||||
struct sk_buff_head *list = &sk->sk_receive_queue;
|
||||
int rmem, err = -ENOMEM;
|
||||
spinlock_t *busy = NULL;
|
||||
bool becomes_readable;
|
||||
int size, rcvbuf;
|
||||
|
||||
/* Immediately drop when the receive queue is full.
|
||||
@ -1715,19 +1714,12 @@ int __udp_enqueue_schedule_skb(struct sock *sk, struct sk_buff *skb)
|
||||
*/
|
||||
sock_skb_set_dropcount(sk, skb);
|
||||
|
||||
becomes_readable = skb_queue_empty(list);
|
||||
__skb_queue_tail(list, skb);
|
||||
spin_unlock(&list->lock);
|
||||
|
||||
if (!sock_flag(sk, SOCK_DEAD)) {
|
||||
if (becomes_readable ||
|
||||
sk->sk_data_ready != sock_def_readable ||
|
||||
READ_ONCE(sk->sk_peek_off) >= 0)
|
||||
INDIRECT_CALL_1(sk->sk_data_ready,
|
||||
sock_def_readable, sk);
|
||||
else
|
||||
sk_wake_async_rcu(sk, SOCK_WAKE_WAITD, POLL_IN);
|
||||
}
|
||||
if (!sock_flag(sk, SOCK_DEAD))
|
||||
INDIRECT_CALL_1(sk->sk_data_ready, sock_def_readable, sk);
|
||||
|
||||
busylock_release(busy);
|
||||
return 0;
|
||||
|
||||
|
@ -4821,7 +4821,7 @@ inet6_rtm_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh,
|
||||
ifm->ifa_prefixlen, extack);
|
||||
}
|
||||
|
||||
static int modify_prefix_route(struct inet6_ifaddr *ifp,
|
||||
static int modify_prefix_route(struct net *net, struct inet6_ifaddr *ifp,
|
||||
unsigned long expires, u32 flags,
|
||||
bool modify_peer)
|
||||
{
|
||||
@ -4845,7 +4845,9 @@ static int modify_prefix_route(struct inet6_ifaddr *ifp,
|
||||
ifp->prefix_len,
|
||||
ifp->rt_priority, ifp->idev->dev,
|
||||
expires, flags, GFP_KERNEL);
|
||||
} else {
|
||||
return 0;
|
||||
}
|
||||
if (f6i != net->ipv6.fib6_null_entry) {
|
||||
table = f6i->fib6_table;
|
||||
spin_lock_bh(&table->tb6_lock);
|
||||
|
||||
@ -4858,9 +4860,8 @@ static int modify_prefix_route(struct inet6_ifaddr *ifp,
|
||||
}
|
||||
|
||||
spin_unlock_bh(&table->tb6_lock);
|
||||
|
||||
fib6_info_release(f6i);
|
||||
}
|
||||
fib6_info_release(f6i);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -4939,7 +4940,7 @@ static int inet6_addr_modify(struct net *net, struct inet6_ifaddr *ifp,
|
||||
int rc = -ENOENT;
|
||||
|
||||
if (had_prefixroute)
|
||||
rc = modify_prefix_route(ifp, expires, flags, false);
|
||||
rc = modify_prefix_route(net, ifp, expires, flags, false);
|
||||
|
||||
/* prefix route could have been deleted; if so restore it */
|
||||
if (rc == -ENOENT) {
|
||||
@ -4949,7 +4950,7 @@ static int inet6_addr_modify(struct net *net, struct inet6_ifaddr *ifp,
|
||||
}
|
||||
|
||||
if (had_prefixroute && !ipv6_addr_any(&ifp->peer_addr))
|
||||
rc = modify_prefix_route(ifp, expires, flags, true);
|
||||
rc = modify_prefix_route(net, ifp, expires, flags, true);
|
||||
|
||||
if (rc == -ENOENT && !ipv6_addr_any(&ifp->peer_addr)) {
|
||||
addrconf_prefix_route(&ifp->peer_addr, ifp->prefix_len,
|
||||
|
@ -110,7 +110,7 @@ static void ipmr_expire_process(struct timer_list *t);
|
||||
|
||||
static bool ip6mr_can_free_table(struct net *net)
|
||||
{
|
||||
return !check_net(net) || !net->ipv6.mr6_rules_ops;
|
||||
return !check_net(net) || !net_initialized(net);
|
||||
}
|
||||
|
||||
static struct mr_table *ip6mr_mr_table_iter(struct net *net,
|
||||
|
@ -2780,10 +2780,10 @@ static void ip6_negative_advice(struct sock *sk,
|
||||
if (rt->rt6i_flags & RTF_CACHE) {
|
||||
rcu_read_lock();
|
||||
if (rt6_check_expired(rt)) {
|
||||
/* counteract the dst_release() in sk_dst_reset() */
|
||||
dst_hold(dst);
|
||||
/* rt/dst can not be destroyed yet,
|
||||
* because of rcu_read_lock()
|
||||
*/
|
||||
sk_dst_reset(sk);
|
||||
|
||||
rt6_remove_exception_rt(rt);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
@ -104,14 +104,19 @@ find_set_type(const char *name, u8 family, u8 revision)
|
||||
static bool
|
||||
load_settype(const char *name)
|
||||
{
|
||||
if (!try_module_get(THIS_MODULE))
|
||||
return false;
|
||||
|
||||
nfnl_unlock(NFNL_SUBSYS_IPSET);
|
||||
pr_debug("try to load ip_set_%s\n", name);
|
||||
if (request_module("ip_set_%s", name) < 0) {
|
||||
pr_warn("Can't find ip_set type %s\n", name);
|
||||
nfnl_lock(NFNL_SUBSYS_IPSET);
|
||||
module_put(THIS_MODULE);
|
||||
return false;
|
||||
}
|
||||
nfnl_lock(NFNL_SUBSYS_IPSET);
|
||||
module_put(THIS_MODULE);
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -340,7 +340,7 @@ void __net_exit ip_vs_protocol_net_cleanup(struct netns_ipvs *ipvs)
|
||||
|
||||
int __init ip_vs_protocol_init(void)
|
||||
{
|
||||
char protocols[64];
|
||||
char protocols[64] = { 0 };
|
||||
#define REGISTER_PROTOCOL(p) \
|
||||
do { \
|
||||
register_ip_vs_protocol(p); \
|
||||
@ -348,8 +348,6 @@ int __init ip_vs_protocol_init(void)
|
||||
strcat(protocols, (p)->name); \
|
||||
} while (0)
|
||||
|
||||
protocols[0] = '\0';
|
||||
protocols[2] = '\0';
|
||||
#ifdef CONFIG_IP_VS_PROTO_TCP
|
||||
REGISTER_PROTOCOL(&ip_vs_protocol_tcp);
|
||||
#endif
|
||||
|
@ -210,35 +210,66 @@ static int nft_inner_parse(const struct nft_inner *priv,
|
||||
struct nft_pktinfo *pkt,
|
||||
struct nft_inner_tun_ctx *tun_ctx)
|
||||
{
|
||||
struct nft_inner_tun_ctx ctx = {};
|
||||
u32 off = pkt->inneroff;
|
||||
|
||||
if (priv->flags & NFT_INNER_HDRSIZE &&
|
||||
nft_inner_parse_tunhdr(priv, pkt, &ctx, &off) < 0)
|
||||
nft_inner_parse_tunhdr(priv, pkt, tun_ctx, &off) < 0)
|
||||
return -1;
|
||||
|
||||
if (priv->flags & (NFT_INNER_LL | NFT_INNER_NH)) {
|
||||
if (nft_inner_parse_l2l3(priv, pkt, &ctx, off) < 0)
|
||||
if (nft_inner_parse_l2l3(priv, pkt, tun_ctx, off) < 0)
|
||||
return -1;
|
||||
} else if (priv->flags & NFT_INNER_TH) {
|
||||
ctx.inner_thoff = off;
|
||||
ctx.flags |= NFT_PAYLOAD_CTX_INNER_TH;
|
||||
tun_ctx->inner_thoff = off;
|
||||
tun_ctx->flags |= NFT_PAYLOAD_CTX_INNER_TH;
|
||||
}
|
||||
|
||||
*tun_ctx = ctx;
|
||||
tun_ctx->type = priv->type;
|
||||
tun_ctx->cookie = (unsigned long)pkt->skb;
|
||||
pkt->flags |= NFT_PKTINFO_INNER_FULL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool nft_inner_parse_needed(const struct nft_inner *priv,
|
||||
const struct nft_pktinfo *pkt,
|
||||
static bool nft_inner_restore_tun_ctx(const struct nft_pktinfo *pkt,
|
||||
struct nft_inner_tun_ctx *tun_ctx)
|
||||
{
|
||||
struct nft_inner_tun_ctx *this_cpu_tun_ctx;
|
||||
|
||||
local_bh_disable();
|
||||
this_cpu_tun_ctx = this_cpu_ptr(&nft_pcpu_tun_ctx);
|
||||
if (this_cpu_tun_ctx->cookie != (unsigned long)pkt->skb) {
|
||||
local_bh_enable();
|
||||
return false;
|
||||
}
|
||||
*tun_ctx = *this_cpu_tun_ctx;
|
||||
local_bh_enable();
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static void nft_inner_save_tun_ctx(const struct nft_pktinfo *pkt,
|
||||
const struct nft_inner_tun_ctx *tun_ctx)
|
||||
{
|
||||
struct nft_inner_tun_ctx *this_cpu_tun_ctx;
|
||||
|
||||
local_bh_disable();
|
||||
this_cpu_tun_ctx = this_cpu_ptr(&nft_pcpu_tun_ctx);
|
||||
if (this_cpu_tun_ctx->cookie != tun_ctx->cookie)
|
||||
*this_cpu_tun_ctx = *tun_ctx;
|
||||
local_bh_enable();
|
||||
}
|
||||
|
||||
static bool nft_inner_parse_needed(const struct nft_inner *priv,
|
||||
const struct nft_pktinfo *pkt,
|
||||
struct nft_inner_tun_ctx *tun_ctx)
|
||||
{
|
||||
if (!(pkt->flags & NFT_PKTINFO_INNER_FULL))
|
||||
return true;
|
||||
|
||||
if (!nft_inner_restore_tun_ctx(pkt, tun_ctx))
|
||||
return true;
|
||||
|
||||
if (priv->type != tun_ctx->type)
|
||||
return true;
|
||||
|
||||
@ -248,27 +279,29 @@ static bool nft_inner_parse_needed(const struct nft_inner *priv,
|
||||
static void nft_inner_eval(const struct nft_expr *expr, struct nft_regs *regs,
|
||||
const struct nft_pktinfo *pkt)
|
||||
{
|
||||
struct nft_inner_tun_ctx *tun_ctx = this_cpu_ptr(&nft_pcpu_tun_ctx);
|
||||
const struct nft_inner *priv = nft_expr_priv(expr);
|
||||
struct nft_inner_tun_ctx tun_ctx = {};
|
||||
|
||||
if (nft_payload_inner_offset(pkt) < 0)
|
||||
goto err;
|
||||
|
||||
if (nft_inner_parse_needed(priv, pkt, tun_ctx) &&
|
||||
nft_inner_parse(priv, (struct nft_pktinfo *)pkt, tun_ctx) < 0)
|
||||
if (nft_inner_parse_needed(priv, pkt, &tun_ctx) &&
|
||||
nft_inner_parse(priv, (struct nft_pktinfo *)pkt, &tun_ctx) < 0)
|
||||
goto err;
|
||||
|
||||
switch (priv->expr_type) {
|
||||
case NFT_INNER_EXPR_PAYLOAD:
|
||||
nft_payload_inner_eval((struct nft_expr *)&priv->expr, regs, pkt, tun_ctx);
|
||||
nft_payload_inner_eval((struct nft_expr *)&priv->expr, regs, pkt, &tun_ctx);
|
||||
break;
|
||||
case NFT_INNER_EXPR_META:
|
||||
nft_meta_inner_eval((struct nft_expr *)&priv->expr, regs, pkt, tun_ctx);
|
||||
nft_meta_inner_eval((struct nft_expr *)&priv->expr, regs, pkt, &tun_ctx);
|
||||
break;
|
||||
default:
|
||||
WARN_ON_ONCE(1);
|
||||
goto err;
|
||||
}
|
||||
nft_inner_save_tun_ctx(pkt, &tun_ctx);
|
||||
|
||||
return;
|
||||
err:
|
||||
regs->verdict.code = NFT_BREAK;
|
||||
|
@ -24,11 +24,13 @@
|
||||
struct nft_rhash {
|
||||
struct rhashtable ht;
|
||||
struct delayed_work gc_work;
|
||||
u32 wq_gc_seq;
|
||||
};
|
||||
|
||||
struct nft_rhash_elem {
|
||||
struct nft_elem_priv priv;
|
||||
struct rhash_head node;
|
||||
u32 wq_gc_seq;
|
||||
struct nft_set_ext ext;
|
||||
};
|
||||
|
||||
@ -338,6 +340,10 @@ static void nft_rhash_gc(struct work_struct *work)
|
||||
if (!gc)
|
||||
goto done;
|
||||
|
||||
/* Elements never collected use a zero gc worker sequence number. */
|
||||
if (unlikely(++priv->wq_gc_seq == 0))
|
||||
priv->wq_gc_seq++;
|
||||
|
||||
rhashtable_walk_enter(&priv->ht, &hti);
|
||||
rhashtable_walk_start(&hti);
|
||||
|
||||
@ -355,6 +361,14 @@ static void nft_rhash_gc(struct work_struct *work)
|
||||
goto try_later;
|
||||
}
|
||||
|
||||
/* rhashtable walk is unstable, already seen in this gc run?
|
||||
* Then, skip this element. In case of (unlikely) sequence
|
||||
* wraparound and stale element wq_gc_seq, next gc run will
|
||||
* just find this expired element.
|
||||
*/
|
||||
if (he->wq_gc_seq == priv->wq_gc_seq)
|
||||
continue;
|
||||
|
||||
if (nft_set_elem_is_dead(&he->ext))
|
||||
goto dead_elem;
|
||||
|
||||
@ -371,6 +385,8 @@ static void nft_rhash_gc(struct work_struct *work)
|
||||
if (!gc)
|
||||
goto try_later;
|
||||
|
||||
/* annotate gc sequence for this attempt. */
|
||||
he->wq_gc_seq = priv->wq_gc_seq;
|
||||
nft_trans_gc_elem_add(gc, he);
|
||||
}
|
||||
|
||||
|
@ -68,7 +68,7 @@ static noinline int nft_socket_cgroup_subtree_level(void)
|
||||
|
||||
cgroup_put(cgrp);
|
||||
|
||||
if (WARN_ON_ONCE(level > 255))
|
||||
if (level > 255)
|
||||
return -ERANGE;
|
||||
|
||||
if (WARN_ON_ONCE(level < 0))
|
||||
|
@ -96,7 +96,9 @@ static int led_tg_check(const struct xt_tgchk_param *par)
|
||||
struct xt_led_info_internal *ledinternal;
|
||||
int err;
|
||||
|
||||
if (ledinfo->id[0] == '\0')
|
||||
/* Bail out if empty string or not a string at all. */
|
||||
if (ledinfo->id[0] == '\0' ||
|
||||
!memchr(ledinfo->id, '\0', sizeof(ledinfo->id)))
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&xt_led_mutex);
|
||||
|
@ -1369,7 +1369,6 @@ static int fl_set_erspan_opt(const struct nlattr *nla, struct fl_flow_key *key,
|
||||
int err;
|
||||
|
||||
md = (struct erspan_metadata *)&key->enc_opts.data[key->enc_opts.len];
|
||||
memset(md, 0xff, sizeof(*md));
|
||||
md->version = 1;
|
||||
|
||||
if (!depth)
|
||||
@ -1398,9 +1397,9 @@ static int fl_set_erspan_opt(const struct nlattr *nla, struct fl_flow_key *key,
|
||||
NL_SET_ERR_MSG(extack, "Missing tunnel key erspan option index");
|
||||
return -EINVAL;
|
||||
}
|
||||
memset(&md->u.index, 0xff, sizeof(md->u.index));
|
||||
if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX]) {
|
||||
nla = tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX];
|
||||
memset(&md->u, 0x00, sizeof(md->u));
|
||||
md->u.index = nla_get_be32(nla);
|
||||
}
|
||||
} else if (md->version == 2) {
|
||||
@ -1409,10 +1408,12 @@ static int fl_set_erspan_opt(const struct nlattr *nla, struct fl_flow_key *key,
|
||||
NL_SET_ERR_MSG(extack, "Missing tunnel key erspan option dir or hwid");
|
||||
return -EINVAL;
|
||||
}
|
||||
md->u.md2.dir = 1;
|
||||
if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR]) {
|
||||
nla = tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR];
|
||||
md->u.md2.dir = nla_get_u8(nla);
|
||||
}
|
||||
set_hwid(&md->u.md2, 0xff);
|
||||
if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID]) {
|
||||
nla = tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID];
|
||||
set_hwid(&md->u.md2, nla_get_u8(nla));
|
||||
|
@ -1525,7 +1525,6 @@ static unsigned int cake_drop(struct Qdisc *sch, struct sk_buff **to_free)
|
||||
b->backlogs[idx] -= len;
|
||||
b->tin_backlog -= len;
|
||||
sch->qstats.backlog -= len;
|
||||
qdisc_tree_reduce_backlog(sch, 1, len);
|
||||
|
||||
flow->dropped++;
|
||||
b->tin_dropped++;
|
||||
@ -1536,6 +1535,7 @@ static unsigned int cake_drop(struct Qdisc *sch, struct sk_buff **to_free)
|
||||
|
||||
__qdisc_drop(skb, to_free);
|
||||
sch->q.qlen--;
|
||||
qdisc_tree_reduce_backlog(sch, 1, len);
|
||||
|
||||
cake_heapify(q, 0);
|
||||
|
||||
|
@ -123,10 +123,10 @@ static void choke_drop_by_idx(struct Qdisc *sch, unsigned int idx,
|
||||
if (idx == q->tail)
|
||||
choke_zap_tail_holes(q);
|
||||
|
||||
--sch->q.qlen;
|
||||
qdisc_qstats_backlog_dec(sch, skb);
|
||||
qdisc_tree_reduce_backlog(sch, 1, qdisc_pkt_len(skb));
|
||||
qdisc_drop(skb, sch, to_free);
|
||||
--sch->q.qlen;
|
||||
}
|
||||
|
||||
struct choke_skb_cb {
|
||||
|
@ -208,7 +208,7 @@ static int tbf_segment(struct sk_buff *skb, struct Qdisc *sch,
|
||||
struct tbf_sched_data *q = qdisc_priv(sch);
|
||||
struct sk_buff *segs, *nskb;
|
||||
netdev_features_t features = netif_skb_features(skb);
|
||||
unsigned int len = 0, prev_len = qdisc_pkt_len(skb);
|
||||
unsigned int len = 0, prev_len = qdisc_pkt_len(skb), seg_len;
|
||||
int ret, nb;
|
||||
|
||||
segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
|
||||
@ -219,21 +219,27 @@ static int tbf_segment(struct sk_buff *skb, struct Qdisc *sch,
|
||||
nb = 0;
|
||||
skb_list_walk_safe(segs, segs, nskb) {
|
||||
skb_mark_not_on_list(segs);
|
||||
qdisc_skb_cb(segs)->pkt_len = segs->len;
|
||||
len += segs->len;
|
||||
seg_len = segs->len;
|
||||
qdisc_skb_cb(segs)->pkt_len = seg_len;
|
||||
ret = qdisc_enqueue(segs, q->qdisc, to_free);
|
||||
if (ret != NET_XMIT_SUCCESS) {
|
||||
if (net_xmit_drop_count(ret))
|
||||
qdisc_qstats_drop(sch);
|
||||
} else {
|
||||
nb++;
|
||||
len += seg_len;
|
||||
}
|
||||
}
|
||||
sch->q.qlen += nb;
|
||||
if (nb > 1)
|
||||
sch->qstats.backlog += len;
|
||||
if (nb > 0) {
|
||||
qdisc_tree_reduce_backlog(sch, 1 - nb, prev_len - len);
|
||||
consume_skb(skb);
|
||||
return nb > 0 ? NET_XMIT_SUCCESS : NET_XMIT_DROP;
|
||||
consume_skb(skb);
|
||||
return NET_XMIT_SUCCESS;
|
||||
}
|
||||
|
||||
kfree_skb(skb);
|
||||
return NET_XMIT_DROP;
|
||||
}
|
||||
|
||||
static int tbf_enqueue(struct sk_buff *skb, struct Qdisc *sch,
|
||||
|
@ -383,6 +383,7 @@ void smc_sk_init(struct net *net, struct sock *sk, int protocol)
|
||||
smc->limit_smc_hs = net->smc.limit_smc_hs;
|
||||
smc->use_fallback = false; /* assume rdma capability first */
|
||||
smc->fallback_rsn = 0;
|
||||
smc_close_init(smc);
|
||||
}
|
||||
|
||||
static struct sock *smc_sock_alloc(struct net *net, struct socket *sock,
|
||||
@ -1299,7 +1300,6 @@ static int smc_connect_rdma(struct smc_sock *smc,
|
||||
goto connect_abort;
|
||||
}
|
||||
|
||||
smc_close_init(smc);
|
||||
smc_rx_init(smc);
|
||||
|
||||
if (ini->first_contact_local) {
|
||||
@ -1435,7 +1435,6 @@ static int smc_connect_ism(struct smc_sock *smc,
|
||||
goto connect_abort;
|
||||
}
|
||||
}
|
||||
smc_close_init(smc);
|
||||
smc_rx_init(smc);
|
||||
smc_tx_init(smc);
|
||||
|
||||
@ -1901,6 +1900,7 @@ static void smc_listen_out(struct smc_sock *new_smc)
|
||||
if (tcp_sk(new_smc->clcsock->sk)->syn_smc)
|
||||
atomic_dec(&lsmc->queued_smc_hs);
|
||||
|
||||
release_sock(newsmcsk); /* lock in smc_listen_work() */
|
||||
if (lsmc->sk.sk_state == SMC_LISTEN) {
|
||||
lock_sock_nested(&lsmc->sk, SINGLE_DEPTH_NESTING);
|
||||
smc_accept_enqueue(&lsmc->sk, newsmcsk);
|
||||
@ -2422,6 +2422,7 @@ static void smc_listen_work(struct work_struct *work)
|
||||
u8 accept_version;
|
||||
int rc = 0;
|
||||
|
||||
lock_sock(&new_smc->sk); /* release in smc_listen_out() */
|
||||
if (new_smc->listen_smc->sk.sk_state != SMC_LISTEN)
|
||||
return smc_listen_out_err(new_smc);
|
||||
|
||||
@ -2479,7 +2480,6 @@ static void smc_listen_work(struct work_struct *work)
|
||||
goto out_decl;
|
||||
|
||||
mutex_lock(&smc_server_lgr_pending);
|
||||
smc_close_init(new_smc);
|
||||
smc_rx_init(new_smc);
|
||||
smc_tx_init(new_smc);
|
||||
|
||||
|
@ -814,10 +814,10 @@ static void cleanup_bearer(struct work_struct *work)
|
||||
kfree_rcu(rcast, rcu);
|
||||
}
|
||||
|
||||
atomic_dec(&tipc_net(sock_net(ub->ubsock->sk))->wq_count);
|
||||
dst_cache_destroy(&ub->rcast.dst_cache);
|
||||
udp_tunnel_sock_release(ub->ubsock);
|
||||
synchronize_net();
|
||||
atomic_dec(&tipc_net(sock_net(ub->ubsock->sk))->wq_count);
|
||||
kfree(ub);
|
||||
}
|
||||
|
||||
|
@ -5738,7 +5738,7 @@ static unsigned int selinux_ip_output(void *priv, struct sk_buff *skb,
|
||||
/* we do this in the LOCAL_OUT path and not the POST_ROUTING path
|
||||
* because we want to make sure we apply the necessary labeling
|
||||
* before IPsec is applied so we can leverage AH protection */
|
||||
sk = skb->sk;
|
||||
sk = sk_to_full_sk(skb->sk);
|
||||
if (sk) {
|
||||
struct sk_security_struct *sksec;
|
||||
|
||||
|
@ -3,7 +3,8 @@
|
||||
|
||||
import datetime
|
||||
import random
|
||||
from lib.py import ksft_run, ksft_pr, ksft_exit, ksft_eq, ksft_ne, ksft_ge, ksft_lt
|
||||
import re
|
||||
from lib.py import ksft_run, ksft_pr, ksft_exit, ksft_eq, ksft_ne, ksft_ge, ksft_lt, ksft_true
|
||||
from lib.py import NetDrvEpEnv
|
||||
from lib.py import EthtoolFamily, NetdevFamily
|
||||
from lib.py import KsftSkipEx, KsftFailEx
|
||||
@ -96,6 +97,13 @@ def _send_traffic_check(cfg, port, name, params):
|
||||
f"traffic on inactive queues ({name}): " + str(cnts))
|
||||
|
||||
|
||||
def _ntuple_rule_check(cfg, rule_id, ctx_id):
|
||||
"""Check that ntuple rule references RSS context ID"""
|
||||
text = ethtool(f"-n {cfg.ifname} rule {rule_id}").stdout
|
||||
pattern = f"RSS Context (ID: )?{ctx_id}"
|
||||
ksft_true(re.search(pattern, text), "RSS context not referenced in ntuple rule")
|
||||
|
||||
|
||||
def test_rss_key_indir(cfg):
|
||||
"""Test basics like updating the main RSS key and indirection table."""
|
||||
|
||||
@ -459,6 +467,8 @@ def test_rss_context(cfg, ctx_cnt=1, create_with_cfg=None):
|
||||
ntuple = ethtool_create(cfg, "-N", flow)
|
||||
defer(ethtool, f"-N {cfg.ifname} delete {ntuple}")
|
||||
|
||||
_ntuple_rule_check(cfg, ntuple, ctx_id)
|
||||
|
||||
for i in range(ctx_cnt):
|
||||
_send_traffic_check(cfg, ports[i], f"context {i}",
|
||||
{ 'target': (2+i*2, 3+i*2),
|
||||
|
@ -27,6 +27,7 @@
|
||||
|
||||
#include "timeout.h"
|
||||
#include "control.h"
|
||||
#include "util.h"
|
||||
|
||||
static int control_fd = -1;
|
||||
|
||||
@ -50,7 +51,6 @@ void control_init(const char *control_host,
|
||||
|
||||
for (ai = result; ai; ai = ai->ai_next) {
|
||||
int fd;
|
||||
int val = 1;
|
||||
|
||||
fd = socket(ai->ai_family, ai->ai_socktype, ai->ai_protocol);
|
||||
if (fd < 0)
|
||||
@ -65,11 +65,8 @@ void control_init(const char *control_host,
|
||||
break;
|
||||
}
|
||||
|
||||
if (setsockopt(fd, SOL_SOCKET, SO_REUSEADDR,
|
||||
&val, sizeof(val)) < 0) {
|
||||
perror("setsockopt");
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
setsockopt_int_check(fd, SOL_SOCKET, SO_REUSEADDR, 1,
|
||||
"setsockopt SO_REUSEADDR");
|
||||
|
||||
if (bind(fd, ai->ai_addr, ai->ai_addrlen) < 0)
|
||||
goto next;
|
||||
|
@ -14,16 +14,6 @@
|
||||
|
||||
#include "msg_zerocopy_common.h"
|
||||
|
||||
void enable_so_zerocopy(int fd)
|
||||
{
|
||||
int val = 1;
|
||||
|
||||
if (setsockopt(fd, SOL_SOCKET, SO_ZEROCOPY, &val, sizeof(val))) {
|
||||
perror("setsockopt");
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
}
|
||||
|
||||
void vsock_recv_completion(int fd, const bool *zerocopied)
|
||||
{
|
||||
struct sock_extended_err *serr;
|
||||
|
@ -12,7 +12,6 @@
|
||||
#define VSOCK_RECVERR 1
|
||||
#endif
|
||||
|
||||
void enable_so_zerocopy(int fd);
|
||||
void vsock_recv_completion(int fd, const bool *zerocopied);
|
||||
|
||||
#endif /* MSG_ZEROCOPY_COMMON_H */
|
||||
|
@ -651,3 +651,145 @@ void free_test_iovec(const struct iovec *test_iovec,
|
||||
|
||||
free(iovec);
|
||||
}
|
||||
|
||||
/* Set "unsigned long long" socket option and check that it's indeed set */
|
||||
void setsockopt_ull_check(int fd, int level, int optname,
|
||||
unsigned long long val, char const *errmsg)
|
||||
{
|
||||
unsigned long long chkval;
|
||||
socklen_t chklen;
|
||||
int err;
|
||||
|
||||
err = setsockopt(fd, level, optname, &val, sizeof(val));
|
||||
if (err) {
|
||||
fprintf(stderr, "setsockopt err: %s (%d)\n",
|
||||
strerror(errno), errno);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
chkval = ~val; /* just make storage != val */
|
||||
chklen = sizeof(chkval);
|
||||
|
||||
err = getsockopt(fd, level, optname, &chkval, &chklen);
|
||||
if (err) {
|
||||
fprintf(stderr, "getsockopt err: %s (%d)\n",
|
||||
strerror(errno), errno);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
if (chklen != sizeof(chkval)) {
|
||||
fprintf(stderr, "size mismatch: set %zu got %d\n", sizeof(val),
|
||||
chklen);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
if (chkval != val) {
|
||||
fprintf(stderr, "value mismatch: set %llu got %llu\n", val,
|
||||
chkval);
|
||||
goto fail;
|
||||
}
|
||||
return;
|
||||
fail:
|
||||
fprintf(stderr, "%s val %llu\n", errmsg, val);
|
||||
exit(EXIT_FAILURE);
|
||||
;
|
||||
}
|
||||
|
||||
/* Set "int" socket option and check that it's indeed set */
|
||||
void setsockopt_int_check(int fd, int level, int optname, int val,
|
||||
char const *errmsg)
|
||||
{
|
||||
int chkval;
|
||||
socklen_t chklen;
|
||||
int err;
|
||||
|
||||
err = setsockopt(fd, level, optname, &val, sizeof(val));
|
||||
if (err) {
|
||||
fprintf(stderr, "setsockopt err: %s (%d)\n",
|
||||
strerror(errno), errno);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
chkval = ~val; /* just make storage != val */
|
||||
chklen = sizeof(chkval);
|
||||
|
||||
err = getsockopt(fd, level, optname, &chkval, &chklen);
|
||||
if (err) {
|
||||
fprintf(stderr, "getsockopt err: %s (%d)\n",
|
||||
strerror(errno), errno);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
if (chklen != sizeof(chkval)) {
|
||||
fprintf(stderr, "size mismatch: set %zu got %d\n", sizeof(val),
|
||||
chklen);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
if (chkval != val) {
|
||||
fprintf(stderr, "value mismatch: set %d got %d\n", val, chkval);
|
||||
goto fail;
|
||||
}
|
||||
return;
|
||||
fail:
|
||||
fprintf(stderr, "%s val %d\n", errmsg, val);
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
|
||||
static void mem_invert(unsigned char *mem, size_t size)
|
||||
{
|
||||
size_t i;
|
||||
|
||||
for (i = 0; i < size; i++)
|
||||
mem[i] = ~mem[i];
|
||||
}
|
||||
|
||||
/* Set "timeval" socket option and check that it's indeed set */
|
||||
void setsockopt_timeval_check(int fd, int level, int optname,
|
||||
struct timeval val, char const *errmsg)
|
||||
{
|
||||
struct timeval chkval;
|
||||
socklen_t chklen;
|
||||
int err;
|
||||
|
||||
err = setsockopt(fd, level, optname, &val, sizeof(val));
|
||||
if (err) {
|
||||
fprintf(stderr, "setsockopt err: %s (%d)\n",
|
||||
strerror(errno), errno);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
/* just make storage != val */
|
||||
chkval = val;
|
||||
mem_invert((unsigned char *)&chkval, sizeof(chkval));
|
||||
chklen = sizeof(chkval);
|
||||
|
||||
err = getsockopt(fd, level, optname, &chkval, &chklen);
|
||||
if (err) {
|
||||
fprintf(stderr, "getsockopt err: %s (%d)\n",
|
||||
strerror(errno), errno);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
if (chklen != sizeof(chkval)) {
|
||||
fprintf(stderr, "size mismatch: set %zu got %d\n", sizeof(val),
|
||||
chklen);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
if (memcmp(&chkval, &val, sizeof(val)) != 0) {
|
||||
fprintf(stderr, "value mismatch: set %ld:%ld got %ld:%ld\n",
|
||||
val.tv_sec, val.tv_usec, chkval.tv_sec, chkval.tv_usec);
|
||||
goto fail;
|
||||
}
|
||||
return;
|
||||
fail:
|
||||
fprintf(stderr, "%s val %ld:%ld\n", errmsg, val.tv_sec, val.tv_usec);
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
|
||||
void enable_so_zerocopy_check(int fd)
|
||||
{
|
||||
setsockopt_int_check(fd, SOL_SOCKET, SO_ZEROCOPY, 1,
|
||||
"setsockopt SO_ZEROCOPY");
|
||||
}
|
||||
|
@ -68,4 +68,11 @@ unsigned long iovec_hash_djb2(const struct iovec *iov, size_t iovnum);
|
||||
struct iovec *alloc_test_iovec(const struct iovec *test_iovec, int iovnum);
|
||||
void free_test_iovec(const struct iovec *test_iovec,
|
||||
struct iovec *iovec, int iovnum);
|
||||
void setsockopt_ull_check(int fd, int level, int optname,
|
||||
unsigned long long val, char const *errmsg);
|
||||
void setsockopt_int_check(int fd, int level, int optname, int val,
|
||||
char const *errmsg);
|
||||
void setsockopt_timeval_check(int fd, int level, int optname,
|
||||
struct timeval val, char const *errmsg);
|
||||
void enable_so_zerocopy_check(int fd);
|
||||
#endif /* UTIL_H */
|
||||
|
@ -33,7 +33,7 @@
|
||||
|
||||
static unsigned int port = DEFAULT_PORT;
|
||||
static unsigned long buf_size_bytes = DEFAULT_BUF_SIZE_BYTES;
|
||||
static unsigned long vsock_buf_bytes = DEFAULT_VSOCK_BUF_BYTES;
|
||||
static unsigned long long vsock_buf_bytes = DEFAULT_VSOCK_BUF_BYTES;
|
||||
static bool zerocopy;
|
||||
|
||||
static void error(const char *s)
|
||||
@ -133,7 +133,7 @@ static float get_gbps(unsigned long bits, time_t ns_delta)
|
||||
((float)ns_delta / NSEC_PER_SEC);
|
||||
}
|
||||
|
||||
static void run_receiver(unsigned long rcvlowat_bytes)
|
||||
static void run_receiver(int rcvlowat_bytes)
|
||||
{
|
||||
unsigned int read_cnt;
|
||||
time_t rx_begin_ns;
|
||||
@ -162,8 +162,8 @@ static void run_receiver(unsigned long rcvlowat_bytes)
|
||||
printf("Run as receiver\n");
|
||||
printf("Listen port %u\n", port);
|
||||
printf("RX buffer %lu bytes\n", buf_size_bytes);
|
||||
printf("vsock buffer %lu bytes\n", vsock_buf_bytes);
|
||||
printf("SO_RCVLOWAT %lu bytes\n", rcvlowat_bytes);
|
||||
printf("vsock buffer %llu bytes\n", vsock_buf_bytes);
|
||||
printf("SO_RCVLOWAT %d bytes\n", rcvlowat_bytes);
|
||||
|
||||
fd = socket(AF_VSOCK, SOCK_STREAM, 0);
|
||||
|
||||
@ -251,6 +251,16 @@ static void run_receiver(unsigned long rcvlowat_bytes)
|
||||
close(fd);
|
||||
}
|
||||
|
||||
static void enable_so_zerocopy(int fd)
|
||||
{
|
||||
int val = 1;
|
||||
|
||||
if (setsockopt(fd, SOL_SOCKET, SO_ZEROCOPY, &val, sizeof(val))) {
|
||||
perror("setsockopt");
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
}
|
||||
|
||||
static void run_sender(int peer_cid, unsigned long to_send_bytes)
|
||||
{
|
||||
time_t tx_begin_ns;
|
||||
@ -439,7 +449,7 @@ static long strtolx(const char *arg)
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
unsigned long to_send_bytes = DEFAULT_TO_SEND_BYTES;
|
||||
unsigned long rcvlowat_bytes = DEFAULT_RCVLOWAT_BYTES;
|
||||
int rcvlowat_bytes = DEFAULT_RCVLOWAT_BYTES;
|
||||
int peer_cid = -1;
|
||||
bool sender = false;
|
||||
|
||||
|
@ -429,7 +429,7 @@ static void test_seqpacket_msg_bounds_client(const struct test_opts *opts)
|
||||
|
||||
static void test_seqpacket_msg_bounds_server(const struct test_opts *opts)
|
||||
{
|
||||
unsigned long sock_buf_size;
|
||||
unsigned long long sock_buf_size;
|
||||
unsigned long remote_hash;
|
||||
unsigned long curr_hash;
|
||||
int fd;
|
||||
@ -444,17 +444,13 @@ static void test_seqpacket_msg_bounds_server(const struct test_opts *opts)
|
||||
|
||||
sock_buf_size = SOCK_BUF_SIZE;
|
||||
|
||||
if (setsockopt(fd, AF_VSOCK, SO_VM_SOCKETS_BUFFER_MAX_SIZE,
|
||||
&sock_buf_size, sizeof(sock_buf_size))) {
|
||||
perror("setsockopt(SO_VM_SOCKETS_BUFFER_MAX_SIZE)");
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
setsockopt_ull_check(fd, AF_VSOCK, SO_VM_SOCKETS_BUFFER_MAX_SIZE,
|
||||
sock_buf_size,
|
||||
"setsockopt(SO_VM_SOCKETS_BUFFER_MAX_SIZE)");
|
||||
|
||||
if (setsockopt(fd, AF_VSOCK, SO_VM_SOCKETS_BUFFER_SIZE,
|
||||
&sock_buf_size, sizeof(sock_buf_size))) {
|
||||
perror("setsockopt(SO_VM_SOCKETS_BUFFER_SIZE)");
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
setsockopt_ull_check(fd, AF_VSOCK, SO_VM_SOCKETS_BUFFER_SIZE,
|
||||
sock_buf_size,
|
||||
"setsockopt(SO_VM_SOCKETS_BUFFER_SIZE)");
|
||||
|
||||
/* Ready to receive data. */
|
||||
control_writeln("SRVREADY");
|
||||
@ -586,10 +582,8 @@ static void test_seqpacket_timeout_client(const struct test_opts *opts)
|
||||
tv.tv_sec = RCVTIMEO_TIMEOUT_SEC;
|
||||
tv.tv_usec = 0;
|
||||
|
||||
if (setsockopt(fd, SOL_SOCKET, SO_RCVTIMEO, (void *)&tv, sizeof(tv)) == -1) {
|
||||
perror("setsockopt(SO_RCVTIMEO)");
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
setsockopt_timeval_check(fd, SOL_SOCKET, SO_RCVTIMEO, tv,
|
||||
"setsockopt(SO_RCVTIMEO)");
|
||||
|
||||
read_enter_ns = current_nsec();
|
||||
|
||||
@ -634,7 +628,8 @@ static void test_seqpacket_timeout_server(const struct test_opts *opts)
|
||||
|
||||
static void test_seqpacket_bigmsg_client(const struct test_opts *opts)
|
||||
{
|
||||
unsigned long sock_buf_size;
|
||||
unsigned long long sock_buf_size;
|
||||
size_t buf_size;
|
||||
socklen_t len;
|
||||
void *data;
|
||||
int fd;
|
||||
@ -655,13 +650,20 @@ static void test_seqpacket_bigmsg_client(const struct test_opts *opts)
|
||||
|
||||
sock_buf_size++;
|
||||
|
||||
data = malloc(sock_buf_size);
|
||||
/* size_t can be < unsigned long long */
|
||||
buf_size = (size_t)sock_buf_size;
|
||||
if (buf_size != sock_buf_size) {
|
||||
fprintf(stderr, "Returned BUFFER_SIZE too large\n");
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
|
||||
data = malloc(buf_size);
|
||||
if (!data) {
|
||||
perror("malloc");
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
|
||||
send_buf(fd, data, sock_buf_size, 0, -EMSGSIZE);
|
||||
send_buf(fd, data, buf_size, 0, -EMSGSIZE);
|
||||
|
||||
control_writeln("CLISENT");
|
||||
|
||||
@ -835,7 +837,7 @@ static void test_stream_poll_rcvlowat_server(const struct test_opts *opts)
|
||||
|
||||
static void test_stream_poll_rcvlowat_client(const struct test_opts *opts)
|
||||
{
|
||||
unsigned long lowat_val = RCVLOWAT_BUF_SIZE;
|
||||
int lowat_val = RCVLOWAT_BUF_SIZE;
|
||||
char buf[RCVLOWAT_BUF_SIZE];
|
||||
struct pollfd fds;
|
||||
short poll_flags;
|
||||
@ -847,11 +849,8 @@ static void test_stream_poll_rcvlowat_client(const struct test_opts *opts)
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
|
||||
if (setsockopt(fd, SOL_SOCKET, SO_RCVLOWAT,
|
||||
&lowat_val, sizeof(lowat_val))) {
|
||||
perror("setsockopt(SO_RCVLOWAT)");
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
setsockopt_int_check(fd, SOL_SOCKET, SO_RCVLOWAT,
|
||||
lowat_val, "setsockopt(SO_RCVLOWAT)");
|
||||
|
||||
control_expectln("SRVSENT");
|
||||
|
||||
@ -1357,9 +1356,10 @@ static void test_stream_rcvlowat_def_cred_upd_client(const struct test_opts *opt
|
||||
static void test_stream_credit_update_test(const struct test_opts *opts,
|
||||
bool low_rx_bytes_test)
|
||||
{
|
||||
size_t recv_buf_size;
|
||||
int recv_buf_size;
|
||||
struct pollfd fds;
|
||||
size_t buf_size;
|
||||
unsigned long long sock_buf_size;
|
||||
void *buf;
|
||||
int fd;
|
||||
|
||||
@ -1371,11 +1371,12 @@ static void test_stream_credit_update_test(const struct test_opts *opts,
|
||||
|
||||
buf_size = RCVLOWAT_CREDIT_UPD_BUF_SIZE;
|
||||
|
||||
if (setsockopt(fd, AF_VSOCK, SO_VM_SOCKETS_BUFFER_SIZE,
|
||||
&buf_size, sizeof(buf_size))) {
|
||||
perror("setsockopt(SO_VM_SOCKETS_BUFFER_SIZE)");
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
/* size_t can be < unsigned long long */
|
||||
sock_buf_size = buf_size;
|
||||
|
||||
setsockopt_ull_check(fd, AF_VSOCK, SO_VM_SOCKETS_BUFFER_SIZE,
|
||||
sock_buf_size,
|
||||
"setsockopt(SO_VM_SOCKETS_BUFFER_SIZE)");
|
||||
|
||||
if (low_rx_bytes_test) {
|
||||
/* Set new SO_RCVLOWAT here. This enables sending credit
|
||||
@ -1384,11 +1385,8 @@ static void test_stream_credit_update_test(const struct test_opts *opts,
|
||||
*/
|
||||
recv_buf_size = 1 + VIRTIO_VSOCK_MAX_PKT_BUF_SIZE;
|
||||
|
||||
if (setsockopt(fd, SOL_SOCKET, SO_RCVLOWAT,
|
||||
&recv_buf_size, sizeof(recv_buf_size))) {
|
||||
perror("setsockopt(SO_RCVLOWAT)");
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
setsockopt_int_check(fd, SOL_SOCKET, SO_RCVLOWAT,
|
||||
recv_buf_size, "setsockopt(SO_RCVLOWAT)");
|
||||
}
|
||||
|
||||
/* Send one dummy byte here, because 'setsockopt()' above also
|
||||
@ -1430,11 +1428,8 @@ static void test_stream_credit_update_test(const struct test_opts *opts,
|
||||
recv_buf_size++;
|
||||
|
||||
/* Updating SO_RCVLOWAT will send credit update. */
|
||||
if (setsockopt(fd, SOL_SOCKET, SO_RCVLOWAT,
|
||||
&recv_buf_size, sizeof(recv_buf_size))) {
|
||||
perror("setsockopt(SO_RCVLOWAT)");
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
setsockopt_int_check(fd, SOL_SOCKET, SO_RCVLOWAT,
|
||||
recv_buf_size, "setsockopt(SO_RCVLOWAT)");
|
||||
}
|
||||
|
||||
fds.fd = fd;
|
||||
|
@ -162,7 +162,7 @@ static void test_client(const struct test_opts *opts,
|
||||
}
|
||||
|
||||
if (test_data->so_zerocopy)
|
||||
enable_so_zerocopy(fd);
|
||||
enable_so_zerocopy_check(fd);
|
||||
|
||||
iovec = alloc_test_iovec(test_data->vecs, test_data->vecs_cnt);
|
||||
|
||||
|
@ -73,7 +73,7 @@ static void vsock_io_uring_client(const struct test_opts *opts,
|
||||
}
|
||||
|
||||
if (msg_zerocopy)
|
||||
enable_so_zerocopy(fd);
|
||||
enable_so_zerocopy_check(fd);
|
||||
|
||||
iovec = alloc_test_iovec(test_data->vecs, test_data->vecs_cnt);
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user