mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-12-29 17:25:38 +00:00
Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net into master
Pull networking fixes from David Miller: 1) Fix RCU locaking in iwlwifi, from Johannes Berg. 2) mt76 can access uninitialized NAPI struct, from Felix Fietkau. 3) Fix race in updating pause settings in bnxt_en, from Vasundhara Volam. 4) Propagate error return properly during unbind failures in ax88172a, from George Kennedy. 5) Fix memleak in adf7242_probe, from Liu Jian. 6) smc_drv_probe() can leak, from Wang Hai. 7) Don't muck with the carrier state if register_netdevice() fails in the bonding driver, from Taehee Yoo. 8) Fix memleak in dpaa_eth_probe, from Liu Jian. 9) Need to check skb_put_padto() return value in hsr_fill_tag(), from Murali Karicheri. 10) Don't lose ionic RSS hash settings across FW update, from Shannon Nelson. 11) Fix clobbered SKB control block in act_ct, from Wen Xu. 12) Missing newlink in "tx_timeout" sysfs output, from Xiongfeng Wang. 13) IS_UDPLITE cleanup a long time ago, incorrectly handled transformations involving UDPLITE_RECV_CC. From Miaohe Lin. 14) Unbalanced locking in netdevsim, from Taehee Yoo. 15) Suppress false-positive error messages in qed driver, from Alexander Lobakin. 16) Out of bounds read in ax25_connect and ax25_sendmsg, from Peilin Ye. 17) Missing SKB release in cxgb4's uld_send(), from Navid Emamdoost. 18) Uninitialized value in geneve_changelink(), from Cong Wang. 19) Fix deadlock in xen-netfront, from Andera Righi. 19) flush_backlog() frees skbs with IRQs disabled, so should use dev_kfree_skb_irq() instead of kfree_skb(). From Subash Abhinov Kasiviswanathan. * git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net: (111 commits) drivers/net/wan: lapb: Corrected the usage of skb_cow dev: Defer free of skbs in flush_backlog qrtr: orphan socket in qrtr_release() xen-netfront: fix potential deadlock in xennet_remove() flow_offload: Move rhashtable inclusion to the source file geneve: fix an uninitialized value in geneve_changelink() bonding: check return value of register_netdevice() in bond_newlink() tcp: allow at most one TLP probe per flight AX.25: Prevent integer overflows in connect and sendmsg cxgb4: add missing release on skb in uld_send() net: atlantic: fix PTP on AQC10X AX.25: Prevent out-of-bounds read in ax25_sendmsg() sctp: shrink stream outq when fails to do addstream reconf sctp: shrink stream outq only when new outcnt < old outcnt AX.25: Fix out-of-bounds read in ax25_connect() enetc: Remove the mdio bus on PF probe bailout net: ethernet: ti: add NETIF_F_HW_TC hw feature flag for taprio offload net: ethernet: ave: Fix error returns in ave_init drivers/net/wan/x25_asy: Fix to make it work ipvs: fix the connection sync failed in some cases ...
This commit is contained in:
commit
1b64b2e244
@ -23,6 +23,7 @@ PTP hardware clock infrastructure for Linux
|
||||
+ Ancillary clock features
|
||||
- Time stamp external events
|
||||
- Period output signals configurable from user space
|
||||
- Low Pass Filter (LPF) access from user space
|
||||
- Synchronization of the Linux system time via the PPS subsystem
|
||||
|
||||
PTP hardware clock kernel API
|
||||
@ -94,3 +95,14 @@ Supported hardware
|
||||
|
||||
- Auxiliary Slave/Master Mode Snapshot (optional interrupt)
|
||||
- Target Time (optional interrupt)
|
||||
|
||||
* Renesas (IDT) ClockMatrix™
|
||||
|
||||
- Up to 4 independent PHC channels
|
||||
- Integrated low pass filter (LPF), access via .adjPhase (compliant to ITU-T G.8273.2)
|
||||
- Programmable output periodic signals
|
||||
- Programmable inputs can time stamp external triggers
|
||||
- Driver and/or hardware configuration through firmware (idtcm.bin)
|
||||
- LPF settings (bandwidth, phase limiting, automatic holdover, physical layer assist (per ITU-T G.8273.2))
|
||||
- Programmable output PTP clocks, any frequency up to 1GHz (to other PHY/MAC time stampers, refclk to ASSPs/SoCs/FPGAs)
|
||||
- Lock to GNSS input, automatic switching between GNSS and user-space PHC control (optional)
|
||||
|
@ -26,7 +26,7 @@ Usage
|
||||
|
||||
1) Device creation & deletion
|
||||
|
||||
a) ip link add dev bareudp0 type bareudp dstport 6635 ethertype 0x8847.
|
||||
a) ip link add dev bareudp0 type bareudp dstport 6635 ethertype mpls_uc
|
||||
|
||||
This creates a bareudp tunnel device which tunnels L3 traffic with ethertype
|
||||
0x8847 (MPLS traffic). The destination port of the UDP header will be set to
|
||||
@ -34,14 +34,21 @@ Usage
|
||||
|
||||
b) ip link delete bareudp0
|
||||
|
||||
2) Device creation with multiple proto mode enabled
|
||||
2) Device creation with multiproto mode enabled
|
||||
|
||||
There are two ways to create a bareudp device for MPLS & IP with multiproto mode
|
||||
enabled.
|
||||
The multiproto mode allows bareudp tunnels to handle several protocols of the
|
||||
same family. It is currently only available for IP and MPLS. This mode has to
|
||||
be enabled explicitly with the "multiproto" flag.
|
||||
|
||||
a) ip link add dev bareudp0 type bareudp dstport 6635 ethertype 0x8847 multiproto
|
||||
a) ip link add dev bareudp0 type bareudp dstport 6635 ethertype ipv4 multiproto
|
||||
|
||||
b) ip link add dev bareudp0 type bareudp dstport 6635 ethertype mpls
|
||||
For an IPv4 tunnel the multiproto mode allows the tunnel to also handle
|
||||
IPv6.
|
||||
|
||||
b) ip link add dev bareudp0 type bareudp dstport 6635 ethertype mpls_uc multiproto
|
||||
|
||||
For MPLS, the multiproto mode allows the tunnel to handle both unicast
|
||||
and multicast MPLS packets.
|
||||
|
||||
3) Device Usage
|
||||
|
||||
|
@ -454,10 +454,7 @@ &cp1_eth2 {
|
||||
status = "okay";
|
||||
phy-mode = "2500base-x";
|
||||
phys = <&cp1_comphy5 2>;
|
||||
fixed-link {
|
||||
speed = <2500>;
|
||||
full-duplex;
|
||||
};
|
||||
managed = "in-band-status";
|
||||
};
|
||||
|
||||
&cp1_spi1 {
|
||||
|
@ -102,7 +102,7 @@ static struct net_device *chtls_find_netdev(struct chtls_dev *cdev,
|
||||
case PF_INET:
|
||||
if (likely(!inet_sk(sk)->inet_rcv_saddr))
|
||||
return ndev;
|
||||
ndev = ip_dev_find(&init_net, inet_sk(sk)->inet_rcv_saddr);
|
||||
ndev = __ip_dev_find(&init_net, inet_sk(sk)->inet_rcv_saddr, false);
|
||||
break;
|
||||
#if IS_ENABLED(CONFIG_IPV6)
|
||||
case PF_INET6:
|
||||
|
@ -1052,14 +1052,15 @@ int chtls_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
|
||||
&record_type);
|
||||
if (err)
|
||||
goto out_err;
|
||||
|
||||
/* Avoid appending tls handshake, alert to tls data */
|
||||
if (skb)
|
||||
tx_skb_finalize(skb);
|
||||
}
|
||||
|
||||
recordsz = size;
|
||||
csk->tlshws.txleft = recordsz;
|
||||
csk->tlshws.type = record_type;
|
||||
|
||||
if (skb)
|
||||
ULP_SKB_CB(skb)->ulp.tls.type = record_type;
|
||||
}
|
||||
|
||||
if (!skb || (ULP_SKB_CB(skb)->flags & ULPCB_FLAG_NO_APPEND) ||
|
||||
|
@ -5053,15 +5053,19 @@ int bond_create(struct net *net, const char *name)
|
||||
bond_dev->rtnl_link_ops = &bond_link_ops;
|
||||
|
||||
res = register_netdevice(bond_dev);
|
||||
if (res < 0) {
|
||||
free_netdev(bond_dev);
|
||||
rtnl_unlock();
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
netif_carrier_off(bond_dev);
|
||||
|
||||
bond_work_init_all(bond);
|
||||
|
||||
rtnl_unlock();
|
||||
if (res < 0)
|
||||
free_netdev(bond_dev);
|
||||
return res;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __net_init bond_net_init(struct net *net)
|
||||
|
@ -456,11 +456,10 @@ static int bond_newlink(struct net *src_net, struct net_device *bond_dev,
|
||||
return err;
|
||||
|
||||
err = register_netdevice(bond_dev);
|
||||
|
||||
netif_carrier_off(bond_dev);
|
||||
if (!err) {
|
||||
struct bonding *bond = netdev_priv(bond_dev);
|
||||
|
||||
netif_carrier_off(bond_dev);
|
||||
bond_work_init_all(bond);
|
||||
}
|
||||
|
||||
|
@ -974,23 +974,6 @@ static void ksz9477_port_mirror_del(struct dsa_switch *ds, int port,
|
||||
PORT_MIRROR_SNIFFER, false);
|
||||
}
|
||||
|
||||
static void ksz9477_phy_setup(struct ksz_device *dev, int port,
|
||||
struct phy_device *phy)
|
||||
{
|
||||
/* Only apply to port with PHY. */
|
||||
if (port >= dev->phy_port_cnt)
|
||||
return;
|
||||
|
||||
/* The MAC actually cannot run in 1000 half-duplex mode. */
|
||||
phy_remove_link_mode(phy,
|
||||
ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
|
||||
|
||||
/* PHY does not support gigabit. */
|
||||
if (!(dev->features & GBIT_SUPPORT))
|
||||
phy_remove_link_mode(phy,
|
||||
ETHTOOL_LINK_MODE_1000baseT_Full_BIT);
|
||||
}
|
||||
|
||||
static bool ksz9477_get_gbit(struct ksz_device *dev, u8 data)
|
||||
{
|
||||
bool gbit;
|
||||
@ -1603,7 +1586,6 @@ static const struct ksz_dev_ops ksz9477_dev_ops = {
|
||||
.get_port_addr = ksz9477_get_port_addr,
|
||||
.cfg_port_member = ksz9477_cfg_port_member,
|
||||
.flush_dyn_mac_table = ksz9477_flush_dyn_mac_table,
|
||||
.phy_setup = ksz9477_phy_setup,
|
||||
.port_setup = ksz9477_port_setup,
|
||||
.r_mib_cnt = ksz9477_r_mib_cnt,
|
||||
.r_mib_pkt = ksz9477_r_mib_pkt,
|
||||
@ -1617,7 +1599,29 @@ static const struct ksz_dev_ops ksz9477_dev_ops = {
|
||||
|
||||
int ksz9477_switch_register(struct ksz_device *dev)
|
||||
{
|
||||
return ksz_switch_register(dev, &ksz9477_dev_ops);
|
||||
int ret, i;
|
||||
struct phy_device *phydev;
|
||||
|
||||
ret = ksz_switch_register(dev, &ksz9477_dev_ops);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
for (i = 0; i < dev->phy_port_cnt; ++i) {
|
||||
if (!dsa_is_user_port(dev->ds, i))
|
||||
continue;
|
||||
|
||||
phydev = dsa_to_port(dev->ds, i)->slave->phydev;
|
||||
|
||||
/* The MAC actually cannot run in 1000 half-duplex mode. */
|
||||
phy_remove_link_mode(phydev,
|
||||
ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
|
||||
|
||||
/* PHY does not support gigabit. */
|
||||
if (!(dev->features & GBIT_SUPPORT))
|
||||
phy_remove_link_mode(phydev,
|
||||
ETHTOOL_LINK_MODE_1000baseT_Full_BIT);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(ksz9477_switch_register);
|
||||
|
||||
|
@ -358,8 +358,6 @@ int ksz_enable_port(struct dsa_switch *ds, int port, struct phy_device *phy)
|
||||
|
||||
/* setup slave port */
|
||||
dev->dev_ops->port_setup(dev, port, false);
|
||||
if (dev->dev_ops->phy_setup)
|
||||
dev->dev_ops->phy_setup(dev, port, phy);
|
||||
|
||||
/* port_stp_state_set() will be called after to enable the port so
|
||||
* there is no need to do anything.
|
||||
|
@ -119,8 +119,6 @@ struct ksz_dev_ops {
|
||||
u32 (*get_port_addr)(int port, int offset);
|
||||
void (*cfg_port_member)(struct ksz_device *dev, int port, u8 member);
|
||||
void (*flush_dyn_mac_table)(struct ksz_device *dev, int port);
|
||||
void (*phy_setup)(struct ksz_device *dev, int port,
|
||||
struct phy_device *phy);
|
||||
void (*port_cleanup)(struct ksz_device *dev, int port);
|
||||
void (*port_setup)(struct ksz_device *dev, int port, bool cpu_port);
|
||||
void (*r_phy)(struct ksz_device *dev, u16 phy, u16 reg, u16 *val);
|
||||
|
@ -664,8 +664,11 @@ static void mv88e6xxx_mac_config(struct dsa_switch *ds, int port,
|
||||
const struct phylink_link_state *state)
|
||||
{
|
||||
struct mv88e6xxx_chip *chip = ds->priv;
|
||||
struct mv88e6xxx_port *p;
|
||||
int err;
|
||||
|
||||
p = &chip->ports[port];
|
||||
|
||||
/* FIXME: is this the correct test? If we're in fixed mode on an
|
||||
* internal port, why should we process this any different from
|
||||
* PHY mode? On the other hand, the port may be automedia between
|
||||
@ -675,10 +678,14 @@ static void mv88e6xxx_mac_config(struct dsa_switch *ds, int port,
|
||||
return;
|
||||
|
||||
mv88e6xxx_reg_lock(chip);
|
||||
/* FIXME: should we force the link down here - but if we do, how
|
||||
* do we restore the link force/unforce state? The driver layering
|
||||
* gets in the way.
|
||||
/* In inband mode, the link may come up at any time while the link
|
||||
* is not forced down. Force the link down while we reconfigure the
|
||||
* interface mode.
|
||||
*/
|
||||
if (mode == MLO_AN_INBAND && p->interface != state->interface &&
|
||||
chip->info->ops->port_set_link)
|
||||
chip->info->ops->port_set_link(chip, port, LINK_FORCED_DOWN);
|
||||
|
||||
err = mv88e6xxx_port_config_interface(chip, port, state->interface);
|
||||
if (err && err != -EOPNOTSUPP)
|
||||
goto err_unlock;
|
||||
@ -691,6 +698,15 @@ static void mv88e6xxx_mac_config(struct dsa_switch *ds, int port,
|
||||
if (err > 0)
|
||||
err = 0;
|
||||
|
||||
/* Undo the forced down state above after completing configuration
|
||||
* irrespective of its state on entry, which allows the link to come up.
|
||||
*/
|
||||
if (mode == MLO_AN_INBAND && p->interface != state->interface &&
|
||||
chip->info->ops->port_set_link)
|
||||
chip->info->ops->port_set_link(chip, port, LINK_UNFORCED);
|
||||
|
||||
p->interface = state->interface;
|
||||
|
||||
err_unlock:
|
||||
mv88e6xxx_reg_unlock(chip);
|
||||
|
||||
|
@ -232,6 +232,7 @@ struct mv88e6xxx_port {
|
||||
u64 atu_full_violation;
|
||||
u64 vtu_member_violation;
|
||||
u64 vtu_miss_violation;
|
||||
phy_interface_t interface;
|
||||
u8 cmode;
|
||||
bool mirror_ingress;
|
||||
bool mirror_egress;
|
||||
|
@ -64,6 +64,7 @@ struct aq_hw_caps_s {
|
||||
u8 rx_rings;
|
||||
bool flow_control;
|
||||
bool is_64_dma;
|
||||
u32 quirks;
|
||||
u32 priv_data_len;
|
||||
};
|
||||
|
||||
|
@ -415,6 +415,15 @@ int aq_nic_init(struct aq_nic_s *self)
|
||||
self->aq_nic_cfg.aq_hw_caps->media_type == AQ_HW_MEDIA_TYPE_TP) {
|
||||
self->aq_hw->phy_id = HW_ATL_PHY_ID_MAX;
|
||||
err = aq_phy_init(self->aq_hw);
|
||||
|
||||
/* Disable the PTP on NICs where it's known to cause datapath
|
||||
* problems.
|
||||
* Ideally this should have been done by PHY provisioning, but
|
||||
* many units have been shipped with enabled PTP block already.
|
||||
*/
|
||||
if (self->aq_nic_cfg.aq_hw_caps->quirks & AQ_NIC_QUIRK_BAD_PTP)
|
||||
if (self->aq_hw->phy_id != HW_ATL_PHY_ID_MAX)
|
||||
aq_phy_disable_ptp(self->aq_hw);
|
||||
}
|
||||
|
||||
for (i = 0U; i < self->aq_vecs; i++) {
|
||||
|
@ -81,6 +81,8 @@ struct aq_nic_cfg_s {
|
||||
#define AQ_NIC_FLAG_ERR_UNPLUG 0x40000000U
|
||||
#define AQ_NIC_FLAG_ERR_HW 0x80000000U
|
||||
|
||||
#define AQ_NIC_QUIRK_BAD_PTP BIT(0)
|
||||
|
||||
#define AQ_NIC_WOL_MODES (WAKE_MAGIC |\
|
||||
WAKE_PHY)
|
||||
|
||||
|
@ -1,10 +1,14 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/* aQuantia Corporation Network Driver
|
||||
* Copyright (C) 2018-2019 aQuantia Corporation. All rights reserved
|
||||
/* Atlantic Network Driver
|
||||
*
|
||||
* Copyright (C) 2018-2019 aQuantia Corporation
|
||||
* Copyright (C) 2019-2020 Marvell International Ltd.
|
||||
*/
|
||||
|
||||
#include "aq_phy.h"
|
||||
|
||||
#define HW_ATL_PTP_DISABLE_MSK BIT(10)
|
||||
|
||||
bool aq_mdio_busy_wait(struct aq_hw_s *aq_hw)
|
||||
{
|
||||
int err = 0;
|
||||
@ -145,3 +149,24 @@ bool aq_phy_init(struct aq_hw_s *aq_hw)
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void aq_phy_disable_ptp(struct aq_hw_s *aq_hw)
|
||||
{
|
||||
static const u16 ptp_registers[] = {
|
||||
0x031e,
|
||||
0x031d,
|
||||
0x031c,
|
||||
0x031b,
|
||||
};
|
||||
u16 val;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(ptp_registers); i++) {
|
||||
val = aq_phy_read_reg(aq_hw, MDIO_MMD_VEND1,
|
||||
ptp_registers[i]);
|
||||
|
||||
aq_phy_write_reg(aq_hw, MDIO_MMD_VEND1,
|
||||
ptp_registers[i],
|
||||
val & ~HW_ATL_PTP_DISABLE_MSK);
|
||||
}
|
||||
}
|
||||
|
@ -1,6 +1,8 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/* aQuantia Corporation Network Driver
|
||||
* Copyright (C) 2018-2019 aQuantia Corporation. All rights reserved
|
||||
/* Atlantic Network Driver
|
||||
*
|
||||
* Copyright (C) 2018-2019 aQuantia Corporation
|
||||
* Copyright (C) 2019-2020 Marvell International Ltd.
|
||||
*/
|
||||
|
||||
#ifndef AQ_PHY_H
|
||||
@ -29,4 +31,6 @@ bool aq_phy_init_phy_id(struct aq_hw_s *aq_hw);
|
||||
|
||||
bool aq_phy_init(struct aq_hw_s *aq_hw);
|
||||
|
||||
void aq_phy_disable_ptp(struct aq_hw_s *aq_hw);
|
||||
|
||||
#endif /* AQ_PHY_H */
|
||||
|
@ -93,6 +93,25 @@ const struct aq_hw_caps_s hw_atl_b0_caps_aqc109 = {
|
||||
AQ_NIC_RATE_100M,
|
||||
};
|
||||
|
||||
const struct aq_hw_caps_s hw_atl_b0_caps_aqc111 = {
|
||||
DEFAULT_B0_BOARD_BASIC_CAPABILITIES,
|
||||
.media_type = AQ_HW_MEDIA_TYPE_TP,
|
||||
.link_speed_msk = AQ_NIC_RATE_5G |
|
||||
AQ_NIC_RATE_2G5 |
|
||||
AQ_NIC_RATE_1G |
|
||||
AQ_NIC_RATE_100M,
|
||||
.quirks = AQ_NIC_QUIRK_BAD_PTP,
|
||||
};
|
||||
|
||||
const struct aq_hw_caps_s hw_atl_b0_caps_aqc112 = {
|
||||
DEFAULT_B0_BOARD_BASIC_CAPABILITIES,
|
||||
.media_type = AQ_HW_MEDIA_TYPE_TP,
|
||||
.link_speed_msk = AQ_NIC_RATE_2G5 |
|
||||
AQ_NIC_RATE_1G |
|
||||
AQ_NIC_RATE_100M,
|
||||
.quirks = AQ_NIC_QUIRK_BAD_PTP,
|
||||
};
|
||||
|
||||
static int hw_atl_b0_hw_reset(struct aq_hw_s *self)
|
||||
{
|
||||
int err = 0;
|
||||
@ -354,8 +373,13 @@ static int hw_atl_b0_hw_init_tx_tc_rate_limit(struct aq_hw_s *self)
|
||||
|
||||
/* WSP, if min_rate is set for at least one TC.
|
||||
* RR otherwise.
|
||||
*
|
||||
* NB! MAC FW sets arb mode itself if PTP is enabled. We shouldn't
|
||||
* overwrite it here in that case.
|
||||
*/
|
||||
hw_atl_tps_tx_pkt_shed_data_arb_mode_set(self, min_rate_msk ? 1U : 0U);
|
||||
if (!nic_cfg->is_ptp)
|
||||
hw_atl_tps_tx_pkt_shed_data_arb_mode_set(self, min_rate_msk ? 1U : 0U);
|
||||
|
||||
/* Data TC Arbiter takes precedence over Descriptor TC Arbiter,
|
||||
* leave Descriptor TC Arbiter as RR.
|
||||
*/
|
||||
|
@ -18,17 +18,15 @@ extern const struct aq_hw_caps_s hw_atl_b0_caps_aqc100;
|
||||
extern const struct aq_hw_caps_s hw_atl_b0_caps_aqc107;
|
||||
extern const struct aq_hw_caps_s hw_atl_b0_caps_aqc108;
|
||||
extern const struct aq_hw_caps_s hw_atl_b0_caps_aqc109;
|
||||
|
||||
#define hw_atl_b0_caps_aqc111 hw_atl_b0_caps_aqc108
|
||||
#define hw_atl_b0_caps_aqc112 hw_atl_b0_caps_aqc109
|
||||
extern const struct aq_hw_caps_s hw_atl_b0_caps_aqc111;
|
||||
extern const struct aq_hw_caps_s hw_atl_b0_caps_aqc112;
|
||||
|
||||
#define hw_atl_b0_caps_aqc100s hw_atl_b0_caps_aqc100
|
||||
#define hw_atl_b0_caps_aqc107s hw_atl_b0_caps_aqc107
|
||||
#define hw_atl_b0_caps_aqc108s hw_atl_b0_caps_aqc108
|
||||
#define hw_atl_b0_caps_aqc109s hw_atl_b0_caps_aqc109
|
||||
|
||||
#define hw_atl_b0_caps_aqc111s hw_atl_b0_caps_aqc108
|
||||
#define hw_atl_b0_caps_aqc112s hw_atl_b0_caps_aqc109
|
||||
#define hw_atl_b0_caps_aqc111s hw_atl_b0_caps_aqc111
|
||||
#define hw_atl_b0_caps_aqc112s hw_atl_b0_caps_aqc112
|
||||
|
||||
extern const struct aq_hw_ops hw_atl_ops_b0;
|
||||
|
||||
|
@ -556,7 +556,8 @@ static int ag71xx_mdio_probe(struct ag71xx *ag)
|
||||
ag->mdio_reset = of_reset_control_get_exclusive(np, "mdio");
|
||||
if (IS_ERR(ag->mdio_reset)) {
|
||||
netif_err(ag, probe, ndev, "Failed to get reset mdio.\n");
|
||||
return PTR_ERR(ag->mdio_reset);
|
||||
err = PTR_ERR(ag->mdio_reset);
|
||||
goto mdio_err_put_clk;
|
||||
}
|
||||
|
||||
mii_bus->name = "ag71xx_mdio";
|
||||
|
@ -3418,7 +3418,7 @@ void bnxt_set_tpa_flags(struct bnxt *bp)
|
||||
*/
|
||||
void bnxt_set_ring_params(struct bnxt *bp)
|
||||
{
|
||||
u32 ring_size, rx_size, rx_space;
|
||||
u32 ring_size, rx_size, rx_space, max_rx_cmpl;
|
||||
u32 agg_factor = 0, agg_ring_size = 0;
|
||||
|
||||
/* 8 for CRC and VLAN */
|
||||
@ -3474,7 +3474,15 @@ void bnxt_set_ring_params(struct bnxt *bp)
|
||||
bp->tx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, TX_DESC_CNT);
|
||||
bp->tx_ring_mask = (bp->tx_nr_pages * TX_DESC_CNT) - 1;
|
||||
|
||||
ring_size = bp->rx_ring_size * (2 + agg_factor) + bp->tx_ring_size;
|
||||
max_rx_cmpl = bp->rx_ring_size;
|
||||
/* MAX TPA needs to be added because TPA_START completions are
|
||||
* immediately recycled, so the TPA completions are not bound by
|
||||
* the RX ring size.
|
||||
*/
|
||||
if (bp->flags & BNXT_FLAG_TPA)
|
||||
max_rx_cmpl += bp->max_tpa;
|
||||
/* RX and TPA completions are 32-byte, all others are 16-byte */
|
||||
ring_size = max_rx_cmpl * 2 + agg_ring_size + bp->tx_ring_size;
|
||||
bp->cp_ring_size = ring_size;
|
||||
|
||||
bp->cp_nr_pages = bnxt_calc_nr_ring_pages(ring_size, CP_DESC_CNT);
|
||||
@ -10385,15 +10393,15 @@ static void bnxt_sp_task(struct work_struct *work)
|
||||
&bp->sp_event))
|
||||
bnxt_hwrm_phy_qcaps(bp);
|
||||
|
||||
if (test_and_clear_bit(BNXT_LINK_CFG_CHANGE_SP_EVENT,
|
||||
&bp->sp_event))
|
||||
bnxt_init_ethtool_link_settings(bp);
|
||||
|
||||
rc = bnxt_update_link(bp, true);
|
||||
mutex_unlock(&bp->link_lock);
|
||||
if (rc)
|
||||
netdev_err(bp->dev, "SP task can't update link (rc: %x)\n",
|
||||
rc);
|
||||
|
||||
if (test_and_clear_bit(BNXT_LINK_CFG_CHANGE_SP_EVENT,
|
||||
&bp->sp_event))
|
||||
bnxt_init_ethtool_link_settings(bp);
|
||||
mutex_unlock(&bp->link_lock);
|
||||
}
|
||||
if (test_and_clear_bit(BNXT_UPDATE_PHY_SP_EVENT, &bp->sp_event)) {
|
||||
int rc;
|
||||
|
@ -1765,8 +1765,11 @@ static int bnxt_set_pauseparam(struct net_device *dev,
|
||||
if (epause->tx_pause)
|
||||
link_info->req_flow_ctrl |= BNXT_LINK_PAUSE_TX;
|
||||
|
||||
if (netif_running(dev))
|
||||
if (netif_running(dev)) {
|
||||
mutex_lock(&bp->link_lock);
|
||||
rc = bnxt_hwrm_set_pause(bp);
|
||||
mutex_unlock(&bp->link_lock);
|
||||
}
|
||||
return rc;
|
||||
}
|
||||
|
||||
|
@ -543,14 +543,14 @@ static int bcmgenet_hfb_validate_mask(void *mask, size_t size)
|
||||
#define VALIDATE_MASK(x) \
|
||||
bcmgenet_hfb_validate_mask(&(x), sizeof(x))
|
||||
|
||||
static int bcmgenet_hfb_insert_data(u32 *f, int offset,
|
||||
void *val, void *mask, size_t size)
|
||||
static int bcmgenet_hfb_insert_data(struct bcmgenet_priv *priv, u32 f_index,
|
||||
u32 offset, void *val, void *mask,
|
||||
size_t size)
|
||||
{
|
||||
int index;
|
||||
u32 tmp;
|
||||
u32 index, tmp;
|
||||
|
||||
index = offset / 2;
|
||||
tmp = f[index];
|
||||
index = f_index * priv->hw_params->hfb_filter_size + offset / 2;
|
||||
tmp = bcmgenet_hfb_readl(priv, index * sizeof(u32));
|
||||
|
||||
while (size--) {
|
||||
if (offset++ & 1) {
|
||||
@ -567,9 +567,10 @@ static int bcmgenet_hfb_insert_data(u32 *f, int offset,
|
||||
tmp |= 0x10000;
|
||||
break;
|
||||
}
|
||||
f[index++] = tmp;
|
||||
bcmgenet_hfb_writel(priv, tmp, index++ * sizeof(u32));
|
||||
if (size)
|
||||
tmp = f[index];
|
||||
tmp = bcmgenet_hfb_readl(priv,
|
||||
index * sizeof(u32));
|
||||
} else {
|
||||
tmp &= ~0xCFF00;
|
||||
tmp |= (*(unsigned char *)val++) << 8;
|
||||
@ -585,44 +586,26 @@ static int bcmgenet_hfb_insert_data(u32 *f, int offset,
|
||||
break;
|
||||
}
|
||||
if (!size)
|
||||
f[index] = tmp;
|
||||
bcmgenet_hfb_writel(priv, tmp, index * sizeof(u32));
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void bcmgenet_hfb_set_filter(struct bcmgenet_priv *priv, u32 *f_data,
|
||||
u32 f_length, u32 rx_queue, int f_index)
|
||||
{
|
||||
u32 base = f_index * priv->hw_params->hfb_filter_size;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < f_length; i++)
|
||||
bcmgenet_hfb_writel(priv, f_data[i], (base + i) * sizeof(u32));
|
||||
|
||||
bcmgenet_hfb_set_filter_length(priv, f_index, 2 * f_length);
|
||||
bcmgenet_hfb_set_filter_rx_queue_mapping(priv, f_index, rx_queue);
|
||||
}
|
||||
|
||||
static int bcmgenet_hfb_create_rxnfc_filter(struct bcmgenet_priv *priv,
|
||||
struct bcmgenet_rxnfc_rule *rule)
|
||||
static void bcmgenet_hfb_create_rxnfc_filter(struct bcmgenet_priv *priv,
|
||||
struct bcmgenet_rxnfc_rule *rule)
|
||||
{
|
||||
struct ethtool_rx_flow_spec *fs = &rule->fs;
|
||||
int err = 0, offset = 0, f_length = 0;
|
||||
u32 offset = 0, f_length = 0, f;
|
||||
u8 val_8, mask_8;
|
||||
__be16 val_16;
|
||||
u16 mask_16;
|
||||
size_t size;
|
||||
u32 *f_data;
|
||||
|
||||
f_data = kcalloc(priv->hw_params->hfb_filter_size, sizeof(u32),
|
||||
GFP_KERNEL);
|
||||
if (!f_data)
|
||||
return -ENOMEM;
|
||||
|
||||
f = fs->location;
|
||||
if (fs->flow_type & FLOW_MAC_EXT) {
|
||||
bcmgenet_hfb_insert_data(f_data, 0,
|
||||
bcmgenet_hfb_insert_data(priv, f, 0,
|
||||
&fs->h_ext.h_dest, &fs->m_ext.h_dest,
|
||||
sizeof(fs->h_ext.h_dest));
|
||||
}
|
||||
@ -630,11 +613,11 @@ static int bcmgenet_hfb_create_rxnfc_filter(struct bcmgenet_priv *priv,
|
||||
if (fs->flow_type & FLOW_EXT) {
|
||||
if (fs->m_ext.vlan_etype ||
|
||||
fs->m_ext.vlan_tci) {
|
||||
bcmgenet_hfb_insert_data(f_data, 12,
|
||||
bcmgenet_hfb_insert_data(priv, f, 12,
|
||||
&fs->h_ext.vlan_etype,
|
||||
&fs->m_ext.vlan_etype,
|
||||
sizeof(fs->h_ext.vlan_etype));
|
||||
bcmgenet_hfb_insert_data(f_data, 14,
|
||||
bcmgenet_hfb_insert_data(priv, f, 14,
|
||||
&fs->h_ext.vlan_tci,
|
||||
&fs->m_ext.vlan_tci,
|
||||
sizeof(fs->h_ext.vlan_tci));
|
||||
@ -646,15 +629,15 @@ static int bcmgenet_hfb_create_rxnfc_filter(struct bcmgenet_priv *priv,
|
||||
switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
|
||||
case ETHER_FLOW:
|
||||
f_length += DIV_ROUND_UP(ETH_HLEN, 2);
|
||||
bcmgenet_hfb_insert_data(f_data, 0,
|
||||
bcmgenet_hfb_insert_data(priv, f, 0,
|
||||
&fs->h_u.ether_spec.h_dest,
|
||||
&fs->m_u.ether_spec.h_dest,
|
||||
sizeof(fs->h_u.ether_spec.h_dest));
|
||||
bcmgenet_hfb_insert_data(f_data, ETH_ALEN,
|
||||
bcmgenet_hfb_insert_data(priv, f, ETH_ALEN,
|
||||
&fs->h_u.ether_spec.h_source,
|
||||
&fs->m_u.ether_spec.h_source,
|
||||
sizeof(fs->h_u.ether_spec.h_source));
|
||||
bcmgenet_hfb_insert_data(f_data, (2 * ETH_ALEN) + offset,
|
||||
bcmgenet_hfb_insert_data(priv, f, (2 * ETH_ALEN) + offset,
|
||||
&fs->h_u.ether_spec.h_proto,
|
||||
&fs->m_u.ether_spec.h_proto,
|
||||
sizeof(fs->h_u.ether_spec.h_proto));
|
||||
@ -664,21 +647,21 @@ static int bcmgenet_hfb_create_rxnfc_filter(struct bcmgenet_priv *priv,
|
||||
/* Specify IP Ether Type */
|
||||
val_16 = htons(ETH_P_IP);
|
||||
mask_16 = 0xFFFF;
|
||||
bcmgenet_hfb_insert_data(f_data, (2 * ETH_ALEN) + offset,
|
||||
bcmgenet_hfb_insert_data(priv, f, (2 * ETH_ALEN) + offset,
|
||||
&val_16, &mask_16, sizeof(val_16));
|
||||
bcmgenet_hfb_insert_data(f_data, 15 + offset,
|
||||
bcmgenet_hfb_insert_data(priv, f, 15 + offset,
|
||||
&fs->h_u.usr_ip4_spec.tos,
|
||||
&fs->m_u.usr_ip4_spec.tos,
|
||||
sizeof(fs->h_u.usr_ip4_spec.tos));
|
||||
bcmgenet_hfb_insert_data(f_data, 23 + offset,
|
||||
bcmgenet_hfb_insert_data(priv, f, 23 + offset,
|
||||
&fs->h_u.usr_ip4_spec.proto,
|
||||
&fs->m_u.usr_ip4_spec.proto,
|
||||
sizeof(fs->h_u.usr_ip4_spec.proto));
|
||||
bcmgenet_hfb_insert_data(f_data, 26 + offset,
|
||||
bcmgenet_hfb_insert_data(priv, f, 26 + offset,
|
||||
&fs->h_u.usr_ip4_spec.ip4src,
|
||||
&fs->m_u.usr_ip4_spec.ip4src,
|
||||
sizeof(fs->h_u.usr_ip4_spec.ip4src));
|
||||
bcmgenet_hfb_insert_data(f_data, 30 + offset,
|
||||
bcmgenet_hfb_insert_data(priv, f, 30 + offset,
|
||||
&fs->h_u.usr_ip4_spec.ip4dst,
|
||||
&fs->m_u.usr_ip4_spec.ip4dst,
|
||||
sizeof(fs->h_u.usr_ip4_spec.ip4dst));
|
||||
@ -688,11 +671,11 @@ static int bcmgenet_hfb_create_rxnfc_filter(struct bcmgenet_priv *priv,
|
||||
/* Only supports 20 byte IPv4 header */
|
||||
val_8 = 0x45;
|
||||
mask_8 = 0xFF;
|
||||
bcmgenet_hfb_insert_data(f_data, ETH_HLEN + offset,
|
||||
bcmgenet_hfb_insert_data(priv, f, ETH_HLEN + offset,
|
||||
&val_8, &mask_8,
|
||||
sizeof(val_8));
|
||||
size = sizeof(fs->h_u.usr_ip4_spec.l4_4_bytes);
|
||||
bcmgenet_hfb_insert_data(f_data,
|
||||
bcmgenet_hfb_insert_data(priv, f,
|
||||
ETH_HLEN + 20 + offset,
|
||||
&fs->h_u.usr_ip4_spec.l4_4_bytes,
|
||||
&fs->m_u.usr_ip4_spec.l4_4_bytes,
|
||||
@ -701,34 +684,42 @@ static int bcmgenet_hfb_create_rxnfc_filter(struct bcmgenet_priv *priv,
|
||||
break;
|
||||
}
|
||||
|
||||
bcmgenet_hfb_set_filter_length(priv, f, 2 * f_length);
|
||||
if (!fs->ring_cookie || fs->ring_cookie == RX_CLS_FLOW_WAKE) {
|
||||
/* Ring 0 flows can be handled by the default Descriptor Ring
|
||||
* We'll map them to ring 0, but don't enable the filter
|
||||
*/
|
||||
bcmgenet_hfb_set_filter(priv, f_data, f_length, 0,
|
||||
fs->location);
|
||||
bcmgenet_hfb_set_filter_rx_queue_mapping(priv, f, 0);
|
||||
rule->state = BCMGENET_RXNFC_STATE_DISABLED;
|
||||
} else {
|
||||
/* Other Rx rings are direct mapped here */
|
||||
bcmgenet_hfb_set_filter(priv, f_data, f_length,
|
||||
fs->ring_cookie, fs->location);
|
||||
bcmgenet_hfb_enable_filter(priv, fs->location);
|
||||
bcmgenet_hfb_set_filter_rx_queue_mapping(priv, f,
|
||||
fs->ring_cookie);
|
||||
bcmgenet_hfb_enable_filter(priv, f);
|
||||
rule->state = BCMGENET_RXNFC_STATE_ENABLED;
|
||||
}
|
||||
|
||||
kfree(f_data);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
/* bcmgenet_hfb_clear
|
||||
*
|
||||
* Clear Hardware Filter Block and disable all filtering.
|
||||
*/
|
||||
static void bcmgenet_hfb_clear_filter(struct bcmgenet_priv *priv, u32 f_index)
|
||||
{
|
||||
u32 base, i;
|
||||
|
||||
base = f_index * priv->hw_params->hfb_filter_size;
|
||||
for (i = 0; i < priv->hw_params->hfb_filter_size; i++)
|
||||
bcmgenet_hfb_writel(priv, 0x0, (base + i) * sizeof(u32));
|
||||
}
|
||||
|
||||
static void bcmgenet_hfb_clear(struct bcmgenet_priv *priv)
|
||||
{
|
||||
u32 i;
|
||||
|
||||
if (GENET_IS_V1(priv) || GENET_IS_V2(priv))
|
||||
return;
|
||||
|
||||
bcmgenet_hfb_reg_writel(priv, 0x0, HFB_CTRL);
|
||||
bcmgenet_hfb_reg_writel(priv, 0x0, HFB_FLT_ENABLE_V3PLUS);
|
||||
bcmgenet_hfb_reg_writel(priv, 0x0, HFB_FLT_ENABLE_V3PLUS + 4);
|
||||
@ -740,19 +731,18 @@ static void bcmgenet_hfb_clear(struct bcmgenet_priv *priv)
|
||||
bcmgenet_hfb_reg_writel(priv, 0x0,
|
||||
HFB_FLT_LEN_V3PLUS + i * sizeof(u32));
|
||||
|
||||
for (i = 0; i < priv->hw_params->hfb_filter_cnt *
|
||||
priv->hw_params->hfb_filter_size; i++)
|
||||
bcmgenet_hfb_writel(priv, 0x0, i * sizeof(u32));
|
||||
for (i = 0; i < priv->hw_params->hfb_filter_cnt; i++)
|
||||
bcmgenet_hfb_clear_filter(priv, i);
|
||||
}
|
||||
|
||||
static void bcmgenet_hfb_init(struct bcmgenet_priv *priv)
|
||||
{
|
||||
int i;
|
||||
|
||||
INIT_LIST_HEAD(&priv->rxnfc_list);
|
||||
if (GENET_IS_V1(priv) || GENET_IS_V2(priv))
|
||||
return;
|
||||
|
||||
INIT_LIST_HEAD(&priv->rxnfc_list);
|
||||
for (i = 0; i < MAX_NUM_OF_FS_RULES; i++) {
|
||||
INIT_LIST_HEAD(&priv->rxnfc_rules[i].list);
|
||||
priv->rxnfc_rules[i].state = BCMGENET_RXNFC_STATE_UNUSED;
|
||||
@ -1437,18 +1427,15 @@ static int bcmgenet_insert_flow(struct net_device *dev,
|
||||
loc_rule = &priv->rxnfc_rules[cmd->fs.location];
|
||||
if (loc_rule->state == BCMGENET_RXNFC_STATE_ENABLED)
|
||||
bcmgenet_hfb_disable_filter(priv, cmd->fs.location);
|
||||
if (loc_rule->state != BCMGENET_RXNFC_STATE_UNUSED)
|
||||
if (loc_rule->state != BCMGENET_RXNFC_STATE_UNUSED) {
|
||||
list_del(&loc_rule->list);
|
||||
bcmgenet_hfb_clear_filter(priv, cmd->fs.location);
|
||||
}
|
||||
loc_rule->state = BCMGENET_RXNFC_STATE_UNUSED;
|
||||
memcpy(&loc_rule->fs, &cmd->fs,
|
||||
sizeof(struct ethtool_rx_flow_spec));
|
||||
|
||||
err = bcmgenet_hfb_create_rxnfc_filter(priv, loc_rule);
|
||||
if (err) {
|
||||
netdev_err(dev, "rxnfc: Could not install rule (%d)\n",
|
||||
err);
|
||||
return err;
|
||||
}
|
||||
bcmgenet_hfb_create_rxnfc_filter(priv, loc_rule);
|
||||
|
||||
list_add_tail(&loc_rule->list, &priv->rxnfc_list);
|
||||
|
||||
@ -1473,8 +1460,10 @@ static int bcmgenet_delete_flow(struct net_device *dev,
|
||||
|
||||
if (rule->state == BCMGENET_RXNFC_STATE_ENABLED)
|
||||
bcmgenet_hfb_disable_filter(priv, cmd->fs.location);
|
||||
if (rule->state != BCMGENET_RXNFC_STATE_UNUSED)
|
||||
if (rule->state != BCMGENET_RXNFC_STATE_UNUSED) {
|
||||
list_del(&rule->list);
|
||||
bcmgenet_hfb_clear_filter(priv, cmd->fs.location);
|
||||
}
|
||||
rule->state = BCMGENET_RXNFC_STATE_UNUSED;
|
||||
memset(&rule->fs, 0, sizeof(struct ethtool_rx_flow_spec));
|
||||
|
||||
@ -3999,7 +3988,7 @@ static int bcmgenet_probe(struct platform_device *pdev)
|
||||
if (err)
|
||||
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
|
||||
if (err)
|
||||
goto err;
|
||||
goto err_clk_disable;
|
||||
|
||||
/* Mii wait queue */
|
||||
init_waitqueue_head(&priv->wq);
|
||||
@ -4011,14 +4000,14 @@ static int bcmgenet_probe(struct platform_device *pdev)
|
||||
if (IS_ERR(priv->clk_wol)) {
|
||||
dev_dbg(&priv->pdev->dev, "failed to get enet-wol clock\n");
|
||||
err = PTR_ERR(priv->clk_wol);
|
||||
goto err;
|
||||
goto err_clk_disable;
|
||||
}
|
||||
|
||||
priv->clk_eee = devm_clk_get_optional(&priv->pdev->dev, "enet-eee");
|
||||
if (IS_ERR(priv->clk_eee)) {
|
||||
dev_dbg(&priv->pdev->dev, "failed to get enet-eee clock\n");
|
||||
err = PTR_ERR(priv->clk_eee);
|
||||
goto err;
|
||||
goto err_clk_disable;
|
||||
}
|
||||
|
||||
/* If this is an internal GPHY, power it on now, before UniMAC is
|
||||
@ -4129,8 +4118,9 @@ static int bcmgenet_resume(struct device *d)
|
||||
{
|
||||
struct net_device *dev = dev_get_drvdata(d);
|
||||
struct bcmgenet_priv *priv = netdev_priv(dev);
|
||||
struct bcmgenet_rxnfc_rule *rule;
|
||||
unsigned long dma_ctrl;
|
||||
u32 offset, reg;
|
||||
u32 reg;
|
||||
int ret;
|
||||
|
||||
if (!netif_running(dev))
|
||||
@ -4161,10 +4151,11 @@ static int bcmgenet_resume(struct device *d)
|
||||
|
||||
bcmgenet_set_hw_addr(priv, dev->dev_addr);
|
||||
|
||||
offset = HFB_FLT_ENABLE_V3PLUS;
|
||||
bcmgenet_hfb_reg_writel(priv, priv->hfb_en[1], offset);
|
||||
bcmgenet_hfb_reg_writel(priv, priv->hfb_en[2], offset + sizeof(u32));
|
||||
bcmgenet_hfb_reg_writel(priv, priv->hfb_en[0], HFB_CTRL);
|
||||
/* Restore hardware filters */
|
||||
bcmgenet_hfb_clear(priv);
|
||||
list_for_each_entry(rule, &priv->rxnfc_list, list)
|
||||
if (rule->state != BCMGENET_RXNFC_STATE_UNUSED)
|
||||
bcmgenet_hfb_create_rxnfc_filter(priv, rule);
|
||||
|
||||
if (priv->internal_phy) {
|
||||
reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
|
||||
@ -4208,7 +4199,6 @@ static int bcmgenet_suspend(struct device *d)
|
||||
{
|
||||
struct net_device *dev = dev_get_drvdata(d);
|
||||
struct bcmgenet_priv *priv = netdev_priv(dev);
|
||||
u32 offset;
|
||||
|
||||
if (!netif_running(dev))
|
||||
return 0;
|
||||
@ -4220,11 +4210,7 @@ static int bcmgenet_suspend(struct device *d)
|
||||
if (!device_may_wakeup(d))
|
||||
phy_suspend(dev->phydev);
|
||||
|
||||
/* Preserve filter state and disable filtering */
|
||||
priv->hfb_en[0] = bcmgenet_hfb_reg_readl(priv, HFB_CTRL);
|
||||
offset = HFB_FLT_ENABLE_V3PLUS;
|
||||
priv->hfb_en[1] = bcmgenet_hfb_reg_readl(priv, offset);
|
||||
priv->hfb_en[2] = bcmgenet_hfb_reg_readl(priv, offset + sizeof(u32));
|
||||
/* Disable filtering */
|
||||
bcmgenet_hfb_reg_writel(priv, 0, HFB_CTRL);
|
||||
|
||||
return 0;
|
||||
|
@ -696,7 +696,6 @@ struct bcmgenet_priv {
|
||||
u32 wolopts;
|
||||
u8 sopass[SOPASS_MAX];
|
||||
bool wol_active;
|
||||
u32 hfb_en[3];
|
||||
|
||||
struct bcmgenet_mib_counters mib;
|
||||
|
||||
|
@ -217,20 +217,28 @@ void bcmgenet_wol_power_up_cfg(struct bcmgenet_priv *priv,
|
||||
|
||||
priv->wol_active = 0;
|
||||
clk_disable_unprepare(priv->clk_wol);
|
||||
priv->crc_fwd_en = 0;
|
||||
|
||||
/* Disable Magic Packet Detection */
|
||||
reg = bcmgenet_umac_readl(priv, UMAC_MPD_CTRL);
|
||||
reg &= ~(MPD_EN | MPD_PW_EN);
|
||||
bcmgenet_umac_writel(priv, reg, UMAC_MPD_CTRL);
|
||||
if (priv->wolopts & (WAKE_MAGIC | WAKE_MAGICSECURE)) {
|
||||
reg = bcmgenet_umac_readl(priv, UMAC_MPD_CTRL);
|
||||
if (!(reg & MPD_EN))
|
||||
return; /* already reset so skip the rest */
|
||||
reg &= ~(MPD_EN | MPD_PW_EN);
|
||||
bcmgenet_umac_writel(priv, reg, UMAC_MPD_CTRL);
|
||||
}
|
||||
|
||||
/* Disable WAKE_FILTER Detection */
|
||||
reg = bcmgenet_hfb_reg_readl(priv, HFB_CTRL);
|
||||
reg &= ~(RBUF_HFB_EN | RBUF_ACPI_EN);
|
||||
bcmgenet_hfb_reg_writel(priv, reg, HFB_CTRL);
|
||||
if (priv->wolopts & WAKE_FILTER) {
|
||||
reg = bcmgenet_hfb_reg_readl(priv, HFB_CTRL);
|
||||
if (!(reg & RBUF_ACPI_EN))
|
||||
return; /* already reset so skip the rest */
|
||||
reg &= ~(RBUF_HFB_EN | RBUF_ACPI_EN);
|
||||
bcmgenet_hfb_reg_writel(priv, reg, HFB_CTRL);
|
||||
}
|
||||
|
||||
/* Disable CRC Forward */
|
||||
reg = bcmgenet_umac_readl(priv, UMAC_CMD);
|
||||
reg &= ~CMD_CRC_FWD;
|
||||
bcmgenet_umac_writel(priv, reg, UMAC_CMD);
|
||||
priv->crc_fwd_en = 0;
|
||||
}
|
||||
|
@ -3736,7 +3736,7 @@ static int macb_init(struct platform_device *pdev)
|
||||
|
||||
if (!(bp->caps & MACB_CAPS_USRIO_DISABLED)) {
|
||||
val = 0;
|
||||
if (bp->phy_interface == PHY_INTERFACE_MODE_RGMII)
|
||||
if (phy_interface_mode_is_rgmii(bp->phy_interface))
|
||||
val = GEM_BIT(RGMII);
|
||||
else if (bp->phy_interface == PHY_INTERFACE_MODE_RMII &&
|
||||
(bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII))
|
||||
|
@ -2938,6 +2938,7 @@ static inline int uld_send(struct adapter *adap, struct sk_buff *skb,
|
||||
txq_info = adap->sge.uld_txq_info[tx_uld_type];
|
||||
if (unlikely(!txq_info)) {
|
||||
WARN_ON(true);
|
||||
kfree_skb(skb);
|
||||
return NET_XMIT_DROP;
|
||||
}
|
||||
|
||||
|
@ -2938,7 +2938,7 @@ static int dpaa_eth_probe(struct platform_device *pdev)
|
||||
DMA_BIT_MASK(40));
|
||||
if (err) {
|
||||
netdev_err(net_dev, "dma_coerce_mask_and_coherent() failed\n");
|
||||
return err;
|
||||
goto free_netdev;
|
||||
}
|
||||
|
||||
/* If fsl_fm_max_frm is set to a higher value than the all-common 1500,
|
||||
|
@ -3632,7 +3632,7 @@ static int dpaa2_eth_connect_mac(struct dpaa2_eth_priv *priv)
|
||||
|
||||
dpni_dev = to_fsl_mc_device(priv->net_dev->dev.parent);
|
||||
dpmac_dev = fsl_mc_get_endpoint(dpni_dev);
|
||||
if (IS_ERR(dpmac_dev) || dpmac_dev->dev.type != &fsl_mc_bus_dpmac_type)
|
||||
if (IS_ERR_OR_NULL(dpmac_dev) || dpmac_dev->dev.type != &fsl_mc_bus_dpmac_type)
|
||||
return 0;
|
||||
|
||||
if (dpaa2_mac_is_type_fixed(dpmac_dev, priv->mc_io))
|
||||
|
@ -906,6 +906,7 @@ static int enetc_pf_probe(struct pci_dev *pdev,
|
||||
return 0;
|
||||
|
||||
err_reg_netdev:
|
||||
enetc_mdio_remove(pf);
|
||||
enetc_of_put_phy(priv);
|
||||
enetc_free_msix(priv);
|
||||
err_alloc_msix:
|
||||
|
@ -590,6 +590,7 @@ struct fec_enet_private {
|
||||
void fec_ptp_init(struct platform_device *pdev, int irq_idx);
|
||||
void fec_ptp_stop(struct platform_device *pdev);
|
||||
void fec_ptp_start_cyclecounter(struct net_device *ndev);
|
||||
void fec_ptp_disable_hwts(struct net_device *ndev);
|
||||
int fec_ptp_set(struct net_device *ndev, struct ifreq *ifr);
|
||||
int fec_ptp_get(struct net_device *ndev, struct ifreq *ifr);
|
||||
|
||||
|
@ -1294,8 +1294,13 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)
|
||||
ndev->stats.tx_bytes += skb->len;
|
||||
}
|
||||
|
||||
if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) &&
|
||||
fep->bufdesc_ex) {
|
||||
/* NOTE: SKBTX_IN_PROGRESS being set does not imply it's we who
|
||||
* are to time stamp the packet, so we still need to check time
|
||||
* stamping enabled flag.
|
||||
*/
|
||||
if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS &&
|
||||
fep->hwts_tx_en) &&
|
||||
fep->bufdesc_ex) {
|
||||
struct skb_shared_hwtstamps shhwtstamps;
|
||||
struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
|
||||
|
||||
@ -2723,10 +2728,16 @@ static int fec_enet_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
|
||||
return -ENODEV;
|
||||
|
||||
if (fep->bufdesc_ex) {
|
||||
if (cmd == SIOCSHWTSTAMP)
|
||||
return fec_ptp_set(ndev, rq);
|
||||
if (cmd == SIOCGHWTSTAMP)
|
||||
return fec_ptp_get(ndev, rq);
|
||||
bool use_fec_hwts = !phy_has_hwtstamp(phydev);
|
||||
|
||||
if (cmd == SIOCSHWTSTAMP) {
|
||||
if (use_fec_hwts)
|
||||
return fec_ptp_set(ndev, rq);
|
||||
fec_ptp_disable_hwts(ndev);
|
||||
} else if (cmd == SIOCGHWTSTAMP) {
|
||||
if (use_fec_hwts)
|
||||
return fec_ptp_get(ndev, rq);
|
||||
}
|
||||
}
|
||||
|
||||
return phy_mii_ioctl(phydev, rq, cmd);
|
||||
|
@ -452,6 +452,18 @@ static int fec_ptp_enable(struct ptp_clock_info *ptp,
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
/**
|
||||
* fec_ptp_disable_hwts - disable hardware time stamping
|
||||
* @ndev: pointer to net_device
|
||||
*/
|
||||
void fec_ptp_disable_hwts(struct net_device *ndev)
|
||||
{
|
||||
struct fec_enet_private *fep = netdev_priv(ndev);
|
||||
|
||||
fep->hwts_tx_en = 0;
|
||||
fep->hwts_rx_en = 0;
|
||||
}
|
||||
|
||||
int fec_ptp_set(struct net_device *ndev, struct ifreq *ifr)
|
||||
{
|
||||
struct fec_enet_private *fep = netdev_priv(ndev);
|
||||
|
@ -779,8 +779,12 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
|
||||
|
||||
mac_addr = of_get_mac_address(np);
|
||||
|
||||
if (!IS_ERR(mac_addr))
|
||||
if (!IS_ERR(mac_addr)) {
|
||||
ether_addr_copy(dev->dev_addr, mac_addr);
|
||||
} else {
|
||||
eth_hw_addr_random(dev);
|
||||
dev_info(&ofdev->dev, "Using random MAC address: %pM\n", dev->dev_addr);
|
||||
}
|
||||
|
||||
if (model && !strcasecmp(model, "TSEC"))
|
||||
priv->device_flags |= FSL_GIANFAR_DEV_HAS_GIGABIT |
|
||||
|
@ -77,6 +77,7 @@
|
||||
((ring)->p = ((ring)->p - 1 + (ring)->desc_num) % (ring)->desc_num)
|
||||
|
||||
enum hns_desc_type {
|
||||
DESC_TYPE_UNKNOWN,
|
||||
DESC_TYPE_SKB,
|
||||
DESC_TYPE_FRAGLIST_SKB,
|
||||
DESC_TYPE_PAGE,
|
||||
|
@ -1118,12 +1118,12 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
desc_cb->priv = priv;
|
||||
desc_cb->length = size;
|
||||
desc_cb->dma = dma;
|
||||
desc_cb->type = type;
|
||||
|
||||
if (likely(size <= HNS3_MAX_BD_SIZE)) {
|
||||
desc_cb->priv = priv;
|
||||
desc_cb->dma = dma;
|
||||
desc_cb->type = type;
|
||||
desc->addr = cpu_to_le64(dma);
|
||||
desc->tx.send_size = cpu_to_le16(size);
|
||||
desc->tx.bdtp_fe_sc_vld_ra_ri =
|
||||
@ -1135,18 +1135,11 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
|
||||
}
|
||||
|
||||
frag_buf_num = hns3_tx_bd_count(size);
|
||||
sizeoflast = size & HNS3_TX_LAST_SIZE_M;
|
||||
sizeoflast = size % HNS3_MAX_BD_SIZE;
|
||||
sizeoflast = sizeoflast ? sizeoflast : HNS3_MAX_BD_SIZE;
|
||||
|
||||
/* When frag size is bigger than hardware limit, split this frag */
|
||||
for (k = 0; k < frag_buf_num; k++) {
|
||||
/* The txbd's baseinfo of DESC_TYPE_PAGE & DESC_TYPE_SKB */
|
||||
desc_cb->priv = priv;
|
||||
desc_cb->dma = dma + HNS3_MAX_BD_SIZE * k;
|
||||
desc_cb->type = ((type == DESC_TYPE_FRAGLIST_SKB ||
|
||||
type == DESC_TYPE_SKB) && !k) ?
|
||||
type : DESC_TYPE_PAGE;
|
||||
|
||||
/* now, fill the descriptor */
|
||||
desc->addr = cpu_to_le64(dma + HNS3_MAX_BD_SIZE * k);
|
||||
desc->tx.send_size = cpu_to_le16((k == frag_buf_num - 1) ?
|
||||
@ -1158,7 +1151,6 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
|
||||
/* move ring pointer to next */
|
||||
ring_ptr_move_fw(ring, next_to_use);
|
||||
|
||||
desc_cb = &ring->desc_cb[ring->next_to_use];
|
||||
desc = &ring->desc[ring->next_to_use];
|
||||
}
|
||||
|
||||
@ -1346,6 +1338,10 @@ static void hns3_clear_desc(struct hns3_enet_ring *ring, int next_to_use_orig)
|
||||
unsigned int i;
|
||||
|
||||
for (i = 0; i < ring->desc_num; i++) {
|
||||
struct hns3_desc *desc = &ring->desc[ring->next_to_use];
|
||||
|
||||
memset(desc, 0, sizeof(*desc));
|
||||
|
||||
/* check if this is where we started */
|
||||
if (ring->next_to_use == next_to_use_orig)
|
||||
break;
|
||||
@ -1353,6 +1349,9 @@ static void hns3_clear_desc(struct hns3_enet_ring *ring, int next_to_use_orig)
|
||||
/* rollback one */
|
||||
ring_ptr_move_bw(ring, next_to_use);
|
||||
|
||||
if (!ring->desc_cb[ring->next_to_use].dma)
|
||||
continue;
|
||||
|
||||
/* unmap the descriptor dma address */
|
||||
if (ring->desc_cb[ring->next_to_use].type == DESC_TYPE_SKB ||
|
||||
ring->desc_cb[ring->next_to_use].type ==
|
||||
@ -1369,6 +1368,7 @@ static void hns3_clear_desc(struct hns3_enet_ring *ring, int next_to_use_orig)
|
||||
|
||||
ring->desc_cb[ring->next_to_use].length = 0;
|
||||
ring->desc_cb[ring->next_to_use].dma = 0;
|
||||
ring->desc_cb[ring->next_to_use].type = DESC_TYPE_UNKNOWN;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -165,8 +165,6 @@ enum hns3_nic_state {
|
||||
#define HNS3_TXD_MSS_S 0
|
||||
#define HNS3_TXD_MSS_M (0x3fff << HNS3_TXD_MSS_S)
|
||||
|
||||
#define HNS3_TX_LAST_SIZE_M 0xffff
|
||||
|
||||
#define HNS3_VECTOR_TX_IRQ BIT_ULL(0)
|
||||
#define HNS3_VECTOR_RX_IRQ BIT_ULL(1)
|
||||
|
||||
|
@ -2673,11 +2673,10 @@ void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time)
|
||||
delay_time);
|
||||
}
|
||||
|
||||
static int hclge_get_mac_link_status(struct hclge_dev *hdev)
|
||||
static int hclge_get_mac_link_status(struct hclge_dev *hdev, int *link_status)
|
||||
{
|
||||
struct hclge_link_status_cmd *req;
|
||||
struct hclge_desc desc;
|
||||
int link_status;
|
||||
int ret;
|
||||
|
||||
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
|
||||
@ -2689,33 +2688,25 @@ static int hclge_get_mac_link_status(struct hclge_dev *hdev)
|
||||
}
|
||||
|
||||
req = (struct hclge_link_status_cmd *)desc.data;
|
||||
link_status = req->status & HCLGE_LINK_STATUS_UP_M;
|
||||
*link_status = (req->status & HCLGE_LINK_STATUS_UP_M) > 0 ?
|
||||
HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
|
||||
|
||||
return !!link_status;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int hclge_get_mac_phy_link(struct hclge_dev *hdev)
|
||||
static int hclge_get_mac_phy_link(struct hclge_dev *hdev, int *link_status)
|
||||
{
|
||||
unsigned int mac_state;
|
||||
int link_stat;
|
||||
struct phy_device *phydev = hdev->hw.mac.phydev;
|
||||
|
||||
*link_status = HCLGE_LINK_STATUS_DOWN;
|
||||
|
||||
if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
|
||||
return 0;
|
||||
|
||||
mac_state = hclge_get_mac_link_status(hdev);
|
||||
if (phydev && (phydev->state != PHY_RUNNING || !phydev->link))
|
||||
return 0;
|
||||
|
||||
if (hdev->hw.mac.phydev) {
|
||||
if (hdev->hw.mac.phydev->state == PHY_RUNNING)
|
||||
link_stat = mac_state &
|
||||
hdev->hw.mac.phydev->link;
|
||||
else
|
||||
link_stat = 0;
|
||||
|
||||
} else {
|
||||
link_stat = mac_state;
|
||||
}
|
||||
|
||||
return !!link_stat;
|
||||
return hclge_get_mac_link_status(hdev, link_status);
|
||||
}
|
||||
|
||||
static void hclge_update_link_status(struct hclge_dev *hdev)
|
||||
@ -2725,6 +2716,7 @@ static void hclge_update_link_status(struct hclge_dev *hdev)
|
||||
struct hnae3_handle *rhandle;
|
||||
struct hnae3_handle *handle;
|
||||
int state;
|
||||
int ret;
|
||||
int i;
|
||||
|
||||
if (!client)
|
||||
@ -2733,7 +2725,12 @@ static void hclge_update_link_status(struct hclge_dev *hdev)
|
||||
if (test_and_set_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state))
|
||||
return;
|
||||
|
||||
state = hclge_get_mac_phy_link(hdev);
|
||||
ret = hclge_get_mac_phy_link(hdev, &state);
|
||||
if (ret) {
|
||||
clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
|
||||
return;
|
||||
}
|
||||
|
||||
if (state != hdev->hw.mac.link) {
|
||||
for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
|
||||
handle = &hdev->vport[i].nic;
|
||||
@ -6524,14 +6521,15 @@ static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret)
|
||||
{
|
||||
#define HCLGE_MAC_LINK_STATUS_NUM 100
|
||||
|
||||
int link_status;
|
||||
int i = 0;
|
||||
int ret;
|
||||
|
||||
do {
|
||||
ret = hclge_get_mac_link_status(hdev);
|
||||
if (ret < 0)
|
||||
ret = hclge_get_mac_link_status(hdev, &link_status);
|
||||
if (ret)
|
||||
return ret;
|
||||
else if (ret == link_ret)
|
||||
if (link_status == link_ret)
|
||||
return 0;
|
||||
|
||||
msleep(HCLGE_LINK_STATUS_MS);
|
||||
@ -6542,9 +6540,6 @@ static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret)
|
||||
static int hclge_mac_phy_link_status_wait(struct hclge_dev *hdev, bool en,
|
||||
bool is_phy)
|
||||
{
|
||||
#define HCLGE_LINK_STATUS_DOWN 0
|
||||
#define HCLGE_LINK_STATUS_UP 1
|
||||
|
||||
int link_ret;
|
||||
|
||||
link_ret = en ? HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
|
||||
|
@ -317,6 +317,9 @@ enum hclge_link_fail_code {
|
||||
HCLGE_LF_XSFP_ABSENT,
|
||||
};
|
||||
|
||||
#define HCLGE_LINK_STATUS_DOWN 0
|
||||
#define HCLGE_LINK_STATUS_UP 1
|
||||
|
||||
#define HCLGE_PG_NUM 4
|
||||
#define HCLGE_SCH_MODE_SP 0
|
||||
#define HCLGE_SCH_MODE_DWRR 1
|
||||
|
@ -710,7 +710,7 @@ static int mlxsw_emad_init(struct mlxsw_core *mlxsw_core)
|
||||
err = mlxsw_core_trap_register(mlxsw_core, &mlxsw_emad_rx_listener,
|
||||
mlxsw_core);
|
||||
if (err)
|
||||
return err;
|
||||
goto err_trap_register;
|
||||
|
||||
err = mlxsw_core->driver->basic_trap_groups_set(mlxsw_core);
|
||||
if (err)
|
||||
@ -722,6 +722,7 @@ static int mlxsw_emad_init(struct mlxsw_core *mlxsw_core)
|
||||
err_emad_trap_set:
|
||||
mlxsw_core_trap_unregister(mlxsw_core, &mlxsw_emad_rx_listener,
|
||||
mlxsw_core);
|
||||
err_trap_register:
|
||||
destroy_workqueue(mlxsw_core->emad_wq);
|
||||
return err;
|
||||
}
|
||||
|
@ -45,7 +45,7 @@ static int mlxsw_env_validate_cable_ident(struct mlxsw_core *core, int id,
|
||||
static int
|
||||
mlxsw_env_query_module_eeprom(struct mlxsw_core *mlxsw_core, int module,
|
||||
u16 offset, u16 size, void *data,
|
||||
unsigned int *p_read_size)
|
||||
bool qsfp, unsigned int *p_read_size)
|
||||
{
|
||||
char eeprom_tmp[MLXSW_REG_MCIA_EEPROM_SIZE];
|
||||
char mcia_pl[MLXSW_REG_MCIA_LEN];
|
||||
@ -54,6 +54,10 @@ mlxsw_env_query_module_eeprom(struct mlxsw_core *mlxsw_core, int module,
|
||||
int status;
|
||||
int err;
|
||||
|
||||
/* MCIA register accepts buffer size <= 48. Page of size 128 should be
|
||||
* read by chunks of size 48, 48, 32. Align the size of the last chunk
|
||||
* to avoid reading after the end of the page.
|
||||
*/
|
||||
size = min_t(u16, size, MLXSW_REG_MCIA_EEPROM_SIZE);
|
||||
|
||||
if (offset < MLXSW_REG_MCIA_EEPROM_PAGE_LENGTH &&
|
||||
@ -63,18 +67,25 @@ mlxsw_env_query_module_eeprom(struct mlxsw_core *mlxsw_core, int module,
|
||||
|
||||
i2c_addr = MLXSW_REG_MCIA_I2C_ADDR_LOW;
|
||||
if (offset >= MLXSW_REG_MCIA_EEPROM_PAGE_LENGTH) {
|
||||
page = MLXSW_REG_MCIA_PAGE_GET(offset);
|
||||
offset -= MLXSW_REG_MCIA_EEPROM_UP_PAGE_LENGTH * page;
|
||||
/* When reading upper pages 1, 2 and 3 the offset starts at
|
||||
* 128. Please refer to "QSFP+ Memory Map" figure in SFF-8436
|
||||
* specification for graphical depiction.
|
||||
* MCIA register accepts buffer size <= 48. Page of size 128
|
||||
* should be read by chunks of size 48, 48, 32. Align the size
|
||||
* of the last chunk to avoid reading after the end of the
|
||||
* page.
|
||||
*/
|
||||
if (offset + size > MLXSW_REG_MCIA_EEPROM_PAGE_LENGTH)
|
||||
size = MLXSW_REG_MCIA_EEPROM_PAGE_LENGTH - offset;
|
||||
if (qsfp) {
|
||||
/* When reading upper pages 1, 2 and 3 the offset
|
||||
* starts at 128. Please refer to "QSFP+ Memory Map"
|
||||
* figure in SFF-8436 specification for graphical
|
||||
* depiction.
|
||||
*/
|
||||
page = MLXSW_REG_MCIA_PAGE_GET(offset);
|
||||
offset -= MLXSW_REG_MCIA_EEPROM_UP_PAGE_LENGTH * page;
|
||||
if (offset + size > MLXSW_REG_MCIA_EEPROM_PAGE_LENGTH)
|
||||
size = MLXSW_REG_MCIA_EEPROM_PAGE_LENGTH - offset;
|
||||
} else {
|
||||
/* When reading upper pages 1, 2 and 3 the offset
|
||||
* starts at 0 and I2C high address is used. Please refer
|
||||
* refer to "Memory Organization" figure in SFF-8472
|
||||
* specification for graphical depiction.
|
||||
*/
|
||||
i2c_addr = MLXSW_REG_MCIA_I2C_ADDR_HIGH;
|
||||
offset -= MLXSW_REG_MCIA_EEPROM_PAGE_LENGTH;
|
||||
}
|
||||
}
|
||||
|
||||
mlxsw_reg_mcia_pack(mcia_pl, module, 0, page, offset, size, i2c_addr);
|
||||
@ -166,7 +177,7 @@ int mlxsw_env_get_module_info(struct mlxsw_core *mlxsw_core, int module,
|
||||
int err;
|
||||
|
||||
err = mlxsw_env_query_module_eeprom(mlxsw_core, module, 0, offset,
|
||||
module_info, &read_size);
|
||||
module_info, false, &read_size);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
@ -197,7 +208,7 @@ int mlxsw_env_get_module_info(struct mlxsw_core *mlxsw_core, int module,
|
||||
/* Verify if transceiver provides diagnostic monitoring page */
|
||||
err = mlxsw_env_query_module_eeprom(mlxsw_core, module,
|
||||
SFP_DIAGMON, 1, &diag_mon,
|
||||
&read_size);
|
||||
false, &read_size);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
@ -225,17 +236,22 @@ int mlxsw_env_get_module_eeprom(struct net_device *netdev,
|
||||
int offset = ee->offset;
|
||||
unsigned int read_size;
|
||||
int i = 0;
|
||||
bool qsfp;
|
||||
int err;
|
||||
|
||||
if (!ee->len)
|
||||
return -EINVAL;
|
||||
|
||||
memset(data, 0, ee->len);
|
||||
/* Validate module identifier value. */
|
||||
err = mlxsw_env_validate_cable_ident(mlxsw_core, module, &qsfp);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
while (i < ee->len) {
|
||||
err = mlxsw_env_query_module_eeprom(mlxsw_core, module, offset,
|
||||
ee->len - i, data + i,
|
||||
&read_size);
|
||||
qsfp, &read_size);
|
||||
if (err) {
|
||||
netdev_err(netdev, "Eeprom query failed\n");
|
||||
return err;
|
||||
|
@ -98,7 +98,7 @@ static inline void VXGE_COMPLETE_VPATH_TX(struct vxge_fifo *fifo)
|
||||
{
|
||||
struct sk_buff **skb_ptr = NULL;
|
||||
struct sk_buff **temp;
|
||||
#define NR_SKB_COMPLETED 128
|
||||
#define NR_SKB_COMPLETED 16
|
||||
struct sk_buff *completed[NR_SKB_COMPLETED];
|
||||
int more;
|
||||
|
||||
|
@ -103,15 +103,18 @@ static void ionic_get_regs(struct net_device *netdev, struct ethtool_regs *regs,
|
||||
void *p)
|
||||
{
|
||||
struct ionic_lif *lif = netdev_priv(netdev);
|
||||
unsigned int offset;
|
||||
unsigned int size;
|
||||
|
||||
regs->version = IONIC_DEV_CMD_REG_VERSION;
|
||||
|
||||
offset = 0;
|
||||
size = IONIC_DEV_INFO_REG_COUNT * sizeof(u32);
|
||||
memcpy_fromio(p, lif->ionic->idev.dev_info_regs->words, size);
|
||||
memcpy_fromio(p + offset, lif->ionic->idev.dev_info_regs->words, size);
|
||||
|
||||
offset += size;
|
||||
size = IONIC_DEV_CMD_REG_COUNT * sizeof(u32);
|
||||
memcpy_fromio(p, lif->ionic->idev.dev_cmd_regs->words, size);
|
||||
memcpy_fromio(p + offset, lif->ionic->idev.dev_cmd_regs->words, size);
|
||||
}
|
||||
|
||||
static int ionic_get_link_ksettings(struct net_device *netdev,
|
||||
|
@ -96,8 +96,7 @@ static void ionic_link_status_check(struct ionic_lif *lif)
|
||||
u16 link_status;
|
||||
bool link_up;
|
||||
|
||||
if (!test_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state) ||
|
||||
test_bit(IONIC_LIF_F_QUEUE_RESET, lif->state))
|
||||
if (!test_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state))
|
||||
return;
|
||||
|
||||
link_status = le16_to_cpu(lif->info->status.link_status);
|
||||
@ -114,16 +113,22 @@ static void ionic_link_status_check(struct ionic_lif *lif)
|
||||
netif_carrier_on(netdev);
|
||||
}
|
||||
|
||||
if (lif->netdev->flags & IFF_UP && netif_running(lif->netdev))
|
||||
if (lif->netdev->flags & IFF_UP && netif_running(lif->netdev)) {
|
||||
mutex_lock(&lif->queue_lock);
|
||||
ionic_start_queues(lif);
|
||||
mutex_unlock(&lif->queue_lock);
|
||||
}
|
||||
} else {
|
||||
if (netif_carrier_ok(netdev)) {
|
||||
netdev_info(netdev, "Link down\n");
|
||||
netif_carrier_off(netdev);
|
||||
}
|
||||
|
||||
if (lif->netdev->flags & IFF_UP && netif_running(lif->netdev))
|
||||
if (lif->netdev->flags & IFF_UP && netif_running(lif->netdev)) {
|
||||
mutex_lock(&lif->queue_lock);
|
||||
ionic_stop_queues(lif);
|
||||
mutex_unlock(&lif->queue_lock);
|
||||
}
|
||||
}
|
||||
|
||||
clear_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state);
|
||||
@ -863,8 +868,7 @@ static int ionic_lif_addr_add(struct ionic_lif *lif, const u8 *addr)
|
||||
if (f)
|
||||
return 0;
|
||||
|
||||
netdev_dbg(lif->netdev, "rx_filter add ADDR %pM (id %d)\n", addr,
|
||||
ctx.comp.rx_filter_add.filter_id);
|
||||
netdev_dbg(lif->netdev, "rx_filter add ADDR %pM\n", addr);
|
||||
|
||||
memcpy(ctx.cmd.rx_filter_add.mac.addr, addr, ETH_ALEN);
|
||||
err = ionic_adminq_post_wait(lif, &ctx);
|
||||
@ -893,6 +897,9 @@ static int ionic_lif_addr_del(struct ionic_lif *lif, const u8 *addr)
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
netdev_dbg(lif->netdev, "rx_filter del ADDR %pM (id %d)\n",
|
||||
addr, f->filter_id);
|
||||
|
||||
ctx.cmd.rx_filter_del.filter_id = cpu_to_le32(f->filter_id);
|
||||
ionic_rx_filter_free(lif, f);
|
||||
spin_unlock_bh(&lif->rx_filters.lock);
|
||||
@ -901,9 +908,6 @@ static int ionic_lif_addr_del(struct ionic_lif *lif, const u8 *addr)
|
||||
if (err && err != -EEXIST)
|
||||
return err;
|
||||
|
||||
netdev_dbg(lif->netdev, "rx_filter del ADDR %pM (id %d)\n", addr,
|
||||
ctx.cmd.rx_filter_del.filter_id);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1351,13 +1355,11 @@ static int ionic_vlan_rx_add_vid(struct net_device *netdev, __be16 proto,
|
||||
};
|
||||
int err;
|
||||
|
||||
netdev_dbg(netdev, "rx_filter add VLAN %d\n", vid);
|
||||
err = ionic_adminq_post_wait(lif, &ctx);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
netdev_dbg(netdev, "rx_filter add VLAN %d (id %d)\n", vid,
|
||||
ctx.comp.rx_filter_add.filter_id);
|
||||
|
||||
return ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, 0, &ctx);
|
||||
}
|
||||
|
||||
@ -1382,8 +1384,8 @@ static int ionic_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto,
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
netdev_dbg(netdev, "rx_filter del VLAN %d (id %d)\n", vid,
|
||||
le32_to_cpu(ctx.cmd.rx_filter_del.filter_id));
|
||||
netdev_dbg(netdev, "rx_filter del VLAN %d (id %d)\n",
|
||||
vid, f->filter_id);
|
||||
|
||||
ctx.cmd.rx_filter_del.filter_id = cpu_to_le32(f->filter_id);
|
||||
ionic_rx_filter_free(lif, f);
|
||||
@ -1993,16 +1995,13 @@ int ionic_reset_queues(struct ionic_lif *lif, ionic_reset_cb cb, void *arg)
|
||||
bool running;
|
||||
int err = 0;
|
||||
|
||||
err = ionic_wait_for_bit(lif, IONIC_LIF_F_QUEUE_RESET);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
mutex_lock(&lif->queue_lock);
|
||||
running = netif_running(lif->netdev);
|
||||
if (running) {
|
||||
netif_device_detach(lif->netdev);
|
||||
err = ionic_stop(lif->netdev);
|
||||
if (err)
|
||||
goto reset_out;
|
||||
return err;
|
||||
}
|
||||
|
||||
if (cb)
|
||||
@ -2012,9 +2011,7 @@ int ionic_reset_queues(struct ionic_lif *lif, ionic_reset_cb cb, void *arg)
|
||||
err = ionic_open(lif->netdev);
|
||||
netif_device_attach(lif->netdev);
|
||||
}
|
||||
|
||||
reset_out:
|
||||
clear_bit(IONIC_LIF_F_QUEUE_RESET, lif->state);
|
||||
mutex_unlock(&lif->queue_lock);
|
||||
|
||||
return err;
|
||||
}
|
||||
@ -2161,7 +2158,9 @@ static void ionic_lif_handle_fw_down(struct ionic_lif *lif)
|
||||
|
||||
if (test_bit(IONIC_LIF_F_UP, lif->state)) {
|
||||
dev_info(ionic->dev, "Surprise FW stop, stopping queues\n");
|
||||
mutex_lock(&lif->queue_lock);
|
||||
ionic_stop_queues(lif);
|
||||
mutex_unlock(&lif->queue_lock);
|
||||
}
|
||||
|
||||
if (netif_running(lif->netdev)) {
|
||||
@ -2280,15 +2279,15 @@ static void ionic_lif_deinit(struct ionic_lif *lif)
|
||||
cancel_work_sync(&lif->deferred.work);
|
||||
cancel_work_sync(&lif->tx_timeout_work);
|
||||
ionic_rx_filters_deinit(lif);
|
||||
if (lif->netdev->features & NETIF_F_RXHASH)
|
||||
ionic_lif_rss_deinit(lif);
|
||||
}
|
||||
|
||||
if (lif->netdev->features & NETIF_F_RXHASH)
|
||||
ionic_lif_rss_deinit(lif);
|
||||
|
||||
napi_disable(&lif->adminqcq->napi);
|
||||
ionic_lif_qcq_deinit(lif, lif->notifyqcq);
|
||||
ionic_lif_qcq_deinit(lif, lif->adminqcq);
|
||||
|
||||
mutex_destroy(&lif->queue_lock);
|
||||
ionic_lif_reset(lif);
|
||||
}
|
||||
|
||||
@ -2465,6 +2464,7 @@ static int ionic_lif_init(struct ionic_lif *lif)
|
||||
return err;
|
||||
|
||||
lif->hw_index = le16_to_cpu(comp.hw_index);
|
||||
mutex_init(&lif->queue_lock);
|
||||
|
||||
/* now that we have the hw_index we can figure out our doorbell page */
|
||||
lif->dbid_count = le32_to_cpu(lif->ionic->ident.dev.ndbpgs_per_lif);
|
||||
|
@ -135,7 +135,6 @@ enum ionic_lif_state_flags {
|
||||
IONIC_LIF_F_SW_DEBUG_STATS,
|
||||
IONIC_LIF_F_UP,
|
||||
IONIC_LIF_F_LINK_CHECK_REQUESTED,
|
||||
IONIC_LIF_F_QUEUE_RESET,
|
||||
IONIC_LIF_F_FW_RESET,
|
||||
|
||||
/* leave this as last */
|
||||
@ -165,6 +164,7 @@ struct ionic_lif {
|
||||
unsigned int hw_index;
|
||||
unsigned int kern_pid;
|
||||
u64 __iomem *kern_dbpage;
|
||||
struct mutex queue_lock; /* lock for queue structures */
|
||||
spinlock_t adminq_lock; /* lock for AdminQ operations */
|
||||
struct ionic_qcq *adminqcq;
|
||||
struct ionic_qcq *notifyqcq;
|
||||
@ -213,12 +213,6 @@ struct ionic_lif {
|
||||
#define lif_to_txq(lif, i) (&lif_to_txqcq((lif), i)->q)
|
||||
#define lif_to_rxq(lif, i) (&lif_to_txqcq((lif), i)->q)
|
||||
|
||||
/* return 0 if successfully set the bit, else non-zero */
|
||||
static inline int ionic_wait_for_bit(struct ionic_lif *lif, int bitname)
|
||||
{
|
||||
return wait_on_bit_lock(lif->state, bitname, TASK_INTERRUPTIBLE);
|
||||
}
|
||||
|
||||
static inline u32 ionic_coal_usec_to_hw(struct ionic *ionic, u32 usecs)
|
||||
{
|
||||
u32 mult = le32_to_cpu(ionic->ident.dev.intr_coal_mult);
|
||||
|
@ -21,13 +21,16 @@ void ionic_rx_filter_free(struct ionic_lif *lif, struct ionic_rx_filter *f)
|
||||
void ionic_rx_filter_replay(struct ionic_lif *lif)
|
||||
{
|
||||
struct ionic_rx_filter_add_cmd *ac;
|
||||
struct hlist_head new_id_list;
|
||||
struct ionic_admin_ctx ctx;
|
||||
struct ionic_rx_filter *f;
|
||||
struct hlist_head *head;
|
||||
struct hlist_node *tmp;
|
||||
unsigned int key;
|
||||
unsigned int i;
|
||||
int err;
|
||||
|
||||
INIT_HLIST_HEAD(&new_id_list);
|
||||
ac = &ctx.cmd.rx_filter_add;
|
||||
|
||||
for (i = 0; i < IONIC_RX_FILTER_HLISTS; i++) {
|
||||
@ -58,9 +61,30 @@ void ionic_rx_filter_replay(struct ionic_lif *lif)
|
||||
ac->mac.addr);
|
||||
break;
|
||||
}
|
||||
spin_lock_bh(&lif->rx_filters.lock);
|
||||
ionic_rx_filter_free(lif, f);
|
||||
spin_unlock_bh(&lif->rx_filters.lock);
|
||||
|
||||
continue;
|
||||
}
|
||||
|
||||
/* remove from old id list, save new id in tmp list */
|
||||
spin_lock_bh(&lif->rx_filters.lock);
|
||||
hlist_del(&f->by_id);
|
||||
spin_unlock_bh(&lif->rx_filters.lock);
|
||||
f->filter_id = le32_to_cpu(ctx.comp.rx_filter_add.filter_id);
|
||||
hlist_add_head(&f->by_id, &new_id_list);
|
||||
}
|
||||
}
|
||||
|
||||
/* rebuild the by_id hash lists with the new filter ids */
|
||||
spin_lock_bh(&lif->rx_filters.lock);
|
||||
hlist_for_each_entry_safe(f, tmp, &new_id_list, by_id) {
|
||||
key = f->filter_id & IONIC_RX_FILTER_HLISTS_MASK;
|
||||
head = &lif->rx_filters.by_id[key];
|
||||
hlist_add_head(&f->by_id, head);
|
||||
}
|
||||
spin_unlock_bh(&lif->rx_filters.lock);
|
||||
}
|
||||
|
||||
int ionic_rx_filters_init(struct ionic_lif *lif)
|
||||
@ -69,10 +93,12 @@ int ionic_rx_filters_init(struct ionic_lif *lif)
|
||||
|
||||
spin_lock_init(&lif->rx_filters.lock);
|
||||
|
||||
spin_lock_bh(&lif->rx_filters.lock);
|
||||
for (i = 0; i < IONIC_RX_FILTER_HLISTS; i++) {
|
||||
INIT_HLIST_HEAD(&lif->rx_filters.by_hash[i]);
|
||||
INIT_HLIST_HEAD(&lif->rx_filters.by_id[i]);
|
||||
}
|
||||
spin_unlock_bh(&lif->rx_filters.lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -84,11 +110,13 @@ void ionic_rx_filters_deinit(struct ionic_lif *lif)
|
||||
struct hlist_node *tmp;
|
||||
unsigned int i;
|
||||
|
||||
spin_lock_bh(&lif->rx_filters.lock);
|
||||
for (i = 0; i < IONIC_RX_FILTER_HLISTS; i++) {
|
||||
head = &lif->rx_filters.by_id[i];
|
||||
hlist_for_each_entry_safe(f, tmp, head, by_id)
|
||||
ionic_rx_filter_free(lif, f);
|
||||
}
|
||||
spin_unlock_bh(&lif->rx_filters.lock);
|
||||
}
|
||||
|
||||
int ionic_rx_filter_save(struct ionic_lif *lif, u32 flow_id, u16 rxq_index,
|
||||
@ -124,6 +152,7 @@ int ionic_rx_filter_save(struct ionic_lif *lif, u32 flow_id, u16 rxq_index,
|
||||
f->filter_id = le32_to_cpu(ctx->comp.rx_filter_add.filter_id);
|
||||
f->rxq_index = rxq_index;
|
||||
memcpy(&f->cmd, ac, sizeof(f->cmd));
|
||||
netdev_dbg(lif->netdev, "rx_filter add filter_id %d\n", f->filter_id);
|
||||
|
||||
INIT_HLIST_NODE(&f->by_hash);
|
||||
INIT_HLIST_NODE(&f->by_id);
|
||||
|
@ -161,12 +161,6 @@ static void ionic_rx_clean(struct ionic_queue *q,
|
||||
return;
|
||||
}
|
||||
|
||||
/* no packet processing while resetting */
|
||||
if (unlikely(test_bit(IONIC_LIF_F_QUEUE_RESET, q->lif->state))) {
|
||||
stats->dropped++;
|
||||
return;
|
||||
}
|
||||
|
||||
stats->pkts++;
|
||||
stats->bytes += le16_to_cpu(comp->len);
|
||||
|
||||
|
@ -2008,8 +2008,8 @@ static void qed_rdma_set_pf_params(struct qed_hwfn *p_hwfn,
|
||||
enum protocol_type proto;
|
||||
|
||||
if (p_hwfn->mcp_info->func_info.protocol == QED_PCI_ETH_RDMA) {
|
||||
DP_NOTICE(p_hwfn,
|
||||
"Current day drivers don't support RoCE & iWARP simultaneously on the same PF. Default to RoCE-only\n");
|
||||
DP_VERBOSE(p_hwfn, QED_MSG_SP,
|
||||
"Current day drivers don't support RoCE & iWARP simultaneously on the same PF. Default to RoCE-only\n");
|
||||
p_hwfn->hw_info.personality = QED_PCI_ETH_ROCE;
|
||||
}
|
||||
|
||||
|
@ -3102,7 +3102,7 @@ int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params)
|
||||
}
|
||||
|
||||
/* Log and clear previous pglue_b errors if such exist */
|
||||
qed_pglueb_rbc_attn_handler(p_hwfn, p_hwfn->p_main_ptt);
|
||||
qed_pglueb_rbc_attn_handler(p_hwfn, p_hwfn->p_main_ptt, true);
|
||||
|
||||
/* Enable the PF's internal FID_enable in the PXP */
|
||||
rc = qed_pglueb_set_pfid_enable(p_hwfn, p_hwfn->p_main_ptt,
|
||||
|
@ -257,9 +257,10 @@ static int qed_grc_attn_cb(struct qed_hwfn *p_hwfn)
|
||||
#define PGLUE_ATTENTION_ZLR_VALID (1 << 25)
|
||||
#define PGLUE_ATTENTION_ILT_VALID (1 << 23)
|
||||
|
||||
int qed_pglueb_rbc_attn_handler(struct qed_hwfn *p_hwfn,
|
||||
struct qed_ptt *p_ptt)
|
||||
int qed_pglueb_rbc_attn_handler(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
|
||||
bool hw_init)
|
||||
{
|
||||
char msg[256];
|
||||
u32 tmp;
|
||||
|
||||
tmp = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_WR_DETAILS2);
|
||||
@ -273,22 +274,23 @@ int qed_pglueb_rbc_attn_handler(struct qed_hwfn *p_hwfn,
|
||||
details = qed_rd(p_hwfn, p_ptt,
|
||||
PGLUE_B_REG_TX_ERR_WR_DETAILS);
|
||||
|
||||
DP_NOTICE(p_hwfn,
|
||||
"Illegal write by chip to [%08x:%08x] blocked.\n"
|
||||
"Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x]\n"
|
||||
"Details2 %08x [Was_error %02x BME deassert %02x FID_enable deassert %02x]\n",
|
||||
addr_hi, addr_lo, details,
|
||||
(u8)GET_FIELD(details, PGLUE_ATTENTION_DETAILS_PFID),
|
||||
(u8)GET_FIELD(details, PGLUE_ATTENTION_DETAILS_VFID),
|
||||
GET_FIELD(details,
|
||||
PGLUE_ATTENTION_DETAILS_VF_VALID) ? 1 : 0,
|
||||
tmp,
|
||||
GET_FIELD(tmp,
|
||||
PGLUE_ATTENTION_DETAILS2_WAS_ERR) ? 1 : 0,
|
||||
GET_FIELD(tmp,
|
||||
PGLUE_ATTENTION_DETAILS2_BME) ? 1 : 0,
|
||||
GET_FIELD(tmp,
|
||||
PGLUE_ATTENTION_DETAILS2_FID_EN) ? 1 : 0);
|
||||
snprintf(msg, sizeof(msg),
|
||||
"Illegal write by chip to [%08x:%08x] blocked.\n"
|
||||
"Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x]\n"
|
||||
"Details2 %08x [Was_error %02x BME deassert %02x FID_enable deassert %02x]",
|
||||
addr_hi, addr_lo, details,
|
||||
(u8)GET_FIELD(details, PGLUE_ATTENTION_DETAILS_PFID),
|
||||
(u8)GET_FIELD(details, PGLUE_ATTENTION_DETAILS_VFID),
|
||||
!!GET_FIELD(details, PGLUE_ATTENTION_DETAILS_VF_VALID),
|
||||
tmp,
|
||||
!!GET_FIELD(tmp, PGLUE_ATTENTION_DETAILS2_WAS_ERR),
|
||||
!!GET_FIELD(tmp, PGLUE_ATTENTION_DETAILS2_BME),
|
||||
!!GET_FIELD(tmp, PGLUE_ATTENTION_DETAILS2_FID_EN));
|
||||
|
||||
if (hw_init)
|
||||
DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "%s\n", msg);
|
||||
else
|
||||
DP_NOTICE(p_hwfn, "%s\n", msg);
|
||||
}
|
||||
|
||||
tmp = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_RD_DETAILS2);
|
||||
@ -321,8 +323,14 @@ int qed_pglueb_rbc_attn_handler(struct qed_hwfn *p_hwfn,
|
||||
}
|
||||
|
||||
tmp = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_WR_DETAILS_ICPL);
|
||||
if (tmp & PGLUE_ATTENTION_ICPL_VALID)
|
||||
DP_NOTICE(p_hwfn, "ICPL error - %08x\n", tmp);
|
||||
if (tmp & PGLUE_ATTENTION_ICPL_VALID) {
|
||||
snprintf(msg, sizeof(msg), "ICPL error - %08x", tmp);
|
||||
|
||||
if (hw_init)
|
||||
DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "%s\n", msg);
|
||||
else
|
||||
DP_NOTICE(p_hwfn, "%s\n", msg);
|
||||
}
|
||||
|
||||
tmp = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_MASTER_ZLR_ERR_DETAILS);
|
||||
if (tmp & PGLUE_ATTENTION_ZLR_VALID) {
|
||||
@ -361,7 +369,7 @@ int qed_pglueb_rbc_attn_handler(struct qed_hwfn *p_hwfn,
|
||||
|
||||
static int qed_pglueb_rbc_attn_cb(struct qed_hwfn *p_hwfn)
|
||||
{
|
||||
return qed_pglueb_rbc_attn_handler(p_hwfn, p_hwfn->p_dpc_ptt);
|
||||
return qed_pglueb_rbc_attn_handler(p_hwfn, p_hwfn->p_dpc_ptt, false);
|
||||
}
|
||||
|
||||
static int qed_fw_assertion(struct qed_hwfn *p_hwfn)
|
||||
@ -1193,7 +1201,8 @@ static int qed_int_attentions(struct qed_hwfn *p_hwfn)
|
||||
index, attn_bits, attn_acks, asserted_bits,
|
||||
deasserted_bits, p_sb_attn_sw->known_attn);
|
||||
} else if (asserted_bits == 0x100) {
|
||||
DP_INFO(p_hwfn, "MFW indication via attention\n");
|
||||
DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
|
||||
"MFW indication via attention\n");
|
||||
} else {
|
||||
DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
|
||||
"MFW indication [deassertion]\n");
|
||||
|
@ -442,7 +442,7 @@ int qed_int_set_timer_res(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
|
||||
|
||||
#define QED_MAPPING_MEMORY_SIZE(dev) (NUM_OF_SBS(dev))
|
||||
|
||||
int qed_pglueb_rbc_attn_handler(struct qed_hwfn *p_hwfn,
|
||||
struct qed_ptt *p_ptt);
|
||||
int qed_pglueb_rbc_attn_handler(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
|
||||
bool hw_init);
|
||||
|
||||
#endif
|
||||
|
@ -1450,6 +1450,7 @@ static void ravb_tx_timeout_work(struct work_struct *work)
|
||||
struct ravb_private *priv = container_of(work, struct ravb_private,
|
||||
work);
|
||||
struct net_device *ndev = priv->ndev;
|
||||
int error;
|
||||
|
||||
netif_tx_stop_all_queues(ndev);
|
||||
|
||||
@ -1458,15 +1459,36 @@ static void ravb_tx_timeout_work(struct work_struct *work)
|
||||
ravb_ptp_stop(ndev);
|
||||
|
||||
/* Wait for DMA stopping */
|
||||
ravb_stop_dma(ndev);
|
||||
if (ravb_stop_dma(ndev)) {
|
||||
/* If ravb_stop_dma() fails, the hardware is still operating
|
||||
* for TX and/or RX. So, this should not call the following
|
||||
* functions because ravb_dmac_init() is possible to fail too.
|
||||
* Also, this should not retry ravb_stop_dma() again and again
|
||||
* here because it's possible to wait forever. So, this just
|
||||
* re-enables the TX and RX and skip the following
|
||||
* re-initialization procedure.
|
||||
*/
|
||||
ravb_rcv_snd_enable(ndev);
|
||||
goto out;
|
||||
}
|
||||
|
||||
ravb_ring_free(ndev, RAVB_BE);
|
||||
ravb_ring_free(ndev, RAVB_NC);
|
||||
|
||||
/* Device init */
|
||||
ravb_dmac_init(ndev);
|
||||
error = ravb_dmac_init(ndev);
|
||||
if (error) {
|
||||
/* If ravb_dmac_init() fails, descriptors are freed. So, this
|
||||
* should return here to avoid re-enabling the TX and RX in
|
||||
* ravb_emac_init().
|
||||
*/
|
||||
netdev_err(ndev, "%s: ravb_dmac_init() failed, error %d\n",
|
||||
__func__, error);
|
||||
return;
|
||||
}
|
||||
ravb_emac_init(ndev);
|
||||
|
||||
out:
|
||||
/* Initialise PTP Clock driver */
|
||||
if (priv->chip_id == RCAR_GEN2)
|
||||
ravb_ptp_init(ndev, priv->pdev);
|
||||
|
@ -2274,7 +2274,7 @@ static int smc_drv_probe(struct platform_device *pdev)
|
||||
ret = try_toggle_control_gpio(&pdev->dev, &lp->power_gpio,
|
||||
"power", 0, 0, 100);
|
||||
if (ret)
|
||||
return ret;
|
||||
goto out_free_netdev;
|
||||
|
||||
/*
|
||||
* Optional reset GPIO configured? Minimum 100 ns reset needed
|
||||
@ -2283,7 +2283,7 @@ static int smc_drv_probe(struct platform_device *pdev)
|
||||
ret = try_toggle_control_gpio(&pdev->dev, &lp->reset_gpio,
|
||||
"reset", 0, 0, 100);
|
||||
if (ret)
|
||||
return ret;
|
||||
goto out_free_netdev;
|
||||
|
||||
/*
|
||||
* Need to wait for optional EEPROM to load, max 750 us according
|
||||
|
@ -1191,7 +1191,7 @@ static int ave_init(struct net_device *ndev)
|
||||
ret = regmap_update_bits(priv->regmap, SG_ETPINMODE,
|
||||
priv->pinmode_mask, priv->pinmode_val);
|
||||
if (ret)
|
||||
return ret;
|
||||
goto out_reset_assert;
|
||||
|
||||
ave_global_reset(ndev);
|
||||
|
||||
|
@ -1850,7 +1850,8 @@ static int am65_cpsw_nuss_init_ndev_2g(struct am65_cpsw_common *common)
|
||||
port->ndev->max_mtu = AM65_CPSW_MAX_PACKET_SIZE;
|
||||
port->ndev->hw_features = NETIF_F_SG |
|
||||
NETIF_F_RXCSUM |
|
||||
NETIF_F_HW_CSUM;
|
||||
NETIF_F_HW_CSUM |
|
||||
NETIF_F_HW_TC;
|
||||
port->ndev->features = port->ndev->hw_features |
|
||||
NETIF_F_HW_VLAN_CTAG_FILTER;
|
||||
port->ndev->vlan_features |= NETIF_F_SG;
|
||||
|
@ -1615,11 +1615,11 @@ static int geneve_changelink(struct net_device *dev, struct nlattr *tb[],
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
struct geneve_dev *geneve = netdev_priv(dev);
|
||||
enum ifla_geneve_df df = geneve->df;
|
||||
struct geneve_sock *gs4, *gs6;
|
||||
struct ip_tunnel_info info;
|
||||
bool metadata;
|
||||
bool use_udp6_rx_checksums;
|
||||
enum ifla_geneve_df df;
|
||||
bool ttl_inherit;
|
||||
int err;
|
||||
|
||||
|
@ -1242,7 +1242,7 @@ static int rr_open(struct net_device *dev)
|
||||
rrpriv->info = NULL;
|
||||
}
|
||||
if (rrpriv->rx_ctrl) {
|
||||
pci_free_consistent(pdev, sizeof(struct ring_ctrl),
|
||||
pci_free_consistent(pdev, 256 * sizeof(struct ring_ctrl),
|
||||
rrpriv->rx_ctrl, rrpriv->rx_ctrl_dma);
|
||||
rrpriv->rx_ctrl = NULL;
|
||||
}
|
||||
|
@ -4,7 +4,7 @@
|
||||
*
|
||||
* Copyright 2009-2017 Analog Devices Inc.
|
||||
*
|
||||
* http://www.analog.com/ADF7242
|
||||
* https://www.analog.com/ADF7242
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
@ -1262,7 +1262,7 @@ static int adf7242_probe(struct spi_device *spi)
|
||||
WQ_MEM_RECLAIM);
|
||||
if (unlikely(!lp->wqueue)) {
|
||||
ret = -ENOMEM;
|
||||
goto err_hw_init;
|
||||
goto err_alloc_wq;
|
||||
}
|
||||
|
||||
ret = adf7242_hw_init(lp);
|
||||
@ -1294,6 +1294,8 @@ static int adf7242_probe(struct spi_device *spi)
|
||||
return ret;
|
||||
|
||||
err_hw_init:
|
||||
destroy_workqueue(lp->wqueue);
|
||||
err_alloc_wq:
|
||||
mutex_destroy(&lp->bmux);
|
||||
ieee802154_free_hw(lp->hw);
|
||||
|
||||
|
@ -302,7 +302,7 @@ nsim_create(struct nsim_dev *nsim_dev, struct nsim_dev_port *nsim_dev_port)
|
||||
rtnl_lock();
|
||||
err = nsim_bpf_init(ns);
|
||||
if (err)
|
||||
goto err_free_netdev;
|
||||
goto err_rtnl_unlock;
|
||||
|
||||
nsim_ipsec_init(ns);
|
||||
|
||||
@ -316,8 +316,8 @@ nsim_create(struct nsim_dev *nsim_dev, struct nsim_dev_port *nsim_dev_port)
|
||||
err_ipsec_teardown:
|
||||
nsim_ipsec_teardown(ns);
|
||||
nsim_bpf_uninit(ns);
|
||||
err_rtnl_unlock:
|
||||
rtnl_unlock();
|
||||
err_free_netdev:
|
||||
free_netdev(dev);
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
|
@ -1260,6 +1260,7 @@ static int dp83640_hwtstamp(struct mii_timestamper *mii_ts, struct ifreq *ifr)
|
||||
dp83640->hwts_rx_en = 1;
|
||||
dp83640->layer = PTP_CLASS_L4;
|
||||
dp83640->version = PTP_CLASS_V1;
|
||||
cfg.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
|
||||
break;
|
||||
case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
|
||||
case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
|
||||
@ -1267,6 +1268,7 @@ static int dp83640_hwtstamp(struct mii_timestamper *mii_ts, struct ifreq *ifr)
|
||||
dp83640->hwts_rx_en = 1;
|
||||
dp83640->layer = PTP_CLASS_L4;
|
||||
dp83640->version = PTP_CLASS_V2;
|
||||
cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
|
||||
break;
|
||||
case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
|
||||
case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
|
||||
@ -1274,6 +1276,7 @@ static int dp83640_hwtstamp(struct mii_timestamper *mii_ts, struct ifreq *ifr)
|
||||
dp83640->hwts_rx_en = 1;
|
||||
dp83640->layer = PTP_CLASS_L2;
|
||||
dp83640->version = PTP_CLASS_V2;
|
||||
cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
|
||||
break;
|
||||
case HWTSTAMP_FILTER_PTP_V2_EVENT:
|
||||
case HWTSTAMP_FILTER_PTP_V2_SYNC:
|
||||
@ -1281,6 +1284,7 @@ static int dp83640_hwtstamp(struct mii_timestamper *mii_ts, struct ifreq *ifr)
|
||||
dp83640->hwts_rx_en = 1;
|
||||
dp83640->layer = PTP_CLASS_L4 | PTP_CLASS_L2;
|
||||
dp83640->version = PTP_CLASS_V2;
|
||||
cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
|
||||
break;
|
||||
default:
|
||||
return -ERANGE;
|
||||
|
@ -187,6 +187,7 @@ static int ax88172a_bind(struct usbnet *dev, struct usb_interface *intf)
|
||||
ret = asix_read_cmd(dev, AX_CMD_READ_NODE_ID, 0, 0, ETH_ALEN, buf, 0);
|
||||
if (ret < ETH_ALEN) {
|
||||
netdev_err(dev->net, "Failed to read MAC address: %d\n", ret);
|
||||
ret = -EIO;
|
||||
goto free;
|
||||
}
|
||||
memcpy(dev->net->dev_addr, buf, ETH_ALEN);
|
||||
|
@ -1390,8 +1390,9 @@ static void hso_serial_set_termios(struct tty_struct *tty, struct ktermios *old)
|
||||
unsigned long flags;
|
||||
|
||||
if (old)
|
||||
hso_dbg(0x16, "Termios called with: cflags new[%d] - old[%d]\n",
|
||||
tty->termios.c_cflag, old->c_cflag);
|
||||
hso_dbg(0x16, "Termios called with: cflags new[%u] - old[%u]\n",
|
||||
(unsigned int)tty->termios.c_cflag,
|
||||
(unsigned int)old->c_cflag);
|
||||
|
||||
/* the actual setup */
|
||||
spin_lock_irqsave(&serial->serial_lock, flags);
|
||||
|
@ -71,8 +71,10 @@ static int x25_data_indication(struct net_device *dev, struct sk_buff *skb)
|
||||
{
|
||||
unsigned char *ptr;
|
||||
|
||||
if (skb_cow(skb, 1))
|
||||
if (skb_cow(skb, 1)) {
|
||||
kfree_skb(skb);
|
||||
return NET_RX_DROP;
|
||||
}
|
||||
|
||||
skb_push(skb, 1);
|
||||
skb_reset_network_header(skb);
|
||||
|
@ -128,10 +128,12 @@ static int lapbeth_data_indication(struct net_device *dev, struct sk_buff *skb)
|
||||
{
|
||||
unsigned char *ptr;
|
||||
|
||||
skb_push(skb, 1);
|
||||
|
||||
if (skb_cow(skb, 1))
|
||||
if (skb_cow(skb, 1)) {
|
||||
kfree_skb(skb);
|
||||
return NET_RX_DROP;
|
||||
}
|
||||
|
||||
skb_push(skb, 1);
|
||||
|
||||
ptr = skb->data;
|
||||
*ptr = X25_IFACE_DATA;
|
||||
|
@ -183,7 +183,7 @@ static inline void x25_asy_unlock(struct x25_asy *sl)
|
||||
netif_wake_queue(sl->dev);
|
||||
}
|
||||
|
||||
/* Send one completely decapsulated IP datagram to the IP layer. */
|
||||
/* Send an LAPB frame to the LAPB module to process. */
|
||||
|
||||
static void x25_asy_bump(struct x25_asy *sl)
|
||||
{
|
||||
@ -195,13 +195,12 @@ static void x25_asy_bump(struct x25_asy *sl)
|
||||
count = sl->rcount;
|
||||
dev->stats.rx_bytes += count;
|
||||
|
||||
skb = dev_alloc_skb(count+1);
|
||||
skb = dev_alloc_skb(count);
|
||||
if (skb == NULL) {
|
||||
netdev_warn(sl->dev, "memory squeeze, dropping packet\n");
|
||||
dev->stats.rx_dropped++;
|
||||
return;
|
||||
}
|
||||
skb_push(skb, 1); /* LAPB internal control */
|
||||
skb_put_data(skb, sl->rbuff, count);
|
||||
skb->protocol = x25_type_trans(skb, sl->dev);
|
||||
err = lapb_data_received(skb->dev, skb);
|
||||
@ -209,7 +208,6 @@ static void x25_asy_bump(struct x25_asy *sl)
|
||||
kfree_skb(skb);
|
||||
printk(KERN_DEBUG "x25_asy: data received err - %d\n", err);
|
||||
} else {
|
||||
netif_rx(skb);
|
||||
dev->stats.rx_packets++;
|
||||
}
|
||||
}
|
||||
@ -356,12 +354,21 @@ static netdev_tx_t x25_asy_xmit(struct sk_buff *skb,
|
||||
*/
|
||||
|
||||
/*
|
||||
* Called when I frame data arrives. We did the work above - throw it
|
||||
* at the net layer.
|
||||
* Called when I frame data arrive. We add a pseudo header for upper
|
||||
* layers and pass it to upper layers.
|
||||
*/
|
||||
|
||||
static int x25_asy_data_indication(struct net_device *dev, struct sk_buff *skb)
|
||||
{
|
||||
if (skb_cow(skb, 1)) {
|
||||
kfree_skb(skb);
|
||||
return NET_RX_DROP;
|
||||
}
|
||||
skb_push(skb, 1);
|
||||
skb->data[0] = X25_IFACE_DATA;
|
||||
|
||||
skb->protocol = x25_type_trans(skb, dev);
|
||||
|
||||
return netif_rx(skb);
|
||||
}
|
||||
|
||||
@ -657,7 +664,7 @@ static void x25_asy_unesc(struct x25_asy *sl, unsigned char s)
|
||||
switch (s) {
|
||||
case X25_END:
|
||||
if (!test_and_clear_bit(SLF_ERROR, &sl->flags) &&
|
||||
sl->rcount > 2)
|
||||
sl->rcount >= 2)
|
||||
x25_asy_bump(sl);
|
||||
clear_bit(SLF_ESCAPE, &sl->flags);
|
||||
sl->rcount = 0;
|
||||
|
@ -820,7 +820,7 @@ static int ath10k_ahb_probe(struct platform_device *pdev)
|
||||
ath10k_ahb_release_irq_legacy(ar);
|
||||
|
||||
err_free_pipes:
|
||||
ath10k_pci_free_pipes(ar);
|
||||
ath10k_pci_release_resource(ar);
|
||||
|
||||
err_resource_deinit:
|
||||
ath10k_ahb_resource_deinit(ar);
|
||||
|
@ -3473,6 +3473,28 @@ int ath10k_pci_setup_resource(struct ath10k *ar)
|
||||
|
||||
timer_setup(&ar_pci->rx_post_retry, ath10k_pci_rx_replenish_retry, 0);
|
||||
|
||||
ar_pci->attr = kmemdup(pci_host_ce_config_wlan,
|
||||
sizeof(pci_host_ce_config_wlan),
|
||||
GFP_KERNEL);
|
||||
if (!ar_pci->attr)
|
||||
return -ENOMEM;
|
||||
|
||||
ar_pci->pipe_config = kmemdup(pci_target_ce_config_wlan,
|
||||
sizeof(pci_target_ce_config_wlan),
|
||||
GFP_KERNEL);
|
||||
if (!ar_pci->pipe_config) {
|
||||
ret = -ENOMEM;
|
||||
goto err_free_attr;
|
||||
}
|
||||
|
||||
ar_pci->serv_to_pipe = kmemdup(pci_target_service_to_ce_map_wlan,
|
||||
sizeof(pci_target_service_to_ce_map_wlan),
|
||||
GFP_KERNEL);
|
||||
if (!ar_pci->serv_to_pipe) {
|
||||
ret = -ENOMEM;
|
||||
goto err_free_pipe_config;
|
||||
}
|
||||
|
||||
if (QCA_REV_6174(ar) || QCA_REV_9377(ar))
|
||||
ath10k_pci_override_ce_config(ar);
|
||||
|
||||
@ -3480,18 +3502,31 @@ int ath10k_pci_setup_resource(struct ath10k *ar)
|
||||
if (ret) {
|
||||
ath10k_err(ar, "failed to allocate copy engine pipes: %d\n",
|
||||
ret);
|
||||
return ret;
|
||||
goto err_free_serv_to_pipe;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
err_free_serv_to_pipe:
|
||||
kfree(ar_pci->serv_to_pipe);
|
||||
err_free_pipe_config:
|
||||
kfree(ar_pci->pipe_config);
|
||||
err_free_attr:
|
||||
kfree(ar_pci->attr);
|
||||
return ret;
|
||||
}
|
||||
|
||||
void ath10k_pci_release_resource(struct ath10k *ar)
|
||||
{
|
||||
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
|
||||
|
||||
ath10k_pci_rx_retry_sync(ar);
|
||||
netif_napi_del(&ar->napi);
|
||||
ath10k_pci_ce_deinit(ar);
|
||||
ath10k_pci_free_pipes(ar);
|
||||
kfree(ar_pci->attr);
|
||||
kfree(ar_pci->pipe_config);
|
||||
kfree(ar_pci->serv_to_pipe);
|
||||
}
|
||||
|
||||
static const struct ath10k_bus_ops ath10k_pci_bus_ops = {
|
||||
@ -3601,30 +3636,6 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
|
||||
|
||||
timer_setup(&ar_pci->ps_timer, ath10k_pci_ps_timer, 0);
|
||||
|
||||
ar_pci->attr = kmemdup(pci_host_ce_config_wlan,
|
||||
sizeof(pci_host_ce_config_wlan),
|
||||
GFP_KERNEL);
|
||||
if (!ar_pci->attr) {
|
||||
ret = -ENOMEM;
|
||||
goto err_free;
|
||||
}
|
||||
|
||||
ar_pci->pipe_config = kmemdup(pci_target_ce_config_wlan,
|
||||
sizeof(pci_target_ce_config_wlan),
|
||||
GFP_KERNEL);
|
||||
if (!ar_pci->pipe_config) {
|
||||
ret = -ENOMEM;
|
||||
goto err_free;
|
||||
}
|
||||
|
||||
ar_pci->serv_to_pipe = kmemdup(pci_target_service_to_ce_map_wlan,
|
||||
sizeof(pci_target_service_to_ce_map_wlan),
|
||||
GFP_KERNEL);
|
||||
if (!ar_pci->serv_to_pipe) {
|
||||
ret = -ENOMEM;
|
||||
goto err_free;
|
||||
}
|
||||
|
||||
ret = ath10k_pci_setup_resource(ar);
|
||||
if (ret) {
|
||||
ath10k_err(ar, "failed to setup resource: %d\n", ret);
|
||||
@ -3705,10 +3716,9 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
|
||||
|
||||
err_free_irq:
|
||||
ath10k_pci_free_irq(ar);
|
||||
ath10k_pci_rx_retry_sync(ar);
|
||||
|
||||
err_deinit_irq:
|
||||
ath10k_pci_deinit_irq(ar);
|
||||
ath10k_pci_release_resource(ar);
|
||||
|
||||
err_sleep:
|
||||
ath10k_pci_sleep_sync(ar);
|
||||
@ -3720,29 +3730,18 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
|
||||
err_core_destroy:
|
||||
ath10k_core_destroy(ar);
|
||||
|
||||
err_free:
|
||||
kfree(ar_pci->attr);
|
||||
kfree(ar_pci->pipe_config);
|
||||
kfree(ar_pci->serv_to_pipe);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void ath10k_pci_remove(struct pci_dev *pdev)
|
||||
{
|
||||
struct ath10k *ar = pci_get_drvdata(pdev);
|
||||
struct ath10k_pci *ar_pci;
|
||||
|
||||
ath10k_dbg(ar, ATH10K_DBG_PCI, "pci remove\n");
|
||||
|
||||
if (!ar)
|
||||
return;
|
||||
|
||||
ar_pci = ath10k_pci_priv(ar);
|
||||
|
||||
if (!ar_pci)
|
||||
return;
|
||||
|
||||
ath10k_core_unregister(ar);
|
||||
ath10k_pci_free_irq(ar);
|
||||
ath10k_pci_deinit_irq(ar);
|
||||
@ -3750,9 +3749,6 @@ static void ath10k_pci_remove(struct pci_dev *pdev)
|
||||
ath10k_pci_sleep_sync(ar);
|
||||
ath10k_pci_release(ar);
|
||||
ath10k_core_destroy(ar);
|
||||
kfree(ar_pci->attr);
|
||||
kfree(ar_pci->pipe_config);
|
||||
kfree(ar_pci->serv_to_pipe);
|
||||
}
|
||||
|
||||
MODULE_DEVICE_TABLE(pci, ath10k_pci_id_table);
|
||||
|
@ -733,11 +733,13 @@ static void ath9k_hif_usb_reg_in_cb(struct urb *urb)
|
||||
return;
|
||||
}
|
||||
|
||||
rx_buf->skb = nskb;
|
||||
|
||||
usb_fill_int_urb(urb, hif_dev->udev,
|
||||
usb_rcvintpipe(hif_dev->udev,
|
||||
USB_REG_IN_PIPE),
|
||||
nskb->data, MAX_REG_IN_BUF_SIZE,
|
||||
ath9k_hif_usb_reg_in_cb, nskb, 1);
|
||||
ath9k_hif_usb_reg_in_cb, rx_buf, 1);
|
||||
}
|
||||
|
||||
resubmit:
|
||||
|
@ -271,6 +271,8 @@ static int iwl_dbg_tlv_alloc_trigger(struct iwl_trans *trans,
|
||||
{
|
||||
struct iwl_fw_ini_trigger_tlv *trig = (void *)tlv->data;
|
||||
u32 tp = le32_to_cpu(trig->time_point);
|
||||
struct iwl_ucode_tlv *dup = NULL;
|
||||
int ret;
|
||||
|
||||
if (le32_to_cpu(tlv->length) < sizeof(*trig))
|
||||
return -EINVAL;
|
||||
@ -283,10 +285,20 @@ static int iwl_dbg_tlv_alloc_trigger(struct iwl_trans *trans,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!le32_to_cpu(trig->occurrences))
|
||||
if (!le32_to_cpu(trig->occurrences)) {
|
||||
dup = kmemdup(tlv, sizeof(*tlv) + le32_to_cpu(tlv->length),
|
||||
GFP_KERNEL);
|
||||
if (!dup)
|
||||
return -ENOMEM;
|
||||
trig = (void *)dup->data;
|
||||
trig->occurrences = cpu_to_le32(-1);
|
||||
tlv = dup;
|
||||
}
|
||||
|
||||
return iwl_dbg_tlv_add(tlv, &trans->dbg.time_point[tp].trig_list);
|
||||
ret = iwl_dbg_tlv_add(tlv, &trans->dbg.time_point[tp].trig_list);
|
||||
kfree(dup);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int (*dbg_tlv_alloc[])(struct iwl_trans *trans,
|
||||
|
@ -1189,17 +1189,15 @@ static int iwl_mvm_inactivity_check(struct iwl_mvm *mvm, u8 alloc_for_sta)
|
||||
for_each_set_bit(i, &changetid_queues, IWL_MAX_HW_QUEUES)
|
||||
iwl_mvm_change_queue_tid(mvm, i);
|
||||
|
||||
rcu_read_unlock();
|
||||
|
||||
if (free_queue >= 0 && alloc_for_sta != IWL_MVM_INVALID_STA) {
|
||||
ret = iwl_mvm_free_inactive_queue(mvm, free_queue, queue_owner,
|
||||
alloc_for_sta);
|
||||
if (ret) {
|
||||
rcu_read_unlock();
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
rcu_read_unlock();
|
||||
|
||||
return free_queue;
|
||||
}
|
||||
|
||||
|
@ -582,6 +582,8 @@ static const struct iwl_dev_info iwl_dev_info_table[] = {
|
||||
IWL_DEV_INFO(0x30DC, 0x1552, iwl9560_2ac_cfg_soc, iwl9560_killer_1550i_name),
|
||||
IWL_DEV_INFO(0x31DC, 0x1551, iwl9560_2ac_cfg_soc, iwl9560_killer_1550s_name),
|
||||
IWL_DEV_INFO(0x31DC, 0x1552, iwl9560_2ac_cfg_soc, iwl9560_killer_1550i_name),
|
||||
IWL_DEV_INFO(0xA370, 0x1551, iwl9560_2ac_cfg_soc, iwl9560_killer_1550s_name),
|
||||
IWL_DEV_INFO(0xA370, 0x1552, iwl9560_2ac_cfg_soc, iwl9560_killer_1550i_name),
|
||||
|
||||
IWL_DEV_INFO(0x271C, 0x0214, iwl9260_2ac_cfg, iwl9260_1_name),
|
||||
|
||||
|
@ -301,6 +301,7 @@ struct mt76_hw_cap {
|
||||
#define MT_DRV_TX_ALIGNED4_SKBS BIT(1)
|
||||
#define MT_DRV_SW_RX_AIRTIME BIT(2)
|
||||
#define MT_DRV_RX_DMA_HDR BIT(3)
|
||||
#define MT_DRV_HW_MGMT_TXQ BIT(4)
|
||||
|
||||
struct mt76_driver_ops {
|
||||
u32 drv_flags;
|
||||
|
@ -642,8 +642,10 @@ mt7603_set_coverage_class(struct ieee80211_hw *hw, s16 coverage_class)
|
||||
{
|
||||
struct mt7603_dev *dev = hw->priv;
|
||||
|
||||
mutex_lock(&dev->mt76.mutex);
|
||||
dev->coverage_class = max_t(s16, coverage_class, 0);
|
||||
mt7603_mac_set_timing(dev);
|
||||
mutex_unlock(&dev->mt76.mutex);
|
||||
}
|
||||
|
||||
static void mt7603_tx(struct ieee80211_hw *hw,
|
||||
|
@ -234,10 +234,11 @@ mt7615_queues_acq(struct seq_file *s, void *data)
|
||||
int i;
|
||||
|
||||
for (i = 0; i < 16; i++) {
|
||||
int j, acs = i / 4, index = i % 4;
|
||||
int j, wmm_idx = i % MT7615_MAX_WMM_SETS;
|
||||
int acs = i / MT7615_MAX_WMM_SETS;
|
||||
u32 ctrl, val, qlen = 0;
|
||||
|
||||
val = mt76_rr(dev, MT_PLE_AC_QEMPTY(acs, index));
|
||||
val = mt76_rr(dev, MT_PLE_AC_QEMPTY(acs, wmm_idx));
|
||||
ctrl = BIT(31) | BIT(15) | (acs << 8);
|
||||
|
||||
for (j = 0; j < 32; j++) {
|
||||
@ -245,11 +246,11 @@ mt7615_queues_acq(struct seq_file *s, void *data)
|
||||
continue;
|
||||
|
||||
mt76_wr(dev, MT_PLE_FL_Q0_CTRL,
|
||||
ctrl | (j + (index << 5)));
|
||||
ctrl | (j + (wmm_idx << 5)));
|
||||
qlen += mt76_get_field(dev, MT_PLE_FL_Q3_CTRL,
|
||||
GENMASK(11, 0));
|
||||
}
|
||||
seq_printf(s, "AC%d%d: queued=%d\n", acs, index, qlen);
|
||||
seq_printf(s, "AC%d%d: queued=%d\n", wmm_idx, acs, qlen);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -36,10 +36,10 @@ static int
|
||||
mt7622_init_tx_queues_multi(struct mt7615_dev *dev)
|
||||
{
|
||||
static const u8 wmm_queue_map[] = {
|
||||
MT7622_TXQ_AC0,
|
||||
MT7622_TXQ_AC1,
|
||||
MT7622_TXQ_AC2,
|
||||
MT7622_TXQ_AC3,
|
||||
[IEEE80211_AC_BK] = MT7622_TXQ_AC0,
|
||||
[IEEE80211_AC_BE] = MT7622_TXQ_AC1,
|
||||
[IEEE80211_AC_VI] = MT7622_TXQ_AC2,
|
||||
[IEEE80211_AC_VO] = MT7622_TXQ_AC3,
|
||||
};
|
||||
int ret;
|
||||
int i;
|
||||
@ -100,6 +100,7 @@ mt7615_tx_cleanup(struct mt7615_dev *dev)
|
||||
int i;
|
||||
|
||||
mt76_queue_tx_cleanup(dev, MT_TXQ_MCU, false);
|
||||
mt76_queue_tx_cleanup(dev, MT_TXQ_PSD, false);
|
||||
if (is_mt7615(&dev->mt76)) {
|
||||
mt76_queue_tx_cleanup(dev, MT_TXQ_BE, false);
|
||||
} else {
|
||||
|
@ -72,8 +72,7 @@ static int mt7615_eeprom_load(struct mt7615_dev *dev, u32 addr)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = mt76_eeprom_init(&dev->mt76, MT7615_EEPROM_SIZE +
|
||||
MT7615_EEPROM_EXTRA_DATA);
|
||||
ret = mt76_eeprom_init(&dev->mt76, MT7615_EEPROM_FULL_SIZE);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
|
@ -17,7 +17,7 @@
|
||||
#define MT7615_EEPROM_TXDPD_SIZE 216
|
||||
#define MT7615_EEPROM_TXDPD_COUNT (44 + 3)
|
||||
|
||||
#define MT7615_EEPROM_EXTRA_DATA (MT7615_EEPROM_TXDPD_OFFSET + \
|
||||
#define MT7615_EEPROM_FULL_SIZE (MT7615_EEPROM_TXDPD_OFFSET + \
|
||||
MT7615_EEPROM_TXDPD_COUNT * \
|
||||
MT7615_EEPROM_TXDPD_SIZE)
|
||||
|
||||
|
@ -526,22 +526,16 @@ int mt7615_mac_write_txwi(struct mt7615_dev *dev, __le32 *txwi,
|
||||
fc_type = (le16_to_cpu(fc) & IEEE80211_FCTL_FTYPE) >> 2;
|
||||
fc_stype = (le16_to_cpu(fc) & IEEE80211_FCTL_STYPE) >> 4;
|
||||
|
||||
if (ieee80211_is_data(fc) || ieee80211_is_bufferable_mmpdu(fc)) {
|
||||
q_idx = wmm_idx * MT7615_MAX_WMM_SETS +
|
||||
skb_get_queue_mapping(skb);
|
||||
p_fmt = is_usb ? MT_TX_TYPE_SF : MT_TX_TYPE_CT;
|
||||
} else if (beacon) {
|
||||
if (ext_phy)
|
||||
q_idx = MT_LMAC_BCN1;
|
||||
else
|
||||
q_idx = MT_LMAC_BCN0;
|
||||
if (beacon) {
|
||||
p_fmt = MT_TX_TYPE_FW;
|
||||
} else {
|
||||
if (ext_phy)
|
||||
q_idx = MT_LMAC_ALTX1;
|
||||
else
|
||||
q_idx = MT_LMAC_ALTX0;
|
||||
q_idx = ext_phy ? MT_LMAC_BCN1 : MT_LMAC_BCN0;
|
||||
} else if (skb_get_queue_mapping(skb) >= MT_TXQ_PSD) {
|
||||
p_fmt = is_usb ? MT_TX_TYPE_SF : MT_TX_TYPE_CT;
|
||||
q_idx = ext_phy ? MT_LMAC_ALTX1 : MT_LMAC_ALTX0;
|
||||
} else {
|
||||
p_fmt = is_usb ? MT_TX_TYPE_SF : MT_TX_TYPE_CT;
|
||||
q_idx = wmm_idx * MT7615_MAX_WMM_SETS +
|
||||
mt7615_lmac_mapping(dev, skb_get_queue_mapping(skb));
|
||||
}
|
||||
|
||||
val = FIELD_PREP(MT_TXD0_TX_BYTES, skb->len + sz_txd) |
|
||||
|
@ -124,21 +124,6 @@ enum tx_pkt_type {
|
||||
MT_TX_TYPE_FW,
|
||||
};
|
||||
|
||||
enum tx_pkt_queue_idx {
|
||||
MT_LMAC_AC00,
|
||||
MT_LMAC_AC01,
|
||||
MT_LMAC_AC02,
|
||||
MT_LMAC_AC03,
|
||||
MT_LMAC_ALTX0 = 0x10,
|
||||
MT_LMAC_BMC0,
|
||||
MT_LMAC_BCN0,
|
||||
MT_LMAC_PSMP0,
|
||||
MT_LMAC_ALTX1,
|
||||
MT_LMAC_BMC1,
|
||||
MT_LMAC_BCN1,
|
||||
MT_LMAC_PSMP1,
|
||||
};
|
||||
|
||||
enum tx_port_idx {
|
||||
MT_TX_PORT_IDX_LMAC,
|
||||
MT_TX_PORT_IDX_MCU
|
||||
|
@ -397,6 +397,7 @@ mt7615_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u16 queue,
|
||||
struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
|
||||
struct mt7615_dev *dev = mt7615_hw_dev(hw);
|
||||
|
||||
queue = mt7615_lmac_mapping(dev, queue);
|
||||
queue += mvif->wmm_idx * MT7615_MAX_WMM_SETS;
|
||||
|
||||
return mt7615_mcu_set_wmm(dev, queue, params);
|
||||
@ -735,9 +736,12 @@ static void
|
||||
mt7615_set_coverage_class(struct ieee80211_hw *hw, s16 coverage_class)
|
||||
{
|
||||
struct mt7615_phy *phy = mt7615_hw_phy(hw);
|
||||
struct mt7615_dev *dev = phy->dev;
|
||||
|
||||
mutex_lock(&dev->mt76.mutex);
|
||||
phy->coverage_class = max_t(s16, coverage_class, 0);
|
||||
mt7615_mac_set_timing(phy);
|
||||
mutex_unlock(&dev->mt76.mutex);
|
||||
}
|
||||
|
||||
static int
|
||||
|
@ -146,7 +146,7 @@ int mt7615_mmio_probe(struct device *pdev, void __iomem *mem_base,
|
||||
static const struct mt76_driver_ops drv_ops = {
|
||||
/* txwi_size = txd size + txp size */
|
||||
.txwi_size = MT_TXD_SIZE + sizeof(struct mt7615_txp_common),
|
||||
.drv_flags = MT_DRV_TXWI_NO_FREE,
|
||||
.drv_flags = MT_DRV_TXWI_NO_FREE | MT_DRV_HW_MGMT_TXQ,
|
||||
.survey_flags = SURVEY_INFO_TIME_TX |
|
||||
SURVEY_INFO_TIME_RX |
|
||||
SURVEY_INFO_TIME_BSS_RX,
|
||||
|
@ -282,6 +282,21 @@ struct mt7615_dev {
|
||||
struct list_head wd_head;
|
||||
};
|
||||
|
||||
enum tx_pkt_queue_idx {
|
||||
MT_LMAC_AC00,
|
||||
MT_LMAC_AC01,
|
||||
MT_LMAC_AC02,
|
||||
MT_LMAC_AC03,
|
||||
MT_LMAC_ALTX0 = 0x10,
|
||||
MT_LMAC_BMC0,
|
||||
MT_LMAC_BCN0,
|
||||
MT_LMAC_PSMP0,
|
||||
MT_LMAC_ALTX1,
|
||||
MT_LMAC_BMC1,
|
||||
MT_LMAC_BCN1,
|
||||
MT_LMAC_PSMP1,
|
||||
};
|
||||
|
||||
enum {
|
||||
HW_BSSID_0 = 0x0,
|
||||
HW_BSSID_1,
|
||||
@ -447,6 +462,21 @@ static inline u16 mt7615_wtbl_size(struct mt7615_dev *dev)
|
||||
return MT7615_WTBL_SIZE;
|
||||
}
|
||||
|
||||
static inline u8 mt7615_lmac_mapping(struct mt7615_dev *dev, u8 ac)
|
||||
{
|
||||
static const u8 lmac_queue_map[] = {
|
||||
[IEEE80211_AC_BK] = MT_LMAC_AC00,
|
||||
[IEEE80211_AC_BE] = MT_LMAC_AC01,
|
||||
[IEEE80211_AC_VI] = MT_LMAC_AC02,
|
||||
[IEEE80211_AC_VO] = MT_LMAC_AC03,
|
||||
};
|
||||
|
||||
if (WARN_ON_ONCE(ac >= ARRAY_SIZE(lmac_queue_map)))
|
||||
return MT_LMAC_AC01; /* BE */
|
||||
|
||||
return lmac_queue_map[ac];
|
||||
}
|
||||
|
||||
void mt7615_dma_reset(struct mt7615_dev *dev);
|
||||
void mt7615_scan_work(struct work_struct *work);
|
||||
void mt7615_roc_work(struct work_struct *work);
|
||||
|
@ -270,7 +270,7 @@ static int mt7663u_probe(struct usb_interface *usb_intf,
|
||||
{
|
||||
static const struct mt76_driver_ops drv_ops = {
|
||||
.txwi_size = MT_USB_TXD_SIZE,
|
||||
.drv_flags = MT_DRV_RX_DMA_HDR,
|
||||
.drv_flags = MT_DRV_RX_DMA_HDR | MT_DRV_HW_MGMT_TXQ,
|
||||
.tx_prepare_skb = mt7663u_tx_prepare_skb,
|
||||
.tx_complete_skb = mt7663u_tx_complete_skb,
|
||||
.tx_status_data = mt7663u_tx_status_data,
|
||||
@ -329,25 +329,26 @@ static int mt7663u_probe(struct usb_interface *usb_intf,
|
||||
if (!mt76_poll_msec(dev, MT_CONN_ON_MISC, MT_TOP_MISC2_FW_PWR_ON,
|
||||
FW_STATE_PWR_ON << 1, 500)) {
|
||||
dev_err(dev->mt76.dev, "Timeout for power on\n");
|
||||
return -EIO;
|
||||
ret = -EIO;
|
||||
goto error;
|
||||
}
|
||||
|
||||
alloc_queues:
|
||||
ret = mt76u_alloc_mcu_queue(&dev->mt76);
|
||||
if (ret)
|
||||
goto error;
|
||||
goto error_free_q;
|
||||
|
||||
ret = mt76u_alloc_queues(&dev->mt76);
|
||||
if (ret)
|
||||
goto error;
|
||||
goto error_free_q;
|
||||
|
||||
ret = mt7663u_register_device(dev);
|
||||
if (ret)
|
||||
goto error_freeq;
|
||||
goto error_free_q;
|
||||
|
||||
return 0;
|
||||
|
||||
error_freeq:
|
||||
error_free_q:
|
||||
mt76u_queues_deinit(&dev->mt76);
|
||||
error:
|
||||
mt76u_deinit(&dev->mt76);
|
||||
|
@ -456,8 +456,9 @@ static void mt76x02_watchdog_reset(struct mt76x02_dev *dev)
|
||||
tasklet_disable(&dev->mt76.tx_tasklet);
|
||||
napi_disable(&dev->mt76.tx_napi);
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(dev->mt76.napi); i++)
|
||||
mt76_for_each_q_rx(&dev->mt76, i) {
|
||||
napi_disable(&dev->mt76.napi[i]);
|
||||
}
|
||||
|
||||
mutex_lock(&dev->mt76.mutex);
|
||||
|
||||
@ -515,7 +516,7 @@ static void mt76x02_watchdog_reset(struct mt76x02_dev *dev)
|
||||
|
||||
tasklet_enable(&dev->mt76.pre_tbtt_tasklet);
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(dev->mt76.napi); i++) {
|
||||
mt76_for_each_q_rx(&dev->mt76, i) {
|
||||
napi_enable(&dev->mt76.napi[i]);
|
||||
napi_schedule(&dev->mt76.napi[i]);
|
||||
}
|
||||
|
@ -716,9 +716,12 @@ static void
|
||||
mt7915_set_coverage_class(struct ieee80211_hw *hw, s16 coverage_class)
|
||||
{
|
||||
struct mt7915_phy *phy = mt7915_hw_phy(hw);
|
||||
struct mt7915_dev *dev = phy->dev;
|
||||
|
||||
mutex_lock(&dev->mt76.mutex);
|
||||
phy->coverage_class = max_t(s16, coverage_class, 0);
|
||||
mt7915_mac_set_timing(phy);
|
||||
mutex_unlock(&dev->mt76.mutex);
|
||||
}
|
||||
|
||||
static int
|
||||
|
@ -264,6 +264,13 @@ mt76_tx(struct mt76_phy *phy, struct ieee80211_sta *sta,
|
||||
skb_set_queue_mapping(skb, qid);
|
||||
}
|
||||
|
||||
if ((dev->drv->drv_flags & MT_DRV_HW_MGMT_TXQ) &&
|
||||
!ieee80211_is_data(hdr->frame_control) &&
|
||||
!ieee80211_is_bufferable_mmpdu(hdr->frame_control)) {
|
||||
qid = MT_TXQ_PSD;
|
||||
skb_set_queue_mapping(skb, qid);
|
||||
}
|
||||
|
||||
if (!(wcid->tx_info & MT_WCID_TX_INFO_SET))
|
||||
ieee80211_get_tx_rates(info->control.vif, sta, skb,
|
||||
info->control.rates, 1);
|
||||
|
@ -1010,17 +1010,18 @@ static void mt76u_tx_kick(struct mt76_dev *dev, struct mt76_queue *q)
|
||||
static u8 mt76u_ac_to_hwq(struct mt76_dev *dev, u8 ac)
|
||||
{
|
||||
if (mt76_chip(dev) == 0x7663) {
|
||||
static const u8 wmm_queue_map[] = {
|
||||
[IEEE80211_AC_VO] = 0,
|
||||
[IEEE80211_AC_VI] = 1,
|
||||
[IEEE80211_AC_BE] = 2,
|
||||
[IEEE80211_AC_BK] = 4,
|
||||
static const u8 lmac_queue_map[] = {
|
||||
/* ac to lmac mapping */
|
||||
[IEEE80211_AC_BK] = 0,
|
||||
[IEEE80211_AC_BE] = 1,
|
||||
[IEEE80211_AC_VI] = 2,
|
||||
[IEEE80211_AC_VO] = 4,
|
||||
};
|
||||
|
||||
if (WARN_ON(ac >= ARRAY_SIZE(wmm_queue_map)))
|
||||
return 2; /* BE */
|
||||
if (WARN_ON(ac >= ARRAY_SIZE(lmac_queue_map)))
|
||||
return 1; /* BE */
|
||||
|
||||
return wmm_queue_map[ac];
|
||||
return lmac_queue_map[ac];
|
||||
}
|
||||
|
||||
return mt76_ac_to_hwq(ac);
|
||||
@ -1066,11 +1067,16 @@ static int mt76u_alloc_tx(struct mt76_dev *dev)
|
||||
|
||||
static void mt76u_free_tx(struct mt76_dev *dev)
|
||||
{
|
||||
struct mt76_queue *q;
|
||||
int i, j;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < IEEE80211_NUM_ACS; i++) {
|
||||
struct mt76_queue *q;
|
||||
int j;
|
||||
|
||||
q = dev->q_tx[i].q;
|
||||
if (!q)
|
||||
continue;
|
||||
|
||||
for (j = 0; j < q->ndesc; j++)
|
||||
usb_free_urb(q->entry[j].urb);
|
||||
}
|
||||
@ -1078,17 +1084,22 @@ static void mt76u_free_tx(struct mt76_dev *dev)
|
||||
|
||||
void mt76u_stop_tx(struct mt76_dev *dev)
|
||||
{
|
||||
struct mt76_queue_entry entry;
|
||||
struct mt76_queue *q;
|
||||
int i, j, ret;
|
||||
int ret;
|
||||
|
||||
ret = wait_event_timeout(dev->tx_wait, !mt76_has_tx_pending(&dev->phy),
|
||||
HZ / 5);
|
||||
if (!ret) {
|
||||
struct mt76_queue_entry entry;
|
||||
struct mt76_queue *q;
|
||||
int i, j;
|
||||
|
||||
dev_err(dev->dev, "timed out waiting for pending tx\n");
|
||||
|
||||
for (i = 0; i < IEEE80211_NUM_ACS; i++) {
|
||||
q = dev->q_tx[i].q;
|
||||
if (!q)
|
||||
continue;
|
||||
|
||||
for (j = 0; j < q->ndesc; j++)
|
||||
usb_kill_urb(q->entry[j].urb);
|
||||
}
|
||||
@ -1100,6 +1111,8 @@ void mt76u_stop_tx(struct mt76_dev *dev)
|
||||
*/
|
||||
for (i = 0; i < IEEE80211_NUM_ACS; i++) {
|
||||
q = dev->q_tx[i].q;
|
||||
if (!q)
|
||||
continue;
|
||||
|
||||
/* Assure we are in sync with killed tasklet. */
|
||||
spin_lock_bh(&q->lock);
|
||||
|
@ -63,6 +63,8 @@ module_param_named(max_queues, xennet_max_queues, uint, 0644);
|
||||
MODULE_PARM_DESC(max_queues,
|
||||
"Maximum number of queues per virtual interface");
|
||||
|
||||
#define XENNET_TIMEOUT (5 * HZ)
|
||||
|
||||
static const struct ethtool_ops xennet_ethtool_ops;
|
||||
|
||||
struct netfront_cb {
|
||||
@ -1334,12 +1336,15 @@ static struct net_device *xennet_create_dev(struct xenbus_device *dev)
|
||||
|
||||
netif_carrier_off(netdev);
|
||||
|
||||
xenbus_switch_state(dev, XenbusStateInitialising);
|
||||
wait_event(module_wq,
|
||||
xenbus_read_driver_state(dev->otherend) !=
|
||||
XenbusStateClosed &&
|
||||
xenbus_read_driver_state(dev->otherend) !=
|
||||
XenbusStateUnknown);
|
||||
do {
|
||||
xenbus_switch_state(dev, XenbusStateInitialising);
|
||||
err = wait_event_timeout(module_wq,
|
||||
xenbus_read_driver_state(dev->otherend) !=
|
||||
XenbusStateClosed &&
|
||||
xenbus_read_driver_state(dev->otherend) !=
|
||||
XenbusStateUnknown, XENNET_TIMEOUT);
|
||||
} while (!err);
|
||||
|
||||
return netdev;
|
||||
|
||||
exit:
|
||||
@ -2139,28 +2144,43 @@ static const struct attribute_group xennet_dev_group = {
|
||||
};
|
||||
#endif /* CONFIG_SYSFS */
|
||||
|
||||
static void xennet_bus_close(struct xenbus_device *dev)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (xenbus_read_driver_state(dev->otherend) == XenbusStateClosed)
|
||||
return;
|
||||
do {
|
||||
xenbus_switch_state(dev, XenbusStateClosing);
|
||||
ret = wait_event_timeout(module_wq,
|
||||
xenbus_read_driver_state(dev->otherend) ==
|
||||
XenbusStateClosing ||
|
||||
xenbus_read_driver_state(dev->otherend) ==
|
||||
XenbusStateClosed ||
|
||||
xenbus_read_driver_state(dev->otherend) ==
|
||||
XenbusStateUnknown,
|
||||
XENNET_TIMEOUT);
|
||||
} while (!ret);
|
||||
|
||||
if (xenbus_read_driver_state(dev->otherend) == XenbusStateClosed)
|
||||
return;
|
||||
|
||||
do {
|
||||
xenbus_switch_state(dev, XenbusStateClosed);
|
||||
ret = wait_event_timeout(module_wq,
|
||||
xenbus_read_driver_state(dev->otherend) ==
|
||||
XenbusStateClosed ||
|
||||
xenbus_read_driver_state(dev->otherend) ==
|
||||
XenbusStateUnknown,
|
||||
XENNET_TIMEOUT);
|
||||
} while (!ret);
|
||||
}
|
||||
|
||||
static int xennet_remove(struct xenbus_device *dev)
|
||||
{
|
||||
struct netfront_info *info = dev_get_drvdata(&dev->dev);
|
||||
|
||||
dev_dbg(&dev->dev, "%s\n", dev->nodename);
|
||||
|
||||
if (xenbus_read_driver_state(dev->otherend) != XenbusStateClosed) {
|
||||
xenbus_switch_state(dev, XenbusStateClosing);
|
||||
wait_event(module_wq,
|
||||
xenbus_read_driver_state(dev->otherend) ==
|
||||
XenbusStateClosing ||
|
||||
xenbus_read_driver_state(dev->otherend) ==
|
||||
XenbusStateUnknown);
|
||||
|
||||
xenbus_switch_state(dev, XenbusStateClosed);
|
||||
wait_event(module_wq,
|
||||
xenbus_read_driver_state(dev->otherend) ==
|
||||
XenbusStateClosed ||
|
||||
xenbus_read_driver_state(dev->otherend) ==
|
||||
XenbusStateUnknown);
|
||||
}
|
||||
|
||||
xennet_bus_close(dev);
|
||||
xennet_disconnect_backend(info);
|
||||
|
||||
if (info->netdev->reg_state == NETREG_REGISTERED)
|
||||
|
@ -198,6 +198,7 @@ int s3fwrn5_recv_frame(struct nci_dev *ndev, struct sk_buff *skb,
|
||||
case S3FWRN5_MODE_FW:
|
||||
return s3fwrn5_fw_recv_frame(ndev, skb);
|
||||
default:
|
||||
kfree_skb(skb);
|
||||
return -ENODEV;
|
||||
}
|
||||
}
|
||||
|
@ -33,7 +33,7 @@
|
||||
* of two or more hash tables when the rhashtable is being resized.
|
||||
* The end of the chain is marked with a special nulls marks which has
|
||||
* the least significant bit set but otherwise stores the address of
|
||||
* the hash bucket. This allows us to be be sure we've found the end
|
||||
* the hash bucket. This allows us to be sure we've found the end
|
||||
* of the right list.
|
||||
* The value stored in the hash bucket has BIT(0) used as a lock bit.
|
||||
* This bit must be atomically set before any changes are made to
|
||||
|
@ -220,7 +220,9 @@ struct tcp_sock {
|
||||
} rack;
|
||||
u16 advmss; /* Advertised MSS */
|
||||
u8 compressed_ack;
|
||||
u8 dup_ack_counter;
|
||||
u8 dup_ack_counter:2,
|
||||
tlp_retrans:1, /* TLP is a retransmission */
|
||||
unused:5;
|
||||
u32 chrono_start; /* Start time in jiffies of a TCP chrono */
|
||||
u32 chrono_stat[3]; /* Time in jiffies for chrono_stat stats */
|
||||
u8 chrono_type:2, /* current chronograph type */
|
||||
@ -243,7 +245,7 @@ struct tcp_sock {
|
||||
save_syn:1, /* Save headers of SYN packet */
|
||||
is_cwnd_limited:1,/* forward progress limited by snd_cwnd? */
|
||||
syn_smc:1; /* SYN includes SMC */
|
||||
u32 tlp_high_seq; /* snd_nxt at the time of TLP retransmit. */
|
||||
u32 tlp_high_seq; /* snd_nxt at the time of TLP */
|
||||
|
||||
u32 tcp_tx_delay; /* delay (in usec) added to TX packets */
|
||||
u64 tcp_wstamp_ns; /* departure time for next sent data packet */
|
||||
|
@ -5,7 +5,6 @@
|
||||
#include <linux/list.h>
|
||||
#include <linux/netlink.h>
|
||||
#include <net/flow_dissector.h>
|
||||
#include <linux/rhashtable.h>
|
||||
|
||||
struct flow_match {
|
||||
struct flow_dissector *dissector;
|
||||
|
@ -1187,7 +1187,10 @@ static int __must_check ax25_connect(struct socket *sock,
|
||||
if (addr_len > sizeof(struct sockaddr_ax25) &&
|
||||
fsa->fsa_ax25.sax25_ndigis != 0) {
|
||||
/* Valid number of digipeaters ? */
|
||||
if (fsa->fsa_ax25.sax25_ndigis < 1 || fsa->fsa_ax25.sax25_ndigis > AX25_MAX_DIGIS) {
|
||||
if (fsa->fsa_ax25.sax25_ndigis < 1 ||
|
||||
fsa->fsa_ax25.sax25_ndigis > AX25_MAX_DIGIS ||
|
||||
addr_len < sizeof(struct sockaddr_ax25) +
|
||||
sizeof(ax25_address) * fsa->fsa_ax25.sax25_ndigis) {
|
||||
err = -EINVAL;
|
||||
goto out_release;
|
||||
}
|
||||
@ -1507,7 +1510,10 @@ static int ax25_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
|
||||
struct full_sockaddr_ax25 *fsa = (struct full_sockaddr_ax25 *)usax;
|
||||
|
||||
/* Valid number of digipeaters ? */
|
||||
if (usax->sax25_ndigis < 1 || usax->sax25_ndigis > AX25_MAX_DIGIS) {
|
||||
if (usax->sax25_ndigis < 1 ||
|
||||
usax->sax25_ndigis > AX25_MAX_DIGIS ||
|
||||
addr_len < sizeof(struct sockaddr_ax25) +
|
||||
sizeof(ax25_address) * usax->sax25_ndigis) {
|
||||
err = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
@ -5601,7 +5601,7 @@ static void flush_backlog(struct work_struct *work)
|
||||
skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
|
||||
if (skb->dev->reg_state == NETREG_UNREGISTERING) {
|
||||
__skb_unlink(skb, &sd->input_pkt_queue);
|
||||
kfree_skb(skb);
|
||||
dev_kfree_skb_irq(skb);
|
||||
input_queue_head_incr(sd);
|
||||
}
|
||||
}
|
||||
|
@ -4,6 +4,7 @@
|
||||
#include <net/flow_offload.h>
|
||||
#include <linux/rtnetlink.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/rhashtable.h>
|
||||
|
||||
struct flow_rule *flow_rule_alloc(unsigned int num_actions)
|
||||
{
|
||||
|
@ -1108,7 +1108,7 @@ static ssize_t tx_timeout_show(struct netdev_queue *queue, char *buf)
|
||||
trans_timeout = queue->trans_timeout;
|
||||
spin_unlock_irq(&queue->_xmit_lock);
|
||||
|
||||
return sprintf(buf, "%lu", trans_timeout);
|
||||
return sprintf(buf, fmt_ulong, trans_timeout);
|
||||
}
|
||||
|
||||
static unsigned int get_netdev_queue_index(struct netdev_queue *queue)
|
||||
|
@ -3343,7 +3343,8 @@ static int __rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh,
|
||||
*/
|
||||
if (err < 0) {
|
||||
/* If device is not registered at all, free it now */
|
||||
if (dev->reg_state == NETREG_UNINITIALIZED)
|
||||
if (dev->reg_state == NETREG_UNINITIALIZED ||
|
||||
dev->reg_state == NETREG_UNREGISTERED)
|
||||
free_netdev(dev);
|
||||
goto out;
|
||||
}
|
||||
|
@ -101,6 +101,7 @@ static struct sock_reuseport *reuseport_grow(struct sock_reuseport *reuse)
|
||||
more_reuse->prog = reuse->prog;
|
||||
more_reuse->reuseport_id = reuse->reuseport_id;
|
||||
more_reuse->bind_inany = reuse->bind_inany;
|
||||
more_reuse->has_conns = reuse->has_conns;
|
||||
|
||||
memcpy(more_reuse->socks, reuse->socks,
|
||||
reuse->num_socks * sizeof(struct sock *));
|
||||
|
@ -120,13 +120,18 @@ static struct sk_buff *frame_get_stripped_skb(struct hsr_frame_info *frame,
|
||||
return skb_clone(frame->skb_std, GFP_ATOMIC);
|
||||
}
|
||||
|
||||
static void hsr_fill_tag(struct sk_buff *skb, struct hsr_frame_info *frame,
|
||||
struct hsr_port *port, u8 proto_version)
|
||||
static struct sk_buff *hsr_fill_tag(struct sk_buff *skb,
|
||||
struct hsr_frame_info *frame,
|
||||
struct hsr_port *port, u8 proto_version)
|
||||
{
|
||||
struct hsr_ethhdr *hsr_ethhdr;
|
||||
int lane_id;
|
||||
int lsdu_size;
|
||||
|
||||
/* pad to minimum packet size which is 60 + 6 (HSR tag) */
|
||||
if (skb_put_padto(skb, ETH_ZLEN + HSR_HLEN))
|
||||
return NULL;
|
||||
|
||||
if (port->type == HSR_PT_SLAVE_A)
|
||||
lane_id = 0;
|
||||
else
|
||||
@ -144,6 +149,8 @@ static void hsr_fill_tag(struct sk_buff *skb, struct hsr_frame_info *frame,
|
||||
hsr_ethhdr->hsr_tag.encap_proto = hsr_ethhdr->ethhdr.h_proto;
|
||||
hsr_ethhdr->ethhdr.h_proto = htons(proto_version ?
|
||||
ETH_P_HSR : ETH_P_PRP);
|
||||
|
||||
return skb;
|
||||
}
|
||||
|
||||
static struct sk_buff *create_tagged_skb(struct sk_buff *skb_o,
|
||||
@ -172,9 +179,10 @@ static struct sk_buff *create_tagged_skb(struct sk_buff *skb_o,
|
||||
memmove(dst, src, movelen);
|
||||
skb_reset_mac_header(skb);
|
||||
|
||||
hsr_fill_tag(skb, frame, port, port->hsr->prot_version);
|
||||
|
||||
return skb;
|
||||
/* skb_put_padto free skb on error and hsr_fill_tag returns NULL in
|
||||
* that case
|
||||
*/
|
||||
return hsr_fill_tag(skb, frame, port, port->hsr->prot_version);
|
||||
}
|
||||
|
||||
/* If the original frame was an HSR tagged frame, just clone it to be sent
|
||||
|
@ -325,7 +325,8 @@ void hsr_addr_subst_dest(struct hsr_node *node_src, struct sk_buff *skb,
|
||||
if (port->type != node_dst->addr_B_port)
|
||||
return;
|
||||
|
||||
ether_addr_copy(eth_hdr(skb)->h_dest, node_dst->macaddress_B);
|
||||
if (is_valid_ether_addr(node_dst->macaddress_B))
|
||||
ether_addr_copy(eth_hdr(skb)->h_dest, node_dst->macaddress_B);
|
||||
}
|
||||
|
||||
void hsr_register_frame_in(struct hsr_node *node, struct hsr_port *port,
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user