mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2024-12-29 09:12:07 +00:00
Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
Pull networking fixes from David Miller: 1) Fix deadlock in bpf_send_signal() from Yonghong Song. 2) Fix off by one in kTLS offload of mlx5, from Tariq Toukan. 3) Add missing locking in iwlwifi mvm code, from Avraham Stern. 4) Fix MSG_WAITALL handling in rxrpc, from David Howells. 5) Need to hold RTNL mutex in tcindex_partial_destroy_work(), from Cong Wang. 6) Fix producer race condition in AF_PACKET, from Willem de Bruijn. 7) cls_route removes the wrong filter during change operations, from Cong Wang. 8) Reject unrecognized request flags in ethtool netlink code, from Michal Kubecek. 9) Need to keep MAC in reset until PHY is up in bcmgenet driver, from Doug Berger. 10) Don't leak ct zone template in act_ct during replace, from Paul Blakey. 11) Fix flushing of offloaded netfilter flowtable flows, also from Paul Blakey. 12) Fix throughput drop during tx backpressure in cxgb4, from Rahul Lakkireddy. 13) Don't let a non-NULL skb->dev leave the TCP stack, from Eric Dumazet. 14) TCP_QUEUE_SEQ socket option has to update tp->copied_seq as well, also from Eric Dumazet. 15) Restrict macsec to ethernet devices, from Willem de Bruijn. 16) Fix reference leak in some ethtool *_SET handlers, from Michal Kubecek. 17) Fix accidental disabling of MSI for some r8169 chips, from Heiner Kallweit. * git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net: (138 commits) net: Fix CONFIG_NET_CLS_ACT=n and CONFIG_NFT_FWD_NETDEV={y, m} build net: ena: Add PCI shutdown handler to allow safe kexec selftests/net/forwarding: define libs as TEST_PROGS_EXTENDED selftests/net: add missing tests to Makefile r8169: re-enable MSI on RTL8168c net: phy: mdio-bcm-unimac: Fix clock handling cxgb4/ptp: pass the sign of offset delta in FW CMD net: dsa: tag_8021q: replace dsa_8021q_remove_header with __skb_vlan_pop net: cbs: Fix software cbs to consider packet sending time net/mlx5e: Do not recover from a non-fatal syndrome net/mlx5e: Fix ICOSQ recovery flow with Striding RQ net/mlx5e: Fix missing reset of SW metadata in Striding RQ reset net/mlx5e: Enhance ICOSQ WQE info fields net/mlx5_core: Set IB capability mask1 to fix ib_srpt connection failure selftests: netfilter: add nfqueue test case netfilter: nft_fwd_netdev: allow to redirect to ifb via ingress netfilter: nft_fwd_netdev: validate family and chain type netfilter: nft_set_rbtree: Detect partial overlaps on insertion netfilter: nft_set_rbtree: Introduce and use nft_rbtree_interval_start() netfilter: nft_set_pipapo: Separate partial and complete overlap cases on insertion ...
This commit is contained in:
commit
1b649e0bca
1
.mailmap
1
.mailmap
@ -225,6 +225,7 @@ Pratyush Anand <pratyush.anand@gmail.com> <pratyush.anand@st.com>
|
||||
Praveen BP <praveenbp@ti.com>
|
||||
Punit Agrawal <punitagrawal@gmail.com> <punit.agrawal@arm.com>
|
||||
Qais Yousef <qsyousef@gmail.com> <qais.yousef@imgtec.com>
|
||||
Quentin Monnet <quentin@isovalent.com> <quentin.monnet@netronome.com>
|
||||
Quentin Perret <qperret@qperret.net> <quentin.perret@arm.com>
|
||||
Rafael J. Wysocki <rjw@rjwysocki.net> <rjw@sisk.pl>
|
||||
Rajesh Shah <rajesh.shah@intel.com>
|
||||
|
@ -8688,7 +8688,7 @@ M: Emmanuel Grumbach <emmanuel.grumbach@intel.com>
|
||||
M: Luca Coelho <luciano.coelho@intel.com>
|
||||
M: Intel Linux Wireless <linuxwifi@intel.com>
|
||||
L: linux-wireless@vger.kernel.org
|
||||
W: http://intellinuxwireless.org
|
||||
W: https://wireless.wiki.kernel.org/en/users/drivers/iwlwifi
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/iwlwifi/iwlwifi.git
|
||||
S: Supported
|
||||
F: drivers/net/wireless/intel/iwlwifi/
|
||||
|
@ -119,12 +119,12 @@ ethernet@e2000 {
|
||||
|
||||
ethernet@e4000 {
|
||||
phy-handle = <&rgmii_phy1>;
|
||||
phy-connection-type = "rgmii-txid";
|
||||
phy-connection-type = "rgmii-id";
|
||||
};
|
||||
|
||||
ethernet@e6000 {
|
||||
phy-handle = <&rgmii_phy2>;
|
||||
phy-connection-type = "rgmii-txid";
|
||||
phy-connection-type = "rgmii-id";
|
||||
};
|
||||
|
||||
ethernet@e8000 {
|
||||
|
@ -131,12 +131,12 @@ &usb1 {
|
||||
&fman0 {
|
||||
ethernet@e4000 {
|
||||
phy-handle = <&rgmii_phy1>;
|
||||
phy-connection-type = "rgmii";
|
||||
phy-connection-type = "rgmii-id";
|
||||
};
|
||||
|
||||
ethernet@e6000 {
|
||||
phy-handle = <&rgmii_phy2>;
|
||||
phy-connection-type = "rgmii";
|
||||
phy-connection-type = "rgmii-id";
|
||||
};
|
||||
|
||||
ethernet@e8000 {
|
||||
|
@ -2039,10 +2039,12 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
|
||||
}
|
||||
/* and dreg_lo,sreg_lo */
|
||||
EMIT2(0x23, add_2reg(0xC0, sreg_lo, dreg_lo));
|
||||
/* and dreg_hi,sreg_hi */
|
||||
EMIT2(0x23, add_2reg(0xC0, sreg_hi, dreg_hi));
|
||||
/* or dreg_lo,dreg_hi */
|
||||
EMIT2(0x09, add_2reg(0xC0, dreg_lo, dreg_hi));
|
||||
if (is_jmp64) {
|
||||
/* and dreg_hi,sreg_hi */
|
||||
EMIT2(0x23, add_2reg(0xC0, sreg_hi, dreg_hi));
|
||||
/* or dreg_lo,dreg_hi */
|
||||
EMIT2(0x09, add_2reg(0xC0, dreg_lo, dreg_hi));
|
||||
}
|
||||
goto emit_cond_jmp;
|
||||
}
|
||||
case BPF_JMP | BPF_JSET | BPF_K:
|
||||
|
@ -149,6 +149,7 @@ config NET_FC
|
||||
config IFB
|
||||
tristate "Intermediate Functional Block support"
|
||||
depends on NET_CLS_ACT
|
||||
select NET_REDIRECT
|
||||
---help---
|
||||
This is an intermediate driver that allows sharing of
|
||||
resources.
|
||||
|
@ -141,29 +141,29 @@ static ssize_t dbgfs_state(struct file *file, char __user *user_buf,
|
||||
return 0;
|
||||
|
||||
/* Print out debug information. */
|
||||
len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
|
||||
"CAIF SPI debug information:\n");
|
||||
len += scnprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
|
||||
"CAIF SPI debug information:\n");
|
||||
|
||||
len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len), FLAVOR);
|
||||
len += scnprintf((buf + len), (DEBUGFS_BUF_SIZE - len), FLAVOR);
|
||||
|
||||
len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
|
||||
"STATE: %d\n", cfspi->dbg_state);
|
||||
len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
|
||||
"Previous CMD: 0x%x\n", cfspi->pcmd);
|
||||
len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
|
||||
"Current CMD: 0x%x\n", cfspi->cmd);
|
||||
len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
|
||||
"Previous TX len: %d\n", cfspi->tx_ppck_len);
|
||||
len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
|
||||
"Previous RX len: %d\n", cfspi->rx_ppck_len);
|
||||
len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
|
||||
"Current TX len: %d\n", cfspi->tx_cpck_len);
|
||||
len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
|
||||
"Current RX len: %d\n", cfspi->rx_cpck_len);
|
||||
len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
|
||||
"Next TX len: %d\n", cfspi->tx_npck_len);
|
||||
len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
|
||||
"Next RX len: %d\n", cfspi->rx_npck_len);
|
||||
len += scnprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
|
||||
"STATE: %d\n", cfspi->dbg_state);
|
||||
len += scnprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
|
||||
"Previous CMD: 0x%x\n", cfspi->pcmd);
|
||||
len += scnprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
|
||||
"Current CMD: 0x%x\n", cfspi->cmd);
|
||||
len += scnprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
|
||||
"Previous TX len: %d\n", cfspi->tx_ppck_len);
|
||||
len += scnprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
|
||||
"Previous RX len: %d\n", cfspi->rx_ppck_len);
|
||||
len += scnprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
|
||||
"Current TX len: %d\n", cfspi->tx_cpck_len);
|
||||
len += scnprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
|
||||
"Current RX len: %d\n", cfspi->rx_cpck_len);
|
||||
len += scnprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
|
||||
"Next TX len: %d\n", cfspi->tx_npck_len);
|
||||
len += scnprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
|
||||
"Next RX len: %d\n", cfspi->rx_npck_len);
|
||||
|
||||
if (len > DEBUGFS_BUF_SIZE)
|
||||
len = DEBUGFS_BUF_SIZE;
|
||||
@ -180,23 +180,23 @@ static ssize_t print_frame(char *buf, size_t size, char *frm,
|
||||
int len = 0;
|
||||
int i;
|
||||
for (i = 0; i < count; i++) {
|
||||
len += snprintf((buf + len), (size - len),
|
||||
len += scnprintf((buf + len), (size - len),
|
||||
"[0x" BYTE_HEX_FMT "]",
|
||||
frm[i]);
|
||||
if ((i == cut) && (count > (cut * 2))) {
|
||||
/* Fast forward. */
|
||||
i = count - cut;
|
||||
len += snprintf((buf + len), (size - len),
|
||||
"--- %zu bytes skipped ---\n",
|
||||
count - (cut * 2));
|
||||
len += scnprintf((buf + len), (size - len),
|
||||
"--- %zu bytes skipped ---\n",
|
||||
count - (cut * 2));
|
||||
}
|
||||
|
||||
if ((!(i % 10)) && i) {
|
||||
len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
|
||||
"\n");
|
||||
len += scnprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
|
||||
"\n");
|
||||
}
|
||||
}
|
||||
len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len), "\n");
|
||||
len += scnprintf((buf + len), (DEBUGFS_BUF_SIZE - len), "\n");
|
||||
return len;
|
||||
}
|
||||
|
||||
@ -214,18 +214,18 @@ static ssize_t dbgfs_frame(struct file *file, char __user *user_buf,
|
||||
return 0;
|
||||
|
||||
/* Print out debug information. */
|
||||
len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
|
||||
"Current frame:\n");
|
||||
len += scnprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
|
||||
"Current frame:\n");
|
||||
|
||||
len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
|
||||
"Tx data (Len: %d):\n", cfspi->tx_cpck_len);
|
||||
len += scnprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
|
||||
"Tx data (Len: %d):\n", cfspi->tx_cpck_len);
|
||||
|
||||
len += print_frame((buf + len), (DEBUGFS_BUF_SIZE - len),
|
||||
cfspi->xfer.va_tx[0],
|
||||
(cfspi->tx_cpck_len + SPI_CMD_SZ), 100);
|
||||
|
||||
len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
|
||||
"Rx data (Len: %d):\n", cfspi->rx_cpck_len);
|
||||
len += scnprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
|
||||
"Rx data (Len: %d):\n", cfspi->rx_cpck_len);
|
||||
|
||||
len += print_frame((buf + len), (DEBUGFS_BUF_SIZE - len),
|
||||
cfspi->xfer.va_rx,
|
||||
|
@ -625,7 +625,10 @@ static int slcan_open(struct tty_struct *tty)
|
||||
tty->disc_data = NULL;
|
||||
clear_bit(SLF_INUSE, &sl->flags);
|
||||
slc_free_netdev(sl->dev);
|
||||
/* do not call free_netdev before rtnl_unlock */
|
||||
rtnl_unlock();
|
||||
free_netdev(sl->dev);
|
||||
return err;
|
||||
|
||||
err_exit:
|
||||
rtnl_unlock();
|
||||
|
@ -566,7 +566,7 @@ mt7530_mib_reset(struct dsa_switch *ds)
|
||||
static void
|
||||
mt7530_port_set_status(struct mt7530_priv *priv, int port, int enable)
|
||||
{
|
||||
u32 mask = PMCR_TX_EN | PMCR_RX_EN;
|
||||
u32 mask = PMCR_TX_EN | PMCR_RX_EN | PMCR_FORCE_LNK;
|
||||
|
||||
if (enable)
|
||||
mt7530_set(priv, MT7530_PMCR_P(port), mask);
|
||||
@ -1444,7 +1444,7 @@ static void mt7530_phylink_mac_config(struct dsa_switch *ds, int port,
|
||||
mcr_new &= ~(PMCR_FORCE_SPEED_1000 | PMCR_FORCE_SPEED_100 |
|
||||
PMCR_FORCE_FDX | PMCR_TX_FC_EN | PMCR_RX_FC_EN);
|
||||
mcr_new |= PMCR_IFG_XMIT(1) | PMCR_MAC_MODE | PMCR_BACKOFF_EN |
|
||||
PMCR_BACKPR_EN | PMCR_FORCE_MODE | PMCR_FORCE_LNK;
|
||||
PMCR_BACKPR_EN | PMCR_FORCE_MODE;
|
||||
|
||||
/* Are we connected to external phy */
|
||||
if (port == 5 && dsa_is_user_port(ds, 5))
|
||||
|
@ -1018,13 +1018,9 @@ static int ena_refill_rx_bufs(struct ena_ring *rx_ring, u32 num)
|
||||
struct ena_rx_buffer *rx_info;
|
||||
|
||||
req_id = rx_ring->free_ids[next_to_use];
|
||||
rc = validate_rx_req_id(rx_ring, req_id);
|
||||
if (unlikely(rc < 0))
|
||||
break;
|
||||
|
||||
rx_info = &rx_ring->rx_buffer_info[req_id];
|
||||
|
||||
|
||||
rc = ena_alloc_rx_page(rx_ring, rx_info,
|
||||
GFP_ATOMIC | __GFP_COMP);
|
||||
if (unlikely(rc < 0)) {
|
||||
@ -1379,9 +1375,15 @@ static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring,
|
||||
struct ena_rx_buffer *rx_info;
|
||||
u16 len, req_id, buf = 0;
|
||||
void *va;
|
||||
int rc;
|
||||
|
||||
len = ena_bufs[buf].len;
|
||||
req_id = ena_bufs[buf].req_id;
|
||||
|
||||
rc = validate_rx_req_id(rx_ring, req_id);
|
||||
if (unlikely(rc < 0))
|
||||
return NULL;
|
||||
|
||||
rx_info = &rx_ring->rx_buffer_info[req_id];
|
||||
|
||||
if (unlikely(!rx_info->page)) {
|
||||
@ -1454,6 +1456,11 @@ static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring,
|
||||
buf++;
|
||||
len = ena_bufs[buf].len;
|
||||
req_id = ena_bufs[buf].req_id;
|
||||
|
||||
rc = validate_rx_req_id(rx_ring, req_id);
|
||||
if (unlikely(rc < 0))
|
||||
return NULL;
|
||||
|
||||
rx_info = &rx_ring->rx_buffer_info[req_id];
|
||||
} while (1);
|
||||
|
||||
@ -1968,7 +1975,7 @@ static int ena_enable_msix(struct ena_adapter *adapter)
|
||||
}
|
||||
|
||||
/* Reserved the max msix vectors we might need */
|
||||
msix_vecs = ENA_MAX_MSIX_VEC(adapter->num_io_queues);
|
||||
msix_vecs = ENA_MAX_MSIX_VEC(adapter->max_num_io_queues);
|
||||
netif_dbg(adapter, probe, adapter->netdev,
|
||||
"trying to enable MSI-X, vectors %d\n", msix_vecs);
|
||||
|
||||
@ -2068,6 +2075,7 @@ static int ena_request_mgmnt_irq(struct ena_adapter *adapter)
|
||||
|
||||
static int ena_request_io_irq(struct ena_adapter *adapter)
|
||||
{
|
||||
u32 io_queue_count = adapter->num_io_queues + adapter->xdp_num_queues;
|
||||
unsigned long flags = 0;
|
||||
struct ena_irq *irq;
|
||||
int rc = 0, i, k;
|
||||
@ -2078,7 +2086,7 @@ static int ena_request_io_irq(struct ena_adapter *adapter)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
for (i = ENA_IO_IRQ_FIRST_IDX; i < adapter->msix_vecs; i++) {
|
||||
for (i = ENA_IO_IRQ_FIRST_IDX; i < ENA_MAX_MSIX_VEC(io_queue_count); i++) {
|
||||
irq = &adapter->irq_tbl[i];
|
||||
rc = request_irq(irq->vector, irq->handler, flags, irq->name,
|
||||
irq->data);
|
||||
@ -2119,6 +2127,7 @@ static void ena_free_mgmnt_irq(struct ena_adapter *adapter)
|
||||
|
||||
static void ena_free_io_irq(struct ena_adapter *adapter)
|
||||
{
|
||||
u32 io_queue_count = adapter->num_io_queues + adapter->xdp_num_queues;
|
||||
struct ena_irq *irq;
|
||||
int i;
|
||||
|
||||
@ -2129,7 +2138,7 @@ static void ena_free_io_irq(struct ena_adapter *adapter)
|
||||
}
|
||||
#endif /* CONFIG_RFS_ACCEL */
|
||||
|
||||
for (i = ENA_IO_IRQ_FIRST_IDX; i < adapter->msix_vecs; i++) {
|
||||
for (i = ENA_IO_IRQ_FIRST_IDX; i < ENA_MAX_MSIX_VEC(io_queue_count); i++) {
|
||||
irq = &adapter->irq_tbl[i];
|
||||
irq_set_affinity_hint(irq->vector, NULL);
|
||||
free_irq(irq->vector, irq->data);
|
||||
@ -2144,12 +2153,13 @@ static void ena_disable_msix(struct ena_adapter *adapter)
|
||||
|
||||
static void ena_disable_io_intr_sync(struct ena_adapter *adapter)
|
||||
{
|
||||
u32 io_queue_count = adapter->num_io_queues + adapter->xdp_num_queues;
|
||||
int i;
|
||||
|
||||
if (!netif_running(adapter->netdev))
|
||||
return;
|
||||
|
||||
for (i = ENA_IO_IRQ_FIRST_IDX; i < adapter->msix_vecs; i++)
|
||||
for (i = ENA_IO_IRQ_FIRST_IDX; i < ENA_MAX_MSIX_VEC(io_queue_count); i++)
|
||||
synchronize_irq(adapter->irq_tbl[i].vector);
|
||||
}
|
||||
|
||||
@ -3476,6 +3486,7 @@ static int ena_restore_device(struct ena_adapter *adapter)
|
||||
netif_carrier_on(adapter->netdev);
|
||||
|
||||
mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ));
|
||||
adapter->last_keep_alive_jiffies = jiffies;
|
||||
dev_err(&pdev->dev,
|
||||
"Device reset completed successfully, Driver info: %s\n",
|
||||
version);
|
||||
@ -4325,13 +4336,15 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
|
||||
/*****************************************************************************/
|
||||
|
||||
/* ena_remove - Device Removal Routine
|
||||
/* __ena_shutoff - Helper used in both PCI remove/shutdown routines
|
||||
* @pdev: PCI device information struct
|
||||
* @shutdown: Is it a shutdown operation? If false, means it is a removal
|
||||
*
|
||||
* ena_remove is called by the PCI subsystem to alert the driver
|
||||
* that it should release a PCI device.
|
||||
* __ena_shutoff is a helper routine that does the real work on shutdown and
|
||||
* removal paths; the difference between those paths is with regards to whether
|
||||
* dettach or unregister the netdevice.
|
||||
*/
|
||||
static void ena_remove(struct pci_dev *pdev)
|
||||
static void __ena_shutoff(struct pci_dev *pdev, bool shutdown)
|
||||
{
|
||||
struct ena_adapter *adapter = pci_get_drvdata(pdev);
|
||||
struct ena_com_dev *ena_dev;
|
||||
@ -4350,13 +4363,17 @@ static void ena_remove(struct pci_dev *pdev)
|
||||
|
||||
cancel_work_sync(&adapter->reset_task);
|
||||
|
||||
rtnl_lock();
|
||||
rtnl_lock(); /* lock released inside the below if-else block */
|
||||
ena_destroy_device(adapter, true);
|
||||
rtnl_unlock();
|
||||
|
||||
unregister_netdev(netdev);
|
||||
|
||||
free_netdev(netdev);
|
||||
if (shutdown) {
|
||||
netif_device_detach(netdev);
|
||||
dev_close(netdev);
|
||||
rtnl_unlock();
|
||||
} else {
|
||||
rtnl_unlock();
|
||||
unregister_netdev(netdev);
|
||||
free_netdev(netdev);
|
||||
}
|
||||
|
||||
ena_com_rss_destroy(ena_dev);
|
||||
|
||||
@ -4371,6 +4388,30 @@ static void ena_remove(struct pci_dev *pdev)
|
||||
vfree(ena_dev);
|
||||
}
|
||||
|
||||
/* ena_remove - Device Removal Routine
|
||||
* @pdev: PCI device information struct
|
||||
*
|
||||
* ena_remove is called by the PCI subsystem to alert the driver
|
||||
* that it should release a PCI device.
|
||||
*/
|
||||
|
||||
static void ena_remove(struct pci_dev *pdev)
|
||||
{
|
||||
__ena_shutoff(pdev, false);
|
||||
}
|
||||
|
||||
/* ena_shutdown - Device Shutdown Routine
|
||||
* @pdev: PCI device information struct
|
||||
*
|
||||
* ena_shutdown is called by the PCI subsystem to alert the driver that
|
||||
* a shutdown/reboot (or kexec) is happening and device must be disabled.
|
||||
*/
|
||||
|
||||
static void ena_shutdown(struct pci_dev *pdev)
|
||||
{
|
||||
__ena_shutoff(pdev, true);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PM
|
||||
/* ena_suspend - PM suspend callback
|
||||
* @pdev: PCI device information struct
|
||||
@ -4420,6 +4461,7 @@ static struct pci_driver ena_pci_driver = {
|
||||
.id_table = ena_pci_tbl,
|
||||
.probe = ena_probe,
|
||||
.remove = ena_remove,
|
||||
.shutdown = ena_shutdown,
|
||||
#ifdef CONFIG_PM
|
||||
.suspend = ena_suspend,
|
||||
.resume = ena_resume,
|
||||
|
@ -6880,12 +6880,12 @@ static int bnxt_alloc_ctx_mem(struct bnxt *bp)
|
||||
}
|
||||
ena |= FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES;
|
||||
rc = bnxt_hwrm_func_backing_store_cfg(bp, ena);
|
||||
if (rc)
|
||||
if (rc) {
|
||||
netdev_err(bp->dev, "Failed configuring context mem, rc = %d.\n",
|
||||
rc);
|
||||
else
|
||||
ctx->flags |= BNXT_CTX_FLAG_INITED;
|
||||
|
||||
return rc;
|
||||
}
|
||||
ctx->flags |= BNXT_CTX_FLAG_INITED;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -7406,14 +7406,22 @@ static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp)
|
||||
pri2cos = &resp2->pri0_cos_queue_id;
|
||||
for (i = 0; i < 8; i++) {
|
||||
u8 queue_id = pri2cos[i];
|
||||
u8 queue_idx;
|
||||
|
||||
/* Per port queue IDs start from 0, 10, 20, etc */
|
||||
queue_idx = queue_id % 10;
|
||||
if (queue_idx > BNXT_MAX_QUEUE) {
|
||||
bp->pri2cos_valid = false;
|
||||
goto qstats_done;
|
||||
}
|
||||
for (j = 0; j < bp->max_q; j++) {
|
||||
if (bp->q_ids[j] == queue_id)
|
||||
bp->pri2cos[i] = j;
|
||||
bp->pri2cos_idx[i] = queue_idx;
|
||||
}
|
||||
}
|
||||
bp->pri2cos_valid = 1;
|
||||
}
|
||||
qstats_done:
|
||||
mutex_unlock(&bp->hwrm_cmd_lock);
|
||||
return rc;
|
||||
}
|
||||
@ -11669,6 +11677,10 @@ static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh)
|
||||
bp->rx_nr_rings++;
|
||||
bp->cp_nr_rings++;
|
||||
}
|
||||
if (rc) {
|
||||
bp->tx_nr_rings = 0;
|
||||
bp->rx_nr_rings = 0;
|
||||
}
|
||||
return rc;
|
||||
}
|
||||
|
||||
@ -11962,12 +11974,12 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
bnxt_hwrm_func_drv_unrgtr(bp);
|
||||
bnxt_free_hwrm_short_cmd_req(bp);
|
||||
bnxt_free_hwrm_resources(bp);
|
||||
bnxt_free_ctx_mem(bp);
|
||||
kfree(bp->ctx);
|
||||
bp->ctx = NULL;
|
||||
kfree(bp->fw_health);
|
||||
bp->fw_health = NULL;
|
||||
bnxt_cleanup_pci(bp);
|
||||
bnxt_free_ctx_mem(bp);
|
||||
kfree(bp->ctx);
|
||||
bp->ctx = NULL;
|
||||
|
||||
init_err_free:
|
||||
free_netdev(dev);
|
||||
|
@ -1716,7 +1716,7 @@ struct bnxt {
|
||||
u16 fw_rx_stats_ext_size;
|
||||
u16 fw_tx_stats_ext_size;
|
||||
u16 hw_ring_stats_size;
|
||||
u8 pri2cos[8];
|
||||
u8 pri2cos_idx[8];
|
||||
u8 pri2cos_valid;
|
||||
|
||||
u16 hwrm_max_req_len;
|
||||
|
@ -479,24 +479,26 @@ static int bnxt_dcbnl_ieee_getets(struct net_device *dev, struct ieee_ets *ets)
|
||||
{
|
||||
struct bnxt *bp = netdev_priv(dev);
|
||||
struct ieee_ets *my_ets = bp->ieee_ets;
|
||||
int rc;
|
||||
|
||||
ets->ets_cap = bp->max_tc;
|
||||
|
||||
if (!my_ets) {
|
||||
int rc;
|
||||
|
||||
if (bp->dcbx_cap & DCB_CAP_DCBX_HOST)
|
||||
return 0;
|
||||
|
||||
my_ets = kzalloc(sizeof(*my_ets), GFP_KERNEL);
|
||||
if (!my_ets)
|
||||
return 0;
|
||||
return -ENOMEM;
|
||||
rc = bnxt_hwrm_queue_cos2bw_qcfg(bp, my_ets);
|
||||
if (rc)
|
||||
return 0;
|
||||
goto error;
|
||||
rc = bnxt_hwrm_queue_pri2cos_qcfg(bp, my_ets);
|
||||
if (rc)
|
||||
return 0;
|
||||
goto error;
|
||||
|
||||
/* cache result */
|
||||
bp->ieee_ets = my_ets;
|
||||
}
|
||||
|
||||
ets->cbs = my_ets->cbs;
|
||||
@ -505,6 +507,9 @@ static int bnxt_dcbnl_ieee_getets(struct net_device *dev, struct ieee_ets *ets)
|
||||
memcpy(ets->tc_tsa, my_ets->tc_tsa, sizeof(ets->tc_tsa));
|
||||
memcpy(ets->prio_tc, my_ets->prio_tc, sizeof(ets->prio_tc));
|
||||
return 0;
|
||||
error:
|
||||
kfree(my_ets);
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int bnxt_dcbnl_ieee_setets(struct net_device *dev, struct ieee_ets *ets)
|
||||
|
@ -589,25 +589,25 @@ static void bnxt_get_ethtool_stats(struct net_device *dev,
|
||||
if (bp->pri2cos_valid) {
|
||||
for (i = 0; i < 8; i++, j++) {
|
||||
long n = bnxt_rx_bytes_pri_arr[i].base_off +
|
||||
bp->pri2cos[i];
|
||||
bp->pri2cos_idx[i];
|
||||
|
||||
buf[j] = le64_to_cpu(*(rx_port_stats_ext + n));
|
||||
}
|
||||
for (i = 0; i < 8; i++, j++) {
|
||||
long n = bnxt_rx_pkts_pri_arr[i].base_off +
|
||||
bp->pri2cos[i];
|
||||
bp->pri2cos_idx[i];
|
||||
|
||||
buf[j] = le64_to_cpu(*(rx_port_stats_ext + n));
|
||||
}
|
||||
for (i = 0; i < 8; i++, j++) {
|
||||
long n = bnxt_tx_bytes_pri_arr[i].base_off +
|
||||
bp->pri2cos[i];
|
||||
bp->pri2cos_idx[i];
|
||||
|
||||
buf[j] = le64_to_cpu(*(tx_port_stats_ext + n));
|
||||
}
|
||||
for (i = 0; i < 8; i++, j++) {
|
||||
long n = bnxt_tx_pkts_pri_arr[i].base_off +
|
||||
bp->pri2cos[i];
|
||||
bp->pri2cos_idx[i];
|
||||
|
||||
buf[j] = le64_to_cpu(*(tx_port_stats_ext + n));
|
||||
}
|
||||
|
@ -94,12 +94,6 @@ static inline void dmadesc_set_length_status(struct bcmgenet_priv *priv,
|
||||
bcmgenet_writel(value, d + DMA_DESC_LENGTH_STATUS);
|
||||
}
|
||||
|
||||
static inline u32 dmadesc_get_length_status(struct bcmgenet_priv *priv,
|
||||
void __iomem *d)
|
||||
{
|
||||
return bcmgenet_readl(d + DMA_DESC_LENGTH_STATUS);
|
||||
}
|
||||
|
||||
static inline void dmadesc_set_addr(struct bcmgenet_priv *priv,
|
||||
void __iomem *d,
|
||||
dma_addr_t addr)
|
||||
@ -508,61 +502,6 @@ static int bcmgenet_set_link_ksettings(struct net_device *dev,
|
||||
return phy_ethtool_ksettings_set(dev->phydev, cmd);
|
||||
}
|
||||
|
||||
static void bcmgenet_set_rx_csum(struct net_device *dev,
|
||||
netdev_features_t wanted)
|
||||
{
|
||||
struct bcmgenet_priv *priv = netdev_priv(dev);
|
||||
u32 rbuf_chk_ctrl;
|
||||
bool rx_csum_en;
|
||||
|
||||
rx_csum_en = !!(wanted & NETIF_F_RXCSUM);
|
||||
|
||||
rbuf_chk_ctrl = bcmgenet_rbuf_readl(priv, RBUF_CHK_CTRL);
|
||||
|
||||
/* enable rx checksumming */
|
||||
if (rx_csum_en)
|
||||
rbuf_chk_ctrl |= RBUF_RXCHK_EN | RBUF_L3_PARSE_DIS;
|
||||
else
|
||||
rbuf_chk_ctrl &= ~RBUF_RXCHK_EN;
|
||||
priv->desc_rxchk_en = rx_csum_en;
|
||||
|
||||
/* If UniMAC forwards CRC, we need to skip over it to get
|
||||
* a valid CHK bit to be set in the per-packet status word
|
||||
*/
|
||||
if (rx_csum_en && priv->crc_fwd_en)
|
||||
rbuf_chk_ctrl |= RBUF_SKIP_FCS;
|
||||
else
|
||||
rbuf_chk_ctrl &= ~RBUF_SKIP_FCS;
|
||||
|
||||
bcmgenet_rbuf_writel(priv, rbuf_chk_ctrl, RBUF_CHK_CTRL);
|
||||
}
|
||||
|
||||
static void bcmgenet_set_tx_csum(struct net_device *dev,
|
||||
netdev_features_t wanted)
|
||||
{
|
||||
struct bcmgenet_priv *priv = netdev_priv(dev);
|
||||
bool desc_64b_en;
|
||||
u32 tbuf_ctrl, rbuf_ctrl;
|
||||
|
||||
tbuf_ctrl = bcmgenet_tbuf_ctrl_get(priv);
|
||||
rbuf_ctrl = bcmgenet_rbuf_readl(priv, RBUF_CTRL);
|
||||
|
||||
desc_64b_en = !!(wanted & NETIF_F_HW_CSUM);
|
||||
|
||||
/* enable 64 bytes descriptor in both directions (RBUF and TBUF) */
|
||||
if (desc_64b_en) {
|
||||
tbuf_ctrl |= RBUF_64B_EN;
|
||||
rbuf_ctrl |= RBUF_64B_EN;
|
||||
} else {
|
||||
tbuf_ctrl &= ~RBUF_64B_EN;
|
||||
rbuf_ctrl &= ~RBUF_64B_EN;
|
||||
}
|
||||
priv->desc_64b_en = desc_64b_en;
|
||||
|
||||
bcmgenet_tbuf_ctrl_set(priv, tbuf_ctrl);
|
||||
bcmgenet_rbuf_writel(priv, rbuf_ctrl, RBUF_CTRL);
|
||||
}
|
||||
|
||||
static int bcmgenet_set_features(struct net_device *dev,
|
||||
netdev_features_t features)
|
||||
{
|
||||
@ -578,9 +517,6 @@ static int bcmgenet_set_features(struct net_device *dev,
|
||||
reg = bcmgenet_umac_readl(priv, UMAC_CMD);
|
||||
priv->crc_fwd_en = !!(reg & CMD_CRC_FWD);
|
||||
|
||||
bcmgenet_set_tx_csum(dev, features);
|
||||
bcmgenet_set_rx_csum(dev, features);
|
||||
|
||||
clk_disable_unprepare(priv->clk);
|
||||
|
||||
return ret;
|
||||
@ -1475,8 +1411,8 @@ static void bcmgenet_tx_reclaim_all(struct net_device *dev)
|
||||
/* Reallocate the SKB to put enough headroom in front of it and insert
|
||||
* the transmit checksum offsets in the descriptors
|
||||
*/
|
||||
static struct sk_buff *bcmgenet_put_tx_csum(struct net_device *dev,
|
||||
struct sk_buff *skb)
|
||||
static struct sk_buff *bcmgenet_add_tsb(struct net_device *dev,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
struct bcmgenet_priv *priv = netdev_priv(dev);
|
||||
struct status_64 *status = NULL;
|
||||
@ -1590,13 +1526,11 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
*/
|
||||
GENET_CB(skb)->bytes_sent = skb->len;
|
||||
|
||||
/* set the SKB transmit checksum */
|
||||
if (priv->desc_64b_en) {
|
||||
skb = bcmgenet_put_tx_csum(dev, skb);
|
||||
if (!skb) {
|
||||
ret = NETDEV_TX_OK;
|
||||
goto out;
|
||||
}
|
||||
/* add the Transmit Status Block */
|
||||
skb = bcmgenet_add_tsb(dev, skb);
|
||||
if (!skb) {
|
||||
ret = NETDEV_TX_OK;
|
||||
goto out;
|
||||
}
|
||||
|
||||
for (i = 0; i <= nr_frags; i++) {
|
||||
@ -1775,6 +1709,9 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring,
|
||||
|
||||
while ((rxpktprocessed < rxpkttoprocess) &&
|
||||
(rxpktprocessed < budget)) {
|
||||
struct status_64 *status;
|
||||
__be16 rx_csum;
|
||||
|
||||
cb = &priv->rx_cbs[ring->read_ptr];
|
||||
skb = bcmgenet_rx_refill(priv, cb);
|
||||
|
||||
@ -1783,20 +1720,12 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring,
|
||||
goto next;
|
||||
}
|
||||
|
||||
if (!priv->desc_64b_en) {
|
||||
dma_length_status =
|
||||
dmadesc_get_length_status(priv, cb->bd_addr);
|
||||
} else {
|
||||
struct status_64 *status;
|
||||
__be16 rx_csum;
|
||||
|
||||
status = (struct status_64 *)skb->data;
|
||||
dma_length_status = status->length_status;
|
||||
status = (struct status_64 *)skb->data;
|
||||
dma_length_status = status->length_status;
|
||||
if (dev->features & NETIF_F_RXCSUM) {
|
||||
rx_csum = (__force __be16)(status->rx_csum & 0xffff);
|
||||
if (priv->desc_rxchk_en) {
|
||||
skb->csum = (__force __wsum)ntohs(rx_csum);
|
||||
skb->ip_summed = CHECKSUM_COMPLETE;
|
||||
}
|
||||
skb->csum = (__force __wsum)ntohs(rx_csum);
|
||||
skb->ip_summed = CHECKSUM_COMPLETE;
|
||||
}
|
||||
|
||||
/* DMA flags and length are still valid no matter how
|
||||
@ -1840,14 +1769,10 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring,
|
||||
} /* error packet */
|
||||
|
||||
skb_put(skb, len);
|
||||
if (priv->desc_64b_en) {
|
||||
skb_pull(skb, 64);
|
||||
len -= 64;
|
||||
}
|
||||
|
||||
/* remove hardware 2bytes added for IP alignment */
|
||||
skb_pull(skb, 2);
|
||||
len -= 2;
|
||||
/* remove RSB and hardware 2bytes added for IP alignment */
|
||||
skb_pull(skb, 66);
|
||||
len -= 66;
|
||||
|
||||
if (priv->crc_fwd_en) {
|
||||
skb_trim(skb, len - ETH_FCS_LEN);
|
||||
@ -1965,6 +1890,8 @@ static void umac_enable_set(struct bcmgenet_priv *priv, u32 mask, bool enable)
|
||||
u32 reg;
|
||||
|
||||
reg = bcmgenet_umac_readl(priv, UMAC_CMD);
|
||||
if (reg & CMD_SW_RESET)
|
||||
return;
|
||||
if (enable)
|
||||
reg |= mask;
|
||||
else
|
||||
@ -1984,11 +1911,9 @@ static void reset_umac(struct bcmgenet_priv *priv)
|
||||
bcmgenet_rbuf_ctrl_set(priv, 0);
|
||||
udelay(10);
|
||||
|
||||
/* disable MAC while updating its registers */
|
||||
bcmgenet_umac_writel(priv, 0, UMAC_CMD);
|
||||
|
||||
/* issue soft reset with (rg)mii loopback to ensure a stable rxclk */
|
||||
bcmgenet_umac_writel(priv, CMD_SW_RESET | CMD_LCL_LOOP_EN, UMAC_CMD);
|
||||
/* issue soft reset and disable MAC while updating its registers */
|
||||
bcmgenet_umac_writel(priv, CMD_SW_RESET, UMAC_CMD);
|
||||
udelay(2);
|
||||
}
|
||||
|
||||
static void bcmgenet_intr_disable(struct bcmgenet_priv *priv)
|
||||
@ -2038,11 +1963,28 @@ static void init_umac(struct bcmgenet_priv *priv)
|
||||
|
||||
bcmgenet_umac_writel(priv, ENET_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN);
|
||||
|
||||
/* init rx registers, enable ip header optimization */
|
||||
/* init tx registers, enable TSB */
|
||||
reg = bcmgenet_tbuf_ctrl_get(priv);
|
||||
reg |= TBUF_64B_EN;
|
||||
bcmgenet_tbuf_ctrl_set(priv, reg);
|
||||
|
||||
/* init rx registers, enable ip header optimization and RSB */
|
||||
reg = bcmgenet_rbuf_readl(priv, RBUF_CTRL);
|
||||
reg |= RBUF_ALIGN_2B;
|
||||
reg |= RBUF_ALIGN_2B | RBUF_64B_EN;
|
||||
bcmgenet_rbuf_writel(priv, reg, RBUF_CTRL);
|
||||
|
||||
/* enable rx checksumming */
|
||||
reg = bcmgenet_rbuf_readl(priv, RBUF_CHK_CTRL);
|
||||
reg |= RBUF_RXCHK_EN | RBUF_L3_PARSE_DIS;
|
||||
/* If UniMAC forwards CRC, we need to skip over it to get
|
||||
* a valid CHK bit to be set in the per-packet status word
|
||||
*/
|
||||
if (priv->crc_fwd_en)
|
||||
reg |= RBUF_SKIP_FCS;
|
||||
else
|
||||
reg &= ~RBUF_SKIP_FCS;
|
||||
bcmgenet_rbuf_writel(priv, reg, RBUF_CHK_CTRL);
|
||||
|
||||
if (!GENET_IS_V1(priv) && !GENET_IS_V2(priv))
|
||||
bcmgenet_rbuf_writel(priv, 1, RBUF_TBUF_SIZE_CTRL);
|
||||
|
||||
|
@ -273,6 +273,7 @@ struct bcmgenet_mib_counters {
|
||||
#define RBUF_FLTR_LEN_SHIFT 8
|
||||
|
||||
#define TBUF_CTRL 0x00
|
||||
#define TBUF_64B_EN (1 << 0)
|
||||
#define TBUF_BP_MC 0x0C
|
||||
#define TBUF_ENERGY_CTRL 0x14
|
||||
#define TBUF_EEE_EN (1 << 0)
|
||||
@ -662,8 +663,6 @@ struct bcmgenet_priv {
|
||||
unsigned int irq0_stat;
|
||||
|
||||
/* HW descriptors/checksum variables */
|
||||
bool desc_64b_en;
|
||||
bool desc_rxchk_en;
|
||||
bool crc_fwd_en;
|
||||
|
||||
u32 dma_max_burst_length;
|
||||
|
@ -132,8 +132,12 @@ int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* disable RX */
|
||||
/* Can't suspend with WoL if MAC is still in reset */
|
||||
reg = bcmgenet_umac_readl(priv, UMAC_CMD);
|
||||
if (reg & CMD_SW_RESET)
|
||||
reg &= ~CMD_SW_RESET;
|
||||
|
||||
/* disable RX */
|
||||
reg &= ~CMD_RX_EN;
|
||||
bcmgenet_umac_writel(priv, reg, UMAC_CMD);
|
||||
mdelay(10);
|
||||
|
@ -95,6 +95,12 @@ void bcmgenet_mii_setup(struct net_device *dev)
|
||||
CMD_HD_EN |
|
||||
CMD_RX_PAUSE_IGNORE | CMD_TX_PAUSE_IGNORE);
|
||||
reg |= cmd_bits;
|
||||
if (reg & CMD_SW_RESET) {
|
||||
reg &= ~CMD_SW_RESET;
|
||||
bcmgenet_umac_writel(priv, reg, UMAC_CMD);
|
||||
udelay(2);
|
||||
reg |= CMD_TX_EN | CMD_RX_EN;
|
||||
}
|
||||
bcmgenet_umac_writel(priv, reg, UMAC_CMD);
|
||||
} else {
|
||||
/* done if nothing has changed */
|
||||
@ -181,38 +187,8 @@ int bcmgenet_mii_config(struct net_device *dev, bool init)
|
||||
const char *phy_name = NULL;
|
||||
u32 id_mode_dis = 0;
|
||||
u32 port_ctrl;
|
||||
int bmcr = -1;
|
||||
int ret;
|
||||
u32 reg;
|
||||
|
||||
/* MAC clocking workaround during reset of umac state machines */
|
||||
reg = bcmgenet_umac_readl(priv, UMAC_CMD);
|
||||
if (reg & CMD_SW_RESET) {
|
||||
/* An MII PHY must be isolated to prevent TXC contention */
|
||||
if (priv->phy_interface == PHY_INTERFACE_MODE_MII) {
|
||||
ret = phy_read(phydev, MII_BMCR);
|
||||
if (ret >= 0) {
|
||||
bmcr = ret;
|
||||
ret = phy_write(phydev, MII_BMCR,
|
||||
bmcr | BMCR_ISOLATE);
|
||||
}
|
||||
if (ret) {
|
||||
netdev_err(dev, "failed to isolate PHY\n");
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
/* Switch MAC clocking to RGMII generated clock */
|
||||
bcmgenet_sys_writel(priv, PORT_MODE_EXT_GPHY, SYS_PORT_CTRL);
|
||||
/* Ensure 5 clks with Rx disabled
|
||||
* followed by 5 clks with Reset asserted
|
||||
*/
|
||||
udelay(4);
|
||||
reg &= ~(CMD_SW_RESET | CMD_LCL_LOOP_EN);
|
||||
bcmgenet_umac_writel(priv, reg, UMAC_CMD);
|
||||
/* Ensure 5 more clocks before Rx is enabled */
|
||||
udelay(2);
|
||||
}
|
||||
|
||||
switch (priv->phy_interface) {
|
||||
case PHY_INTERFACE_MODE_INTERNAL:
|
||||
phy_name = "internal PHY";
|
||||
@ -282,10 +258,6 @@ int bcmgenet_mii_config(struct net_device *dev, bool init)
|
||||
|
||||
bcmgenet_sys_writel(priv, port_ctrl, SYS_PORT_CTRL);
|
||||
|
||||
/* Restore the MII PHY after isolation */
|
||||
if (bmcr >= 0)
|
||||
phy_write(phydev, MII_BMCR, bmcr);
|
||||
|
||||
priv->ext_phy = !priv->internal_phy &&
|
||||
(priv->phy_interface != PHY_INTERFACE_MODE_MOCA);
|
||||
|
||||
|
@ -902,7 +902,7 @@ void clear_all_filters(struct adapter *adapter)
|
||||
adapter->tids.tid_tab[i];
|
||||
|
||||
if (f && (f->valid || f->pending))
|
||||
cxgb4_del_filter(dev, i, &f->fs);
|
||||
cxgb4_del_filter(dev, f->tid, &f->fs);
|
||||
}
|
||||
|
||||
sb = t4_read_reg(adapter, LE_DB_SRVR_START_INDEX_A);
|
||||
@ -910,7 +910,7 @@ void clear_all_filters(struct adapter *adapter)
|
||||
f = (struct filter_entry *)adapter->tids.tid_tab[i];
|
||||
|
||||
if (f && (f->valid || f->pending))
|
||||
cxgb4_del_filter(dev, i, &f->fs);
|
||||
cxgb4_del_filter(dev, f->tid, &f->fs);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -246,6 +246,9 @@ static int cxgb4_ptp_fineadjtime(struct adapter *adapter, s64 delta)
|
||||
FW_PTP_CMD_PORTID_V(0));
|
||||
c.retval_len16 = cpu_to_be32(FW_CMD_LEN16_V(sizeof(c) / 16));
|
||||
c.u.ts.sc = FW_PTP_SC_ADJ_FTIME;
|
||||
c.u.ts.sign = (delta < 0) ? 1 : 0;
|
||||
if (delta < 0)
|
||||
delta = -delta;
|
||||
c.u.ts.tm = cpu_to_be64(delta);
|
||||
|
||||
err = t4_wr_mbox(adapter, adapter->mbox, &c, sizeof(c), NULL);
|
||||
|
@ -1307,8 +1307,9 @@ static inline void *write_tso_wr(struct adapter *adap, struct sk_buff *skb,
|
||||
int t4_sge_eth_txq_egress_update(struct adapter *adap, struct sge_eth_txq *eq,
|
||||
int maxreclaim)
|
||||
{
|
||||
unsigned int reclaimed, hw_cidx;
|
||||
struct sge_txq *q = &eq->q;
|
||||
unsigned int reclaimed;
|
||||
int hw_in_use;
|
||||
|
||||
if (!q->in_use || !__netif_tx_trylock(eq->txq))
|
||||
return 0;
|
||||
@ -1316,12 +1317,17 @@ int t4_sge_eth_txq_egress_update(struct adapter *adap, struct sge_eth_txq *eq,
|
||||
/* Reclaim pending completed TX Descriptors. */
|
||||
reclaimed = reclaim_completed_tx(adap, &eq->q, maxreclaim, true);
|
||||
|
||||
hw_cidx = ntohs(READ_ONCE(q->stat->cidx));
|
||||
hw_in_use = q->pidx - hw_cidx;
|
||||
if (hw_in_use < 0)
|
||||
hw_in_use += q->size;
|
||||
|
||||
/* If the TX Queue is currently stopped and there's now more than half
|
||||
* the queue available, restart it. Otherwise bail out since the rest
|
||||
* of what we want do here is with the possibility of shipping any
|
||||
* currently buffered Coalesced TX Work Request.
|
||||
*/
|
||||
if (netif_tx_queue_stopped(eq->txq) && txq_avail(q) > (q->size / 2)) {
|
||||
if (netif_tx_queue_stopped(eq->txq) && hw_in_use < (q->size / 2)) {
|
||||
netif_tx_wake_queue(eq->txq);
|
||||
eq->q.restarts++;
|
||||
}
|
||||
@ -1486,16 +1492,7 @@ static netdev_tx_t cxgb4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
* has opened up.
|
||||
*/
|
||||
eth_txq_stop(q);
|
||||
|
||||
/* If we're using the SGE Doorbell Queue Timer facility, we
|
||||
* don't need to ask the Firmware to send us Egress Queue CIDX
|
||||
* Updates: the Hardware will do this automatically. And
|
||||
* since we send the Ingress Queue CIDX Updates to the
|
||||
* corresponding Ethernet Response Queue, we'll get them very
|
||||
* quickly.
|
||||
*/
|
||||
if (!q->dbqt)
|
||||
wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
|
||||
wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
|
||||
}
|
||||
|
||||
wr = (void *)&q->q.desc[q->q.pidx];
|
||||
@ -1805,16 +1802,7 @@ static netdev_tx_t cxgb4_vf_eth_xmit(struct sk_buff *skb,
|
||||
* has opened up.
|
||||
*/
|
||||
eth_txq_stop(txq);
|
||||
|
||||
/* If we're using the SGE Doorbell Queue Timer facility, we
|
||||
* don't need to ask the Firmware to send us Egress Queue CIDX
|
||||
* Updates: the Hardware will do this automatically. And
|
||||
* since we send the Ingress Queue CIDX Updates to the
|
||||
* corresponding Ethernet Response Queue, we'll get them very
|
||||
* quickly.
|
||||
*/
|
||||
if (!txq->dbqt)
|
||||
wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
|
||||
wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
|
||||
}
|
||||
|
||||
/* Start filling in our Work Request. Note that we do _not_ handle
|
||||
@ -3370,26 +3358,6 @@ static void t4_tx_completion_handler(struct sge_rspq *rspq,
|
||||
}
|
||||
|
||||
txq = &s->ethtxq[pi->first_qset + rspq->idx];
|
||||
|
||||
/* We've got the Hardware Consumer Index Update in the Egress Update
|
||||
* message. If we're using the SGE Doorbell Queue Timer mechanism,
|
||||
* these Egress Update messages will be our sole CIDX Updates we get
|
||||
* since we don't want to chew up PCIe bandwidth for both Ingress
|
||||
* Messages and Status Page writes. However, The code which manages
|
||||
* reclaiming successfully DMA'ed TX Work Requests uses the CIDX value
|
||||
* stored in the Status Page at the end of the TX Queue. It's easiest
|
||||
* to simply copy the CIDX Update value from the Egress Update message
|
||||
* to the Status Page. Also note that no Endian issues need to be
|
||||
* considered here since both are Big Endian and we're just copying
|
||||
* bytes consistently ...
|
||||
*/
|
||||
if (txq->dbqt) {
|
||||
struct cpl_sge_egr_update *egr;
|
||||
|
||||
egr = (struct cpl_sge_egr_update *)rsp;
|
||||
WRITE_ONCE(txq->q.stat->cidx, egr->cidx);
|
||||
}
|
||||
|
||||
t4_sge_eth_txq_egress_update(adapter, txq, -1);
|
||||
}
|
||||
|
||||
|
@ -782,7 +782,7 @@ int memac_adjust_link(struct fman_mac *memac, u16 speed)
|
||||
/* Set full duplex */
|
||||
tmp &= ~IF_MODE_HD;
|
||||
|
||||
if (memac->phy_if == PHY_INTERFACE_MODE_RGMII) {
|
||||
if (phy_interface_mode_is_rgmii(memac->phy_if)) {
|
||||
/* Configure RGMII in manual mode */
|
||||
tmp &= ~IF_MODE_RGMII_AUTO;
|
||||
tmp &= ~IF_MODE_RGMII_SP_MASK;
|
||||
|
@ -389,7 +389,8 @@ static int cmdq_sync_cmd_direct_resp(struct hinic_cmdq *cmdq,
|
||||
|
||||
spin_unlock_bh(&cmdq->cmdq_lock);
|
||||
|
||||
if (!wait_for_completion_timeout(&done, CMDQ_TIMEOUT)) {
|
||||
if (!wait_for_completion_timeout(&done,
|
||||
msecs_to_jiffies(CMDQ_TIMEOUT))) {
|
||||
spin_lock_bh(&cmdq->cmdq_lock);
|
||||
|
||||
if (cmdq->errcode[curr_prod_idx] == &errcode)
|
||||
@ -623,6 +624,8 @@ static int cmdq_cmd_ceq_handler(struct hinic_cmdq *cmdq, u16 ci,
|
||||
if (!CMDQ_WQE_COMPLETED(be32_to_cpu(ctrl->ctrl_info)))
|
||||
return -EBUSY;
|
||||
|
||||
dma_rmb();
|
||||
|
||||
errcode = CMDQ_WQE_ERRCODE_GET(be32_to_cpu(status->status_info), VAL);
|
||||
|
||||
cmdq_sync_cmd_handler(cmdq, ci, errcode);
|
||||
|
@ -360,50 +360,6 @@ static int wait_for_db_state(struct hinic_hwdev *hwdev)
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
static int wait_for_io_stopped(struct hinic_hwdev *hwdev)
|
||||
{
|
||||
struct hinic_cmd_io_status cmd_io_status;
|
||||
struct hinic_hwif *hwif = hwdev->hwif;
|
||||
struct pci_dev *pdev = hwif->pdev;
|
||||
struct hinic_pfhwdev *pfhwdev;
|
||||
unsigned long end;
|
||||
u16 out_size;
|
||||
int err;
|
||||
|
||||
if (!HINIC_IS_PF(hwif) && !HINIC_IS_PPF(hwif)) {
|
||||
dev_err(&pdev->dev, "Unsupported PCI Function type\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev);
|
||||
|
||||
cmd_io_status.func_idx = HINIC_HWIF_FUNC_IDX(hwif);
|
||||
|
||||
end = jiffies + msecs_to_jiffies(IO_STATUS_TIMEOUT);
|
||||
do {
|
||||
err = hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, HINIC_MOD_COMM,
|
||||
HINIC_COMM_CMD_IO_STATUS_GET,
|
||||
&cmd_io_status, sizeof(cmd_io_status),
|
||||
&cmd_io_status, &out_size,
|
||||
HINIC_MGMT_MSG_SYNC);
|
||||
if ((err) || (out_size != sizeof(cmd_io_status))) {
|
||||
dev_err(&pdev->dev, "Failed to get IO status, ret = %d\n",
|
||||
err);
|
||||
return err;
|
||||
}
|
||||
|
||||
if (cmd_io_status.status == IO_STOPPED) {
|
||||
dev_info(&pdev->dev, "IO stopped\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
msleep(20);
|
||||
} while (time_before(jiffies, end));
|
||||
|
||||
dev_err(&pdev->dev, "Wait for IO stopped - Timeout\n");
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
|
||||
/**
|
||||
* clear_io_resource - set the IO resources as not active in the NIC
|
||||
* @hwdev: the NIC HW device
|
||||
@ -423,11 +379,8 @@ static int clear_io_resources(struct hinic_hwdev *hwdev)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
err = wait_for_io_stopped(hwdev);
|
||||
if (err) {
|
||||
dev_err(&pdev->dev, "IO has not stopped yet\n");
|
||||
return err;
|
||||
}
|
||||
/* sleep 100ms to wait for firmware stopping I/O */
|
||||
msleep(100);
|
||||
|
||||
cmd_clear_io_res.func_idx = HINIC_HWIF_FUNC_IDX(hwif);
|
||||
|
||||
|
@ -188,7 +188,7 @@ static u8 eq_cons_idx_checksum_set(u32 val)
|
||||
* eq_update_ci - update the HW cons idx of event queue
|
||||
* @eq: the event queue to update the cons idx for
|
||||
**/
|
||||
static void eq_update_ci(struct hinic_eq *eq)
|
||||
static void eq_update_ci(struct hinic_eq *eq, u32 arm_state)
|
||||
{
|
||||
u32 val, addr = EQ_CONS_IDX_REG_ADDR(eq);
|
||||
|
||||
@ -202,7 +202,7 @@ static void eq_update_ci(struct hinic_eq *eq)
|
||||
|
||||
val |= HINIC_EQ_CI_SET(eq->cons_idx, IDX) |
|
||||
HINIC_EQ_CI_SET(eq->wrapped, WRAPPED) |
|
||||
HINIC_EQ_CI_SET(EQ_ARMED, INT_ARMED);
|
||||
HINIC_EQ_CI_SET(arm_state, INT_ARMED);
|
||||
|
||||
val |= HINIC_EQ_CI_SET(eq_cons_idx_checksum_set(val), XOR_CHKSUM);
|
||||
|
||||
@ -235,6 +235,8 @@ static void aeq_irq_handler(struct hinic_eq *eq)
|
||||
if (HINIC_EQ_ELEM_DESC_GET(aeqe_desc, WRAPPED) == eq->wrapped)
|
||||
break;
|
||||
|
||||
dma_rmb();
|
||||
|
||||
event = HINIC_EQ_ELEM_DESC_GET(aeqe_desc, TYPE);
|
||||
if (event >= HINIC_MAX_AEQ_EVENTS) {
|
||||
dev_err(&pdev->dev, "Unknown AEQ Event %d\n", event);
|
||||
@ -347,7 +349,7 @@ static void eq_irq_handler(void *data)
|
||||
else if (eq->type == HINIC_CEQ)
|
||||
ceq_irq_handler(eq);
|
||||
|
||||
eq_update_ci(eq);
|
||||
eq_update_ci(eq, EQ_ARMED);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -702,7 +704,7 @@ static int init_eq(struct hinic_eq *eq, struct hinic_hwif *hwif,
|
||||
}
|
||||
|
||||
set_eq_ctrls(eq);
|
||||
eq_update_ci(eq);
|
||||
eq_update_ci(eq, EQ_ARMED);
|
||||
|
||||
err = alloc_eq_pages(eq);
|
||||
if (err) {
|
||||
@ -752,18 +754,28 @@ static int init_eq(struct hinic_eq *eq, struct hinic_hwif *hwif,
|
||||
**/
|
||||
static void remove_eq(struct hinic_eq *eq)
|
||||
{
|
||||
struct msix_entry *entry = &eq->msix_entry;
|
||||
|
||||
free_irq(entry->vector, eq);
|
||||
hinic_set_msix_state(eq->hwif, eq->msix_entry.entry,
|
||||
HINIC_MSIX_DISABLE);
|
||||
free_irq(eq->msix_entry.vector, eq);
|
||||
|
||||
if (eq->type == HINIC_AEQ) {
|
||||
struct hinic_eq_work *aeq_work = &eq->aeq_work;
|
||||
|
||||
cancel_work_sync(&aeq_work->work);
|
||||
/* clear aeq_len to avoid hw access host memory */
|
||||
hinic_hwif_write_reg(eq->hwif,
|
||||
HINIC_CSR_AEQ_CTRL_1_ADDR(eq->q_id), 0);
|
||||
} else if (eq->type == HINIC_CEQ) {
|
||||
tasklet_kill(&eq->ceq_tasklet);
|
||||
/* clear ceq_len to avoid hw access host memory */
|
||||
hinic_hwif_write_reg(eq->hwif,
|
||||
HINIC_CSR_CEQ_CTRL_1_ADDR(eq->q_id), 0);
|
||||
}
|
||||
|
||||
/* update cons_idx to avoid invalid interrupt */
|
||||
eq->cons_idx = hinic_hwif_read_reg(eq->hwif, EQ_PROD_IDX_REG_ADDR(eq));
|
||||
eq_update_ci(eq, EQ_NOT_ARMED);
|
||||
|
||||
free_eq_pages(eq);
|
||||
}
|
||||
|
||||
|
@ -43,7 +43,7 @@
|
||||
|
||||
#define MSG_NOT_RESP 0xFFFF
|
||||
|
||||
#define MGMT_MSG_TIMEOUT 1000
|
||||
#define MGMT_MSG_TIMEOUT 5000
|
||||
|
||||
#define mgmt_to_pfhwdev(pf_mgmt) \
|
||||
container_of(pf_mgmt, struct hinic_pfhwdev, pf_to_mgmt)
|
||||
@ -267,7 +267,8 @@ static int msg_to_mgmt_sync(struct hinic_pf_to_mgmt *pf_to_mgmt,
|
||||
goto unlock_sync_msg;
|
||||
}
|
||||
|
||||
if (!wait_for_completion_timeout(recv_done, MGMT_MSG_TIMEOUT)) {
|
||||
if (!wait_for_completion_timeout(recv_done,
|
||||
msecs_to_jiffies(MGMT_MSG_TIMEOUT))) {
|
||||
dev_err(&pdev->dev, "MGMT timeout, MSG id = %d\n", msg_id);
|
||||
err = -ETIMEDOUT;
|
||||
goto unlock_sync_msg;
|
||||
|
@ -350,6 +350,9 @@ static int rxq_recv(struct hinic_rxq *rxq, int budget)
|
||||
if (!rq_wqe)
|
||||
break;
|
||||
|
||||
/* make sure we read rx_done before packet length */
|
||||
dma_rmb();
|
||||
|
||||
cqe = rq->cqe[ci];
|
||||
status = be32_to_cpu(cqe->status);
|
||||
hinic_rq_get_sge(rxq->rq, rq_wqe, ci, &sge);
|
||||
|
@ -45,7 +45,7 @@
|
||||
|
||||
#define HW_CONS_IDX(sq) be16_to_cpu(*(u16 *)((sq)->hw_ci_addr))
|
||||
|
||||
#define MIN_SKB_LEN 17
|
||||
#define MIN_SKB_LEN 32
|
||||
|
||||
#define MAX_PAYLOAD_OFFSET 221
|
||||
#define TRANSPORT_OFFSET(l4_hdr, skb) ((u32)((l4_hdr) - (skb)->data))
|
||||
@ -622,6 +622,8 @@ static int free_tx_poll(struct napi_struct *napi, int budget)
|
||||
do {
|
||||
hw_ci = HW_CONS_IDX(sq) & wq->mask;
|
||||
|
||||
dma_rmb();
|
||||
|
||||
/* Reading a WQEBB to get real WQE size and consumer index. */
|
||||
sq_wqe = hinic_sq_read_wqebb(sq, &skb, &wqe_size, &sw_ci);
|
||||
if ((!sq_wqe) ||
|
||||
|
@ -364,8 +364,8 @@ static int orion_mdio_probe(struct platform_device *pdev)
|
||||
writel(MVMDIO_ERR_INT_SMI_DONE,
|
||||
dev->regs + MVMDIO_ERR_INT_MASK);
|
||||
|
||||
} else if (dev->err_interrupt < 0) {
|
||||
ret = dev->err_interrupt;
|
||||
} else if (dev->err_interrupt == -EPROBE_DEFER) {
|
||||
ret = -EPROBE_DEFER;
|
||||
goto out_mdio;
|
||||
}
|
||||
|
||||
|
@ -3036,11 +3036,10 @@ static int mvneta_poll(struct napi_struct *napi, int budget)
|
||||
/* For the case where the last mvneta_poll did not process all
|
||||
* RX packets
|
||||
*/
|
||||
rx_queue = fls(((cause_rx_tx >> 8) & 0xff));
|
||||
|
||||
cause_rx_tx |= pp->neta_armada3700 ? pp->cause_rx_tx :
|
||||
port->cause_rx_tx;
|
||||
|
||||
rx_queue = fls(((cause_rx_tx >> 8) & 0xff));
|
||||
if (rx_queue) {
|
||||
rx_queue = rx_queue - 1;
|
||||
if (pp->bm_priv)
|
||||
|
@ -906,59 +906,59 @@ static void mlx4_err_rule(struct mlx4_dev *dev, char *str,
|
||||
int len = 0;
|
||||
|
||||
mlx4_err(dev, "%s", str);
|
||||
len += snprintf(buf + len, BUF_SIZE - len,
|
||||
"port = %d prio = 0x%x qp = 0x%x ",
|
||||
rule->port, rule->priority, rule->qpn);
|
||||
len += scnprintf(buf + len, BUF_SIZE - len,
|
||||
"port = %d prio = 0x%x qp = 0x%x ",
|
||||
rule->port, rule->priority, rule->qpn);
|
||||
|
||||
list_for_each_entry(cur, &rule->list, list) {
|
||||
switch (cur->id) {
|
||||
case MLX4_NET_TRANS_RULE_ID_ETH:
|
||||
len += snprintf(buf + len, BUF_SIZE - len,
|
||||
"dmac = %pM ", &cur->eth.dst_mac);
|
||||
len += scnprintf(buf + len, BUF_SIZE - len,
|
||||
"dmac = %pM ", &cur->eth.dst_mac);
|
||||
if (cur->eth.ether_type)
|
||||
len += snprintf(buf + len, BUF_SIZE - len,
|
||||
"ethertype = 0x%x ",
|
||||
be16_to_cpu(cur->eth.ether_type));
|
||||
len += scnprintf(buf + len, BUF_SIZE - len,
|
||||
"ethertype = 0x%x ",
|
||||
be16_to_cpu(cur->eth.ether_type));
|
||||
if (cur->eth.vlan_id)
|
||||
len += snprintf(buf + len, BUF_SIZE - len,
|
||||
"vlan-id = %d ",
|
||||
be16_to_cpu(cur->eth.vlan_id));
|
||||
len += scnprintf(buf + len, BUF_SIZE - len,
|
||||
"vlan-id = %d ",
|
||||
be16_to_cpu(cur->eth.vlan_id));
|
||||
break;
|
||||
|
||||
case MLX4_NET_TRANS_RULE_ID_IPV4:
|
||||
if (cur->ipv4.src_ip)
|
||||
len += snprintf(buf + len, BUF_SIZE - len,
|
||||
"src-ip = %pI4 ",
|
||||
&cur->ipv4.src_ip);
|
||||
len += scnprintf(buf + len, BUF_SIZE - len,
|
||||
"src-ip = %pI4 ",
|
||||
&cur->ipv4.src_ip);
|
||||
if (cur->ipv4.dst_ip)
|
||||
len += snprintf(buf + len, BUF_SIZE - len,
|
||||
"dst-ip = %pI4 ",
|
||||
&cur->ipv4.dst_ip);
|
||||
len += scnprintf(buf + len, BUF_SIZE - len,
|
||||
"dst-ip = %pI4 ",
|
||||
&cur->ipv4.dst_ip);
|
||||
break;
|
||||
|
||||
case MLX4_NET_TRANS_RULE_ID_TCP:
|
||||
case MLX4_NET_TRANS_RULE_ID_UDP:
|
||||
if (cur->tcp_udp.src_port)
|
||||
len += snprintf(buf + len, BUF_SIZE - len,
|
||||
"src-port = %d ",
|
||||
be16_to_cpu(cur->tcp_udp.src_port));
|
||||
len += scnprintf(buf + len, BUF_SIZE - len,
|
||||
"src-port = %d ",
|
||||
be16_to_cpu(cur->tcp_udp.src_port));
|
||||
if (cur->tcp_udp.dst_port)
|
||||
len += snprintf(buf + len, BUF_SIZE - len,
|
||||
"dst-port = %d ",
|
||||
be16_to_cpu(cur->tcp_udp.dst_port));
|
||||
len += scnprintf(buf + len, BUF_SIZE - len,
|
||||
"dst-port = %d ",
|
||||
be16_to_cpu(cur->tcp_udp.dst_port));
|
||||
break;
|
||||
|
||||
case MLX4_NET_TRANS_RULE_ID_IB:
|
||||
len += snprintf(buf + len, BUF_SIZE - len,
|
||||
"dst-gid = %pI6\n", cur->ib.dst_gid);
|
||||
len += snprintf(buf + len, BUF_SIZE - len,
|
||||
"dst-gid-mask = %pI6\n",
|
||||
cur->ib.dst_gid_msk);
|
||||
len += scnprintf(buf + len, BUF_SIZE - len,
|
||||
"dst-gid = %pI6\n", cur->ib.dst_gid);
|
||||
len += scnprintf(buf + len, BUF_SIZE - len,
|
||||
"dst-gid-mask = %pI6\n",
|
||||
cur->ib.dst_gid_msk);
|
||||
break;
|
||||
|
||||
case MLX4_NET_TRANS_RULE_ID_VXLAN:
|
||||
len += snprintf(buf + len, BUF_SIZE - len,
|
||||
"VNID = %d ", be32_to_cpu(cur->vxlan.vni));
|
||||
len += scnprintf(buf + len, BUF_SIZE - len,
|
||||
"VNID = %d ", be32_to_cpu(cur->vxlan.vni));
|
||||
break;
|
||||
case MLX4_NET_TRANS_RULE_ID_IPV6:
|
||||
break;
|
||||
@ -967,7 +967,7 @@ static void mlx4_err_rule(struct mlx4_dev *dev, char *str,
|
||||
break;
|
||||
}
|
||||
}
|
||||
len += snprintf(buf + len, BUF_SIZE - len, "\n");
|
||||
len += scnprintf(buf + len, BUF_SIZE - len, "\n");
|
||||
mlx4_err(dev, "%s", buf);
|
||||
|
||||
if (len >= BUF_SIZE)
|
||||
|
@ -371,6 +371,7 @@ enum {
|
||||
|
||||
struct mlx5e_sq_wqe_info {
|
||||
u8 opcode;
|
||||
u8 num_wqebbs;
|
||||
|
||||
/* Auxiliary data for different opcodes. */
|
||||
union {
|
||||
@ -1059,6 +1060,7 @@ int mlx5e_modify_rq_state(struct mlx5e_rq *rq, int curr_state, int next_state);
|
||||
void mlx5e_activate_rq(struct mlx5e_rq *rq);
|
||||
void mlx5e_deactivate_rq(struct mlx5e_rq *rq);
|
||||
void mlx5e_free_rx_descs(struct mlx5e_rq *rq);
|
||||
void mlx5e_free_rx_in_progress_descs(struct mlx5e_rq *rq);
|
||||
void mlx5e_activate_icosq(struct mlx5e_icosq *icosq);
|
||||
void mlx5e_deactivate_icosq(struct mlx5e_icosq *icosq);
|
||||
|
||||
|
@ -10,8 +10,7 @@
|
||||
|
||||
static inline bool cqe_syndrome_needs_recover(u8 syndrome)
|
||||
{
|
||||
return syndrome == MLX5_CQE_SYNDROME_LOCAL_LENGTH_ERR ||
|
||||
syndrome == MLX5_CQE_SYNDROME_LOCAL_QP_OP_ERR ||
|
||||
return syndrome == MLX5_CQE_SYNDROME_LOCAL_QP_OP_ERR ||
|
||||
syndrome == MLX5_CQE_SYNDROME_LOCAL_PROT_ERR ||
|
||||
syndrome == MLX5_CQE_SYNDROME_WR_FLUSH_ERR;
|
||||
}
|
||||
|
@ -90,7 +90,7 @@ static int mlx5e_rx_reporter_err_icosq_cqe_recover(void *ctx)
|
||||
goto out;
|
||||
|
||||
mlx5e_reset_icosq_cc_pc(icosq);
|
||||
mlx5e_free_rx_descs(rq);
|
||||
mlx5e_free_rx_in_progress_descs(rq);
|
||||
clear_bit(MLX5E_SQ_STATE_RECOVERING, &icosq->state);
|
||||
mlx5e_activate_icosq(icosq);
|
||||
mlx5e_activate_rq(rq);
|
||||
|
@ -181,10 +181,12 @@ mlx5e_tx_dma_unmap(struct device *pdev, struct mlx5e_sq_dma *dma)
|
||||
|
||||
static inline void mlx5e_rqwq_reset(struct mlx5e_rq *rq)
|
||||
{
|
||||
if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ)
|
||||
if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) {
|
||||
mlx5_wq_ll_reset(&rq->mpwqe.wq);
|
||||
else
|
||||
rq->mpwqe.actual_wq_head = 0;
|
||||
} else {
|
||||
mlx5_wq_cyc_reset(&rq->wqe.wq);
|
||||
}
|
||||
}
|
||||
|
||||
/* SW parser related functions */
|
||||
|
@ -38,8 +38,8 @@ enum {
|
||||
|
||||
enum {
|
||||
MLX5E_TLS_PROGRESS_PARAMS_RECORD_TRACKER_STATE_START = 0,
|
||||
MLX5E_TLS_PROGRESS_PARAMS_RECORD_TRACKER_STATE_SEARCHING = 1,
|
||||
MLX5E_TLS_PROGRESS_PARAMS_RECORD_TRACKER_STATE_TRACKING = 2,
|
||||
MLX5E_TLS_PROGRESS_PARAMS_RECORD_TRACKER_STATE_TRACKING = 1,
|
||||
MLX5E_TLS_PROGRESS_PARAMS_RECORD_TRACKER_STATE_SEARCHING = 2,
|
||||
};
|
||||
|
||||
struct mlx5e_ktls_offload_context_tx {
|
||||
|
@ -218,7 +218,7 @@ tx_sync_info_get(struct mlx5e_ktls_offload_context_tx *priv_tx,
|
||||
* this packet was already acknowledged and its record info
|
||||
* was released.
|
||||
*/
|
||||
ends_before = before(tcp_seq + datalen, tls_record_start_seq(record));
|
||||
ends_before = before(tcp_seq + datalen - 1, tls_record_start_seq(record));
|
||||
|
||||
if (unlikely(tls_record_is_start_marker(record))) {
|
||||
ret = ends_before ? MLX5E_KTLS_SYNC_SKIP_NO_DATA : MLX5E_KTLS_SYNC_FAIL;
|
||||
|
@ -813,6 +813,29 @@ int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq, int wait_time)
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
|
||||
void mlx5e_free_rx_in_progress_descs(struct mlx5e_rq *rq)
|
||||
{
|
||||
struct mlx5_wq_ll *wq;
|
||||
u16 head;
|
||||
int i;
|
||||
|
||||
if (rq->wq_type != MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ)
|
||||
return;
|
||||
|
||||
wq = &rq->mpwqe.wq;
|
||||
head = wq->head;
|
||||
|
||||
/* Outstanding UMR WQEs (in progress) start at wq->head */
|
||||
for (i = 0; i < rq->mpwqe.umr_in_progress; i++) {
|
||||
rq->dealloc_wqe(rq, head);
|
||||
head = mlx5_wq_ll_get_wqe_next_ix(wq, head);
|
||||
}
|
||||
|
||||
rq->mpwqe.actual_wq_head = wq->head;
|
||||
rq->mpwqe.umr_in_progress = 0;
|
||||
rq->mpwqe.umr_completed = 0;
|
||||
}
|
||||
|
||||
void mlx5e_free_rx_descs(struct mlx5e_rq *rq)
|
||||
{
|
||||
__be16 wqe_ix_be;
|
||||
@ -820,14 +843,8 @@ void mlx5e_free_rx_descs(struct mlx5e_rq *rq)
|
||||
|
||||
if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) {
|
||||
struct mlx5_wq_ll *wq = &rq->mpwqe.wq;
|
||||
u16 head = wq->head;
|
||||
int i;
|
||||
|
||||
/* Outstanding UMR WQEs (in progress) start at wq->head */
|
||||
for (i = 0; i < rq->mpwqe.umr_in_progress; i++) {
|
||||
rq->dealloc_wqe(rq, head);
|
||||
head = mlx5_wq_ll_get_wqe_next_ix(wq, head);
|
||||
}
|
||||
mlx5e_free_rx_in_progress_descs(rq);
|
||||
|
||||
while (!mlx5_wq_ll_is_empty(wq)) {
|
||||
struct mlx5e_rx_wqe_ll *wqe;
|
||||
|
@ -477,6 +477,7 @@ static inline void mlx5e_fill_icosq_frag_edge(struct mlx5e_icosq *sq,
|
||||
/* fill sq frag edge with nops to avoid wqe wrapping two pages */
|
||||
for (; wi < edge_wi; wi++) {
|
||||
wi->opcode = MLX5_OPCODE_NOP;
|
||||
wi->num_wqebbs = 1;
|
||||
mlx5e_post_nop(wq, sq->sqn, &sq->pc);
|
||||
}
|
||||
}
|
||||
@ -525,6 +526,7 @@ static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
|
||||
umr_wqe->uctrl.xlt_offset = cpu_to_be16(xlt_offset);
|
||||
|
||||
sq->db.ico_wqe[pi].opcode = MLX5_OPCODE_UMR;
|
||||
sq->db.ico_wqe[pi].num_wqebbs = MLX5E_UMR_WQEBBS;
|
||||
sq->db.ico_wqe[pi].umr.rq = rq;
|
||||
sq->pc += MLX5E_UMR_WQEBBS;
|
||||
|
||||
@ -621,6 +623,7 @@ void mlx5e_poll_ico_cq(struct mlx5e_cq *cq)
|
||||
|
||||
ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc);
|
||||
wi = &sq->db.ico_wqe[ci];
|
||||
sqcc += wi->num_wqebbs;
|
||||
|
||||
if (last_wqe && unlikely(get_cqe_opcode(cqe) != MLX5_CQE_REQ)) {
|
||||
netdev_WARN_ONCE(cq->channel->netdev,
|
||||
@ -631,16 +634,12 @@ void mlx5e_poll_ico_cq(struct mlx5e_cq *cq)
|
||||
break;
|
||||
}
|
||||
|
||||
if (likely(wi->opcode == MLX5_OPCODE_UMR)) {
|
||||
sqcc += MLX5E_UMR_WQEBBS;
|
||||
if (likely(wi->opcode == MLX5_OPCODE_UMR))
|
||||
wi->umr.rq->mpwqe.umr_completed++;
|
||||
} else if (likely(wi->opcode == MLX5_OPCODE_NOP)) {
|
||||
sqcc++;
|
||||
} else {
|
||||
else if (unlikely(wi->opcode != MLX5_OPCODE_NOP))
|
||||
netdev_WARN_ONCE(cq->channel->netdev,
|
||||
"Bad OPCODE in ICOSQ WQE info: 0x%x\n",
|
||||
wi->opcode);
|
||||
}
|
||||
|
||||
} while (!last_wqe);
|
||||
|
||||
|
@ -2476,10 +2476,11 @@ static int offload_pedit_fields(struct pedit_headers_action *hdrs,
|
||||
continue;
|
||||
|
||||
if (f->field_bsize == 32) {
|
||||
mask_be32 = *(__be32 *)&mask;
|
||||
mask_be32 = (__be32)mask;
|
||||
mask = (__force unsigned long)cpu_to_le32(be32_to_cpu(mask_be32));
|
||||
} else if (f->field_bsize == 16) {
|
||||
mask_be16 = *(__be16 *)&mask;
|
||||
mask_be32 = (__be32)mask;
|
||||
mask_be16 = *(__be16 *)&mask_be32;
|
||||
mask = (__force unsigned long)cpu_to_le16(be16_to_cpu(mask_be16));
|
||||
}
|
||||
|
||||
|
@ -78,6 +78,7 @@ void mlx5e_trigger_irq(struct mlx5e_icosq *sq)
|
||||
u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
|
||||
|
||||
sq->db.ico_wqe[pi].opcode = MLX5_OPCODE_NOP;
|
||||
sq->db.ico_wqe[pi].num_wqebbs = 1;
|
||||
nopwqe = mlx5e_post_nop(wq, sq->sqn, &sq->pc);
|
||||
mlx5e_notify_hw(wq, sq->pc, sq->uar_map, &nopwqe->ctrl);
|
||||
}
|
||||
|
@ -615,8 +615,10 @@ void mlx5_lag_remove(struct mlx5_core_dev *dev)
|
||||
break;
|
||||
|
||||
if (i == MLX5_MAX_PORTS) {
|
||||
if (ldev->nb.notifier_call)
|
||||
if (ldev->nb.notifier_call) {
|
||||
unregister_netdevice_notifier_net(&init_net, &ldev->nb);
|
||||
ldev->nb.notifier_call = NULL;
|
||||
}
|
||||
mlx5_lag_mp_cleanup(ldev);
|
||||
cancel_delayed_work_sync(&ldev->bond_work);
|
||||
mlx5_lag_dev_free(ldev);
|
||||
|
@ -933,7 +933,6 @@ static int dr_actions_l2_rewrite(struct mlx5dr_domain *dmn,
|
||||
|
||||
action->rewrite.data = (void *)ops;
|
||||
action->rewrite.num_of_actions = i;
|
||||
action->rewrite.chunk->byte_size = i * sizeof(*ops);
|
||||
|
||||
ret = mlx5dr_send_postsend_action(dmn, action);
|
||||
if (ret) {
|
||||
|
@ -558,7 +558,8 @@ int mlx5dr_send_postsend_action(struct mlx5dr_domain *dmn,
|
||||
int ret;
|
||||
|
||||
send_info.write.addr = (uintptr_t)action->rewrite.data;
|
||||
send_info.write.length = action->rewrite.chunk->byte_size;
|
||||
send_info.write.length = action->rewrite.num_of_actions *
|
||||
DR_MODIFY_ACTION_SIZE;
|
||||
send_info.write.lkey = 0;
|
||||
send_info.remote_addr = action->rewrite.chunk->mr_addr;
|
||||
send_info.rkey = action->rewrite.chunk->rkey;
|
||||
|
@ -1071,6 +1071,9 @@ int mlx5_core_modify_hca_vport_context(struct mlx5_core_dev *dev,
|
||||
MLX5_SET64(hca_vport_context, ctx, port_guid, req->port_guid);
|
||||
if (req->field_select & MLX5_HCA_VPORT_SEL_NODE_GUID)
|
||||
MLX5_SET64(hca_vport_context, ctx, node_guid, req->node_guid);
|
||||
MLX5_SET(hca_vport_context, ctx, cap_mask1, req->cap_mask1);
|
||||
MLX5_SET(hca_vport_context, ctx, cap_mask1_field_select,
|
||||
req->cap_mask1_perm);
|
||||
err = mlx5_cmd_exec(dev, in, in_sz, out, sizeof(out));
|
||||
ex:
|
||||
kfree(in);
|
||||
|
@ -1322,36 +1322,64 @@ static void mlxsw_pci_mbox_free(struct mlxsw_pci *mlxsw_pci,
|
||||
mbox->mapaddr);
|
||||
}
|
||||
|
||||
static int mlxsw_pci_sw_reset(struct mlxsw_pci *mlxsw_pci,
|
||||
const struct pci_device_id *id)
|
||||
static int mlxsw_pci_sys_ready_wait(struct mlxsw_pci *mlxsw_pci,
|
||||
const struct pci_device_id *id,
|
||||
u32 *p_sys_status)
|
||||
{
|
||||
unsigned long end;
|
||||
char mrsr_pl[MLXSW_REG_MRSR_LEN];
|
||||
int err;
|
||||
u32 val;
|
||||
|
||||
mlxsw_reg_mrsr_pack(mrsr_pl);
|
||||
err = mlxsw_reg_write(mlxsw_pci->core, MLXSW_REG(mrsr), mrsr_pl);
|
||||
if (err)
|
||||
return err;
|
||||
if (id->device == PCI_DEVICE_ID_MELLANOX_SWITCHX2) {
|
||||
msleep(MLXSW_PCI_SW_RESET_TIMEOUT_MSECS);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* We must wait for the HW to become responsive once again. */
|
||||
/* We must wait for the HW to become responsive. */
|
||||
msleep(MLXSW_PCI_SW_RESET_WAIT_MSECS);
|
||||
|
||||
end = jiffies + msecs_to_jiffies(MLXSW_PCI_SW_RESET_TIMEOUT_MSECS);
|
||||
do {
|
||||
u32 val = mlxsw_pci_read32(mlxsw_pci, FW_READY);
|
||||
|
||||
val = mlxsw_pci_read32(mlxsw_pci, FW_READY);
|
||||
if ((val & MLXSW_PCI_FW_READY_MASK) == MLXSW_PCI_FW_READY_MAGIC)
|
||||
return 0;
|
||||
cond_resched();
|
||||
} while (time_before(jiffies, end));
|
||||
|
||||
*p_sys_status = val & MLXSW_PCI_FW_READY_MASK;
|
||||
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
static int mlxsw_pci_sw_reset(struct mlxsw_pci *mlxsw_pci,
|
||||
const struct pci_device_id *id)
|
||||
{
|
||||
struct pci_dev *pdev = mlxsw_pci->pdev;
|
||||
char mrsr_pl[MLXSW_REG_MRSR_LEN];
|
||||
u32 sys_status;
|
||||
int err;
|
||||
|
||||
err = mlxsw_pci_sys_ready_wait(mlxsw_pci, id, &sys_status);
|
||||
if (err) {
|
||||
dev_err(&pdev->dev, "Failed to reach system ready status before reset. Status is 0x%x\n",
|
||||
sys_status);
|
||||
return err;
|
||||
}
|
||||
|
||||
mlxsw_reg_mrsr_pack(mrsr_pl);
|
||||
err = mlxsw_reg_write(mlxsw_pci->core, MLXSW_REG(mrsr), mrsr_pl);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
err = mlxsw_pci_sys_ready_wait(mlxsw_pci, id, &sys_status);
|
||||
if (err) {
|
||||
dev_err(&pdev->dev, "Failed to reach system ready status after reset. Status is 0x%x\n",
|
||||
sys_status);
|
||||
return err;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int mlxsw_pci_alloc_irq_vectors(struct mlxsw_pci *mlxsw_pci)
|
||||
{
|
||||
int err;
|
||||
|
@ -3572,7 +3572,7 @@ MLXSW_ITEM32(reg, qeec, mase, 0x10, 31, 1);
|
||||
* When in bytes mode, value is specified in units of 1000bps.
|
||||
* Access: RW
|
||||
*/
|
||||
MLXSW_ITEM32(reg, qeec, max_shaper_rate, 0x10, 0, 28);
|
||||
MLXSW_ITEM32(reg, qeec, max_shaper_rate, 0x10, 0, 31);
|
||||
|
||||
/* reg_qeec_de
|
||||
* DWRR configuration enable. Enables configuration of the dwrr and
|
||||
|
@ -2045,7 +2045,7 @@ vxge_hw_vpath_strip_fcs_check(struct __vxge_hw_device *hldev, u64 vpath_mask);
|
||||
if ((level >= VXGE_ERR && VXGE_COMPONENT_LL & VXGE_DEBUG_ERR_MASK) || \
|
||||
(level >= VXGE_TRACE && VXGE_COMPONENT_LL & VXGE_DEBUG_TRACE_MASK))\
|
||||
if ((mask & VXGE_DEBUG_MASK) == mask) \
|
||||
printk(fmt "\n", __VA_ARGS__); \
|
||||
printk(fmt "\n", ##__VA_ARGS__); \
|
||||
} while (0)
|
||||
#else
|
||||
#define vxge_debug_ll(level, mask, fmt, ...)
|
||||
|
@ -452,49 +452,49 @@ int vxge_fw_upgrade(struct vxgedev *vdev, char *fw_name, int override);
|
||||
|
||||
#if (VXGE_DEBUG_LL_CONFIG & VXGE_DEBUG_MASK)
|
||||
#define vxge_debug_ll_config(level, fmt, ...) \
|
||||
vxge_debug_ll(level, VXGE_DEBUG_LL_CONFIG, fmt, __VA_ARGS__)
|
||||
vxge_debug_ll(level, VXGE_DEBUG_LL_CONFIG, fmt, ##__VA_ARGS__)
|
||||
#else
|
||||
#define vxge_debug_ll_config(level, fmt, ...)
|
||||
#endif
|
||||
|
||||
#if (VXGE_DEBUG_INIT & VXGE_DEBUG_MASK)
|
||||
#define vxge_debug_init(level, fmt, ...) \
|
||||
vxge_debug_ll(level, VXGE_DEBUG_INIT, fmt, __VA_ARGS__)
|
||||
vxge_debug_ll(level, VXGE_DEBUG_INIT, fmt, ##__VA_ARGS__)
|
||||
#else
|
||||
#define vxge_debug_init(level, fmt, ...)
|
||||
#endif
|
||||
|
||||
#if (VXGE_DEBUG_TX & VXGE_DEBUG_MASK)
|
||||
#define vxge_debug_tx(level, fmt, ...) \
|
||||
vxge_debug_ll(level, VXGE_DEBUG_TX, fmt, __VA_ARGS__)
|
||||
vxge_debug_ll(level, VXGE_DEBUG_TX, fmt, ##__VA_ARGS__)
|
||||
#else
|
||||
#define vxge_debug_tx(level, fmt, ...)
|
||||
#endif
|
||||
|
||||
#if (VXGE_DEBUG_RX & VXGE_DEBUG_MASK)
|
||||
#define vxge_debug_rx(level, fmt, ...) \
|
||||
vxge_debug_ll(level, VXGE_DEBUG_RX, fmt, __VA_ARGS__)
|
||||
vxge_debug_ll(level, VXGE_DEBUG_RX, fmt, ##__VA_ARGS__)
|
||||
#else
|
||||
#define vxge_debug_rx(level, fmt, ...)
|
||||
#endif
|
||||
|
||||
#if (VXGE_DEBUG_MEM & VXGE_DEBUG_MASK)
|
||||
#define vxge_debug_mem(level, fmt, ...) \
|
||||
vxge_debug_ll(level, VXGE_DEBUG_MEM, fmt, __VA_ARGS__)
|
||||
vxge_debug_ll(level, VXGE_DEBUG_MEM, fmt, ##__VA_ARGS__)
|
||||
#else
|
||||
#define vxge_debug_mem(level, fmt, ...)
|
||||
#endif
|
||||
|
||||
#if (VXGE_DEBUG_ENTRYEXIT & VXGE_DEBUG_MASK)
|
||||
#define vxge_debug_entryexit(level, fmt, ...) \
|
||||
vxge_debug_ll(level, VXGE_DEBUG_ENTRYEXIT, fmt, __VA_ARGS__)
|
||||
vxge_debug_ll(level, VXGE_DEBUG_ENTRYEXIT, fmt, ##__VA_ARGS__)
|
||||
#else
|
||||
#define vxge_debug_entryexit(level, fmt, ...)
|
||||
#endif
|
||||
|
||||
#if (VXGE_DEBUG_INTR & VXGE_DEBUG_MASK)
|
||||
#define vxge_debug_intr(level, fmt, ...) \
|
||||
vxge_debug_ll(level, VXGE_DEBUG_INTR, fmt, __VA_ARGS__)
|
||||
vxge_debug_ll(level, VXGE_DEBUG_INTR, fmt, ##__VA_ARGS__)
|
||||
#else
|
||||
#define vxge_debug_intr(level, fmt, ...)
|
||||
#endif
|
||||
|
@ -616,7 +616,7 @@ static int enable_bars(struct nfp6000_pcie *nfp, u16 interface)
|
||||
if (bar->iomem) {
|
||||
int pf;
|
||||
|
||||
msg += snprintf(msg, end - msg, "0.0: General/MSI-X SRAM, ");
|
||||
msg += scnprintf(msg, end - msg, "0.0: General/MSI-X SRAM, ");
|
||||
atomic_inc(&bar->refcnt);
|
||||
bars_free--;
|
||||
|
||||
@ -661,7 +661,7 @@ static int enable_bars(struct nfp6000_pcie *nfp, u16 interface)
|
||||
|
||||
/* Configure, and lock, BAR0.1 for PCIe XPB (MSI-X PBA) */
|
||||
bar = &nfp->bar[1];
|
||||
msg += snprintf(msg, end - msg, "0.1: PCIe XPB/MSI-X PBA, ");
|
||||
msg += scnprintf(msg, end - msg, "0.1: PCIe XPB/MSI-X PBA, ");
|
||||
atomic_inc(&bar->refcnt);
|
||||
bars_free--;
|
||||
|
||||
@ -680,8 +680,8 @@ static int enable_bars(struct nfp6000_pcie *nfp, u16 interface)
|
||||
bar->iomem = ioremap(nfp_bar_resource_start(bar),
|
||||
nfp_bar_resource_len(bar));
|
||||
if (bar->iomem) {
|
||||
msg += snprintf(msg, end - msg,
|
||||
"0.%d: Explicit%d, ", 4 + i, i);
|
||||
msg += scnprintf(msg, end - msg,
|
||||
"0.%d: Explicit%d, ", 4 + i, i);
|
||||
atomic_inc(&bar->refcnt);
|
||||
bars_free--;
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB OR BSD-2-Clause */
|
||||
/* SPDX-License-Identifier: (GPL-2.0 OR Linux-OpenIB) OR BSD-2-Clause */
|
||||
/* Copyright (c) 2017-2019 Pensando Systems, Inc. All rights reserved. */
|
||||
|
||||
#ifndef _IONIC_IF_H_
|
||||
|
@ -948,18 +948,18 @@ static void ionic_lif_rx_mode(struct ionic_lif *lif, unsigned int rx_mode)
|
||||
int i;
|
||||
#define REMAIN(__x) (sizeof(buf) - (__x))
|
||||
|
||||
i = snprintf(buf, sizeof(buf), "rx_mode 0x%04x -> 0x%04x:",
|
||||
lif->rx_mode, rx_mode);
|
||||
i = scnprintf(buf, sizeof(buf), "rx_mode 0x%04x -> 0x%04x:",
|
||||
lif->rx_mode, rx_mode);
|
||||
if (rx_mode & IONIC_RX_MODE_F_UNICAST)
|
||||
i += snprintf(&buf[i], REMAIN(i), " RX_MODE_F_UNICAST");
|
||||
i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_UNICAST");
|
||||
if (rx_mode & IONIC_RX_MODE_F_MULTICAST)
|
||||
i += snprintf(&buf[i], REMAIN(i), " RX_MODE_F_MULTICAST");
|
||||
i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_MULTICAST");
|
||||
if (rx_mode & IONIC_RX_MODE_F_BROADCAST)
|
||||
i += snprintf(&buf[i], REMAIN(i), " RX_MODE_F_BROADCAST");
|
||||
i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_BROADCAST");
|
||||
if (rx_mode & IONIC_RX_MODE_F_PROMISC)
|
||||
i += snprintf(&buf[i], REMAIN(i), " RX_MODE_F_PROMISC");
|
||||
i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_PROMISC");
|
||||
if (rx_mode & IONIC_RX_MODE_F_ALLMULTI)
|
||||
i += snprintf(&buf[i], REMAIN(i), " RX_MODE_F_ALLMULTI");
|
||||
i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_ALLMULTI");
|
||||
netdev_dbg(lif->netdev, "lif%d %s\n", lif->index, buf);
|
||||
|
||||
err = ionic_adminq_post_wait(lif, &ctx);
|
||||
|
@ -1,4 +1,4 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB OR BSD-2-Clause */
|
||||
/* SPDX-License-Identifier: (GPL-2.0 OR Linux-OpenIB) OR BSD-2-Clause */
|
||||
/* Copyright (c) 2018-2019 Pensando Systems, Inc. All rights reserved. */
|
||||
|
||||
#ifndef IONIC_REGS_H
|
||||
|
@ -5194,7 +5194,7 @@ static int rtl_alloc_irq(struct rtl8169_private *tp)
|
||||
RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~MSIEnable);
|
||||
rtl_lock_config_regs(tp);
|
||||
/* fall through */
|
||||
case RTL_GIGA_MAC_VER_07 ... RTL_GIGA_MAC_VER_24:
|
||||
case RTL_GIGA_MAC_VER_07 ... RTL_GIGA_MAC_VER_17:
|
||||
flags = PCI_IRQ_LEGACY;
|
||||
break;
|
||||
default:
|
||||
|
@ -212,12 +212,14 @@ static void efx_mcdi_send_request(struct efx_nic *efx, unsigned cmd,
|
||||
* progress on a NIC at any one time. So no need for locking.
|
||||
*/
|
||||
for (i = 0; i < hdr_len / 4 && bytes < PAGE_SIZE; i++)
|
||||
bytes += snprintf(buf + bytes, PAGE_SIZE - bytes,
|
||||
" %08x", le32_to_cpu(hdr[i].u32[0]));
|
||||
bytes += scnprintf(buf + bytes, PAGE_SIZE - bytes,
|
||||
" %08x",
|
||||
le32_to_cpu(hdr[i].u32[0]));
|
||||
|
||||
for (i = 0; i < inlen / 4 && bytes < PAGE_SIZE; i++)
|
||||
bytes += snprintf(buf + bytes, PAGE_SIZE - bytes,
|
||||
" %08x", le32_to_cpu(inbuf[i].u32[0]));
|
||||
bytes += scnprintf(buf + bytes, PAGE_SIZE - bytes,
|
||||
" %08x",
|
||||
le32_to_cpu(inbuf[i].u32[0]));
|
||||
|
||||
netif_info(efx, hw, efx->net_dev, "MCDI RPC REQ:%s\n", buf);
|
||||
}
|
||||
@ -302,15 +304,15 @@ static void efx_mcdi_read_response_header(struct efx_nic *efx)
|
||||
*/
|
||||
for (i = 0; i < hdr_len && bytes < PAGE_SIZE; i++) {
|
||||
efx->type->mcdi_read_response(efx, &hdr, (i * 4), 4);
|
||||
bytes += snprintf(buf + bytes, PAGE_SIZE - bytes,
|
||||
" %08x", le32_to_cpu(hdr.u32[0]));
|
||||
bytes += scnprintf(buf + bytes, PAGE_SIZE - bytes,
|
||||
" %08x", le32_to_cpu(hdr.u32[0]));
|
||||
}
|
||||
|
||||
for (i = 0; i < data_len && bytes < PAGE_SIZE; i++) {
|
||||
efx->type->mcdi_read_response(efx, &hdr,
|
||||
mcdi->resp_hdr_len + (i * 4), 4);
|
||||
bytes += snprintf(buf + bytes, PAGE_SIZE - bytes,
|
||||
" %08x", le32_to_cpu(hdr.u32[0]));
|
||||
bytes += scnprintf(buf + bytes, PAGE_SIZE - bytes,
|
||||
" %08x", le32_to_cpu(hdr.u32[0]));
|
||||
}
|
||||
|
||||
netif_info(efx, hw, efx->net_dev, "MCDI RPC RESP:%s\n", buf);
|
||||
@ -1417,9 +1419,11 @@ void efx_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len)
|
||||
}
|
||||
|
||||
ver_words = (__le16 *)MCDI_PTR(outbuf, GET_VERSION_OUT_VERSION);
|
||||
offset = snprintf(buf, len, "%u.%u.%u.%u",
|
||||
le16_to_cpu(ver_words[0]), le16_to_cpu(ver_words[1]),
|
||||
le16_to_cpu(ver_words[2]), le16_to_cpu(ver_words[3]));
|
||||
offset = scnprintf(buf, len, "%u.%u.%u.%u",
|
||||
le16_to_cpu(ver_words[0]),
|
||||
le16_to_cpu(ver_words[1]),
|
||||
le16_to_cpu(ver_words[2]),
|
||||
le16_to_cpu(ver_words[3]));
|
||||
|
||||
/* EF10 may have multiple datapath firmware variants within a
|
||||
* single version. Report which variants are running.
|
||||
@ -1427,9 +1431,9 @@ void efx_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len)
|
||||
if (efx_nic_rev(efx) >= EFX_REV_HUNT_A0) {
|
||||
struct efx_ef10_nic_data *nic_data = efx->nic_data;
|
||||
|
||||
offset += snprintf(buf + offset, len - offset, " rx%x tx%x",
|
||||
nic_data->rx_dpcpu_fw_id,
|
||||
nic_data->tx_dpcpu_fw_id);
|
||||
offset += scnprintf(buf + offset, len - offset, " rx%x tx%x",
|
||||
nic_data->rx_dpcpu_fw_id,
|
||||
nic_data->tx_dpcpu_fw_id);
|
||||
|
||||
/* It's theoretically possible for the string to exceed 31
|
||||
* characters, though in practice the first three version
|
||||
|
@ -1411,7 +1411,7 @@ static int rk_gmac_probe(struct platform_device *pdev)
|
||||
|
||||
ret = rk_gmac_clk_init(plat_dat);
|
||||
if (ret)
|
||||
return ret;
|
||||
goto err_remove_config_dt;
|
||||
|
||||
ret = rk_gmac_powerup(plat_dat->bsp_priv);
|
||||
if (ret)
|
||||
|
@ -663,16 +663,22 @@ int stmmac_get_platform_resources(struct platform_device *pdev,
|
||||
* In case the wake up interrupt is not passed from the platform
|
||||
* so the driver will continue to use the mac irq (ndev->irq)
|
||||
*/
|
||||
stmmac_res->wol_irq = platform_get_irq_byname(pdev, "eth_wake_irq");
|
||||
stmmac_res->wol_irq =
|
||||
platform_get_irq_byname_optional(pdev, "eth_wake_irq");
|
||||
if (stmmac_res->wol_irq < 0) {
|
||||
if (stmmac_res->wol_irq == -EPROBE_DEFER)
|
||||
return -EPROBE_DEFER;
|
||||
dev_info(&pdev->dev, "IRQ eth_wake_irq not found\n");
|
||||
stmmac_res->wol_irq = stmmac_res->irq;
|
||||
}
|
||||
|
||||
stmmac_res->lpi_irq = platform_get_irq_byname(pdev, "eth_lpi");
|
||||
if (stmmac_res->lpi_irq == -EPROBE_DEFER)
|
||||
return -EPROBE_DEFER;
|
||||
stmmac_res->lpi_irq =
|
||||
platform_get_irq_byname_optional(pdev, "eth_lpi");
|
||||
if (stmmac_res->lpi_irq < 0) {
|
||||
if (stmmac_res->lpi_irq == -EPROBE_DEFER)
|
||||
return -EPROBE_DEFER;
|
||||
dev_info(&pdev->dev, "IRQ eth_lpi not found\n");
|
||||
}
|
||||
|
||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
stmmac_res->addr = devm_ioremap_resource(&pdev->dev, res);
|
||||
|
@ -1845,8 +1845,6 @@ static void geneve_destroy_tunnels(struct net *net, struct list_head *head)
|
||||
if (!net_eq(dev_net(geneve->dev), net))
|
||||
unregister_netdevice_queue(geneve->dev, head);
|
||||
}
|
||||
|
||||
WARN_ON_ONCE(!list_empty(&gn->sock_list));
|
||||
}
|
||||
|
||||
static void __net_exit geneve_exit_batch_net(struct list_head *net_list)
|
||||
@ -1861,6 +1859,12 @@ static void __net_exit geneve_exit_batch_net(struct list_head *net_list)
|
||||
/* unregister the devices gathered above */
|
||||
unregister_netdevice_many(&list);
|
||||
rtnl_unlock();
|
||||
|
||||
list_for_each_entry(net, net_list, exit_list) {
|
||||
const struct geneve_net *gn = net_generic(net, geneve_net_id);
|
||||
|
||||
WARN_ON_ONCE(!list_empty(&gn->sock_list));
|
||||
}
|
||||
}
|
||||
|
||||
static struct pernet_operations geneve_net_ops = {
|
||||
|
@ -75,7 +75,7 @@ static void ifb_ri_tasklet(unsigned long _txp)
|
||||
}
|
||||
|
||||
while ((skb = __skb_dequeue(&txp->tq)) != NULL) {
|
||||
skb->tc_redirected = 0;
|
||||
skb->redirected = 0;
|
||||
skb->tc_skip_classify = 1;
|
||||
|
||||
u64_stats_update_begin(&txp->tsync);
|
||||
@ -96,7 +96,7 @@ static void ifb_ri_tasklet(unsigned long _txp)
|
||||
rcu_read_unlock();
|
||||
skb->skb_iif = txp->dev->ifindex;
|
||||
|
||||
if (!skb->tc_from_ingress) {
|
||||
if (!skb->from_ingress) {
|
||||
dev_queue_xmit(skb);
|
||||
} else {
|
||||
skb_pull_rcsum(skb, skb->mac_len);
|
||||
@ -243,7 +243,7 @@ static netdev_tx_t ifb_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
txp->rx_bytes += skb->len;
|
||||
u64_stats_update_end(&txp->rsync);
|
||||
|
||||
if (!skb->tc_redirected || !skb->skb_iif) {
|
||||
if (!skb->redirected || !skb->skb_iif) {
|
||||
dev_kfree_skb(skb);
|
||||
dev->stats.rx_dropped++;
|
||||
return NETDEV_TX_OK;
|
||||
|
@ -19,6 +19,7 @@
|
||||
#include <net/gro_cells.h>
|
||||
#include <net/macsec.h>
|
||||
#include <linux/phy.h>
|
||||
#include <linux/if_arp.h>
|
||||
|
||||
#include <uapi/linux/if_macsec.h>
|
||||
|
||||
@ -3665,6 +3666,8 @@ static int macsec_newlink(struct net *net, struct net_device *dev,
|
||||
real_dev = __dev_get_by_index(net, nla_get_u32(tb[IFLA_LINK]));
|
||||
if (!real_dev)
|
||||
return -ENODEV;
|
||||
if (real_dev->type != ARPHRD_ETHER)
|
||||
return -EINVAL;
|
||||
|
||||
dev->priv_flags |= IFF_MACSEC;
|
||||
|
||||
|
@ -29,9 +29,9 @@ static ssize_t nsim_dbg_netdev_ops_read(struct file *filp,
|
||||
return -ENOMEM;
|
||||
|
||||
p = buf;
|
||||
p += snprintf(p, bufsize - (p - buf),
|
||||
"SA count=%u tx=%u\n",
|
||||
ipsec->count, ipsec->tx);
|
||||
p += scnprintf(p, bufsize - (p - buf),
|
||||
"SA count=%u tx=%u\n",
|
||||
ipsec->count, ipsec->tx);
|
||||
|
||||
for (i = 0; i < NSIM_IPSEC_MAX_SA_COUNT; i++) {
|
||||
struct nsim_sa *sap = &ipsec->sa[i];
|
||||
@ -39,18 +39,18 @@ static ssize_t nsim_dbg_netdev_ops_read(struct file *filp,
|
||||
if (!sap->used)
|
||||
continue;
|
||||
|
||||
p += snprintf(p, bufsize - (p - buf),
|
||||
"sa[%i] %cx ipaddr=0x%08x %08x %08x %08x\n",
|
||||
i, (sap->rx ? 'r' : 't'), sap->ipaddr[0],
|
||||
sap->ipaddr[1], sap->ipaddr[2], sap->ipaddr[3]);
|
||||
p += snprintf(p, bufsize - (p - buf),
|
||||
"sa[%i] spi=0x%08x proto=0x%x salt=0x%08x crypt=%d\n",
|
||||
i, be32_to_cpu(sap->xs->id.spi),
|
||||
sap->xs->id.proto, sap->salt, sap->crypt);
|
||||
p += snprintf(p, bufsize - (p - buf),
|
||||
"sa[%i] key=0x%08x %08x %08x %08x\n",
|
||||
i, sap->key[0], sap->key[1],
|
||||
sap->key[2], sap->key[3]);
|
||||
p += scnprintf(p, bufsize - (p - buf),
|
||||
"sa[%i] %cx ipaddr=0x%08x %08x %08x %08x\n",
|
||||
i, (sap->rx ? 'r' : 't'), sap->ipaddr[0],
|
||||
sap->ipaddr[1], sap->ipaddr[2], sap->ipaddr[3]);
|
||||
p += scnprintf(p, bufsize - (p - buf),
|
||||
"sa[%i] spi=0x%08x proto=0x%x salt=0x%08x crypt=%d\n",
|
||||
i, be32_to_cpu(sap->xs->id.spi),
|
||||
sap->xs->id.proto, sap->salt, sap->crypt);
|
||||
p += scnprintf(p, bufsize - (p - buf),
|
||||
"sa[%i] key=0x%08x %08x %08x %08x\n",
|
||||
i, sap->key[0], sap->key[1],
|
||||
sap->key[2], sap->key[3]);
|
||||
}
|
||||
|
||||
len = simple_read_from_buffer(buffer, count, ppos, buf, p - buf);
|
||||
|
@ -28,7 +28,8 @@
|
||||
#define DP83867_CTRL 0x1f
|
||||
|
||||
/* Extended Registers */
|
||||
#define DP83867_CFG4 0x0031
|
||||
#define DP83867_FLD_THR_CFG 0x002e
|
||||
#define DP83867_CFG4 0x0031
|
||||
#define DP83867_CFG4_SGMII_ANEG_MASK (BIT(5) | BIT(6))
|
||||
#define DP83867_CFG4_SGMII_ANEG_TIMER_11MS (3 << 5)
|
||||
#define DP83867_CFG4_SGMII_ANEG_TIMER_800US (2 << 5)
|
||||
@ -91,6 +92,7 @@
|
||||
#define DP83867_STRAP_STS2_CLK_SKEW_RX_MASK GENMASK(2, 0)
|
||||
#define DP83867_STRAP_STS2_CLK_SKEW_RX_SHIFT 0
|
||||
#define DP83867_STRAP_STS2_CLK_SKEW_NONE BIT(2)
|
||||
#define DP83867_STRAP_STS2_STRAP_FLD BIT(10)
|
||||
|
||||
/* PHY CTRL bits */
|
||||
#define DP83867_PHYCR_TX_FIFO_DEPTH_SHIFT 14
|
||||
@ -125,6 +127,9 @@
|
||||
/* CFG4 bits */
|
||||
#define DP83867_CFG4_PORT_MIRROR_EN BIT(0)
|
||||
|
||||
/* FLD_THR_CFG */
|
||||
#define DP83867_FLD_THR_CFG_ENERGY_LOST_THR_MASK 0x7
|
||||
|
||||
enum {
|
||||
DP83867_PORT_MIRROING_KEEP,
|
||||
DP83867_PORT_MIRROING_EN,
|
||||
@ -476,6 +481,20 @@ static int dp83867_config_init(struct phy_device *phydev)
|
||||
phy_clear_bits_mmd(phydev, DP83867_DEVADDR, DP83867_CFG4,
|
||||
BIT(7));
|
||||
|
||||
bs = phy_read_mmd(phydev, DP83867_DEVADDR, DP83867_STRAP_STS2);
|
||||
if (bs & DP83867_STRAP_STS2_STRAP_FLD) {
|
||||
/* When using strap to enable FLD, the ENERGY_LOST_FLD_THR will
|
||||
* be set to 0x2. This may causes the PHY link to be unstable -
|
||||
* the default value 0x1 need to be restored.
|
||||
*/
|
||||
ret = phy_modify_mmd(phydev, DP83867_DEVADDR,
|
||||
DP83867_FLD_THR_CFG,
|
||||
DP83867_FLD_THR_CFG_ENERGY_LOST_THR_MASK,
|
||||
0x1);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (phy_interface_is_rgmii(phydev) ||
|
||||
phydev->interface == PHY_INTERFACE_MODE_SGMII) {
|
||||
val = phy_read(phydev, MII_DP83867_PHYCTRL);
|
||||
|
@ -242,11 +242,9 @@ static int unimac_mdio_probe(struct platform_device *pdev)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
priv->clk = devm_clk_get(&pdev->dev, NULL);
|
||||
if (PTR_ERR(priv->clk) == -EPROBE_DEFER)
|
||||
priv->clk = devm_clk_get_optional(&pdev->dev, NULL);
|
||||
if (IS_ERR(priv->clk))
|
||||
return PTR_ERR(priv->clk);
|
||||
else
|
||||
priv->clk = NULL;
|
||||
|
||||
ret = clk_prepare_enable(priv->clk);
|
||||
if (ret)
|
||||
|
@ -288,8 +288,13 @@ static int mdio_mux_iproc_suspend(struct device *dev)
|
||||
static int mdio_mux_iproc_resume(struct device *dev)
|
||||
{
|
||||
struct iproc_mdiomux_desc *md = dev_get_drvdata(dev);
|
||||
int rc;
|
||||
|
||||
clk_prepare_enable(md->core_clk);
|
||||
rc = clk_prepare_enable(md->core_clk);
|
||||
if (rc) {
|
||||
dev_err(md->dev, "failed to enable core clk\n");
|
||||
return rc;
|
||||
}
|
||||
mdio_mux_iproc_config(md);
|
||||
|
||||
return 0;
|
||||
|
@ -572,13 +572,15 @@ static void sfp_upstream_clear(struct sfp_bus *bus)
|
||||
* the sfp_bus structure, incrementing its reference count. This must
|
||||
* be put via sfp_bus_put() when done.
|
||||
*
|
||||
* Returns: on success, a pointer to the sfp_bus structure,
|
||||
* %NULL if no SFP is specified,
|
||||
* on failure, an error pointer value:
|
||||
* corresponding to the errors detailed for
|
||||
* fwnode_property_get_reference_args().
|
||||
* %-ENOMEM if we failed to allocate the bus.
|
||||
* an error from the upstream's connect_phy() method.
|
||||
* Returns:
|
||||
* - on success, a pointer to the sfp_bus structure,
|
||||
* - %NULL if no SFP is specified,
|
||||
* - on failure, an error pointer value:
|
||||
*
|
||||
* - corresponding to the errors detailed for
|
||||
* fwnode_property_get_reference_args().
|
||||
* - %-ENOMEM if we failed to allocate the bus.
|
||||
* - an error from the upstream's connect_phy() method.
|
||||
*/
|
||||
struct sfp_bus *sfp_bus_find_fwnode(struct fwnode_handle *fwnode)
|
||||
{
|
||||
@ -612,13 +614,15 @@ EXPORT_SYMBOL_GPL(sfp_bus_find_fwnode);
|
||||
* the SFP bus using sfp_register_upstream(). This takes a reference on the
|
||||
* bus, so it is safe to put the bus after this call.
|
||||
*
|
||||
* Returns: on success, a pointer to the sfp_bus structure,
|
||||
* %NULL if no SFP is specified,
|
||||
* on failure, an error pointer value:
|
||||
* corresponding to the errors detailed for
|
||||
* fwnode_property_get_reference_args().
|
||||
* %-ENOMEM if we failed to allocate the bus.
|
||||
* an error from the upstream's connect_phy() method.
|
||||
* Returns:
|
||||
* - on success, a pointer to the sfp_bus structure,
|
||||
* - %NULL if no SFP is specified,
|
||||
* - on failure, an error pointer value:
|
||||
*
|
||||
* - corresponding to the errors detailed for
|
||||
* fwnode_property_get_reference_args().
|
||||
* - %-ENOMEM if we failed to allocate the bus.
|
||||
* - an error from the upstream's connect_phy() method.
|
||||
*/
|
||||
int sfp_bus_add_upstream(struct sfp_bus *bus, void *upstream,
|
||||
const struct sfp_upstream_ops *ops)
|
||||
|
@ -1210,6 +1210,7 @@ static const struct usb_device_id products[] = {
|
||||
{QMI_FIXED_INTF(0x1435, 0xd182, 5)}, /* Wistron NeWeb D18 */
|
||||
{QMI_FIXED_INTF(0x1435, 0xd191, 4)}, /* Wistron NeWeb D19Q1 */
|
||||
{QMI_QUIRK_SET_DTR(0x1508, 0x1001, 4)}, /* Fibocom NL668 series */
|
||||
{QMI_FIXED_INTF(0x1690, 0x7588, 4)}, /* ASKEY WWHC050 */
|
||||
{QMI_FIXED_INTF(0x16d8, 0x6003, 0)}, /* CMOTech 6003 */
|
||||
{QMI_FIXED_INTF(0x16d8, 0x6007, 0)}, /* CMOTech CHE-628S */
|
||||
{QMI_FIXED_INTF(0x16d8, 0x6008, 0)}, /* CMOTech CMU-301 */
|
||||
|
@ -2779,10 +2779,19 @@ static void vxlan_vs_add_dev(struct vxlan_sock *vs, struct vxlan_dev *vxlan,
|
||||
/* Setup stats when device is created */
|
||||
static int vxlan_init(struct net_device *dev)
|
||||
{
|
||||
struct vxlan_dev *vxlan = netdev_priv(dev);
|
||||
int err;
|
||||
|
||||
dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
|
||||
if (!dev->tstats)
|
||||
return -ENOMEM;
|
||||
|
||||
err = gro_cells_init(&vxlan->gro_cells, dev);
|
||||
if (err) {
|
||||
free_percpu(dev->tstats);
|
||||
return err;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -3043,8 +3052,6 @@ static void vxlan_setup(struct net_device *dev)
|
||||
|
||||
vxlan->dev = dev;
|
||||
|
||||
gro_cells_init(&vxlan->gro_cells, dev);
|
||||
|
||||
for (h = 0; h < FDB_HASH_SIZE; ++h) {
|
||||
spin_lock_init(&vxlan->hash_lock[h]);
|
||||
INIT_HLIST_HEAD(&vxlan->fdb_head[h]);
|
||||
|
@ -122,7 +122,7 @@ static netdev_tx_t wg_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
u32 mtu;
|
||||
int ret;
|
||||
|
||||
if (unlikely(wg_skb_examine_untrusted_ip_hdr(skb) != skb->protocol)) {
|
||||
if (unlikely(!wg_check_packet_protocol(skb))) {
|
||||
ret = -EPROTONOSUPPORT;
|
||||
net_dbg_ratelimited("%s: Invalid IP packet\n", dev->name);
|
||||
goto err;
|
||||
|
@ -411,11 +411,7 @@ static int set_peer(struct wg_device *wg, struct nlattr **attrs)
|
||||
|
||||
peer = wg_peer_create(wg, public_key, preshared_key);
|
||||
if (IS_ERR(peer)) {
|
||||
/* Similar to the above, if the key is invalid, we skip
|
||||
* it without fanfare, so that services don't need to
|
||||
* worry about doing key validation themselves.
|
||||
*/
|
||||
ret = PTR_ERR(peer) == -EKEYREJECTED ? 0 : PTR_ERR(peer);
|
||||
ret = PTR_ERR(peer);
|
||||
peer = NULL;
|
||||
goto out;
|
||||
}
|
||||
@ -569,7 +565,7 @@ static int wg_set_device(struct sk_buff *skb, struct genl_info *info)
|
||||
private_key);
|
||||
list_for_each_entry_safe(peer, temp, &wg->peer_list,
|
||||
peer_list) {
|
||||
BUG_ON(!wg_noise_precompute_static_static(peer));
|
||||
wg_noise_precompute_static_static(peer);
|
||||
wg_noise_expire_current_peer_keypairs(peer);
|
||||
}
|
||||
wg_cookie_checker_precompute_device_keys(&wg->cookie_checker);
|
||||
|
@ -44,32 +44,23 @@ void __init wg_noise_init(void)
|
||||
}
|
||||
|
||||
/* Must hold peer->handshake.static_identity->lock */
|
||||
bool wg_noise_precompute_static_static(struct wg_peer *peer)
|
||||
void wg_noise_precompute_static_static(struct wg_peer *peer)
|
||||
{
|
||||
bool ret;
|
||||
|
||||
down_write(&peer->handshake.lock);
|
||||
if (peer->handshake.static_identity->has_identity) {
|
||||
ret = curve25519(
|
||||
peer->handshake.precomputed_static_static,
|
||||
if (!peer->handshake.static_identity->has_identity ||
|
||||
!curve25519(peer->handshake.precomputed_static_static,
|
||||
peer->handshake.static_identity->static_private,
|
||||
peer->handshake.remote_static);
|
||||
} else {
|
||||
u8 empty[NOISE_PUBLIC_KEY_LEN] = { 0 };
|
||||
|
||||
ret = curve25519(empty, empty, peer->handshake.remote_static);
|
||||
peer->handshake.remote_static))
|
||||
memset(peer->handshake.precomputed_static_static, 0,
|
||||
NOISE_PUBLIC_KEY_LEN);
|
||||
}
|
||||
up_write(&peer->handshake.lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
bool wg_noise_handshake_init(struct noise_handshake *handshake,
|
||||
struct noise_static_identity *static_identity,
|
||||
const u8 peer_public_key[NOISE_PUBLIC_KEY_LEN],
|
||||
const u8 peer_preshared_key[NOISE_SYMMETRIC_KEY_LEN],
|
||||
struct wg_peer *peer)
|
||||
void wg_noise_handshake_init(struct noise_handshake *handshake,
|
||||
struct noise_static_identity *static_identity,
|
||||
const u8 peer_public_key[NOISE_PUBLIC_KEY_LEN],
|
||||
const u8 peer_preshared_key[NOISE_SYMMETRIC_KEY_LEN],
|
||||
struct wg_peer *peer)
|
||||
{
|
||||
memset(handshake, 0, sizeof(*handshake));
|
||||
init_rwsem(&handshake->lock);
|
||||
@ -81,7 +72,7 @@ bool wg_noise_handshake_init(struct noise_handshake *handshake,
|
||||
NOISE_SYMMETRIC_KEY_LEN);
|
||||
handshake->static_identity = static_identity;
|
||||
handshake->state = HANDSHAKE_ZEROED;
|
||||
return wg_noise_precompute_static_static(peer);
|
||||
wg_noise_precompute_static_static(peer);
|
||||
}
|
||||
|
||||
static void handshake_zero(struct noise_handshake *handshake)
|
||||
@ -403,6 +394,19 @@ static bool __must_check mix_dh(u8 chaining_key[NOISE_HASH_LEN],
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool __must_check mix_precomputed_dh(u8 chaining_key[NOISE_HASH_LEN],
|
||||
u8 key[NOISE_SYMMETRIC_KEY_LEN],
|
||||
const u8 precomputed[NOISE_PUBLIC_KEY_LEN])
|
||||
{
|
||||
static u8 zero_point[NOISE_PUBLIC_KEY_LEN];
|
||||
if (unlikely(!crypto_memneq(precomputed, zero_point, NOISE_PUBLIC_KEY_LEN)))
|
||||
return false;
|
||||
kdf(chaining_key, key, NULL, precomputed, NOISE_HASH_LEN,
|
||||
NOISE_SYMMETRIC_KEY_LEN, 0, NOISE_PUBLIC_KEY_LEN,
|
||||
chaining_key);
|
||||
return true;
|
||||
}
|
||||
|
||||
static void mix_hash(u8 hash[NOISE_HASH_LEN], const u8 *src, size_t src_len)
|
||||
{
|
||||
struct blake2s_state blake;
|
||||
@ -531,10 +535,9 @@ wg_noise_handshake_create_initiation(struct message_handshake_initiation *dst,
|
||||
NOISE_PUBLIC_KEY_LEN, key, handshake->hash);
|
||||
|
||||
/* ss */
|
||||
kdf(handshake->chaining_key, key, NULL,
|
||||
handshake->precomputed_static_static, NOISE_HASH_LEN,
|
||||
NOISE_SYMMETRIC_KEY_LEN, 0, NOISE_PUBLIC_KEY_LEN,
|
||||
handshake->chaining_key);
|
||||
if (!mix_precomputed_dh(handshake->chaining_key, key,
|
||||
handshake->precomputed_static_static))
|
||||
goto out;
|
||||
|
||||
/* {t} */
|
||||
tai64n_now(timestamp);
|
||||
@ -595,9 +598,9 @@ wg_noise_handshake_consume_initiation(struct message_handshake_initiation *src,
|
||||
handshake = &peer->handshake;
|
||||
|
||||
/* ss */
|
||||
kdf(chaining_key, key, NULL, handshake->precomputed_static_static,
|
||||
NOISE_HASH_LEN, NOISE_SYMMETRIC_KEY_LEN, 0, NOISE_PUBLIC_KEY_LEN,
|
||||
chaining_key);
|
||||
if (!mix_precomputed_dh(chaining_key, key,
|
||||
handshake->precomputed_static_static))
|
||||
goto out;
|
||||
|
||||
/* {t} */
|
||||
if (!message_decrypt(t, src->encrypted_timestamp,
|
||||
|
@ -94,11 +94,11 @@ struct noise_handshake {
|
||||
struct wg_device;
|
||||
|
||||
void wg_noise_init(void);
|
||||
bool wg_noise_handshake_init(struct noise_handshake *handshake,
|
||||
struct noise_static_identity *static_identity,
|
||||
const u8 peer_public_key[NOISE_PUBLIC_KEY_LEN],
|
||||
const u8 peer_preshared_key[NOISE_SYMMETRIC_KEY_LEN],
|
||||
struct wg_peer *peer);
|
||||
void wg_noise_handshake_init(struct noise_handshake *handshake,
|
||||
struct noise_static_identity *static_identity,
|
||||
const u8 peer_public_key[NOISE_PUBLIC_KEY_LEN],
|
||||
const u8 peer_preshared_key[NOISE_SYMMETRIC_KEY_LEN],
|
||||
struct wg_peer *peer);
|
||||
void wg_noise_handshake_clear(struct noise_handshake *handshake);
|
||||
static inline void wg_noise_reset_last_sent_handshake(atomic64_t *handshake_ns)
|
||||
{
|
||||
@ -116,7 +116,7 @@ void wg_noise_expire_current_peer_keypairs(struct wg_peer *peer);
|
||||
void wg_noise_set_static_identity_private_key(
|
||||
struct noise_static_identity *static_identity,
|
||||
const u8 private_key[NOISE_PUBLIC_KEY_LEN]);
|
||||
bool wg_noise_precompute_static_static(struct wg_peer *peer);
|
||||
void wg_noise_precompute_static_static(struct wg_peer *peer);
|
||||
|
||||
bool
|
||||
wg_noise_handshake_create_initiation(struct message_handshake_initiation *dst,
|
||||
|
@ -34,11 +34,8 @@ struct wg_peer *wg_peer_create(struct wg_device *wg,
|
||||
return ERR_PTR(ret);
|
||||
peer->device = wg;
|
||||
|
||||
if (!wg_noise_handshake_init(&peer->handshake, &wg->static_identity,
|
||||
public_key, preshared_key, peer)) {
|
||||
ret = -EKEYREJECTED;
|
||||
goto err_1;
|
||||
}
|
||||
wg_noise_handshake_init(&peer->handshake, &wg->static_identity,
|
||||
public_key, preshared_key, peer);
|
||||
if (dst_cache_init(&peer->endpoint_cache, GFP_KERNEL))
|
||||
goto err_1;
|
||||
if (wg_packet_queue_init(&peer->tx_queue, wg_packet_tx_worker, false,
|
||||
|
@ -66,7 +66,7 @@ struct packet_cb {
|
||||
#define PACKET_PEER(skb) (PACKET_CB(skb)->keypair->entry.peer)
|
||||
|
||||
/* Returns either the correct skb->protocol value, or 0 if invalid. */
|
||||
static inline __be16 wg_skb_examine_untrusted_ip_hdr(struct sk_buff *skb)
|
||||
static inline __be16 wg_examine_packet_protocol(struct sk_buff *skb)
|
||||
{
|
||||
if (skb_network_header(skb) >= skb->head &&
|
||||
(skb_network_header(skb) + sizeof(struct iphdr)) <=
|
||||
@ -81,6 +81,12 @@ static inline __be16 wg_skb_examine_untrusted_ip_hdr(struct sk_buff *skb)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline bool wg_check_packet_protocol(struct sk_buff *skb)
|
||||
{
|
||||
__be16 real_protocol = wg_examine_packet_protocol(skb);
|
||||
return real_protocol && skb->protocol == real_protocol;
|
||||
}
|
||||
|
||||
static inline void wg_reset_packet(struct sk_buff *skb)
|
||||
{
|
||||
skb_scrub_packet(skb, true);
|
||||
@ -94,8 +100,8 @@ static inline void wg_reset_packet(struct sk_buff *skb)
|
||||
skb->dev = NULL;
|
||||
#ifdef CONFIG_NET_SCHED
|
||||
skb->tc_index = 0;
|
||||
skb_reset_tc(skb);
|
||||
#endif
|
||||
skb_reset_redirect(skb);
|
||||
skb->hdr_len = skb_headroom(skb);
|
||||
skb_reset_mac_header(skb);
|
||||
skb_reset_network_header(skb);
|
||||
|
@ -56,7 +56,7 @@ static int prepare_skb_header(struct sk_buff *skb, struct wg_device *wg)
|
||||
size_t data_offset, data_len, header_len;
|
||||
struct udphdr *udp;
|
||||
|
||||
if (unlikely(wg_skb_examine_untrusted_ip_hdr(skb) != skb->protocol ||
|
||||
if (unlikely(!wg_check_packet_protocol(skb) ||
|
||||
skb_transport_header(skb) < skb->head ||
|
||||
(skb_transport_header(skb) + sizeof(struct udphdr)) >
|
||||
skb_tail_pointer(skb)))
|
||||
@ -388,7 +388,7 @@ static void wg_packet_consume_data_done(struct wg_peer *peer,
|
||||
*/
|
||||
skb->ip_summed = CHECKSUM_UNNECESSARY;
|
||||
skb->csum_level = ~0; /* All levels */
|
||||
skb->protocol = wg_skb_examine_untrusted_ip_hdr(skb);
|
||||
skb->protocol = wg_examine_packet_protocol(skb);
|
||||
if (skb->protocol == htons(ETH_P_IP)) {
|
||||
len = ntohs(ip_hdr(skb)->tot_len);
|
||||
if (unlikely(len < sizeof(struct iphdr)))
|
||||
@ -587,8 +587,7 @@ void wg_packet_receive(struct wg_device *wg, struct sk_buff *skb)
|
||||
wg_packet_consume_data(wg, skb);
|
||||
break;
|
||||
default:
|
||||
net_dbg_skb_ratelimited("%s: Invalid packet from %pISpfsc\n",
|
||||
wg->dev->name, skb);
|
||||
WARN(1, "Non-exhaustive parsing of packet header lead to unknown packet type!\n");
|
||||
goto err;
|
||||
}
|
||||
return;
|
||||
|
@ -283,6 +283,7 @@ const struct iwl_cfg iwl_ax101_cfg_qu_c0_hr_b0 = {
|
||||
* HT size; mac80211 would otherwise pick the HE max (256) by default.
|
||||
*/
|
||||
.max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
|
||||
.tx_with_siso_diversity = true,
|
||||
.num_rbds = IWL_NUM_RBDS_22000_HE,
|
||||
};
|
||||
|
||||
@ -309,6 +310,7 @@ const struct iwl_cfg iwl_ax101_cfg_quz_hr = {
|
||||
* HT size; mac80211 would otherwise pick the HE max (256) by default.
|
||||
*/
|
||||
.max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
|
||||
.tx_with_siso_diversity = true,
|
||||
.num_rbds = IWL_NUM_RBDS_22000_HE,
|
||||
};
|
||||
|
||||
|
@ -6,7 +6,7 @@
|
||||
* GPL LICENSE SUMMARY
|
||||
*
|
||||
* Copyright(c) 2017 Intel Deutschland GmbH
|
||||
* Copyright (C) 2019 Intel Corporation
|
||||
* Copyright (C) 2019 - 2020 Intel Corporation
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of version 2 of the GNU General Public License as
|
||||
@ -27,7 +27,7 @@
|
||||
* BSD LICENSE
|
||||
*
|
||||
* Copyright(c) 2017 Intel Deutschland GmbH
|
||||
* Copyright (C) 2019 Intel Corporation
|
||||
* Copyright (C) 2019 - 2020 Intel Corporation
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
@ -491,13 +491,13 @@ int iwl_validate_sar_geo_profile(struct iwl_fw_runtime *fwrt,
|
||||
}
|
||||
IWL_EXPORT_SYMBOL(iwl_validate_sar_geo_profile);
|
||||
|
||||
void iwl_sar_geo_init(struct iwl_fw_runtime *fwrt,
|
||||
struct iwl_per_chain_offset_group *table)
|
||||
int iwl_sar_geo_init(struct iwl_fw_runtime *fwrt,
|
||||
struct iwl_per_chain_offset_group *table)
|
||||
{
|
||||
int ret, i, j;
|
||||
|
||||
if (!iwl_sar_geo_support(fwrt))
|
||||
return;
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
ret = iwl_sar_get_wgds_table(fwrt);
|
||||
if (ret < 0) {
|
||||
@ -505,7 +505,7 @@ void iwl_sar_geo_init(struct iwl_fw_runtime *fwrt,
|
||||
"Geo SAR BIOS table invalid or unavailable. (%d)\n",
|
||||
ret);
|
||||
/* we don't fail if the table is not available */
|
||||
return;
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
BUILD_BUG_ON(ACPI_NUM_GEO_PROFILES * ACPI_WGDS_NUM_BANDS *
|
||||
@ -530,5 +530,7 @@ void iwl_sar_geo_init(struct iwl_fw_runtime *fwrt,
|
||||
i, j, value[1], value[2], value[0]);
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
IWL_EXPORT_SYMBOL(iwl_sar_geo_init);
|
||||
|
@ -6,7 +6,7 @@
|
||||
* GPL LICENSE SUMMARY
|
||||
*
|
||||
* Copyright(c) 2017 Intel Deutschland GmbH
|
||||
* Copyright(c) 2018 - 2019 Intel Corporation
|
||||
* Copyright(c) 2018 - 2020 Intel Corporation
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of version 2 of the GNU General Public License as
|
||||
@ -27,7 +27,7 @@
|
||||
* BSD LICENSE
|
||||
*
|
||||
* Copyright(c) 2017 Intel Deutschland GmbH
|
||||
* Copyright(c) 2018 - 2019 Intel Corporation
|
||||
* Copyright(c) 2018 - 2020 Intel Corporation
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
@ -171,8 +171,9 @@ bool iwl_sar_geo_support(struct iwl_fw_runtime *fwrt);
|
||||
int iwl_validate_sar_geo_profile(struct iwl_fw_runtime *fwrt,
|
||||
struct iwl_host_cmd *cmd);
|
||||
|
||||
void iwl_sar_geo_init(struct iwl_fw_runtime *fwrt,
|
||||
struct iwl_per_chain_offset_group *table);
|
||||
int iwl_sar_geo_init(struct iwl_fw_runtime *fwrt,
|
||||
struct iwl_per_chain_offset_group *table);
|
||||
|
||||
#else /* CONFIG_ACPI */
|
||||
|
||||
static inline void *iwl_acpi_get_object(struct device *dev, acpi_string method)
|
||||
@ -243,9 +244,10 @@ static inline int iwl_validate_sar_geo_profile(struct iwl_fw_runtime *fwrt,
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
static inline void iwl_sar_geo_init(struct iwl_fw_runtime *fwrt,
|
||||
struct iwl_per_chain_offset_group *table)
|
||||
static inline int iwl_sar_geo_init(struct iwl_fw_runtime *fwrt,
|
||||
struct iwl_per_chain_offset_group *table)
|
||||
{
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
#endif /* CONFIG_ACPI */
|
||||
|
@ -8,7 +8,7 @@
|
||||
* Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
|
||||
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
|
||||
* Copyright(c) 2015 - 2017 Intel Deutschland GmbH
|
||||
* Copyright(c) 2018 - 2019 Intel Corporation
|
||||
* Copyright(c) 2018 - 2020 Intel Corporation
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of version 2 of the GNU General Public License as
|
||||
@ -31,7 +31,7 @@
|
||||
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
|
||||
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
|
||||
* Copyright(c) 2015 - 2017 Intel Deutschland GmbH
|
||||
* Copyright(c) 2018 - 2019 Intel Corporation
|
||||
* Copyright(c) 2018 - 2020 Intel Corporation
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
@ -1409,11 +1409,7 @@ static int iwl_dump_ini_rxf_iter(struct iwl_fw_runtime *fwrt,
|
||||
goto out;
|
||||
}
|
||||
|
||||
/*
|
||||
* region register have absolute value so apply rxf offset after
|
||||
* reading the registers
|
||||
*/
|
||||
offs += rxf_data.offset;
|
||||
offs = rxf_data.offset;
|
||||
|
||||
/* Lock fence */
|
||||
iwl_write_prph_no_grab(fwrt->trans, RXF_SET_FENCE_MODE + offs, 0x1);
|
||||
@ -2494,10 +2490,7 @@ static void iwl_fw_dbg_collect_sync(struct iwl_fw_runtime *fwrt, u8 wk_idx)
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (iwl_fw_dbg_stop_restart_recording(fwrt, ¶ms, true)) {
|
||||
IWL_ERR(fwrt, "Failed to stop DBGC recording, aborting dump\n");
|
||||
goto out;
|
||||
}
|
||||
iwl_fw_dbg_stop_restart_recording(fwrt, ¶ms, true);
|
||||
|
||||
IWL_DEBUG_FW_INFO(fwrt, "WRT: Data collection start\n");
|
||||
if (iwl_trans_dbg_ini_valid(fwrt->trans))
|
||||
@ -2662,14 +2655,14 @@ static int iwl_fw_dbg_restart_recording(struct iwl_trans *trans,
|
||||
return 0;
|
||||
}
|
||||
|
||||
int iwl_fw_dbg_stop_restart_recording(struct iwl_fw_runtime *fwrt,
|
||||
struct iwl_fw_dbg_params *params,
|
||||
bool stop)
|
||||
void iwl_fw_dbg_stop_restart_recording(struct iwl_fw_runtime *fwrt,
|
||||
struct iwl_fw_dbg_params *params,
|
||||
bool stop)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
if (test_bit(STATUS_FW_ERROR, &fwrt->trans->status))
|
||||
return 0;
|
||||
return;
|
||||
|
||||
if (fw_has_capa(&fwrt->fw->ucode_capa,
|
||||
IWL_UCODE_TLV_CAPA_DBG_SUSPEND_RESUME_CMD_SUPP))
|
||||
@ -2686,7 +2679,5 @@ int iwl_fw_dbg_stop_restart_recording(struct iwl_fw_runtime *fwrt,
|
||||
iwl_fw_set_dbg_rec_on(fwrt);
|
||||
}
|
||||
#endif
|
||||
|
||||
return ret;
|
||||
}
|
||||
IWL_EXPORT_SYMBOL(iwl_fw_dbg_stop_restart_recording);
|
||||
|
@ -239,9 +239,9 @@ _iwl_fw_dbg_trigger_simple_stop(struct iwl_fw_runtime *fwrt,
|
||||
_iwl_fw_dbg_trigger_simple_stop((fwrt), (wdev), \
|
||||
iwl_fw_dbg_get_trigger((fwrt)->fw,\
|
||||
(trig)))
|
||||
int iwl_fw_dbg_stop_restart_recording(struct iwl_fw_runtime *fwrt,
|
||||
struct iwl_fw_dbg_params *params,
|
||||
bool stop);
|
||||
void iwl_fw_dbg_stop_restart_recording(struct iwl_fw_runtime *fwrt,
|
||||
struct iwl_fw_dbg_params *params,
|
||||
bool stop);
|
||||
|
||||
#ifdef CONFIG_IWLWIFI_DEBUGFS
|
||||
static inline void iwl_fw_set_dbg_rec_on(struct iwl_fw_runtime *fwrt)
|
||||
|
@ -1467,7 +1467,7 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
|
||||
kmemdup(pieces->dbg_conf_tlv[i],
|
||||
pieces->dbg_conf_tlv_len[i],
|
||||
GFP_KERNEL);
|
||||
if (!pieces->dbg_conf_tlv_len[i])
|
||||
if (!pieces->dbg_conf_tlv[i])
|
||||
goto out_free_fw;
|
||||
}
|
||||
}
|
||||
|
@ -762,10 +762,17 @@ static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm)
|
||||
u16 cmd_wide_id = WIDE_ID(PHY_OPS_GROUP, GEO_TX_POWER_LIMIT);
|
||||
union geo_tx_power_profiles_cmd cmd;
|
||||
u16 len;
|
||||
int ret;
|
||||
|
||||
cmd.geo_cmd.ops = cpu_to_le32(IWL_PER_CHAIN_OFFSET_SET_TABLES);
|
||||
|
||||
iwl_sar_geo_init(&mvm->fwrt, cmd.geo_cmd.table);
|
||||
ret = iwl_sar_geo_init(&mvm->fwrt, cmd.geo_cmd.table);
|
||||
/*
|
||||
* It is a valid scenario to not support SAR, or miss wgds table,
|
||||
* but in that case there is no need to send the command.
|
||||
*/
|
||||
if (ret)
|
||||
return 0;
|
||||
|
||||
cmd.geo_cmd.table_revision = cpu_to_le32(mvm->fwrt.geo_rev);
|
||||
|
||||
|
@ -6,7 +6,7 @@
|
||||
* GPL LICENSE SUMMARY
|
||||
*
|
||||
* Copyright(c) 2017 Intel Deutschland GmbH
|
||||
* Copyright(c) 2018 - 2019 Intel Corporation
|
||||
* Copyright(c) 2018 - 2020 Intel Corporation
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of version 2 of the GNU General Public License as
|
||||
@ -27,7 +27,7 @@
|
||||
* BSD LICENSE
|
||||
*
|
||||
* Copyright(c) 2017 Intel Deutschland GmbH
|
||||
* Copyright(c) 2018 - 2019 Intel Corporation
|
||||
* Copyright(c) 2018 - 2020 Intel Corporation
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
@ -147,7 +147,11 @@ static u16 rs_fw_get_config_flags(struct iwl_mvm *mvm,
|
||||
(vht_ena && (vht_cap->cap & IEEE80211_VHT_CAP_RXLDPC))))
|
||||
flags |= IWL_TLC_MNG_CFG_FLAGS_LDPC_MSK;
|
||||
|
||||
/* consider our LDPC support in case of HE */
|
||||
/* consider LDPC support in case of HE */
|
||||
if (he_cap->has_he && (he_cap->he_cap_elem.phy_cap_info[1] &
|
||||
IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD))
|
||||
flags |= IWL_TLC_MNG_CFG_FLAGS_LDPC_MSK;
|
||||
|
||||
if (sband->iftype_data && sband->iftype_data->he_cap.has_he &&
|
||||
!(sband->iftype_data->he_cap.he_cap_elem.phy_cap_info[1] &
|
||||
IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD))
|
||||
@ -191,11 +195,13 @@ rs_fw_vht_set_enabled_rates(const struct ieee80211_sta *sta,
|
||||
{
|
||||
u16 supp;
|
||||
int i, highest_mcs;
|
||||
u8 nss = sta->rx_nss;
|
||||
|
||||
for (i = 0; i < sta->rx_nss; i++) {
|
||||
if (i == IWL_TLC_NSS_MAX)
|
||||
break;
|
||||
/* the station support only a single receive chain */
|
||||
if (sta->smps_mode == IEEE80211_SMPS_STATIC)
|
||||
nss = 1;
|
||||
|
||||
for (i = 0; i < nss && i < IWL_TLC_NSS_MAX; i++) {
|
||||
highest_mcs = rs_fw_vht_highest_rx_mcs_index(vht_cap, i + 1);
|
||||
if (!highest_mcs)
|
||||
continue;
|
||||
@ -241,8 +247,13 @@ rs_fw_he_set_enabled_rates(const struct ieee80211_sta *sta,
|
||||
u16 tx_mcs_160 =
|
||||
le16_to_cpu(sband->iftype_data->he_cap.he_mcs_nss_supp.tx_mcs_160);
|
||||
int i;
|
||||
u8 nss = sta->rx_nss;
|
||||
|
||||
for (i = 0; i < sta->rx_nss && i < IWL_TLC_NSS_MAX; i++) {
|
||||
/* the station support only a single receive chain */
|
||||
if (sta->smps_mode == IEEE80211_SMPS_STATIC)
|
||||
nss = 1;
|
||||
|
||||
for (i = 0; i < nss && i < IWL_TLC_NSS_MAX; i++) {
|
||||
u16 _mcs_160 = (mcs_160 >> (2 * i)) & 0x3;
|
||||
u16 _mcs_80 = (mcs_80 >> (2 * i)) & 0x3;
|
||||
u16 _tx_mcs_160 = (tx_mcs_160 >> (2 * i)) & 0x3;
|
||||
@ -303,8 +314,14 @@ static void rs_fw_set_supp_rates(struct ieee80211_sta *sta,
|
||||
cmd->mode = IWL_TLC_MNG_MODE_HT;
|
||||
cmd->ht_rates[IWL_TLC_NSS_1][IWL_TLC_HT_BW_NONE_160] =
|
||||
cpu_to_le16(ht_cap->mcs.rx_mask[0]);
|
||||
cmd->ht_rates[IWL_TLC_NSS_2][IWL_TLC_HT_BW_NONE_160] =
|
||||
cpu_to_le16(ht_cap->mcs.rx_mask[1]);
|
||||
|
||||
/* the station support only a single receive chain */
|
||||
if (sta->smps_mode == IEEE80211_SMPS_STATIC)
|
||||
cmd->ht_rates[IWL_TLC_NSS_2][IWL_TLC_HT_BW_NONE_160] =
|
||||
0;
|
||||
else
|
||||
cmd->ht_rates[IWL_TLC_NSS_2][IWL_TLC_HT_BW_NONE_160] =
|
||||
cpu_to_le16(ht_cap->mcs.rx_mask[1]);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -785,7 +785,9 @@ void iwl_mvm_rx_session_protect_notif(struct iwl_mvm *mvm,
|
||||
if (!le32_to_cpu(notif->status)) {
|
||||
iwl_mvm_te_check_disconnect(mvm, vif,
|
||||
"Session protection failure");
|
||||
spin_lock_bh(&mvm->time_event_lock);
|
||||
iwl_mvm_te_clear_data(mvm, te_data);
|
||||
spin_unlock_bh(&mvm->time_event_lock);
|
||||
}
|
||||
|
||||
if (le32_to_cpu(notif->start)) {
|
||||
@ -801,7 +803,9 @@ void iwl_mvm_rx_session_protect_notif(struct iwl_mvm *mvm,
|
||||
*/
|
||||
iwl_mvm_te_check_disconnect(mvm, vif,
|
||||
"No beacon heard and the session protection is over already...");
|
||||
spin_lock_bh(&mvm->time_event_lock);
|
||||
iwl_mvm_te_clear_data(mvm, te_data);
|
||||
spin_unlock_bh(&mvm->time_event_lock);
|
||||
}
|
||||
|
||||
goto out_unlock;
|
||||
|
@ -981,6 +981,9 @@ static const struct iwl_dev_info iwl_dev_info_table[] = {
|
||||
IWL_DEV_INFO(0x2526, 0x0014, iwl9260_2ac_160_cfg, iwl9260_160_name),
|
||||
IWL_DEV_INFO(0x2526, 0x0018, iwl9260_2ac_160_cfg, iwl9260_160_name),
|
||||
IWL_DEV_INFO(0x2526, 0x001C, iwl9260_2ac_160_cfg, iwl9260_160_name),
|
||||
IWL_DEV_INFO(0x2526, 0x4010, iwl9260_2ac_160_cfg, iwl9260_160_name),
|
||||
IWL_DEV_INFO(0x2526, 0x4018, iwl9260_2ac_160_cfg, iwl9260_160_name),
|
||||
IWL_DEV_INFO(0x2526, 0x401C, iwl9260_2ac_160_cfg, iwl9260_160_name),
|
||||
IWL_DEV_INFO(0x2526, 0x6010, iwl9260_2ac_160_cfg, iwl9260_160_name),
|
||||
IWL_DEV_INFO(0x2526, 0x6014, iwl9260_2ac_160_cfg, iwl9260_160_name),
|
||||
IWL_DEV_INFO(0x2526, 0x8014, iwl9260_2ac_160_cfg, iwl9260_160_name),
|
||||
|
@ -561,6 +561,7 @@ static inline void clear_pci_tx_desc_content(__le32 *__pdesc, int _size)
|
||||
rxmcs == DESC92C_RATE11M)
|
||||
|
||||
struct phy_status_rpt {
|
||||
u8 padding[2];
|
||||
u8 ch_corr[2];
|
||||
u8 cck_sig_qual_ofdm_pwdb_all;
|
||||
u8 cck_agc_rpt_ofdm_cfosho_a;
|
||||
|
@ -6274,7 +6274,7 @@ static int wl1271_init_ieee80211(struct wl1271 *wl)
|
||||
wl->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD |
|
||||
WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL |
|
||||
WIPHY_FLAG_HAS_CHANNEL_SWITCH |
|
||||
+ WIPHY_FLAG_IBSS_RSN;
|
||||
WIPHY_FLAG_IBSS_RSN;
|
||||
|
||||
wl->hw->wiphy->features |= NL80211_FEATURE_AP_SCAN;
|
||||
|
||||
|
@ -184,7 +184,7 @@ static int fdp_nci_send_patch(struct nci_dev *ndev, u8 conn_id, u8 type)
|
||||
const struct firmware *fw;
|
||||
struct sk_buff *skb;
|
||||
unsigned long len;
|
||||
u8 max_size, payload_size;
|
||||
int max_size, payload_size;
|
||||
int rc = 0;
|
||||
|
||||
if ((type == NCI_PATCH_TYPE_OTP && !info->otp_patch) ||
|
||||
@ -207,8 +207,7 @@ static int fdp_nci_send_patch(struct nci_dev *ndev, u8 conn_id, u8 type)
|
||||
|
||||
while (len) {
|
||||
|
||||
payload_size = min_t(unsigned long, (unsigned long) max_size,
|
||||
len);
|
||||
payload_size = min_t(unsigned long, max_size, len);
|
||||
|
||||
skb = nci_skb_alloc(ndev, (NCI_CTRL_HDR_SIZE + payload_size),
|
||||
GFP_KERNEL);
|
||||
|
@ -243,6 +243,17 @@ static void afs_cm_destructor(struct afs_call *call)
|
||||
call->buffer = NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Abort a service call from within an action function.
|
||||
*/
|
||||
static void afs_abort_service_call(struct afs_call *call, u32 abort_code, int error,
|
||||
const char *why)
|
||||
{
|
||||
rxrpc_kernel_abort_call(call->net->socket, call->rxcall,
|
||||
abort_code, error, why);
|
||||
afs_set_call_complete(call, error, 0);
|
||||
}
|
||||
|
||||
/*
|
||||
* The server supplied a list of callbacks that it wanted to break.
|
||||
*/
|
||||
@ -510,8 +521,7 @@ static void SRXAFSCB_ProbeUuid(struct work_struct *work)
|
||||
if (memcmp(r, &call->net->uuid, sizeof(call->net->uuid)) == 0)
|
||||
afs_send_empty_reply(call);
|
||||
else
|
||||
rxrpc_kernel_abort_call(call->net->socket, call->rxcall,
|
||||
1, 1, "K-1");
|
||||
afs_abort_service_call(call, 1, 1, "K-1");
|
||||
|
||||
afs_put_call(call);
|
||||
_leave("");
|
||||
|
@ -154,7 +154,7 @@ struct afs_call {
|
||||
};
|
||||
unsigned char unmarshall; /* unmarshalling phase */
|
||||
unsigned char addr_ix; /* Address in ->alist */
|
||||
bool incoming; /* T if incoming call */
|
||||
bool drop_ref; /* T if need to drop ref for incoming call */
|
||||
bool send_pages; /* T if data from mapping should be sent */
|
||||
bool need_attention; /* T if RxRPC poked us */
|
||||
bool async; /* T if asynchronous */
|
||||
@ -1209,8 +1209,16 @@ static inline void afs_set_call_complete(struct afs_call *call,
|
||||
ok = true;
|
||||
}
|
||||
spin_unlock_bh(&call->state_lock);
|
||||
if (ok)
|
||||
if (ok) {
|
||||
trace_afs_call_done(call);
|
||||
|
||||
/* Asynchronous calls have two refs to release - one from the alloc and
|
||||
* one queued with the work item - and we can't just deallocate the
|
||||
* call because the work item may be queued again.
|
||||
*/
|
||||
if (call->drop_ref)
|
||||
afs_put_call(call);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -18,7 +18,6 @@ struct workqueue_struct *afs_async_calls;
|
||||
|
||||
static void afs_wake_up_call_waiter(struct sock *, struct rxrpc_call *, unsigned long);
|
||||
static void afs_wake_up_async_call(struct sock *, struct rxrpc_call *, unsigned long);
|
||||
static void afs_delete_async_call(struct work_struct *);
|
||||
static void afs_process_async_call(struct work_struct *);
|
||||
static void afs_rx_new_call(struct sock *, struct rxrpc_call *, unsigned long);
|
||||
static void afs_rx_discard_new_call(struct rxrpc_call *, unsigned long);
|
||||
@ -169,7 +168,7 @@ void afs_put_call(struct afs_call *call)
|
||||
int n = atomic_dec_return(&call->usage);
|
||||
int o = atomic_read(&net->nr_outstanding_calls);
|
||||
|
||||
trace_afs_call(call, afs_call_trace_put, n + 1, o,
|
||||
trace_afs_call(call, afs_call_trace_put, n, o,
|
||||
__builtin_return_address(0));
|
||||
|
||||
ASSERTCMP(n, >=, 0);
|
||||
@ -402,8 +401,10 @@ void afs_make_call(struct afs_addr_cursor *ac, struct afs_call *call, gfp_t gfp)
|
||||
/* If the call is going to be asynchronous, we need an extra ref for
|
||||
* the call to hold itself so the caller need not hang on to its ref.
|
||||
*/
|
||||
if (call->async)
|
||||
if (call->async) {
|
||||
afs_get_call(call, afs_call_trace_get);
|
||||
call->drop_ref = true;
|
||||
}
|
||||
|
||||
/* create a call */
|
||||
rxcall = rxrpc_kernel_begin_call(call->net->socket, srx, call->key,
|
||||
@ -413,7 +414,8 @@ void afs_make_call(struct afs_addr_cursor *ac, struct afs_call *call, gfp_t gfp)
|
||||
afs_wake_up_async_call :
|
||||
afs_wake_up_call_waiter),
|
||||
call->upgrade,
|
||||
call->intr,
|
||||
(call->intr ? RXRPC_PREINTERRUPTIBLE :
|
||||
RXRPC_UNINTERRUPTIBLE),
|
||||
call->debug_id);
|
||||
if (IS_ERR(rxcall)) {
|
||||
ret = PTR_ERR(rxcall);
|
||||
@ -584,8 +586,6 @@ static void afs_deliver_to_call(struct afs_call *call)
|
||||
done:
|
||||
if (call->type->done)
|
||||
call->type->done(call);
|
||||
if (state == AFS_CALL_COMPLETE && call->incoming)
|
||||
afs_put_call(call);
|
||||
out:
|
||||
_leave("");
|
||||
return;
|
||||
@ -604,11 +604,7 @@ static void afs_deliver_to_call(struct afs_call *call)
|
||||
long afs_wait_for_call_to_complete(struct afs_call *call,
|
||||
struct afs_addr_cursor *ac)
|
||||
{
|
||||
signed long rtt2, timeout;
|
||||
long ret;
|
||||
bool stalled = false;
|
||||
u64 rtt;
|
||||
u32 life, last_life;
|
||||
bool rxrpc_complete = false;
|
||||
|
||||
DECLARE_WAITQUEUE(myself, current);
|
||||
@ -619,14 +615,6 @@ long afs_wait_for_call_to_complete(struct afs_call *call,
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
rtt = rxrpc_kernel_get_rtt(call->net->socket, call->rxcall);
|
||||
rtt2 = nsecs_to_jiffies64(rtt) * 2;
|
||||
if (rtt2 < 2)
|
||||
rtt2 = 2;
|
||||
|
||||
timeout = rtt2;
|
||||
rxrpc_kernel_check_life(call->net->socket, call->rxcall, &last_life);
|
||||
|
||||
add_wait_queue(&call->waitq, &myself);
|
||||
for (;;) {
|
||||
set_current_state(TASK_UNINTERRUPTIBLE);
|
||||
@ -637,37 +625,19 @@ long afs_wait_for_call_to_complete(struct afs_call *call,
|
||||
call->need_attention = false;
|
||||
__set_current_state(TASK_RUNNING);
|
||||
afs_deliver_to_call(call);
|
||||
timeout = rtt2;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (afs_check_call_state(call, AFS_CALL_COMPLETE))
|
||||
break;
|
||||
|
||||
if (!rxrpc_kernel_check_life(call->net->socket, call->rxcall, &life)) {
|
||||
if (!rxrpc_kernel_check_life(call->net->socket, call->rxcall)) {
|
||||
/* rxrpc terminated the call. */
|
||||
rxrpc_complete = true;
|
||||
break;
|
||||
}
|
||||
|
||||
if (call->intr && timeout == 0 &&
|
||||
life == last_life && signal_pending(current)) {
|
||||
if (stalled)
|
||||
break;
|
||||
__set_current_state(TASK_RUNNING);
|
||||
rxrpc_kernel_probe_life(call->net->socket, call->rxcall);
|
||||
timeout = rtt2;
|
||||
stalled = true;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (life != last_life) {
|
||||
timeout = rtt2;
|
||||
last_life = life;
|
||||
stalled = false;
|
||||
}
|
||||
|
||||
timeout = schedule_timeout(timeout);
|
||||
schedule();
|
||||
}
|
||||
|
||||
remove_wait_queue(&call->waitq, &myself);
|
||||
@ -735,7 +705,7 @@ static void afs_wake_up_async_call(struct sock *sk, struct rxrpc_call *rxcall,
|
||||
|
||||
u = atomic_fetch_add_unless(&call->usage, 1, 0);
|
||||
if (u != 0) {
|
||||
trace_afs_call(call, afs_call_trace_wake, u,
|
||||
trace_afs_call(call, afs_call_trace_wake, u + 1,
|
||||
atomic_read(&call->net->nr_outstanding_calls),
|
||||
__builtin_return_address(0));
|
||||
|
||||
@ -744,21 +714,6 @@ static void afs_wake_up_async_call(struct sock *sk, struct rxrpc_call *rxcall,
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Delete an asynchronous call. The work item carries a ref to the call struct
|
||||
* that we need to release.
|
||||
*/
|
||||
static void afs_delete_async_call(struct work_struct *work)
|
||||
{
|
||||
struct afs_call *call = container_of(work, struct afs_call, async_work);
|
||||
|
||||
_enter("");
|
||||
|
||||
afs_put_call(call);
|
||||
|
||||
_leave("");
|
||||
}
|
||||
|
||||
/*
|
||||
* Perform I/O processing on an asynchronous call. The work item carries a ref
|
||||
* to the call struct that we either need to release or to pass on.
|
||||
@ -774,16 +729,6 @@ static void afs_process_async_call(struct work_struct *work)
|
||||
afs_deliver_to_call(call);
|
||||
}
|
||||
|
||||
if (call->state == AFS_CALL_COMPLETE) {
|
||||
/* We have two refs to release - one from the alloc and one
|
||||
* queued with the work item - and we can't just deallocate the
|
||||
* call because the work item may be queued again.
|
||||
*/
|
||||
call->async_work.func = afs_delete_async_call;
|
||||
if (!queue_work(afs_async_calls, &call->async_work))
|
||||
afs_put_call(call);
|
||||
}
|
||||
|
||||
afs_put_call(call);
|
||||
_leave("");
|
||||
}
|
||||
@ -810,6 +755,7 @@ void afs_charge_preallocation(struct work_struct *work)
|
||||
if (!call)
|
||||
break;
|
||||
|
||||
call->drop_ref = true;
|
||||
call->async = true;
|
||||
call->state = AFS_CALL_SV_AWAIT_OP_ID;
|
||||
init_waitqueue_head(&call->waitq);
|
||||
|
@ -28,8 +28,6 @@ int dsa_8021q_rx_switch_id(u16 vid);
|
||||
|
||||
int dsa_8021q_rx_source_port(u16 vid);
|
||||
|
||||
struct sk_buff *dsa_8021q_remove_header(struct sk_buff *skb);
|
||||
|
||||
#else
|
||||
|
||||
int dsa_port_setup_8021q_tagging(struct dsa_switch *ds, int index,
|
||||
@ -64,11 +62,6 @@ int dsa_8021q_rx_source_port(u16 vid)
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct sk_buff *dsa_8021q_remove_header(struct sk_buff *skb)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
#endif /* IS_ENABLED(CONFIG_NET_DSA_TAG_8021Q) */
|
||||
|
||||
#endif /* _NET_DSA_8021Q_H */
|
||||
|
@ -115,6 +115,19 @@ static inline void nl_set_extack_cookie_u64(struct netlink_ext_ack *extack,
|
||||
{
|
||||
u64 __cookie = cookie;
|
||||
|
||||
if (!extack)
|
||||
return;
|
||||
memcpy(extack->cookie, &__cookie, sizeof(__cookie));
|
||||
extack->cookie_len = sizeof(__cookie);
|
||||
}
|
||||
|
||||
static inline void nl_set_extack_cookie_u32(struct netlink_ext_ack *extack,
|
||||
u32 cookie)
|
||||
{
|
||||
u32 __cookie = cookie;
|
||||
|
||||
if (!extack)
|
||||
return;
|
||||
memcpy(extack->cookie, &__cookie, sizeof(__cookie));
|
||||
extack->cookie_len = sizeof(__cookie);
|
||||
}
|
||||
|
@ -645,8 +645,8 @@ typedef unsigned char *sk_buff_data_t;
|
||||
* @offload_l3_fwd_mark: Packet was L3-forwarded in hardware
|
||||
* @tc_skip_classify: do not classify packet. set by IFB device
|
||||
* @tc_at_ingress: used within tc_classify to distinguish in/egress
|
||||
* @tc_redirected: packet was redirected by a tc action
|
||||
* @tc_from_ingress: if tc_redirected, tc_at_ingress at time of redirect
|
||||
* @redirected: packet was redirected by packet classifier
|
||||
* @from_ingress: packet was redirected from the ingress path
|
||||
* @peeked: this packet has been seen already, so stats have been
|
||||
* done for it, don't do them again
|
||||
* @nf_trace: netfilter packet trace flag
|
||||
@ -848,8 +848,10 @@ struct sk_buff {
|
||||
#ifdef CONFIG_NET_CLS_ACT
|
||||
__u8 tc_skip_classify:1;
|
||||
__u8 tc_at_ingress:1;
|
||||
__u8 tc_redirected:1;
|
||||
__u8 tc_from_ingress:1;
|
||||
#endif
|
||||
#ifdef CONFIG_NET_REDIRECT
|
||||
__u8 redirected:1;
|
||||
__u8 from_ingress:1;
|
||||
#endif
|
||||
#ifdef CONFIG_TLS_DEVICE
|
||||
__u8 decrypted:1;
|
||||
@ -4579,5 +4581,31 @@ static inline __wsum lco_csum(struct sk_buff *skb)
|
||||
return csum_partial(l4_hdr, csum_start - l4_hdr, partial);
|
||||
}
|
||||
|
||||
static inline bool skb_is_redirected(const struct sk_buff *skb)
|
||||
{
|
||||
#ifdef CONFIG_NET_REDIRECT
|
||||
return skb->redirected;
|
||||
#else
|
||||
return false;
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void skb_set_redirected(struct sk_buff *skb, bool from_ingress)
|
||||
{
|
||||
#ifdef CONFIG_NET_REDIRECT
|
||||
skb->redirected = 1;
|
||||
skb->from_ingress = from_ingress;
|
||||
if (skb->from_ingress)
|
||||
skb->tstamp = 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void skb_reset_redirect(struct sk_buff *skb)
|
||||
{
|
||||
#ifdef CONFIG_NET_REDIRECT
|
||||
skb->redirected = 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
#endif /* _LINUX_SKBUFF_H */
|
||||
|
@ -16,6 +16,12 @@ struct sock;
|
||||
struct socket;
|
||||
struct rxrpc_call;
|
||||
|
||||
enum rxrpc_interruptibility {
|
||||
RXRPC_INTERRUPTIBLE, /* Call is interruptible */
|
||||
RXRPC_PREINTERRUPTIBLE, /* Call can be cancelled whilst waiting for a slot */
|
||||
RXRPC_UNINTERRUPTIBLE, /* Call should not be interruptible at all */
|
||||
};
|
||||
|
||||
/*
|
||||
* Debug ID counter for tracing.
|
||||
*/
|
||||
@ -41,7 +47,7 @@ struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *,
|
||||
gfp_t,
|
||||
rxrpc_notify_rx_t,
|
||||
bool,
|
||||
bool,
|
||||
enum rxrpc_interruptibility,
|
||||
unsigned int);
|
||||
int rxrpc_kernel_send_data(struct socket *, struct rxrpc_call *,
|
||||
struct msghdr *, size_t,
|
||||
@ -58,9 +64,7 @@ int rxrpc_kernel_charge_accept(struct socket *, rxrpc_notify_rx_t,
|
||||
rxrpc_user_attach_call_t, unsigned long, gfp_t,
|
||||
unsigned int);
|
||||
void rxrpc_kernel_set_tx_length(struct socket *, struct rxrpc_call *, s64);
|
||||
bool rxrpc_kernel_check_life(const struct socket *, const struct rxrpc_call *,
|
||||
u32 *);
|
||||
void rxrpc_kernel_probe_life(struct socket *, struct rxrpc_call *);
|
||||
bool rxrpc_kernel_check_life(const struct socket *, const struct rxrpc_call *);
|
||||
u32 rxrpc_kernel_get_epoch(struct socket *, struct rxrpc_call *);
|
||||
bool rxrpc_kernel_get_reply_time(struct socket *, struct rxrpc_call *,
|
||||
ktime_t *);
|
||||
|
@ -675,22 +675,6 @@ void __qdisc_calculate_pkt_len(struct sk_buff *skb,
|
||||
const struct qdisc_size_table *stab);
|
||||
int skb_do_redirect(struct sk_buff *);
|
||||
|
||||
static inline void skb_reset_tc(struct sk_buff *skb)
|
||||
{
|
||||
#ifdef CONFIG_NET_CLS_ACT
|
||||
skb->tc_redirected = 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline bool skb_is_tc_redirected(const struct sk_buff *skb)
|
||||
{
|
||||
#ifdef CONFIG_NET_CLS_ACT
|
||||
return skb->tc_redirected;
|
||||
#else
|
||||
return false;
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline bool skb_at_tc_ingress(const struct sk_buff *skb)
|
||||
{
|
||||
#ifdef CONFIG_NET_CLS_ACT
|
||||
|
@ -233,7 +233,7 @@ enum afs_cb_break_reason {
|
||||
EM(afs_call_trace_get, "GET ") \
|
||||
EM(afs_call_trace_put, "PUT ") \
|
||||
EM(afs_call_trace_wake, "WAKE ") \
|
||||
E_(afs_call_trace_work, "WORK ")
|
||||
E_(afs_call_trace_work, "QUEUE")
|
||||
|
||||
#define afs_server_traces \
|
||||
EM(afs_server_trace_alloc, "ALLOC ") \
|
||||
|
@ -482,13 +482,21 @@ static int bpf_struct_ops_map_delete_elem(struct bpf_map *map, void *key)
|
||||
prev_state = cmpxchg(&st_map->kvalue.state,
|
||||
BPF_STRUCT_OPS_STATE_INUSE,
|
||||
BPF_STRUCT_OPS_STATE_TOBEFREE);
|
||||
if (prev_state == BPF_STRUCT_OPS_STATE_INUSE) {
|
||||
switch (prev_state) {
|
||||
case BPF_STRUCT_OPS_STATE_INUSE:
|
||||
st_map->st_ops->unreg(&st_map->kvalue.data);
|
||||
if (refcount_dec_and_test(&st_map->kvalue.refcnt))
|
||||
bpf_map_put(map);
|
||||
return 0;
|
||||
case BPF_STRUCT_OPS_STATE_TOBEFREE:
|
||||
return -EINPROGRESS;
|
||||
case BPF_STRUCT_OPS_STATE_INIT:
|
||||
return -ENOENT;
|
||||
default:
|
||||
WARN_ON_ONCE(1);
|
||||
/* Should never happen. Treat it as not found. */
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void bpf_struct_ops_map_seq_show_elem(struct bpf_map *map, void *key,
|
||||
|
@ -2418,7 +2418,7 @@ static int btf_enum_check_member(struct btf_verifier_env *env,
|
||||
|
||||
struct_size = struct_type->size;
|
||||
bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off);
|
||||
if (struct_size - bytes_offset < sizeof(int)) {
|
||||
if (struct_size - bytes_offset < member_type->size) {
|
||||
btf_verifier_log_member(env, struct_type, member,
|
||||
"Member exceeds struct_size");
|
||||
return -EINVAL;
|
||||
|
@ -227,6 +227,9 @@ int cgroup_bpf_inherit(struct cgroup *cgrp)
|
||||
for (i = 0; i < NR; i++)
|
||||
bpf_prog_array_free(arrays[i]);
|
||||
|
||||
for (p = cgroup_parent(cgrp); p; p = cgroup_parent(p))
|
||||
cgroup_bpf_put(p);
|
||||
|
||||
percpu_ref_exit(&cgrp->bpf.refcnt);
|
||||
|
||||
return -ENOMEM;
|
||||
@ -302,8 +305,8 @@ int __cgroup_bpf_attach(struct cgroup *cgrp, struct bpf_prog *prog,
|
||||
u32 saved_flags = (flags & (BPF_F_ALLOW_OVERRIDE | BPF_F_ALLOW_MULTI));
|
||||
struct list_head *progs = &cgrp->bpf.progs[type];
|
||||
struct bpf_prog *old_prog = NULL;
|
||||
struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE],
|
||||
*old_storage[MAX_BPF_CGROUP_STORAGE_TYPE] = {NULL};
|
||||
struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE] = {};
|
||||
struct bpf_cgroup_storage *old_storage[MAX_BPF_CGROUP_STORAGE_TYPE] = {};
|
||||
struct bpf_prog_list *pl, *replace_pl = NULL;
|
||||
enum bpf_cgroup_storage_type stype;
|
||||
int err;
|
||||
|
@ -1510,6 +1510,11 @@ static int map_freeze(const union bpf_attr *attr)
|
||||
if (IS_ERR(map))
|
||||
return PTR_ERR(map);
|
||||
|
||||
if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS) {
|
||||
fdput(f);
|
||||
return -ENOTSUPP;
|
||||
}
|
||||
|
||||
mutex_lock(&map->freeze_mutex);
|
||||
|
||||
if (map->writecnt) {
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user