mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2025-01-01 10:42:11 +00:00
Networking fixes for 5.16-rc2, including fixes from bpf, mac80211.
Current release - regressions: - devlink: don't throw an error if flash notification sent before devlink visible - page_pool: Revert "page_pool: disable dma mapping support...", turns out there are active arches who need it Current release - new code bugs: - amt: cancel delayed_work synchronously in amt_fini() Previous releases - regressions: - xsk: fix crash on double free in buffer pool - bpf: fix inner map state pruning regression causing program rejections - mac80211: drop check for DONT_REORDER in __ieee80211_select_queue, preventing mis-selecting the best effort queue - mac80211: do not access the IV when it was stripped - mac80211: fix radiotap header generation, off-by-one - nl80211: fix getting radio statistics in survey dump - e100: fix device suspend/resume Previous releases - always broken: - tcp: fix uninitialized access in skb frags array for Rx 0cp - bpf: fix toctou on read-only map's constant scalar tracking - bpf: forbid bpf_ktime_get_coarse_ns and bpf_timer_* in tracing progs - tipc: only accept encrypted MSG_CRYPTO msgs - smc: transfer remaining wait queue entries during fallback, fix missing wake ups - udp: validate checksum in udp_read_sock() (when sockmap is used) - sched: act_mirred: drop dst for the direction from egress to ingress - virtio_net_hdr_to_skb: count transport header in UFO, prevent allowing bad skbs into the stack - nfc: reorder the logic in nfc_{un,}register_device, fix unregister - ipsec: check return value of ipv6_skip_exthdr - usb: r8152: add MAC passthrough support for more Lenovo Docks Signed-off-by: Jakub Kicinski <kuba@kernel.org> -----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEE6jPA+I1ugmIBA4hXMUZtbf5SIrsFAmGWf08ACgkQMUZtbf5S Irt+lxAAj8FAoLoSmQKUK3LttLLh0ZQQXu8Riey+wrP8Z9Yp8xWXIaVRF1c0vCE6 clbrF+mLfk6Wvv/RzOgwyBMHvK+djr/oVDNSmjlRvss4MLDfOQZhUV8V4XpvF4Up hI7wyKfHtd7niosNqil6wklJFpLU8WyIAWrPSIPE6JlPkJmcm3GUGsliwEPwdLY1 yl7z4zsxigjA+hKxYqNQX6tixF3xnbDUbAnWshrSPL89melwz4GMao45qmcxJEVr EipPhKifk0hT067jG08FMXcKBFKt6rKk7SVNo4mtq8Tl6HleJBj8fdaJAjSdFahB +rYJ0sDZwGoDL5CxZ5mD3fM1cDgh4WFEM0z//0b/bZhoPDRKEpLr9LPuv+N6+/rA 8D98EHsvyNjlFgdyd8celMstiGtBn4YLEoLNYYh9Qibgm0XsCuv0yox7g0AOLMmQ QiBmh2EnaXNPQ8PRZNMK3VH5ol2KoYWL6yrpJYV+wOWVLfezwlSsjkPSfW5pF9FG hU0iQBp/YTCdCadR9YLj8qfDWDUAkCN7WpqIu9EA9FXJcYjJVaix0MA/tAVlzXyR xlB7cU6O5NABcs/+04zPkKLwKbVYNMqgvKE+FVDVm+BKxo0UMxcmz/Np/ZYxfhkF bwKplaiPb2H4D6t0sdxqaeYirPwt1BcleLilae6vHG1jO90H9Vw= =tlqV -----END PGP SIGNATURE----- Merge tag 'net-5.16-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net Pull networking fixes from Jakub Kicinski: "Including fixes from bpf, mac80211. Current release - regressions: - devlink: don't throw an error if flash notification sent before devlink visible - page_pool: Revert "page_pool: disable dma mapping support...", turns out there are active arches who need it Current release - new code bugs: - amt: cancel delayed_work synchronously in amt_fini() Previous releases - regressions: - xsk: fix crash on double free in buffer pool - bpf: fix inner map state pruning regression causing program rejections - mac80211: drop check for DONT_REORDER in __ieee80211_select_queue, preventing mis-selecting the best effort queue - mac80211: do not access the IV when it was stripped - mac80211: fix radiotap header generation, off-by-one - nl80211: fix getting radio statistics in survey dump - e100: fix device suspend/resume Previous releases - always broken: - tcp: fix uninitialized access in skb frags array for Rx 0cp - bpf: fix toctou on read-only map's constant scalar tracking - bpf: forbid bpf_ktime_get_coarse_ns and bpf_timer_* in tracing progs - tipc: only accept encrypted MSG_CRYPTO msgs - smc: transfer remaining wait queue entries during fallback, fix missing wake ups - udp: validate checksum in udp_read_sock() (when sockmap is used) - sched: act_mirred: drop dst for the direction from egress to ingress - virtio_net_hdr_to_skb: count transport header in UFO, prevent allowing bad skbs into the stack - nfc: reorder the logic in nfc_{un,}register_device, fix unregister - ipsec: check return value of ipv6_skip_exthdr - usb: r8152: add MAC passthrough support for more Lenovo Docks" * tag 'net-5.16-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net: (96 commits) ptp: ocp: Fix a couple NULL vs IS_ERR() checks net: ethernet: dec: tulip: de4x5: fix possible array overflows in type3_infoblock() net: tulip: de4x5: fix the problem that the array 'lp->phy[8]' may be out of bound ipv6: check return value of ipv6_skip_exthdr e100: fix device suspend/resume devlink: Don't throw an error if flash notification sent before devlink visible page_pool: Revert "page_pool: disable dma mapping support..." ethernet: hisilicon: hns: hns_dsaf_misc: fix a possible array overflow in hns_dsaf_ge_srst_by_port() octeontx2-af: debugfs: don't corrupt user memory NFC: add NCI_UNREG flag to eliminate the race NFC: reorder the logic in nfc_{un,}register_device NFC: reorganize the functions in nci_request tipc: check for null after calling kmemdup i40e: Fix display error code in dmesg i40e: Fix creation of first queue by omitting it if is not power of two i40e: Fix warning message and call stack during rmmod i40e driver i40e: Fix ping is lost after configuring ADq on VF i40e: Fix changing previously set num_queue_pairs for PFs i40e: Fix NULL ptr dereference on VSI filter sync i40e: Fix correct max_pkt_size on VF RX queue ...
This commit is contained in:
commit
8d0112ac6f
@ -3733,7 +3733,7 @@ F: drivers/scsi/bnx2i/
|
||||
BROADCOM BNX2X 10 GIGABIT ETHERNET DRIVER
|
||||
M: Ariel Elior <aelior@marvell.com>
|
||||
M: Sudarsana Kalluru <skalluru@marvell.com>
|
||||
M: GR-everest-linux-l2@marvell.com
|
||||
M: Manish Chopra <manishc@marvell.com>
|
||||
L: netdev@vger.kernel.org
|
||||
S: Supported
|
||||
F: drivers/net/ethernet/broadcom/bnx2x/
|
||||
@ -15593,7 +15593,7 @@ F: drivers/scsi/qedi/
|
||||
|
||||
QLOGIC QL4xxx ETHERNET DRIVER
|
||||
M: Ariel Elior <aelior@marvell.com>
|
||||
M: GR-everest-linux-l2@marvell.com
|
||||
M: Manish Chopra <manishc@marvell.com>
|
||||
L: netdev@vger.kernel.org
|
||||
S: Supported
|
||||
F: drivers/net/ethernet/qlogic/qed/
|
||||
|
@ -3286,7 +3286,7 @@ static void __exit amt_fini(void)
|
||||
{
|
||||
rtnl_link_unregister(&amt_link_ops);
|
||||
unregister_netdevice_notifier(&amt_notifier_block);
|
||||
cancel_delayed_work(&source_gc_wq);
|
||||
cancel_delayed_work_sync(&source_gc_wq);
|
||||
__amt_source_gc_work();
|
||||
destroy_workqueue(amt_wq);
|
||||
}
|
||||
|
@ -559,6 +559,11 @@ int hw_atl_utils_fw_rpc_wait(struct aq_hw_s *self,
|
||||
goto err_exit;
|
||||
|
||||
if (fw.len == 0xFFFFU) {
|
||||
if (sw.len > sizeof(self->rpc)) {
|
||||
printk(KERN_INFO "Invalid sw len: %x\n", sw.len);
|
||||
err = -EINVAL;
|
||||
goto err_exit;
|
||||
}
|
||||
err = hw_atl_utils_fw_rpc_call(self, sw.len);
|
||||
if (err < 0)
|
||||
goto err_exit;
|
||||
@ -567,6 +572,11 @@ int hw_atl_utils_fw_rpc_wait(struct aq_hw_s *self,
|
||||
|
||||
if (rpc) {
|
||||
if (fw.len) {
|
||||
if (fw.len > sizeof(self->rpc)) {
|
||||
printk(KERN_INFO "Invalid fw len: %x\n", fw.len);
|
||||
err = -EINVAL;
|
||||
goto err_exit;
|
||||
}
|
||||
err =
|
||||
hw_atl_utils_fw_downld_dwords(self,
|
||||
self->rpc_addr,
|
||||
|
@ -127,9 +127,9 @@ struct ax88796c_device {
|
||||
#define AX_PRIV_FLAGS_MASK (AX_CAP_COMP)
|
||||
|
||||
unsigned long flags;
|
||||
#define EVENT_INTR BIT(0)
|
||||
#define EVENT_TX BIT(1)
|
||||
#define EVENT_SET_MULTI BIT(2)
|
||||
#define EVENT_INTR 0
|
||||
#define EVENT_TX 1
|
||||
#define EVENT_SET_MULTI 2
|
||||
|
||||
};
|
||||
|
||||
|
@ -635,11 +635,13 @@ static int bnx2x_ilt_client_mem_op(struct bnx2x *bp, int cli_num,
|
||||
{
|
||||
int i, rc;
|
||||
struct bnx2x_ilt *ilt = BP_ILT(bp);
|
||||
struct ilt_client_info *ilt_cli = &ilt->clients[cli_num];
|
||||
struct ilt_client_info *ilt_cli;
|
||||
|
||||
if (!ilt || !ilt->lines)
|
||||
return -1;
|
||||
|
||||
ilt_cli = &ilt->clients[cli_num];
|
||||
|
||||
if (ilt_cli->flags & (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM))
|
||||
return 0;
|
||||
|
||||
|
@ -2258,6 +2258,16 @@ static inline void bnxt_db_write(struct bnxt *bp, struct bnxt_db_info *db,
|
||||
}
|
||||
}
|
||||
|
||||
/* Must hold rtnl_lock */
|
||||
static inline bool bnxt_sriov_cfg(struct bnxt *bp)
|
||||
{
|
||||
#if defined(CONFIG_BNXT_SRIOV)
|
||||
return BNXT_PF(bp) && (bp->pf.active_vfs || bp->sriov_cfg);
|
||||
#else
|
||||
return false;
|
||||
#endif
|
||||
}
|
||||
|
||||
extern const u16 bnxt_lhint_arr[];
|
||||
|
||||
int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
|
||||
|
@ -360,7 +360,7 @@ bnxt_dl_livepatch_report_err(struct bnxt *bp, struct netlink_ext_ack *extack,
|
||||
NL_SET_ERR_MSG_MOD(extack, "Live patch already applied");
|
||||
break;
|
||||
default:
|
||||
netdev_err(bp->dev, "Unexpected live patch error: %hhd\n", err);
|
||||
netdev_err(bp->dev, "Unexpected live patch error: %d\n", err);
|
||||
NL_SET_ERR_MSG_MOD(extack, "Failed to activate live patch");
|
||||
break;
|
||||
}
|
||||
@ -441,12 +441,13 @@ static int bnxt_dl_reload_down(struct devlink *dl, bool netns_change,
|
||||
|
||||
switch (action) {
|
||||
case DEVLINK_RELOAD_ACTION_DRIVER_REINIT: {
|
||||
if (BNXT_PF(bp) && bp->pf.active_vfs) {
|
||||
rtnl_lock();
|
||||
if (bnxt_sriov_cfg(bp)) {
|
||||
NL_SET_ERR_MSG_MOD(extack,
|
||||
"reload is unsupported when VFs are allocated");
|
||||
"reload is unsupported while VFs are allocated or being configured");
|
||||
rtnl_unlock();
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
rtnl_lock();
|
||||
if (bp->dev->reg_state == NETREG_UNREGISTERED) {
|
||||
rtnl_unlock();
|
||||
return -ENODEV;
|
||||
|
@ -1868,7 +1868,7 @@ static int bnxt_tc_setup_indr_block_cb(enum tc_setup_type type,
|
||||
struct flow_cls_offload *flower = type_data;
|
||||
struct bnxt *bp = priv->bp;
|
||||
|
||||
if (flower->common.chain_index)
|
||||
if (!tc_cls_can_offload_and_chain0(bp->dev, type_data))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
switch (type) {
|
||||
|
@ -4709,6 +4709,10 @@ type3_infoblock(struct net_device *dev, u_char count, u_char *p)
|
||||
lp->ibn = 3;
|
||||
lp->active = *p++;
|
||||
if (MOTO_SROM_BUG) lp->active = 0;
|
||||
/* if (MOTO_SROM_BUG) statement indicates lp->active could
|
||||
* be 8 (i.e. the size of array lp->phy) */
|
||||
if (WARN_ON(lp->active >= ARRAY_SIZE(lp->phy)))
|
||||
return -EINVAL;
|
||||
lp->phy[lp->active].gep = (*p ? p : NULL); p += (2 * (*p) + 1);
|
||||
lp->phy[lp->active].rst = (*p ? p : NULL); p += (2 * (*p) + 1);
|
||||
lp->phy[lp->active].mc = get_unaligned_le16(p); p += 2;
|
||||
@ -5000,19 +5004,23 @@ mii_get_phy(struct net_device *dev)
|
||||
}
|
||||
if ((j == limit) && (i < DE4X5_MAX_MII)) {
|
||||
for (k=0; k < DE4X5_MAX_PHY && lp->phy[k].id; k++);
|
||||
lp->phy[k].addr = i;
|
||||
lp->phy[k].id = id;
|
||||
lp->phy[k].spd.reg = GENERIC_REG; /* ANLPA register */
|
||||
lp->phy[k].spd.mask = GENERIC_MASK; /* 100Mb/s technologies */
|
||||
lp->phy[k].spd.value = GENERIC_VALUE; /* TX & T4, H/F Duplex */
|
||||
lp->mii_cnt++;
|
||||
lp->active++;
|
||||
printk("%s: Using generic MII device control. If the board doesn't operate,\nplease mail the following dump to the author:\n", dev->name);
|
||||
j = de4x5_debug;
|
||||
de4x5_debug |= DEBUG_MII;
|
||||
de4x5_dbg_mii(dev, k);
|
||||
de4x5_debug = j;
|
||||
printk("\n");
|
||||
if (k < DE4X5_MAX_PHY) {
|
||||
lp->phy[k].addr = i;
|
||||
lp->phy[k].id = id;
|
||||
lp->phy[k].spd.reg = GENERIC_REG; /* ANLPA register */
|
||||
lp->phy[k].spd.mask = GENERIC_MASK; /* 100Mb/s technologies */
|
||||
lp->phy[k].spd.value = GENERIC_VALUE; /* TX & T4, H/F Duplex */
|
||||
lp->mii_cnt++;
|
||||
lp->active++;
|
||||
printk("%s: Using generic MII device control. If the board doesn't operate,\nplease mail the following dump to the author:\n", dev->name);
|
||||
j = de4x5_debug;
|
||||
de4x5_debug |= DEBUG_MII;
|
||||
de4x5_dbg_mii(dev, k);
|
||||
de4x5_debug = j;
|
||||
printk("\n");
|
||||
} else {
|
||||
goto purgatory;
|
||||
}
|
||||
}
|
||||
}
|
||||
purgatory:
|
||||
|
@ -4550,10 +4550,10 @@ static int dpaa2_eth_remove(struct fsl_mc_device *ls_dev)
|
||||
|
||||
fsl_mc_portal_free(priv->mc_io);
|
||||
|
||||
free_netdev(net_dev);
|
||||
|
||||
dev_dbg(net_dev->dev.parent, "Removed interface %s\n", net_dev->name);
|
||||
|
||||
free_netdev(net_dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -400,6 +400,10 @@ static void hns_dsaf_ge_srst_by_port(struct dsaf_device *dsaf_dev, u32 port,
|
||||
return;
|
||||
|
||||
if (!HNS_DSAF_IS_DEBUG(dsaf_dev)) {
|
||||
/* DSAF_MAX_PORT_NUM is 6, but DSAF_GE_NUM is 8.
|
||||
We need check to prevent array overflow */
|
||||
if (port >= DSAF_MAX_PORT_NUM)
|
||||
return;
|
||||
reg_val_1 = 0x1 << port;
|
||||
port_rst_off = dsaf_dev->mac_cb[port]->port_rst_off;
|
||||
/* there is difference between V1 and V2 in register.*/
|
||||
|
@ -3003,9 +3003,10 @@ static void __e100_shutdown(struct pci_dev *pdev, bool *enable_wake)
|
||||
struct net_device *netdev = pci_get_drvdata(pdev);
|
||||
struct nic *nic = netdev_priv(netdev);
|
||||
|
||||
netif_device_detach(netdev);
|
||||
|
||||
if (netif_running(netdev))
|
||||
e100_down(nic);
|
||||
netif_device_detach(netdev);
|
||||
|
||||
if ((nic->flags & wol_magic) | e100_asf(nic)) {
|
||||
/* enable reverse auto-negotiation */
|
||||
@ -3022,7 +3023,7 @@ static void __e100_shutdown(struct pci_dev *pdev, bool *enable_wake)
|
||||
*enable_wake = false;
|
||||
}
|
||||
|
||||
pci_clear_master(pdev);
|
||||
pci_disable_device(pdev);
|
||||
}
|
||||
|
||||
static int __e100_power_off(struct pci_dev *pdev, bool wake)
|
||||
@ -3042,8 +3043,6 @@ static int __maybe_unused e100_suspend(struct device *dev_d)
|
||||
|
||||
__e100_shutdown(to_pci_dev(dev_d), &wake);
|
||||
|
||||
device_wakeup_disable(dev_d);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -3051,6 +3050,14 @@ static int __maybe_unused e100_resume(struct device *dev_d)
|
||||
{
|
||||
struct net_device *netdev = dev_get_drvdata(dev_d);
|
||||
struct nic *nic = netdev_priv(netdev);
|
||||
int err;
|
||||
|
||||
err = pci_enable_device(to_pci_dev(dev_d));
|
||||
if (err) {
|
||||
netdev_err(netdev, "Resume cannot enable PCI device, aborting\n");
|
||||
return err;
|
||||
}
|
||||
pci_set_master(to_pci_dev(dev_d));
|
||||
|
||||
/* disable reverse auto-negotiation */
|
||||
if (nic->phy == phy_82552_v) {
|
||||
@ -3062,10 +3069,11 @@ static int __maybe_unused e100_resume(struct device *dev_d)
|
||||
smartspeed & ~(E100_82552_REV_ANEG));
|
||||
}
|
||||
|
||||
netif_device_attach(netdev);
|
||||
if (netif_running(netdev))
|
||||
e100_up(nic);
|
||||
|
||||
netif_device_attach(netdev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -161,6 +161,7 @@ enum i40e_vsi_state_t {
|
||||
__I40E_VSI_OVERFLOW_PROMISC,
|
||||
__I40E_VSI_REINIT_REQUESTED,
|
||||
__I40E_VSI_DOWN_REQUESTED,
|
||||
__I40E_VSI_RELEASING,
|
||||
/* This must be last as it determines the size of the BITMAP */
|
||||
__I40E_VSI_STATE_SIZE__,
|
||||
};
|
||||
@ -1247,6 +1248,7 @@ void i40e_ptp_restore_hw_time(struct i40e_pf *pf);
|
||||
void i40e_ptp_init(struct i40e_pf *pf);
|
||||
void i40e_ptp_stop(struct i40e_pf *pf);
|
||||
int i40e_ptp_alloc_pins(struct i40e_pf *pf);
|
||||
int i40e_update_adq_vsi_queues(struct i40e_vsi *vsi, int vsi_offset);
|
||||
int i40e_is_vsi_uplink_mode_veb(struct i40e_vsi *vsi);
|
||||
i40e_status i40e_get_partition_bw_setting(struct i40e_pf *pf);
|
||||
i40e_status i40e_set_partition_bw_setting(struct i40e_pf *pf);
|
||||
|
@ -1790,6 +1790,7 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
|
||||
bool is_add)
|
||||
{
|
||||
struct i40e_pf *pf = vsi->back;
|
||||
u16 num_tc_qps = 0;
|
||||
u16 sections = 0;
|
||||
u8 netdev_tc = 0;
|
||||
u16 numtc = 1;
|
||||
@ -1797,13 +1798,33 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
|
||||
u8 offset;
|
||||
u16 qmap;
|
||||
int i;
|
||||
u16 num_tc_qps = 0;
|
||||
|
||||
sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
|
||||
offset = 0;
|
||||
/* zero out queue mapping, it will get updated on the end of the function */
|
||||
memset(ctxt->info.queue_mapping, 0, sizeof(ctxt->info.queue_mapping));
|
||||
|
||||
if (vsi->type == I40E_VSI_MAIN) {
|
||||
/* This code helps add more queue to the VSI if we have
|
||||
* more cores than RSS can support, the higher cores will
|
||||
* be served by ATR or other filters. Furthermore, the
|
||||
* non-zero req_queue_pairs says that user requested a new
|
||||
* queue count via ethtool's set_channels, so use this
|
||||
* value for queues distribution across traffic classes
|
||||
*/
|
||||
if (vsi->req_queue_pairs > 0)
|
||||
vsi->num_queue_pairs = vsi->req_queue_pairs;
|
||||
else if (pf->flags & I40E_FLAG_MSIX_ENABLED)
|
||||
vsi->num_queue_pairs = pf->num_lan_msix;
|
||||
}
|
||||
|
||||
/* Number of queues per enabled TC */
|
||||
num_tc_qps = vsi->alloc_queue_pairs;
|
||||
if (vsi->type == I40E_VSI_MAIN ||
|
||||
(vsi->type == I40E_VSI_SRIOV && vsi->num_queue_pairs != 0))
|
||||
num_tc_qps = vsi->num_queue_pairs;
|
||||
else
|
||||
num_tc_qps = vsi->alloc_queue_pairs;
|
||||
|
||||
if (enabled_tc && (vsi->back->flags & I40E_FLAG_DCB_ENABLED)) {
|
||||
/* Find numtc from enabled TC bitmap */
|
||||
for (i = 0, numtc = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
|
||||
@ -1881,15 +1902,11 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
|
||||
}
|
||||
ctxt->info.tc_mapping[i] = cpu_to_le16(qmap);
|
||||
}
|
||||
|
||||
/* Set actual Tx/Rx queue pairs */
|
||||
vsi->num_queue_pairs = offset;
|
||||
if ((vsi->type == I40E_VSI_MAIN) && (numtc == 1)) {
|
||||
if (vsi->req_queue_pairs > 0)
|
||||
vsi->num_queue_pairs = vsi->req_queue_pairs;
|
||||
else if (pf->flags & I40E_FLAG_MSIX_ENABLED)
|
||||
vsi->num_queue_pairs = pf->num_lan_msix;
|
||||
}
|
||||
/* Do not change previously set num_queue_pairs for PFs and VFs*/
|
||||
if ((vsi->type == I40E_VSI_MAIN && numtc != 1) ||
|
||||
(vsi->type == I40E_VSI_SRIOV && vsi->num_queue_pairs == 0) ||
|
||||
(vsi->type != I40E_VSI_MAIN && vsi->type != I40E_VSI_SRIOV))
|
||||
vsi->num_queue_pairs = offset;
|
||||
|
||||
/* Scheduler section valid can only be set for ADD VSI */
|
||||
if (is_add) {
|
||||
@ -2623,7 +2640,8 @@ static void i40e_sync_filters_subtask(struct i40e_pf *pf)
|
||||
|
||||
for (v = 0; v < pf->num_alloc_vsi; v++) {
|
||||
if (pf->vsi[v] &&
|
||||
(pf->vsi[v]->flags & I40E_VSI_FLAG_FILTER_CHANGED)) {
|
||||
(pf->vsi[v]->flags & I40E_VSI_FLAG_FILTER_CHANGED) &&
|
||||
!test_bit(__I40E_VSI_RELEASING, pf->vsi[v]->state)) {
|
||||
int ret = i40e_sync_vsi_filters(pf->vsi[v]);
|
||||
|
||||
if (ret) {
|
||||
@ -5426,6 +5444,58 @@ static void i40e_vsi_update_queue_map(struct i40e_vsi *vsi,
|
||||
sizeof(vsi->info.tc_mapping));
|
||||
}
|
||||
|
||||
/**
|
||||
* i40e_update_adq_vsi_queues - update queue mapping for ADq VSI
|
||||
* @vsi: the VSI being reconfigured
|
||||
* @vsi_offset: offset from main VF VSI
|
||||
*/
|
||||
int i40e_update_adq_vsi_queues(struct i40e_vsi *vsi, int vsi_offset)
|
||||
{
|
||||
struct i40e_vsi_context ctxt = {};
|
||||
struct i40e_pf *pf;
|
||||
struct i40e_hw *hw;
|
||||
int ret;
|
||||
|
||||
if (!vsi)
|
||||
return I40E_ERR_PARAM;
|
||||
pf = vsi->back;
|
||||
hw = &pf->hw;
|
||||
|
||||
ctxt.seid = vsi->seid;
|
||||
ctxt.pf_num = hw->pf_id;
|
||||
ctxt.vf_num = vsi->vf_id + hw->func_caps.vf_base_id + vsi_offset;
|
||||
ctxt.uplink_seid = vsi->uplink_seid;
|
||||
ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
|
||||
ctxt.flags = I40E_AQ_VSI_TYPE_VF;
|
||||
ctxt.info = vsi->info;
|
||||
|
||||
i40e_vsi_setup_queue_map(vsi, &ctxt, vsi->tc_config.enabled_tc,
|
||||
false);
|
||||
if (vsi->reconfig_rss) {
|
||||
vsi->rss_size = min_t(int, pf->alloc_rss_size,
|
||||
vsi->num_queue_pairs);
|
||||
ret = i40e_vsi_config_rss(vsi);
|
||||
if (ret) {
|
||||
dev_info(&pf->pdev->dev, "Failed to reconfig rss for num_queues\n");
|
||||
return ret;
|
||||
}
|
||||
vsi->reconfig_rss = false;
|
||||
}
|
||||
|
||||
ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
|
||||
if (ret) {
|
||||
dev_info(&pf->pdev->dev, "Update vsi config failed, err %s aq_err %s\n",
|
||||
i40e_stat_str(hw, ret),
|
||||
i40e_aq_str(hw, hw->aq.asq_last_status));
|
||||
return ret;
|
||||
}
|
||||
/* update the local VSI info with updated queue map */
|
||||
i40e_vsi_update_queue_map(vsi, &ctxt);
|
||||
vsi->info.valid_sections = 0;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* i40e_vsi_config_tc - Configure VSI Tx Scheduler for given TC map
|
||||
* @vsi: VSI to be configured
|
||||
@ -5716,24 +5786,6 @@ static void i40e_remove_queue_channels(struct i40e_vsi *vsi)
|
||||
INIT_LIST_HEAD(&vsi->ch_list);
|
||||
}
|
||||
|
||||
/**
|
||||
* i40e_is_any_channel - channel exist or not
|
||||
* @vsi: ptr to VSI to which channels are associated with
|
||||
*
|
||||
* Returns true or false if channel(s) exist for associated VSI or not
|
||||
**/
|
||||
static bool i40e_is_any_channel(struct i40e_vsi *vsi)
|
||||
{
|
||||
struct i40e_channel *ch, *ch_tmp;
|
||||
|
||||
list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) {
|
||||
if (ch->initialized)
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* i40e_get_max_queues_for_channel
|
||||
* @vsi: ptr to VSI to which channels are associated with
|
||||
@ -6240,26 +6292,15 @@ int i40e_create_queue_channel(struct i40e_vsi *vsi,
|
||||
/* By default we are in VEPA mode, if this is the first VF/VMDq
|
||||
* VSI to be added switch to VEB mode.
|
||||
*/
|
||||
if ((!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) ||
|
||||
(!i40e_is_any_channel(vsi))) {
|
||||
if (!is_power_of_2(vsi->tc_config.tc_info[0].qcount)) {
|
||||
dev_dbg(&pf->pdev->dev,
|
||||
"Failed to create channel. Override queues (%u) not power of 2\n",
|
||||
vsi->tc_config.tc_info[0].qcount);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
|
||||
pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
|
||||
if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
|
||||
pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
|
||||
|
||||
if (vsi->type == I40E_VSI_MAIN) {
|
||||
if (pf->flags & I40E_FLAG_TC_MQPRIO)
|
||||
i40e_do_reset(pf, I40E_PF_RESET_FLAG,
|
||||
true);
|
||||
else
|
||||
i40e_do_reset_safe(pf,
|
||||
I40E_PF_RESET_FLAG);
|
||||
}
|
||||
if (vsi->type == I40E_VSI_MAIN) {
|
||||
if (pf->flags & I40E_FLAG_TC_MQPRIO)
|
||||
i40e_do_reset(pf, I40E_PF_RESET_FLAG, true);
|
||||
else
|
||||
i40e_do_reset_safe(pf, I40E_PF_RESET_FLAG);
|
||||
}
|
||||
/* now onwards for main VSI, number of queues will be value
|
||||
* of TC0's queue count
|
||||
@ -7912,12 +7953,20 @@ static int i40e_setup_tc(struct net_device *netdev, void *type_data)
|
||||
vsi->seid);
|
||||
need_reset = true;
|
||||
goto exit;
|
||||
} else {
|
||||
dev_info(&vsi->back->pdev->dev,
|
||||
"Setup channel (id:%u) utilizing num_queues %d\n",
|
||||
vsi->seid, vsi->tc_config.tc_info[0].qcount);
|
||||
} else if (enabled_tc &&
|
||||
(!is_power_of_2(vsi->tc_config.tc_info[0].qcount))) {
|
||||
netdev_info(netdev,
|
||||
"Failed to create channel. Override queues (%u) not power of 2\n",
|
||||
vsi->tc_config.tc_info[0].qcount);
|
||||
ret = -EINVAL;
|
||||
need_reset = true;
|
||||
goto exit;
|
||||
}
|
||||
|
||||
dev_info(&vsi->back->pdev->dev,
|
||||
"Setup channel (id:%u) utilizing num_queues %d\n",
|
||||
vsi->seid, vsi->tc_config.tc_info[0].qcount);
|
||||
|
||||
if (pf->flags & I40E_FLAG_TC_MQPRIO) {
|
||||
if (vsi->mqprio_qopt.max_rate[0]) {
|
||||
u64 max_tx_rate = vsi->mqprio_qopt.max_rate[0];
|
||||
@ -8482,9 +8531,8 @@ static int i40e_configure_clsflower(struct i40e_vsi *vsi,
|
||||
err = i40e_add_del_cloud_filter(vsi, filter, true);
|
||||
|
||||
if (err) {
|
||||
dev_err(&pf->pdev->dev,
|
||||
"Failed to add cloud filter, err %s\n",
|
||||
i40e_stat_str(&pf->hw, err));
|
||||
dev_err(&pf->pdev->dev, "Failed to add cloud filter, err %d\n",
|
||||
err);
|
||||
goto err;
|
||||
}
|
||||
|
||||
@ -13771,7 +13819,7 @@ int i40e_vsi_release(struct i40e_vsi *vsi)
|
||||
dev_info(&pf->pdev->dev, "Can't remove PF VSI\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
set_bit(__I40E_VSI_RELEASING, vsi->state);
|
||||
uplink_seid = vsi->uplink_seid;
|
||||
if (vsi->type != I40E_VSI_SRIOV) {
|
||||
if (vsi->netdev_registered) {
|
||||
|
@ -183,17 +183,18 @@ void i40e_vc_notify_vf_reset(struct i40e_vf *vf)
|
||||
/***********************misc routines*****************************/
|
||||
|
||||
/**
|
||||
* i40e_vc_disable_vf
|
||||
* i40e_vc_reset_vf
|
||||
* @vf: pointer to the VF info
|
||||
*
|
||||
* Disable the VF through a SW reset.
|
||||
* @notify_vf: notify vf about reset or not
|
||||
* Reset VF handler.
|
||||
**/
|
||||
static inline void i40e_vc_disable_vf(struct i40e_vf *vf)
|
||||
static void i40e_vc_reset_vf(struct i40e_vf *vf, bool notify_vf)
|
||||
{
|
||||
struct i40e_pf *pf = vf->pf;
|
||||
int i;
|
||||
|
||||
i40e_vc_notify_vf_reset(vf);
|
||||
if (notify_vf)
|
||||
i40e_vc_notify_vf_reset(vf);
|
||||
|
||||
/* We want to ensure that an actual reset occurs initiated after this
|
||||
* function was called. However, we do not want to wait forever, so
|
||||
@ -211,9 +212,14 @@ static inline void i40e_vc_disable_vf(struct i40e_vf *vf)
|
||||
usleep_range(10000, 20000);
|
||||
}
|
||||
|
||||
dev_warn(&vf->pf->pdev->dev,
|
||||
"Failed to initiate reset for VF %d after 200 milliseconds\n",
|
||||
vf->vf_id);
|
||||
if (notify_vf)
|
||||
dev_warn(&vf->pf->pdev->dev,
|
||||
"Failed to initiate reset for VF %d after 200 milliseconds\n",
|
||||
vf->vf_id);
|
||||
else
|
||||
dev_dbg(&vf->pf->pdev->dev,
|
||||
"Failed to initiate reset for VF %d after 200 milliseconds\n",
|
||||
vf->vf_id);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -674,14 +680,13 @@ static int i40e_config_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_id,
|
||||
u16 vsi_queue_id,
|
||||
struct virtchnl_rxq_info *info)
|
||||
{
|
||||
u16 pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id);
|
||||
struct i40e_pf *pf = vf->pf;
|
||||
struct i40e_vsi *vsi = pf->vsi[vf->lan_vsi_idx];
|
||||
struct i40e_hw *hw = &pf->hw;
|
||||
struct i40e_hmc_obj_rxq rx_ctx;
|
||||
u16 pf_queue_id;
|
||||
int ret = 0;
|
||||
|
||||
pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id);
|
||||
|
||||
/* clear the context structure first */
|
||||
memset(&rx_ctx, 0, sizeof(struct i40e_hmc_obj_rxq));
|
||||
|
||||
@ -719,6 +724,10 @@ static int i40e_config_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_id,
|
||||
}
|
||||
rx_ctx.rxmax = info->max_pkt_size;
|
||||
|
||||
/* if port VLAN is configured increase the max packet size */
|
||||
if (vsi->info.pvid)
|
||||
rx_ctx.rxmax += VLAN_HLEN;
|
||||
|
||||
/* enable 32bytes desc always */
|
||||
rx_ctx.dsize = 1;
|
||||
|
||||
@ -2105,20 +2114,6 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* i40e_vc_reset_vf_msg
|
||||
* @vf: pointer to the VF info
|
||||
*
|
||||
* called from the VF to reset itself,
|
||||
* unlike other virtchnl messages, PF driver
|
||||
* doesn't send the response back to the VF
|
||||
**/
|
||||
static void i40e_vc_reset_vf_msg(struct i40e_vf *vf)
|
||||
{
|
||||
if (test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states))
|
||||
i40e_reset_vf(vf, false);
|
||||
}
|
||||
|
||||
/**
|
||||
* i40e_vc_config_promiscuous_mode_msg
|
||||
* @vf: pointer to the VF info
|
||||
@ -2217,11 +2212,12 @@ static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg)
|
||||
struct virtchnl_vsi_queue_config_info *qci =
|
||||
(struct virtchnl_vsi_queue_config_info *)msg;
|
||||
struct virtchnl_queue_pair_info *qpi;
|
||||
struct i40e_pf *pf = vf->pf;
|
||||
u16 vsi_id, vsi_queue_id = 0;
|
||||
u16 num_qps_all = 0;
|
||||
struct i40e_pf *pf = vf->pf;
|
||||
i40e_status aq_ret = 0;
|
||||
int i, j = 0, idx = 0;
|
||||
struct i40e_vsi *vsi;
|
||||
u16 num_qps_all = 0;
|
||||
|
||||
if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
|
||||
aq_ret = I40E_ERR_PARAM;
|
||||
@ -2310,9 +2306,15 @@ static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg)
|
||||
pf->vsi[vf->lan_vsi_idx]->num_queue_pairs =
|
||||
qci->num_queue_pairs;
|
||||
} else {
|
||||
for (i = 0; i < vf->num_tc; i++)
|
||||
pf->vsi[vf->ch[i].vsi_idx]->num_queue_pairs =
|
||||
vf->ch[i].num_qps;
|
||||
for (i = 0; i < vf->num_tc; i++) {
|
||||
vsi = pf->vsi[vf->ch[i].vsi_idx];
|
||||
vsi->num_queue_pairs = vf->ch[i].num_qps;
|
||||
|
||||
if (i40e_update_adq_vsi_queues(vsi, i)) {
|
||||
aq_ret = I40E_ERR_CONFIG;
|
||||
goto error_param;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
error_param:
|
||||
@ -2607,8 +2609,7 @@ static int i40e_vc_request_queues_msg(struct i40e_vf *vf, u8 *msg)
|
||||
} else {
|
||||
/* successful request */
|
||||
vf->num_req_queues = req_pairs;
|
||||
i40e_vc_notify_vf_reset(vf);
|
||||
i40e_reset_vf(vf, false);
|
||||
i40e_vc_reset_vf(vf, true);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -3803,8 +3804,7 @@ static int i40e_vc_add_qch_msg(struct i40e_vf *vf, u8 *msg)
|
||||
vf->num_req_queues = 0;
|
||||
|
||||
/* reset the VF in order to allocate resources */
|
||||
i40e_vc_notify_vf_reset(vf);
|
||||
i40e_reset_vf(vf, false);
|
||||
i40e_vc_reset_vf(vf, true);
|
||||
|
||||
return I40E_SUCCESS;
|
||||
|
||||
@ -3844,8 +3844,7 @@ static int i40e_vc_del_qch_msg(struct i40e_vf *vf, u8 *msg)
|
||||
}
|
||||
|
||||
/* reset the VF in order to allocate resources */
|
||||
i40e_vc_notify_vf_reset(vf);
|
||||
i40e_reset_vf(vf, false);
|
||||
i40e_vc_reset_vf(vf, true);
|
||||
|
||||
return I40E_SUCCESS;
|
||||
|
||||
@ -3907,7 +3906,7 @@ int i40e_vc_process_vf_msg(struct i40e_pf *pf, s16 vf_id, u32 v_opcode,
|
||||
i40e_vc_notify_vf_link_state(vf);
|
||||
break;
|
||||
case VIRTCHNL_OP_RESET_VF:
|
||||
i40e_vc_reset_vf_msg(vf);
|
||||
i40e_vc_reset_vf(vf, false);
|
||||
ret = 0;
|
||||
break;
|
||||
case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
|
||||
@ -4161,7 +4160,7 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
|
||||
/* Force the VF interface down so it has to bring up with new MAC
|
||||
* address
|
||||
*/
|
||||
i40e_vc_disable_vf(vf);
|
||||
i40e_vc_reset_vf(vf, true);
|
||||
dev_info(&pf->pdev->dev, "Bring down and up the VF interface to make this change effective.\n");
|
||||
|
||||
error_param:
|
||||
@ -4169,34 +4168,6 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* i40e_vsi_has_vlans - True if VSI has configured VLANs
|
||||
* @vsi: pointer to the vsi
|
||||
*
|
||||
* Check if a VSI has configured any VLANs. False if we have a port VLAN or if
|
||||
* we have no configured VLANs. Do not call while holding the
|
||||
* mac_filter_hash_lock.
|
||||
*/
|
||||
static bool i40e_vsi_has_vlans(struct i40e_vsi *vsi)
|
||||
{
|
||||
bool have_vlans;
|
||||
|
||||
/* If we have a port VLAN, then the VSI cannot have any VLANs
|
||||
* configured, as all MAC/VLAN filters will be assigned to the PVID.
|
||||
*/
|
||||
if (vsi->info.pvid)
|
||||
return false;
|
||||
|
||||
/* Since we don't have a PVID, we know that if the device is in VLAN
|
||||
* mode it must be because of a VLAN filter configured on this VSI.
|
||||
*/
|
||||
spin_lock_bh(&vsi->mac_filter_hash_lock);
|
||||
have_vlans = i40e_is_vsi_in_vlan(vsi);
|
||||
spin_unlock_bh(&vsi->mac_filter_hash_lock);
|
||||
|
||||
return have_vlans;
|
||||
}
|
||||
|
||||
/**
|
||||
* i40e_ndo_set_vf_port_vlan
|
||||
* @netdev: network interface device structure
|
||||
@ -4253,19 +4224,9 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id,
|
||||
/* duplicate request, so just return success */
|
||||
goto error_pvid;
|
||||
|
||||
if (i40e_vsi_has_vlans(vsi)) {
|
||||
dev_err(&pf->pdev->dev,
|
||||
"VF %d has already configured VLAN filters and the administrator is requesting a port VLAN override.\nPlease unload and reload the VF driver for this change to take effect.\n",
|
||||
vf_id);
|
||||
/* Administrator Error - knock the VF offline until he does
|
||||
* the right thing by reconfiguring his network correctly
|
||||
* and then reloading the VF driver.
|
||||
*/
|
||||
i40e_vc_disable_vf(vf);
|
||||
/* During reset the VF got a new VSI, so refresh the pointer. */
|
||||
vsi = pf->vsi[vf->lan_vsi_idx];
|
||||
}
|
||||
|
||||
i40e_vc_reset_vf(vf, true);
|
||||
/* During reset the VF got a new VSI, so refresh a pointer. */
|
||||
vsi = pf->vsi[vf->lan_vsi_idx];
|
||||
/* Locked once because multiple functions below iterate list */
|
||||
spin_lock_bh(&vsi->mac_filter_hash_lock);
|
||||
|
||||
@ -4641,7 +4602,7 @@ int i40e_ndo_set_vf_trust(struct net_device *netdev, int vf_id, bool setting)
|
||||
goto out;
|
||||
|
||||
vf->trusted = setting;
|
||||
i40e_vc_disable_vf(vf);
|
||||
i40e_vc_reset_vf(vf, true);
|
||||
dev_info(&pf->pdev->dev, "VF %u is now %strusted\n",
|
||||
vf_id, setting ? "" : "un");
|
||||
|
||||
|
@ -39,6 +39,7 @@
|
||||
#include "iavf_txrx.h"
|
||||
#include "iavf_fdir.h"
|
||||
#include "iavf_adv_rss.h"
|
||||
#include <linux/bitmap.h>
|
||||
|
||||
#define DEFAULT_DEBUG_LEVEL_SHIFT 3
|
||||
#define PFX "iavf: "
|
||||
|
@ -1776,6 +1776,7 @@ static int iavf_set_channels(struct net_device *netdev,
|
||||
{
|
||||
struct iavf_adapter *adapter = netdev_priv(netdev);
|
||||
u32 num_req = ch->combined_count;
|
||||
int i;
|
||||
|
||||
if ((adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) &&
|
||||
adapter->num_tc) {
|
||||
@ -1786,7 +1787,7 @@ static int iavf_set_channels(struct net_device *netdev,
|
||||
/* All of these should have already been checked by ethtool before this
|
||||
* even gets to us, but just to be sure.
|
||||
*/
|
||||
if (num_req > adapter->vsi_res->num_queue_pairs)
|
||||
if (num_req == 0 || num_req > adapter->vsi_res->num_queue_pairs)
|
||||
return -EINVAL;
|
||||
|
||||
if (num_req == adapter->num_active_queues)
|
||||
@ -1798,6 +1799,20 @@ static int iavf_set_channels(struct net_device *netdev,
|
||||
adapter->num_req_queues = num_req;
|
||||
adapter->flags |= IAVF_FLAG_REINIT_ITR_NEEDED;
|
||||
iavf_schedule_reset(adapter);
|
||||
|
||||
/* wait for the reset is done */
|
||||
for (i = 0; i < IAVF_RESET_WAIT_COMPLETE_COUNT; i++) {
|
||||
msleep(IAVF_RESET_WAIT_MS);
|
||||
if (adapter->flags & IAVF_FLAG_RESET_PENDING)
|
||||
continue;
|
||||
break;
|
||||
}
|
||||
if (i == IAVF_RESET_WAIT_COMPLETE_COUNT) {
|
||||
adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED;
|
||||
adapter->num_active_queues = num_req;
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1844,14 +1859,13 @@ static int iavf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
|
||||
|
||||
if (hfunc)
|
||||
*hfunc = ETH_RSS_HASH_TOP;
|
||||
if (!indir)
|
||||
return 0;
|
||||
if (key)
|
||||
memcpy(key, adapter->rss_key, adapter->rss_key_size);
|
||||
|
||||
memcpy(key, adapter->rss_key, adapter->rss_key_size);
|
||||
|
||||
/* Each 32 bits pointed by 'indir' is stored with a lut entry */
|
||||
for (i = 0; i < adapter->rss_lut_size; i++)
|
||||
indir[i] = (u32)adapter->rss_lut[i];
|
||||
if (indir)
|
||||
/* Each 32 bits pointed by 'indir' is stored with a lut entry */
|
||||
for (i = 0; i < adapter->rss_lut_size; i++)
|
||||
indir[i] = (u32)adapter->rss_lut[i];
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -696,6 +696,23 @@ static void iavf_del_vlan(struct iavf_adapter *adapter, u16 vlan)
|
||||
spin_unlock_bh(&adapter->mac_vlan_list_lock);
|
||||
}
|
||||
|
||||
/**
|
||||
* iavf_restore_filters
|
||||
* @adapter: board private structure
|
||||
*
|
||||
* Restore existing non MAC filters when VF netdev comes back up
|
||||
**/
|
||||
static void iavf_restore_filters(struct iavf_adapter *adapter)
|
||||
{
|
||||
/* re-add all VLAN filters */
|
||||
if (VLAN_ALLOWED(adapter)) {
|
||||
u16 vid;
|
||||
|
||||
for_each_set_bit(vid, adapter->vsi.active_vlans, VLAN_N_VID)
|
||||
iavf_add_vlan(adapter, vid);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* iavf_vlan_rx_add_vid - Add a VLAN filter to a device
|
||||
* @netdev: network device struct
|
||||
@ -709,8 +726,11 @@ static int iavf_vlan_rx_add_vid(struct net_device *netdev,
|
||||
|
||||
if (!VLAN_ALLOWED(adapter))
|
||||
return -EIO;
|
||||
|
||||
if (iavf_add_vlan(adapter, vid) == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
set_bit(vid, adapter->vsi.active_vlans);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -725,11 +745,13 @@ static int iavf_vlan_rx_kill_vid(struct net_device *netdev,
|
||||
{
|
||||
struct iavf_adapter *adapter = netdev_priv(netdev);
|
||||
|
||||
if (VLAN_ALLOWED(adapter)) {
|
||||
iavf_del_vlan(adapter, vid);
|
||||
return 0;
|
||||
}
|
||||
return -EIO;
|
||||
if (!VLAN_ALLOWED(adapter))
|
||||
return -EIO;
|
||||
|
||||
iavf_del_vlan(adapter, vid);
|
||||
clear_bit(vid, adapter->vsi.active_vlans);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -1639,8 +1661,7 @@ static int iavf_process_aq_command(struct iavf_adapter *adapter)
|
||||
iavf_set_promiscuous(adapter, FLAG_VF_MULTICAST_PROMISC);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if ((adapter->aq_required & IAVF_FLAG_AQ_RELEASE_PROMISC) &&
|
||||
if ((adapter->aq_required & IAVF_FLAG_AQ_RELEASE_PROMISC) ||
|
||||
(adapter->aq_required & IAVF_FLAG_AQ_RELEASE_ALLMULTI)) {
|
||||
iavf_set_promiscuous(adapter, 0);
|
||||
return 0;
|
||||
@ -2123,8 +2144,8 @@ static void iavf_disable_vf(struct iavf_adapter *adapter)
|
||||
|
||||
iavf_free_misc_irq(adapter);
|
||||
iavf_reset_interrupt_capability(adapter);
|
||||
iavf_free_queues(adapter);
|
||||
iavf_free_q_vectors(adapter);
|
||||
iavf_free_queues(adapter);
|
||||
memset(adapter->vf_res, 0, IAVF_VIRTCHNL_VF_RESOURCE_SIZE);
|
||||
iavf_shutdown_adminq(&adapter->hw);
|
||||
adapter->netdev->flags &= ~IFF_UP;
|
||||
@ -2410,7 +2431,7 @@ static void iavf_adminq_task(struct work_struct *work)
|
||||
|
||||
/* check for error indications */
|
||||
val = rd32(hw, hw->aq.arq.len);
|
||||
if (val == 0xdeadbeef) /* indicates device in reset */
|
||||
if (val == 0xdeadbeef || val == 0xffffffff) /* device in reset */
|
||||
goto freedom;
|
||||
oldval = val;
|
||||
if (val & IAVF_VF_ARQLEN1_ARQVFE_MASK) {
|
||||
@ -3095,8 +3116,10 @@ static int iavf_configure_clsflower(struct iavf_adapter *adapter,
|
||||
return -ENOMEM;
|
||||
|
||||
while (!mutex_trylock(&adapter->crit_lock)) {
|
||||
if (--count == 0)
|
||||
goto err;
|
||||
if (--count == 0) {
|
||||
kfree(filter);
|
||||
return err;
|
||||
}
|
||||
udelay(1);
|
||||
}
|
||||
|
||||
@ -3107,11 +3130,11 @@ static int iavf_configure_clsflower(struct iavf_adapter *adapter,
|
||||
/* start out with flow type and eth type IPv4 to begin with */
|
||||
filter->f.flow_type = VIRTCHNL_TCP_V4_FLOW;
|
||||
err = iavf_parse_cls_flower(adapter, cls_flower, filter);
|
||||
if (err < 0)
|
||||
if (err)
|
||||
goto err;
|
||||
|
||||
err = iavf_handle_tclass(adapter, tc, filter);
|
||||
if (err < 0)
|
||||
if (err)
|
||||
goto err;
|
||||
|
||||
/* add filter to the list */
|
||||
@ -3308,6 +3331,9 @@ static int iavf_open(struct net_device *netdev)
|
||||
|
||||
spin_unlock_bh(&adapter->mac_vlan_list_lock);
|
||||
|
||||
/* Restore VLAN filters that were removed with IFF_DOWN */
|
||||
iavf_restore_filters(adapter);
|
||||
|
||||
iavf_configure(adapter);
|
||||
|
||||
iavf_up_complete(adapter);
|
||||
@ -3503,7 +3529,8 @@ static netdev_features_t iavf_fix_features(struct net_device *netdev,
|
||||
{
|
||||
struct iavf_adapter *adapter = netdev_priv(netdev);
|
||||
|
||||
if (!(adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN))
|
||||
if (adapter->vf_res &&
|
||||
!(adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN))
|
||||
features &= ~(NETIF_F_HW_VLAN_CTAG_TX |
|
||||
NETIF_F_HW_VLAN_CTAG_RX |
|
||||
NETIF_F_HW_VLAN_CTAG_FILTER);
|
||||
|
@ -25,6 +25,7 @@
|
||||
#include <linux/io.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/property.h>
|
||||
|
||||
#include <asm/checksum.h>
|
||||
|
||||
@ -239,6 +240,7 @@ ltq_etop_hw_init(struct net_device *dev)
|
||||
{
|
||||
struct ltq_etop_priv *priv = netdev_priv(dev);
|
||||
int i;
|
||||
int err;
|
||||
|
||||
ltq_pmu_enable(PMU_PPE);
|
||||
|
||||
@ -273,7 +275,13 @@ ltq_etop_hw_init(struct net_device *dev)
|
||||
|
||||
if (IS_TX(i)) {
|
||||
ltq_dma_alloc_tx(&ch->dma);
|
||||
request_irq(irq, ltq_etop_dma_irq, 0, "etop_tx", priv);
|
||||
err = request_irq(irq, ltq_etop_dma_irq, 0, "etop_tx", priv);
|
||||
if (err) {
|
||||
netdev_err(dev,
|
||||
"Unable to get Tx DMA IRQ %d\n",
|
||||
irq);
|
||||
return err;
|
||||
}
|
||||
} else if (IS_RX(i)) {
|
||||
ltq_dma_alloc_rx(&ch->dma);
|
||||
for (ch->dma.desc = 0; ch->dma.desc < LTQ_DESC_NUM;
|
||||
@ -281,7 +289,13 @@ ltq_etop_hw_init(struct net_device *dev)
|
||||
if (ltq_etop_alloc_skb(ch))
|
||||
return -ENOMEM;
|
||||
ch->dma.desc = 0;
|
||||
request_irq(irq, ltq_etop_dma_irq, 0, "etop_rx", priv);
|
||||
err = request_irq(irq, ltq_etop_dma_irq, 0, "etop_rx", priv);
|
||||
if (err) {
|
||||
netdev_err(dev,
|
||||
"Unable to get Rx DMA IRQ %d\n",
|
||||
irq);
|
||||
return err;
|
||||
}
|
||||
}
|
||||
ch->dma.irq = irq;
|
||||
}
|
||||
@ -726,7 +740,7 @@ static struct platform_driver ltq_mii_driver = {
|
||||
},
|
||||
};
|
||||
|
||||
int __init
|
||||
static int __init
|
||||
init_ltq_etop(void)
|
||||
{
|
||||
int ret = platform_driver_probe(<q_mii_driver, ltq_etop_probe);
|
||||
|
@ -429,12 +429,14 @@ static const struct of_device_id orion_mdio_match[] = {
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, orion_mdio_match);
|
||||
|
||||
#ifdef CONFIG_ACPI
|
||||
static const struct acpi_device_id orion_mdio_acpi_match[] = {
|
||||
{ "MRVL0100", BUS_TYPE_SMI },
|
||||
{ "MRVL0101", BUS_TYPE_XSMI },
|
||||
{ },
|
||||
};
|
||||
MODULE_DEVICE_TABLE(acpi, orion_mdio_acpi_match);
|
||||
#endif
|
||||
|
||||
static struct platform_driver orion_mdio_driver = {
|
||||
.probe = orion_mdio_probe,
|
||||
|
@ -236,10 +236,11 @@ static ssize_t rvu_dbg_lmtst_map_table_display(struct file *filp,
|
||||
u64 lmt_addr, val, tbl_base;
|
||||
int pf, vf, num_vfs, hw_vfs;
|
||||
void __iomem *lmt_map_base;
|
||||
int index = 0, off = 0;
|
||||
int bytes_not_copied;
|
||||
int buf_size = 10240;
|
||||
size_t off = 0;
|
||||
int index = 0;
|
||||
char *buf;
|
||||
int ret;
|
||||
|
||||
/* don't allow partial reads */
|
||||
if (*ppos != 0)
|
||||
@ -303,15 +304,17 @@ static ssize_t rvu_dbg_lmtst_map_table_display(struct file *filp,
|
||||
}
|
||||
off += scnprintf(&buf[off], buf_size - 1 - off, "\n");
|
||||
|
||||
bytes_not_copied = copy_to_user(buffer, buf, off);
|
||||
ret = min(off, count);
|
||||
if (copy_to_user(buffer, buf, ret))
|
||||
ret = -EFAULT;
|
||||
kfree(buf);
|
||||
|
||||
iounmap(lmt_map_base);
|
||||
if (bytes_not_copied)
|
||||
return -EFAULT;
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
*ppos = off;
|
||||
return off;
|
||||
*ppos = ret;
|
||||
return ret;
|
||||
}
|
||||
|
||||
RVU_DEBUG_FOPS(lmtst_map_table, lmtst_map_table_display, NULL);
|
||||
|
@ -339,6 +339,8 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
|
||||
case MLX5_CMD_OP_PAGE_FAULT_RESUME:
|
||||
case MLX5_CMD_OP_QUERY_ESW_FUNCTIONS:
|
||||
case MLX5_CMD_OP_DEALLOC_SF:
|
||||
case MLX5_CMD_OP_DESTROY_UCTX:
|
||||
case MLX5_CMD_OP_DESTROY_UMEM:
|
||||
return MLX5_CMD_STAT_OK;
|
||||
|
||||
case MLX5_CMD_OP_QUERY_HCA_CAP:
|
||||
@ -464,9 +466,7 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
|
||||
case MLX5_CMD_OP_MODIFY_GENERAL_OBJECT:
|
||||
case MLX5_CMD_OP_QUERY_GENERAL_OBJECT:
|
||||
case MLX5_CMD_OP_CREATE_UCTX:
|
||||
case MLX5_CMD_OP_DESTROY_UCTX:
|
||||
case MLX5_CMD_OP_CREATE_UMEM:
|
||||
case MLX5_CMD_OP_DESTROY_UMEM:
|
||||
case MLX5_CMD_OP_ALLOC_MEMIC:
|
||||
case MLX5_CMD_OP_MODIFY_XRQ:
|
||||
case MLX5_CMD_OP_RELEASE_XRQ_ERROR:
|
||||
|
@ -164,13 +164,14 @@ int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq)
|
||||
MLX5_SET(destroy_cq_in, in, cqn, cq->cqn);
|
||||
MLX5_SET(destroy_cq_in, in, uid, cq->uid);
|
||||
err = mlx5_cmd_exec_in(dev, destroy_cq, in);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
synchronize_irq(cq->irqn);
|
||||
|
||||
mlx5_cq_put(cq);
|
||||
wait_for_completion(&cq->free);
|
||||
|
||||
return err;
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(mlx5_core_destroy_cq);
|
||||
|
||||
|
@ -507,6 +507,8 @@ void mlx5_debug_cq_remove(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq)
|
||||
if (!mlx5_debugfs_root)
|
||||
return;
|
||||
|
||||
if (cq->dbg)
|
||||
if (cq->dbg) {
|
||||
rem_res_tree(cq->dbg);
|
||||
cq->dbg = NULL;
|
||||
}
|
||||
}
|
||||
|
@ -1356,9 +1356,13 @@ mlx5_tc_ct_match_add(struct mlx5_tc_ct_priv *priv,
|
||||
int
|
||||
mlx5_tc_ct_parse_action(struct mlx5_tc_ct_priv *priv,
|
||||
struct mlx5_flow_attr *attr,
|
||||
struct mlx5e_tc_mod_hdr_acts *mod_acts,
|
||||
const struct flow_action_entry *act,
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
bool clear_action = act->ct.action & TCA_CT_ACT_CLEAR;
|
||||
int err;
|
||||
|
||||
if (!priv) {
|
||||
NL_SET_ERR_MSG_MOD(extack,
|
||||
"offload of ct action isn't available");
|
||||
@ -1369,6 +1373,17 @@ mlx5_tc_ct_parse_action(struct mlx5_tc_ct_priv *priv,
|
||||
attr->ct_attr.ct_action = act->ct.action;
|
||||
attr->ct_attr.nf_ft = act->ct.flow_table;
|
||||
|
||||
if (!clear_action)
|
||||
goto out;
|
||||
|
||||
err = mlx5_tc_ct_entry_set_registers(priv, mod_acts, 0, 0, 0, 0);
|
||||
if (err) {
|
||||
NL_SET_ERR_MSG_MOD(extack, "Failed to set registers for ct clear");
|
||||
return err;
|
||||
}
|
||||
attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
|
||||
|
||||
out:
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1898,23 +1913,16 @@ __mlx5_tc_ct_flow_offload_clear(struct mlx5_tc_ct_priv *ct_priv,
|
||||
|
||||
memcpy(pre_ct_attr, attr, attr_sz);
|
||||
|
||||
err = mlx5_tc_ct_entry_set_registers(ct_priv, mod_acts, 0, 0, 0, 0);
|
||||
if (err) {
|
||||
ct_dbg("Failed to set register for ct clear");
|
||||
goto err_set_registers;
|
||||
}
|
||||
|
||||
mod_hdr = mlx5_modify_header_alloc(priv->mdev, ct_priv->ns_type,
|
||||
mod_acts->num_actions,
|
||||
mod_acts->actions);
|
||||
if (IS_ERR(mod_hdr)) {
|
||||
err = PTR_ERR(mod_hdr);
|
||||
ct_dbg("Failed to add create ct clear mod hdr");
|
||||
goto err_set_registers;
|
||||
goto err_mod_hdr;
|
||||
}
|
||||
|
||||
pre_ct_attr->modify_hdr = mod_hdr;
|
||||
pre_ct_attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
|
||||
|
||||
rule = mlx5_tc_rule_insert(priv, orig_spec, pre_ct_attr);
|
||||
if (IS_ERR(rule)) {
|
||||
@ -1930,7 +1938,7 @@ __mlx5_tc_ct_flow_offload_clear(struct mlx5_tc_ct_priv *ct_priv,
|
||||
|
||||
err_insert:
|
||||
mlx5_modify_header_dealloc(priv->mdev, mod_hdr);
|
||||
err_set_registers:
|
||||
err_mod_hdr:
|
||||
netdev_warn(priv->netdev,
|
||||
"Failed to offload ct clear flow, err %d\n", err);
|
||||
kfree(pre_ct_attr);
|
||||
|
@ -110,6 +110,7 @@ int mlx5_tc_ct_add_no_trk_match(struct mlx5_flow_spec *spec);
|
||||
int
|
||||
mlx5_tc_ct_parse_action(struct mlx5_tc_ct_priv *priv,
|
||||
struct mlx5_flow_attr *attr,
|
||||
struct mlx5e_tc_mod_hdr_acts *mod_acts,
|
||||
const struct flow_action_entry *act,
|
||||
struct netlink_ext_ack *extack);
|
||||
|
||||
@ -172,6 +173,7 @@ mlx5_tc_ct_add_no_trk_match(struct mlx5_flow_spec *spec)
|
||||
static inline int
|
||||
mlx5_tc_ct_parse_action(struct mlx5_tc_ct_priv *priv,
|
||||
struct mlx5_flow_attr *attr,
|
||||
struct mlx5e_tc_mod_hdr_acts *mod_acts,
|
||||
const struct flow_action_entry *act,
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
|
@ -102,6 +102,7 @@ struct mlx5e_tc_flow {
|
||||
refcount_t refcnt;
|
||||
struct rcu_head rcu_head;
|
||||
struct completion init_done;
|
||||
struct completion del_hw_done;
|
||||
int tunnel_id; /* the mapped tunnel id of this flow */
|
||||
struct mlx5_flow_attr *attr;
|
||||
};
|
||||
|
@ -245,8 +245,14 @@ static void mlx5e_take_tmp_flow(struct mlx5e_tc_flow *flow,
|
||||
struct list_head *flow_list,
|
||||
int index)
|
||||
{
|
||||
if (IS_ERR(mlx5e_flow_get(flow)))
|
||||
if (IS_ERR(mlx5e_flow_get(flow))) {
|
||||
/* Flow is being deleted concurrently. Wait for it to be
|
||||
* unoffloaded from hardware, otherwise deleting encap will
|
||||
* fail.
|
||||
*/
|
||||
wait_for_completion(&flow->del_hw_done);
|
||||
return;
|
||||
}
|
||||
wait_for_completion(&flow->init_done);
|
||||
|
||||
flow->tmp_entry_index = index;
|
||||
|
@ -55,6 +55,7 @@ struct mlx5e_ktls_offload_context_rx {
|
||||
DECLARE_BITMAP(flags, MLX5E_NUM_PRIV_RX_FLAGS);
|
||||
|
||||
/* resync */
|
||||
spinlock_t lock; /* protects resync fields */
|
||||
struct mlx5e_ktls_rx_resync_ctx resync;
|
||||
struct list_head list;
|
||||
};
|
||||
@ -386,14 +387,18 @@ static void resync_handle_seq_match(struct mlx5e_ktls_offload_context_rx *priv_r
|
||||
struct mlx5e_icosq *sq;
|
||||
bool trigger_poll;
|
||||
|
||||
memcpy(info->rec_seq, &priv_rx->resync.sw_rcd_sn_be, sizeof(info->rec_seq));
|
||||
|
||||
sq = &c->async_icosq;
|
||||
ktls_resync = sq->ktls_resync;
|
||||
trigger_poll = false;
|
||||
|
||||
spin_lock_bh(&ktls_resync->lock);
|
||||
list_add_tail(&priv_rx->list, &ktls_resync->list);
|
||||
trigger_poll = !test_and_set_bit(MLX5E_SQ_STATE_PENDING_TLS_RX_RESYNC, &sq->state);
|
||||
spin_lock_bh(&priv_rx->lock);
|
||||
memcpy(info->rec_seq, &priv_rx->resync.sw_rcd_sn_be, sizeof(info->rec_seq));
|
||||
if (list_empty(&priv_rx->list)) {
|
||||
list_add_tail(&priv_rx->list, &ktls_resync->list);
|
||||
trigger_poll = !test_and_set_bit(MLX5E_SQ_STATE_PENDING_TLS_RX_RESYNC, &sq->state);
|
||||
}
|
||||
spin_unlock_bh(&priv_rx->lock);
|
||||
spin_unlock_bh(&ktls_resync->lock);
|
||||
|
||||
if (!trigger_poll)
|
||||
@ -617,6 +622,8 @@ int mlx5e_ktls_add_rx(struct net_device *netdev, struct sock *sk,
|
||||
if (err)
|
||||
goto err_create_key;
|
||||
|
||||
INIT_LIST_HEAD(&priv_rx->list);
|
||||
spin_lock_init(&priv_rx->lock);
|
||||
priv_rx->crypto_info =
|
||||
*(struct tls12_crypto_info_aes_gcm_128 *)crypto_info;
|
||||
|
||||
@ -730,10 +737,14 @@ bool mlx5e_ktls_rx_handle_resync_list(struct mlx5e_channel *c, int budget)
|
||||
priv_rx = list_first_entry(&local_list,
|
||||
struct mlx5e_ktls_offload_context_rx,
|
||||
list);
|
||||
spin_lock(&priv_rx->lock);
|
||||
cseg = post_static_params(sq, priv_rx);
|
||||
if (IS_ERR(cseg))
|
||||
if (IS_ERR(cseg)) {
|
||||
spin_unlock(&priv_rx->lock);
|
||||
break;
|
||||
list_del(&priv_rx->list);
|
||||
}
|
||||
list_del_init(&priv_rx->list);
|
||||
spin_unlock(&priv_rx->lock);
|
||||
db_cseg = cseg;
|
||||
}
|
||||
if (db_cseg)
|
||||
|
@ -1600,6 +1600,7 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
|
||||
else
|
||||
mlx5e_tc_unoffload_fdb_rules(esw, flow, attr);
|
||||
}
|
||||
complete_all(&flow->del_hw_done);
|
||||
|
||||
if (mlx5_flow_has_geneve_opt(flow))
|
||||
mlx5_geneve_tlv_option_del(priv->mdev->geneve);
|
||||
@ -3607,7 +3608,9 @@ parse_tc_nic_actions(struct mlx5e_priv *priv,
|
||||
attr->dest_chain = act->chain_index;
|
||||
break;
|
||||
case FLOW_ACTION_CT:
|
||||
err = mlx5_tc_ct_parse_action(get_ct_priv(priv), attr, act, extack);
|
||||
err = mlx5_tc_ct_parse_action(get_ct_priv(priv), attr,
|
||||
&parse_attr->mod_hdr_acts,
|
||||
act, extack);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
@ -4276,7 +4279,9 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
|
||||
NL_SET_ERR_MSG_MOD(extack, "Sample action with connection tracking is not supported");
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
err = mlx5_tc_ct_parse_action(get_ct_priv(priv), attr, act, extack);
|
||||
err = mlx5_tc_ct_parse_action(get_ct_priv(priv), attr,
|
||||
&parse_attr->mod_hdr_acts,
|
||||
act, extack);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
@ -4465,6 +4470,7 @@ mlx5e_alloc_flow(struct mlx5e_priv *priv, int attr_size,
|
||||
INIT_LIST_HEAD(&flow->l3_to_l2_reformat);
|
||||
refcount_set(&flow->refcnt, 1);
|
||||
init_completion(&flow->init_done);
|
||||
init_completion(&flow->del_hw_done);
|
||||
|
||||
*__flow = flow;
|
||||
*__parse_attr = parse_attr;
|
||||
|
@ -1305,12 +1305,17 @@ int mlx5_eswitch_enable_locked(struct mlx5_eswitch *esw, int mode, int num_vfs)
|
||||
*/
|
||||
int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs)
|
||||
{
|
||||
bool toggle_lag;
|
||||
int ret;
|
||||
|
||||
if (!mlx5_esw_allowed(esw))
|
||||
return 0;
|
||||
|
||||
mlx5_lag_disable_change(esw->dev);
|
||||
toggle_lag = esw->mode == MLX5_ESWITCH_NONE;
|
||||
|
||||
if (toggle_lag)
|
||||
mlx5_lag_disable_change(esw->dev);
|
||||
|
||||
down_write(&esw->mode_lock);
|
||||
if (esw->mode == MLX5_ESWITCH_NONE) {
|
||||
ret = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_LEGACY, num_vfs);
|
||||
@ -1324,7 +1329,10 @@ int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs)
|
||||
esw->esw_funcs.num_vfs = num_vfs;
|
||||
}
|
||||
up_write(&esw->mode_lock);
|
||||
mlx5_lag_enable_change(esw->dev);
|
||||
|
||||
if (toggle_lag)
|
||||
mlx5_lag_enable_change(esw->dev);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -1572,6 +1580,11 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
|
||||
esw->enabled_vports = 0;
|
||||
esw->mode = MLX5_ESWITCH_NONE;
|
||||
esw->offloads.inline_mode = MLX5_INLINE_MODE_NONE;
|
||||
if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, reformat) &&
|
||||
MLX5_CAP_ESW_FLOWTABLE_FDB(dev, decap))
|
||||
esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_BASIC;
|
||||
else
|
||||
esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_NONE;
|
||||
|
||||
dev->priv.eswitch = esw;
|
||||
BLOCKING_INIT_NOTIFIER_HEAD(&esw->n_head);
|
||||
@ -1934,7 +1947,7 @@ int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw,
|
||||
return err;
|
||||
}
|
||||
|
||||
u8 mlx5_eswitch_mode(struct mlx5_core_dev *dev)
|
||||
u8 mlx5_eswitch_mode(const struct mlx5_core_dev *dev)
|
||||
{
|
||||
struct mlx5_eswitch *esw = dev->priv.eswitch;
|
||||
|
||||
@ -1948,7 +1961,7 @@ mlx5_eswitch_get_encap_mode(const struct mlx5_core_dev *dev)
|
||||
struct mlx5_eswitch *esw;
|
||||
|
||||
esw = dev->priv.eswitch;
|
||||
return mlx5_esw_allowed(esw) ? esw->offloads.encap :
|
||||
return (mlx5_eswitch_mode(dev) == MLX5_ESWITCH_OFFLOADS) ? esw->offloads.encap :
|
||||
DEVLINK_ESWITCH_ENCAP_MODE_NONE;
|
||||
}
|
||||
EXPORT_SYMBOL(mlx5_eswitch_get_encap_mode);
|
||||
|
@ -3183,12 +3183,6 @@ int esw_offloads_enable(struct mlx5_eswitch *esw)
|
||||
u64 mapping_id;
|
||||
int err;
|
||||
|
||||
if (MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, reformat) &&
|
||||
MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, decap))
|
||||
esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_BASIC;
|
||||
else
|
||||
esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_NONE;
|
||||
|
||||
mutex_init(&esw->offloads.termtbl_mutex);
|
||||
mlx5_rdma_enable_roce(esw->dev);
|
||||
|
||||
@ -3286,7 +3280,6 @@ void esw_offloads_disable(struct mlx5_eswitch *esw)
|
||||
esw_offloads_metadata_uninit(esw);
|
||||
mlx5_rdma_disable_roce(esw->dev);
|
||||
mutex_destroy(&esw->offloads.termtbl_mutex);
|
||||
esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_NONE;
|
||||
}
|
||||
|
||||
static int esw_mode_from_devlink(u16 mode, u16 *mlx5_mode)
|
||||
@ -3630,7 +3623,7 @@ int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink,
|
||||
*encap = esw->offloads.encap;
|
||||
unlock:
|
||||
up_write(&esw->mode_lock);
|
||||
return 0;
|
||||
return err;
|
||||
}
|
||||
|
||||
static bool
|
||||
|
@ -40,7 +40,7 @@
|
||||
#define MLX5_FC_STATS_PERIOD msecs_to_jiffies(1000)
|
||||
/* Max number of counters to query in bulk read is 32K */
|
||||
#define MLX5_SW_MAX_COUNTERS_BULK BIT(15)
|
||||
#define MLX5_SF_NUM_COUNTERS_BULK 6
|
||||
#define MLX5_SF_NUM_COUNTERS_BULK 8
|
||||
#define MLX5_FC_POOL_MAX_THRESHOLD BIT(18)
|
||||
#define MLX5_FC_POOL_USED_BUFF_RATIO 10
|
||||
|
||||
|
@ -615,6 +615,7 @@ static int mlx5_handle_changeupper_event(struct mlx5_lag *ldev,
|
||||
bool is_bonded, is_in_lag, mode_supported;
|
||||
int bond_status = 0;
|
||||
int num_slaves = 0;
|
||||
int changed = 0;
|
||||
int idx;
|
||||
|
||||
if (!netif_is_lag_master(upper))
|
||||
@ -653,27 +654,27 @@ static int mlx5_handle_changeupper_event(struct mlx5_lag *ldev,
|
||||
*/
|
||||
is_in_lag = num_slaves == MLX5_MAX_PORTS && bond_status == 0x3;
|
||||
|
||||
if (!mlx5_lag_is_ready(ldev) && is_in_lag) {
|
||||
NL_SET_ERR_MSG_MOD(info->info.extack,
|
||||
"Can't activate LAG offload, PF is configured with more than 64 VFs");
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Lag mode must be activebackup or hash. */
|
||||
mode_supported = tracker->tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP ||
|
||||
tracker->tx_type == NETDEV_LAG_TX_TYPE_HASH;
|
||||
|
||||
if (is_in_lag && !mode_supported)
|
||||
NL_SET_ERR_MSG_MOD(info->info.extack,
|
||||
"Can't activate LAG offload, TX type isn't supported");
|
||||
|
||||
is_bonded = is_in_lag && mode_supported;
|
||||
if (tracker->is_bonded != is_bonded) {
|
||||
tracker->is_bonded = is_bonded;
|
||||
return 1;
|
||||
changed = 1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
if (!is_in_lag)
|
||||
return changed;
|
||||
|
||||
if (!mlx5_lag_is_ready(ldev))
|
||||
NL_SET_ERR_MSG_MOD(info->info.extack,
|
||||
"Can't activate LAG offload, PF is configured with more than 64 VFs");
|
||||
else if (!mode_supported)
|
||||
NL_SET_ERR_MSG_MOD(info->info.extack,
|
||||
"Can't activate LAG offload, TX type isn't supported");
|
||||
|
||||
return changed;
|
||||
}
|
||||
|
||||
static int mlx5_handle_changelowerstate_event(struct mlx5_lag *ldev,
|
||||
@ -716,9 +717,6 @@ static int mlx5_lag_netdev_event(struct notifier_block *this,
|
||||
|
||||
ldev = container_of(this, struct mlx5_lag, nb);
|
||||
|
||||
if (!mlx5_lag_is_ready(ldev) && event == NETDEV_CHANGELOWERSTATE)
|
||||
return NOTIFY_DONE;
|
||||
|
||||
tracker = ldev->tracker;
|
||||
|
||||
switch (event) {
|
||||
|
@ -135,25 +135,14 @@ static void dr_domain_fill_uplink_caps(struct mlx5dr_domain *dmn,
|
||||
|
||||
static int dr_domain_query_vport(struct mlx5dr_domain *dmn,
|
||||
u16 vport_number,
|
||||
bool other_vport,
|
||||
struct mlx5dr_cmd_vport_cap *vport_caps)
|
||||
{
|
||||
u16 cmd_vport = vport_number;
|
||||
bool other_vport = true;
|
||||
int ret;
|
||||
|
||||
if (vport_number == MLX5_VPORT_UPLINK) {
|
||||
dr_domain_fill_uplink_caps(dmn, vport_caps);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (dmn->info.caps.is_ecpf && vport_number == MLX5_VPORT_ECPF) {
|
||||
other_vport = false;
|
||||
cmd_vport = 0;
|
||||
}
|
||||
|
||||
ret = mlx5dr_cmd_query_esw_vport_context(dmn->mdev,
|
||||
other_vport,
|
||||
cmd_vport,
|
||||
vport_number,
|
||||
&vport_caps->icm_address_rx,
|
||||
&vport_caps->icm_address_tx);
|
||||
if (ret)
|
||||
@ -161,7 +150,7 @@ static int dr_domain_query_vport(struct mlx5dr_domain *dmn,
|
||||
|
||||
ret = mlx5dr_cmd_query_gvmi(dmn->mdev,
|
||||
other_vport,
|
||||
cmd_vport,
|
||||
vport_number,
|
||||
&vport_caps->vport_gvmi);
|
||||
if (ret)
|
||||
return ret;
|
||||
@ -176,9 +165,15 @@ static int dr_domain_query_esw_mngr(struct mlx5dr_domain *dmn)
|
||||
{
|
||||
return dr_domain_query_vport(dmn,
|
||||
dmn->info.caps.is_ecpf ? MLX5_VPORT_ECPF : 0,
|
||||
false,
|
||||
&dmn->info.caps.vports.esw_manager_caps);
|
||||
}
|
||||
|
||||
static void dr_domain_query_uplink(struct mlx5dr_domain *dmn)
|
||||
{
|
||||
dr_domain_fill_uplink_caps(dmn, &dmn->info.caps.vports.uplink_caps);
|
||||
}
|
||||
|
||||
static struct mlx5dr_cmd_vport_cap *
|
||||
dr_domain_add_vport_cap(struct mlx5dr_domain *dmn, u16 vport)
|
||||
{
|
||||
@ -190,7 +185,7 @@ dr_domain_add_vport_cap(struct mlx5dr_domain *dmn, u16 vport)
|
||||
if (!vport_caps)
|
||||
return NULL;
|
||||
|
||||
ret = dr_domain_query_vport(dmn, vport, vport_caps);
|
||||
ret = dr_domain_query_vport(dmn, vport, true, vport_caps);
|
||||
if (ret) {
|
||||
kvfree(vport_caps);
|
||||
return NULL;
|
||||
@ -207,16 +202,26 @@ dr_domain_add_vport_cap(struct mlx5dr_domain *dmn, u16 vport)
|
||||
return vport_caps;
|
||||
}
|
||||
|
||||
static bool dr_domain_is_esw_mgr_vport(struct mlx5dr_domain *dmn, u16 vport)
|
||||
{
|
||||
struct mlx5dr_cmd_caps *caps = &dmn->info.caps;
|
||||
|
||||
return (caps->is_ecpf && vport == MLX5_VPORT_ECPF) ||
|
||||
(!caps->is_ecpf && vport == 0);
|
||||
}
|
||||
|
||||
struct mlx5dr_cmd_vport_cap *
|
||||
mlx5dr_domain_get_vport_cap(struct mlx5dr_domain *dmn, u16 vport)
|
||||
{
|
||||
struct mlx5dr_cmd_caps *caps = &dmn->info.caps;
|
||||
struct mlx5dr_cmd_vport_cap *vport_caps;
|
||||
|
||||
if ((caps->is_ecpf && vport == MLX5_VPORT_ECPF) ||
|
||||
(!caps->is_ecpf && vport == 0))
|
||||
if (dr_domain_is_esw_mgr_vport(dmn, vport))
|
||||
return &caps->vports.esw_manager_caps;
|
||||
|
||||
if (vport == MLX5_VPORT_UPLINK)
|
||||
return &caps->vports.uplink_caps;
|
||||
|
||||
vport_load:
|
||||
vport_caps = xa_load(&caps->vports.vports_caps_xa, vport);
|
||||
if (vport_caps)
|
||||
@ -241,17 +246,6 @@ static void dr_domain_clear_vports(struct mlx5dr_domain *dmn)
|
||||
}
|
||||
}
|
||||
|
||||
static int dr_domain_query_uplink(struct mlx5dr_domain *dmn)
|
||||
{
|
||||
struct mlx5dr_cmd_vport_cap *vport_caps;
|
||||
|
||||
vport_caps = mlx5dr_domain_get_vport_cap(dmn, MLX5_VPORT_UPLINK);
|
||||
if (!vport_caps)
|
||||
return -EINVAL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int dr_domain_query_fdb_caps(struct mlx5_core_dev *mdev,
|
||||
struct mlx5dr_domain *dmn)
|
||||
{
|
||||
@ -281,11 +275,7 @@ static int dr_domain_query_fdb_caps(struct mlx5_core_dev *mdev,
|
||||
goto free_vports_caps_xa;
|
||||
}
|
||||
|
||||
ret = dr_domain_query_uplink(dmn);
|
||||
if (ret) {
|
||||
mlx5dr_err(dmn, "Failed to query uplink vport caps (err: %d)", ret);
|
||||
goto free_vports_caps_xa;
|
||||
}
|
||||
dr_domain_query_uplink(dmn);
|
||||
|
||||
return 0;
|
||||
|
||||
|
@ -924,11 +924,12 @@ static int dr_matcher_init(struct mlx5dr_matcher *matcher,
|
||||
|
||||
/* Check that all mask data was consumed */
|
||||
for (i = 0; i < consumed_mask.match_sz; i++) {
|
||||
if (consumed_mask.match_buf[i]) {
|
||||
mlx5dr_dbg(dmn, "Match param mask contains unsupported parameters\n");
|
||||
ret = -EOPNOTSUPP;
|
||||
goto free_consumed_mask;
|
||||
}
|
||||
if (!((u8 *)consumed_mask.match_buf)[i])
|
||||
continue;
|
||||
|
||||
mlx5dr_dbg(dmn, "Match param mask contains unsupported parameters\n");
|
||||
ret = -EOPNOTSUPP;
|
||||
goto free_consumed_mask;
|
||||
}
|
||||
|
||||
ret = 0;
|
||||
|
@ -764,6 +764,7 @@ struct mlx5dr_roce_cap {
|
||||
|
||||
struct mlx5dr_vports {
|
||||
struct mlx5dr_cmd_vport_cap esw_manager_caps;
|
||||
struct mlx5dr_cmd_vport_cap uplink_caps;
|
||||
struct xarray vports_caps_xa;
|
||||
};
|
||||
|
||||
|
@ -392,7 +392,7 @@ static int sis96x_get_mac_addr(struct pci_dev *pci_dev,
|
||||
/* get MAC address from EEPROM */
|
||||
for (i = 0; i < 3; i++)
|
||||
addr[i] = read_eeprom(ioaddr, i + EEPROMMACAddr);
|
||||
eth_hw_addr_set(net_dev, (u8 *)addr);
|
||||
eth_hw_addr_set(net_dev, (u8 *)addr);
|
||||
|
||||
rc = 1;
|
||||
break;
|
||||
|
@ -485,8 +485,28 @@ static int socfpga_dwmac_resume(struct device *dev)
|
||||
}
|
||||
#endif /* CONFIG_PM_SLEEP */
|
||||
|
||||
static SIMPLE_DEV_PM_OPS(socfpga_dwmac_pm_ops, stmmac_suspend,
|
||||
socfpga_dwmac_resume);
|
||||
static int __maybe_unused socfpga_dwmac_runtime_suspend(struct device *dev)
|
||||
{
|
||||
struct net_device *ndev = dev_get_drvdata(dev);
|
||||
struct stmmac_priv *priv = netdev_priv(ndev);
|
||||
|
||||
stmmac_bus_clks_config(priv, false);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __maybe_unused socfpga_dwmac_runtime_resume(struct device *dev)
|
||||
{
|
||||
struct net_device *ndev = dev_get_drvdata(dev);
|
||||
struct stmmac_priv *priv = netdev_priv(ndev);
|
||||
|
||||
return stmmac_bus_clks_config(priv, true);
|
||||
}
|
||||
|
||||
static const struct dev_pm_ops socfpga_dwmac_pm_ops = {
|
||||
SET_SYSTEM_SLEEP_PM_OPS(stmmac_suspend, socfpga_dwmac_resume)
|
||||
SET_RUNTIME_PM_OPS(socfpga_dwmac_runtime_suspend, socfpga_dwmac_runtime_resume, NULL)
|
||||
};
|
||||
|
||||
static const struct socfpga_dwmac_ops socfpga_gen5_ops = {
|
||||
.set_phy_mode = socfpga_gen5_set_phy_mode,
|
||||
|
@ -511,6 +511,14 @@ bool stmmac_eee_init(struct stmmac_priv *priv)
|
||||
return true;
|
||||
}
|
||||
|
||||
static inline u32 stmmac_cdc_adjust(struct stmmac_priv *priv)
|
||||
{
|
||||
/* Correct the clk domain crossing(CDC) error */
|
||||
if (priv->plat->has_gmac4 && priv->plat->clk_ptp_rate)
|
||||
return (2 * NSEC_PER_SEC) / priv->plat->clk_ptp_rate;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* stmmac_get_tx_hwtstamp - get HW TX timestamps
|
||||
* @priv: driver private structure
|
||||
* @p : descriptor pointer
|
||||
@ -524,7 +532,6 @@ static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
|
||||
{
|
||||
struct skb_shared_hwtstamps shhwtstamp;
|
||||
bool found = false;
|
||||
s64 adjust = 0;
|
||||
u64 ns = 0;
|
||||
|
||||
if (!priv->hwts_tx_en)
|
||||
@ -543,12 +550,7 @@ static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
|
||||
}
|
||||
|
||||
if (found) {
|
||||
/* Correct the clk domain crossing(CDC) error */
|
||||
if (priv->plat->has_gmac4 && priv->plat->clk_ptp_rate) {
|
||||
adjust += -(2 * (NSEC_PER_SEC /
|
||||
priv->plat->clk_ptp_rate));
|
||||
ns += adjust;
|
||||
}
|
||||
ns -= stmmac_cdc_adjust(priv);
|
||||
|
||||
memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
|
||||
shhwtstamp.hwtstamp = ns_to_ktime(ns);
|
||||
@ -573,7 +575,6 @@ static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
|
||||
{
|
||||
struct skb_shared_hwtstamps *shhwtstamp = NULL;
|
||||
struct dma_desc *desc = p;
|
||||
u64 adjust = 0;
|
||||
u64 ns = 0;
|
||||
|
||||
if (!priv->hwts_rx_en)
|
||||
@ -586,11 +587,7 @@ static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
|
||||
if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) {
|
||||
stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
|
||||
|
||||
/* Correct the clk domain crossing(CDC) error */
|
||||
if (priv->plat->has_gmac4 && priv->plat->clk_ptp_rate) {
|
||||
adjust += 2 * (NSEC_PER_SEC / priv->plat->clk_ptp_rate);
|
||||
ns -= adjust;
|
||||
}
|
||||
ns -= stmmac_cdc_adjust(priv);
|
||||
|
||||
netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
|
||||
shhwtstamp = skb_hwtstamps(skb);
|
||||
|
@ -306,7 +306,6 @@ static void sp_setup(struct net_device *dev)
|
||||
{
|
||||
/* Finish setting up the DEVICE info. */
|
||||
dev->netdev_ops = &sp_netdev_ops;
|
||||
dev->needs_free_netdev = true;
|
||||
dev->mtu = SIXP_MTU;
|
||||
dev->hard_header_len = AX25_MAX_HEADER_LEN;
|
||||
dev->header_ops = &ax25_header_ops;
|
||||
|
@ -853,6 +853,7 @@ static void ipa_endpoint_init_hol_block_timer(struct ipa_endpoint *endpoint,
|
||||
u32 offset;
|
||||
u32 val;
|
||||
|
||||
/* This should only be changed when HOL_BLOCK_EN is disabled */
|
||||
offset = IPA_REG_ENDP_INIT_HOL_BLOCK_TIMER_N_OFFSET(endpoint_id);
|
||||
val = hol_block_timer_val(ipa, microseconds);
|
||||
iowrite32(val, ipa->reg_virt + offset);
|
||||
@ -868,6 +869,9 @@ ipa_endpoint_init_hol_block_enable(struct ipa_endpoint *endpoint, bool enable)
|
||||
val = enable ? HOL_BLOCK_EN_FMASK : 0;
|
||||
offset = IPA_REG_ENDP_INIT_HOL_BLOCK_EN_N_OFFSET(endpoint_id);
|
||||
iowrite32(val, endpoint->ipa->reg_virt + offset);
|
||||
/* When enabling, the register must be written twice for IPA v4.5+ */
|
||||
if (enable && endpoint->ipa->version >= IPA_VERSION_4_5)
|
||||
iowrite32(val, endpoint->ipa->reg_virt + offset);
|
||||
}
|
||||
|
||||
void ipa_endpoint_modem_hol_block_clear_all(struct ipa *ipa)
|
||||
@ -880,6 +884,7 @@ void ipa_endpoint_modem_hol_block_clear_all(struct ipa *ipa)
|
||||
if (endpoint->toward_ipa || endpoint->ee_id != GSI_EE_MODEM)
|
||||
continue;
|
||||
|
||||
ipa_endpoint_init_hol_block_enable(endpoint, false);
|
||||
ipa_endpoint_init_hol_block_timer(endpoint, 0);
|
||||
ipa_endpoint_init_hol_block_enable(endpoint, true);
|
||||
}
|
||||
|
@ -52,7 +52,7 @@ static bool ipa_resource_limits_valid(struct ipa *ipa,
|
||||
return false;
|
||||
}
|
||||
|
||||
group_count = data->rsrc_group_src_count;
|
||||
group_count = data->rsrc_group_dst_count;
|
||||
if (!group_count || group_count > IPA_RESOURCE_GROUP_MAX)
|
||||
return false;
|
||||
|
||||
|
@ -1010,6 +1010,7 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
{
|
||||
struct tun_struct *tun = netdev_priv(dev);
|
||||
int txq = skb->queue_mapping;
|
||||
struct netdev_queue *queue;
|
||||
struct tun_file *tfile;
|
||||
int len = skb->len;
|
||||
|
||||
@ -1054,6 +1055,10 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
if (ptr_ring_produce(&tfile->tx_ring, skb))
|
||||
goto drop;
|
||||
|
||||
/* NETIF_F_LLTX requires to do our own update of trans_start */
|
||||
queue = netdev_get_tx_queue(dev, txq);
|
||||
queue->trans_start = jiffies;
|
||||
|
||||
/* Notify and wake up reader process */
|
||||
if (tfile->flags & TUN_FASYNC)
|
||||
kill_fasync(&tfile->fasync, SIGIO, POLL_IN);
|
||||
|
@ -9603,12 +9603,9 @@ static int rtl8152_probe(struct usb_interface *intf,
|
||||
netdev->hw_features &= ~NETIF_F_RXCSUM;
|
||||
}
|
||||
|
||||
if (le16_to_cpu(udev->descriptor.idVendor) == VENDOR_ID_LENOVO) {
|
||||
switch (le16_to_cpu(udev->descriptor.idProduct)) {
|
||||
case DEVICE_ID_THINKPAD_THUNDERBOLT3_DOCK_GEN2:
|
||||
case DEVICE_ID_THINKPAD_USB_C_DOCK_GEN2:
|
||||
tp->lenovo_macpassthru = 1;
|
||||
}
|
||||
if (udev->parent &&
|
||||
le16_to_cpu(udev->parent->descriptor.idVendor) == VENDOR_ID_LENOVO) {
|
||||
tp->lenovo_macpassthru = 1;
|
||||
}
|
||||
|
||||
if (le16_to_cpu(udev->descriptor.bcdDevice) == 0x3011 && udev->serial &&
|
||||
|
@ -1699,12 +1699,9 @@ static int initialize_dco_operating_mode(struct idtcm_channel *channel)
|
||||
|
||||
/* PTP Hardware Clock interface */
|
||||
|
||||
/**
|
||||
/*
|
||||
* Maximum absolute value for write phase offset in picoseconds
|
||||
*
|
||||
* @channel: channel
|
||||
* @delta_ns: delta in nanoseconds
|
||||
*
|
||||
* Destination signed register is 32-bit register in resolution of 50ps
|
||||
*
|
||||
* 0x7fffffff * 50 = 2147483647 * 50 = 107374182350
|
||||
|
@ -1304,10 +1304,11 @@ ptp_ocp_register_ext(struct ptp_ocp *bp, struct ocp_resource *r)
|
||||
if (!ext)
|
||||
return -ENOMEM;
|
||||
|
||||
err = -EINVAL;
|
||||
ext->mem = ptp_ocp_get_mem(bp, r);
|
||||
if (!ext->mem)
|
||||
if (IS_ERR(ext->mem)) {
|
||||
err = PTR_ERR(ext->mem);
|
||||
goto out;
|
||||
}
|
||||
|
||||
ext->bp = bp;
|
||||
ext->info = r->extra;
|
||||
@ -1371,8 +1372,8 @@ ptp_ocp_register_mem(struct ptp_ocp *bp, struct ocp_resource *r)
|
||||
void __iomem *mem;
|
||||
|
||||
mem = ptp_ocp_get_mem(bp, r);
|
||||
if (!mem)
|
||||
return -EINVAL;
|
||||
if (IS_ERR(mem))
|
||||
return PTR_ERR(mem);
|
||||
|
||||
bp_assign_entry(bp, r, mem);
|
||||
|
||||
|
@ -193,7 +193,7 @@ struct bpf_map {
|
||||
atomic64_t usercnt;
|
||||
struct work_struct work;
|
||||
struct mutex freeze_mutex;
|
||||
u64 writecnt; /* writable mmap cnt; protected by freeze_mutex */
|
||||
atomic64_t writecnt;
|
||||
};
|
||||
|
||||
static inline bool map_value_has_spin_lock(const struct bpf_map *map)
|
||||
@ -1419,6 +1419,7 @@ void bpf_map_put(struct bpf_map *map);
|
||||
void *bpf_map_area_alloc(u64 size, int numa_node);
|
||||
void *bpf_map_area_mmapable_alloc(u64 size, int numa_node);
|
||||
void bpf_map_area_free(void *base);
|
||||
bool bpf_map_write_active(const struct bpf_map *map);
|
||||
void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr);
|
||||
int generic_map_lookup_batch(struct bpf_map *map,
|
||||
const union bpf_attr *attr,
|
||||
|
@ -145,13 +145,13 @@ u32 mlx5_eswitch_get_vport_metadata_for_set(struct mlx5_eswitch *esw,
|
||||
GENMASK(31 - ESW_TUN_ID_BITS - ESW_RESERVED_BITS, \
|
||||
ESW_TUN_OPTS_OFFSET + 1)
|
||||
|
||||
u8 mlx5_eswitch_mode(struct mlx5_core_dev *dev);
|
||||
u8 mlx5_eswitch_mode(const struct mlx5_core_dev *dev);
|
||||
u16 mlx5_eswitch_get_total_vports(const struct mlx5_core_dev *dev);
|
||||
struct mlx5_core_dev *mlx5_eswitch_get_core_dev(struct mlx5_eswitch *esw);
|
||||
|
||||
#else /* CONFIG_MLX5_ESWITCH */
|
||||
|
||||
static inline u8 mlx5_eswitch_mode(struct mlx5_core_dev *dev)
|
||||
static inline u8 mlx5_eswitch_mode(const struct mlx5_core_dev *dev)
|
||||
{
|
||||
return MLX5_ESWITCH_NONE;
|
||||
}
|
||||
|
@ -105,7 +105,18 @@ struct page {
|
||||
struct page_pool *pp;
|
||||
unsigned long _pp_mapping_pad;
|
||||
unsigned long dma_addr;
|
||||
atomic_long_t pp_frag_count;
|
||||
union {
|
||||
/**
|
||||
* dma_addr_upper: might require a 64-bit
|
||||
* value on 32-bit architectures.
|
||||
*/
|
||||
unsigned long dma_addr_upper;
|
||||
/**
|
||||
* For frag page support, not supported in
|
||||
* 32-bit architectures with 64-bit DMA.
|
||||
*/
|
||||
atomic_long_t pp_frag_count;
|
||||
};
|
||||
};
|
||||
struct { /* slab, slob and slub */
|
||||
union {
|
||||
|
@ -4226,7 +4226,7 @@ static inline void skb_remcsum_process(struct sk_buff *skb, void *ptr,
|
||||
return;
|
||||
}
|
||||
|
||||
if (unlikely(skb->ip_summed != CHECKSUM_COMPLETE)) {
|
||||
if (unlikely(skb->ip_summed != CHECKSUM_COMPLETE)) {
|
||||
__skb_checksum_complete(skb);
|
||||
skb_postpull_rcsum(skb, skb->data, ptr - (void *)skb->data);
|
||||
}
|
||||
|
@ -120,10 +120,15 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb,
|
||||
|
||||
if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
|
||||
u16 gso_size = __virtio16_to_cpu(little_endian, hdr->gso_size);
|
||||
unsigned int nh_off = p_off;
|
||||
struct skb_shared_info *shinfo = skb_shinfo(skb);
|
||||
|
||||
/* UFO may not include transport header in gso_size. */
|
||||
if (gso_type & SKB_GSO_UDP)
|
||||
nh_off -= thlen;
|
||||
|
||||
/* Too small packets are not really GSO ones. */
|
||||
if (skb->len - p_off > gso_size) {
|
||||
if (skb->len - nh_off > gso_size) {
|
||||
shinfo->gso_size = gso_size;
|
||||
shinfo->gso_type = gso_type;
|
||||
|
||||
|
@ -30,6 +30,7 @@ enum nci_flag {
|
||||
NCI_UP,
|
||||
NCI_DATA_EXCHANGE,
|
||||
NCI_DATA_EXCHANGE_TO,
|
||||
NCI_UNREG,
|
||||
};
|
||||
|
||||
/* NCI device states */
|
||||
|
@ -216,14 +216,24 @@ static inline void page_pool_recycle_direct(struct page_pool *pool,
|
||||
page_pool_put_full_page(pool, page, true);
|
||||
}
|
||||
|
||||
#define PAGE_POOL_DMA_USE_PP_FRAG_COUNT \
|
||||
(sizeof(dma_addr_t) > sizeof(unsigned long))
|
||||
|
||||
static inline dma_addr_t page_pool_get_dma_addr(struct page *page)
|
||||
{
|
||||
return page->dma_addr;
|
||||
dma_addr_t ret = page->dma_addr;
|
||||
|
||||
if (PAGE_POOL_DMA_USE_PP_FRAG_COUNT)
|
||||
ret |= (dma_addr_t)page->dma_addr_upper << 16 << 16;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline void page_pool_set_dma_addr(struct page *page, dma_addr_t addr)
|
||||
{
|
||||
page->dma_addr = addr;
|
||||
if (PAGE_POOL_DMA_USE_PP_FRAG_COUNT)
|
||||
page->dma_addr_upper = upper_32_bits(addr);
|
||||
}
|
||||
|
||||
static inline void page_pool_set_frag_count(struct page *page, long nr)
|
||||
|
@ -1809,6 +1809,8 @@ sysctl_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
|
||||
return &bpf_sysctl_get_new_value_proto;
|
||||
case BPF_FUNC_sysctl_set_new_value:
|
||||
return &bpf_sysctl_set_new_value_proto;
|
||||
case BPF_FUNC_ktime_get_coarse_ns:
|
||||
return &bpf_ktime_get_coarse_ns_proto;
|
||||
default:
|
||||
return cgroup_base_func_proto(func_id, prog);
|
||||
}
|
||||
|
@ -1364,8 +1364,6 @@ bpf_base_func_proto(enum bpf_func_id func_id)
|
||||
return &bpf_ktime_get_ns_proto;
|
||||
case BPF_FUNC_ktime_get_boot_ns:
|
||||
return &bpf_ktime_get_boot_ns_proto;
|
||||
case BPF_FUNC_ktime_get_coarse_ns:
|
||||
return &bpf_ktime_get_coarse_ns_proto;
|
||||
case BPF_FUNC_ringbuf_output:
|
||||
return &bpf_ringbuf_output_proto;
|
||||
case BPF_FUNC_ringbuf_reserve:
|
||||
|
@ -132,6 +132,21 @@ static struct bpf_map *find_and_alloc_map(union bpf_attr *attr)
|
||||
return map;
|
||||
}
|
||||
|
||||
static void bpf_map_write_active_inc(struct bpf_map *map)
|
||||
{
|
||||
atomic64_inc(&map->writecnt);
|
||||
}
|
||||
|
||||
static void bpf_map_write_active_dec(struct bpf_map *map)
|
||||
{
|
||||
atomic64_dec(&map->writecnt);
|
||||
}
|
||||
|
||||
bool bpf_map_write_active(const struct bpf_map *map)
|
||||
{
|
||||
return atomic64_read(&map->writecnt) != 0;
|
||||
}
|
||||
|
||||
static u32 bpf_map_value_size(const struct bpf_map *map)
|
||||
{
|
||||
if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
|
||||
@ -601,11 +616,8 @@ static void bpf_map_mmap_open(struct vm_area_struct *vma)
|
||||
{
|
||||
struct bpf_map *map = vma->vm_file->private_data;
|
||||
|
||||
if (vma->vm_flags & VM_MAYWRITE) {
|
||||
mutex_lock(&map->freeze_mutex);
|
||||
map->writecnt++;
|
||||
mutex_unlock(&map->freeze_mutex);
|
||||
}
|
||||
if (vma->vm_flags & VM_MAYWRITE)
|
||||
bpf_map_write_active_inc(map);
|
||||
}
|
||||
|
||||
/* called for all unmapped memory region (including initial) */
|
||||
@ -613,11 +625,8 @@ static void bpf_map_mmap_close(struct vm_area_struct *vma)
|
||||
{
|
||||
struct bpf_map *map = vma->vm_file->private_data;
|
||||
|
||||
if (vma->vm_flags & VM_MAYWRITE) {
|
||||
mutex_lock(&map->freeze_mutex);
|
||||
map->writecnt--;
|
||||
mutex_unlock(&map->freeze_mutex);
|
||||
}
|
||||
if (vma->vm_flags & VM_MAYWRITE)
|
||||
bpf_map_write_active_dec(map);
|
||||
}
|
||||
|
||||
static const struct vm_operations_struct bpf_map_default_vmops = {
|
||||
@ -668,7 +677,7 @@ static int bpf_map_mmap(struct file *filp, struct vm_area_struct *vma)
|
||||
goto out;
|
||||
|
||||
if (vma->vm_flags & VM_MAYWRITE)
|
||||
map->writecnt++;
|
||||
bpf_map_write_active_inc(map);
|
||||
out:
|
||||
mutex_unlock(&map->freeze_mutex);
|
||||
return err;
|
||||
@ -1139,6 +1148,7 @@ static int map_update_elem(union bpf_attr *attr, bpfptr_t uattr)
|
||||
map = __bpf_map_get(f);
|
||||
if (IS_ERR(map))
|
||||
return PTR_ERR(map);
|
||||
bpf_map_write_active_inc(map);
|
||||
if (!(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) {
|
||||
err = -EPERM;
|
||||
goto err_put;
|
||||
@ -1174,6 +1184,7 @@ static int map_update_elem(union bpf_attr *attr, bpfptr_t uattr)
|
||||
free_key:
|
||||
kvfree(key);
|
||||
err_put:
|
||||
bpf_map_write_active_dec(map);
|
||||
fdput(f);
|
||||
return err;
|
||||
}
|
||||
@ -1196,6 +1207,7 @@ static int map_delete_elem(union bpf_attr *attr)
|
||||
map = __bpf_map_get(f);
|
||||
if (IS_ERR(map))
|
||||
return PTR_ERR(map);
|
||||
bpf_map_write_active_inc(map);
|
||||
if (!(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) {
|
||||
err = -EPERM;
|
||||
goto err_put;
|
||||
@ -1226,6 +1238,7 @@ static int map_delete_elem(union bpf_attr *attr)
|
||||
out:
|
||||
kvfree(key);
|
||||
err_put:
|
||||
bpf_map_write_active_dec(map);
|
||||
fdput(f);
|
||||
return err;
|
||||
}
|
||||
@ -1533,6 +1546,7 @@ static int map_lookup_and_delete_elem(union bpf_attr *attr)
|
||||
map = __bpf_map_get(f);
|
||||
if (IS_ERR(map))
|
||||
return PTR_ERR(map);
|
||||
bpf_map_write_active_inc(map);
|
||||
if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ) ||
|
||||
!(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) {
|
||||
err = -EPERM;
|
||||
@ -1597,6 +1611,7 @@ static int map_lookup_and_delete_elem(union bpf_attr *attr)
|
||||
free_key:
|
||||
kvfree(key);
|
||||
err_put:
|
||||
bpf_map_write_active_dec(map);
|
||||
fdput(f);
|
||||
return err;
|
||||
}
|
||||
@ -1624,8 +1639,7 @@ static int map_freeze(const union bpf_attr *attr)
|
||||
}
|
||||
|
||||
mutex_lock(&map->freeze_mutex);
|
||||
|
||||
if (map->writecnt) {
|
||||
if (bpf_map_write_active(map)) {
|
||||
err = -EBUSY;
|
||||
goto err_put;
|
||||
}
|
||||
@ -4171,6 +4185,9 @@ static int bpf_map_do_batch(const union bpf_attr *attr,
|
||||
union bpf_attr __user *uattr,
|
||||
int cmd)
|
||||
{
|
||||
bool has_read = cmd == BPF_MAP_LOOKUP_BATCH ||
|
||||
cmd == BPF_MAP_LOOKUP_AND_DELETE_BATCH;
|
||||
bool has_write = cmd != BPF_MAP_LOOKUP_BATCH;
|
||||
struct bpf_map *map;
|
||||
int err, ufd;
|
||||
struct fd f;
|
||||
@ -4183,16 +4200,13 @@ static int bpf_map_do_batch(const union bpf_attr *attr,
|
||||
map = __bpf_map_get(f);
|
||||
if (IS_ERR(map))
|
||||
return PTR_ERR(map);
|
||||
|
||||
if ((cmd == BPF_MAP_LOOKUP_BATCH ||
|
||||
cmd == BPF_MAP_LOOKUP_AND_DELETE_BATCH) &&
|
||||
!(map_get_sys_perms(map, f) & FMODE_CAN_READ)) {
|
||||
if (has_write)
|
||||
bpf_map_write_active_inc(map);
|
||||
if (has_read && !(map_get_sys_perms(map, f) & FMODE_CAN_READ)) {
|
||||
err = -EPERM;
|
||||
goto err_put;
|
||||
}
|
||||
|
||||
if (cmd != BPF_MAP_LOOKUP_BATCH &&
|
||||
!(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) {
|
||||
if (has_write && !(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) {
|
||||
err = -EPERM;
|
||||
goto err_put;
|
||||
}
|
||||
@ -4205,8 +4219,9 @@ static int bpf_map_do_batch(const union bpf_attr *attr,
|
||||
BPF_DO_BATCH(map->ops->map_update_batch);
|
||||
else
|
||||
BPF_DO_BATCH(map->ops->map_delete_batch);
|
||||
|
||||
err_put:
|
||||
if (has_write)
|
||||
bpf_map_write_active_dec(map);
|
||||
fdput(f);
|
||||
return err;
|
||||
}
|
||||
|
@ -1151,7 +1151,8 @@ static void mark_ptr_not_null_reg(struct bpf_reg_state *reg)
|
||||
/* transfer reg's id which is unique for every map_lookup_elem
|
||||
* as UID of the inner map.
|
||||
*/
|
||||
reg->map_uid = reg->id;
|
||||
if (map_value_has_timer(map->inner_map_meta))
|
||||
reg->map_uid = reg->id;
|
||||
} else if (map->map_type == BPF_MAP_TYPE_XSKMAP) {
|
||||
reg->type = PTR_TO_XDP_SOCK;
|
||||
} else if (map->map_type == BPF_MAP_TYPE_SOCKMAP ||
|
||||
@ -4055,7 +4056,22 @@ static void coerce_reg_to_size(struct bpf_reg_state *reg, int size)
|
||||
|
||||
static bool bpf_map_is_rdonly(const struct bpf_map *map)
|
||||
{
|
||||
return (map->map_flags & BPF_F_RDONLY_PROG) && map->frozen;
|
||||
/* A map is considered read-only if the following condition are true:
|
||||
*
|
||||
* 1) BPF program side cannot change any of the map content. The
|
||||
* BPF_F_RDONLY_PROG flag is throughout the lifetime of a map
|
||||
* and was set at map creation time.
|
||||
* 2) The map value(s) have been initialized from user space by a
|
||||
* loader and then "frozen", such that no new map update/delete
|
||||
* operations from syscall side are possible for the rest of
|
||||
* the map's lifetime from that point onwards.
|
||||
* 3) Any parallel/pending map update/delete operations from syscall
|
||||
* side have been completed. Only after that point, it's safe to
|
||||
* assume that map value(s) are immutable.
|
||||
*/
|
||||
return (map->map_flags & BPF_F_RDONLY_PROG) &&
|
||||
READ_ONCE(map->frozen) &&
|
||||
!bpf_map_write_active(map);
|
||||
}
|
||||
|
||||
static int bpf_map_direct_read(struct bpf_map *map, int off, int size, u64 *val)
|
||||
@ -11631,6 +11647,13 @@ static int check_map_prog_compatibility(struct bpf_verifier_env *env,
|
||||
}
|
||||
}
|
||||
|
||||
if (map_value_has_timer(map)) {
|
||||
if (is_tracing_prog_type(prog_type)) {
|
||||
verbose(env, "tracing progs cannot use bpf_timer yet\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
if ((bpf_prog_is_dev_bound(prog->aux) || bpf_map_is_dev_bound(map)) &&
|
||||
!bpf_offload_prog_map_match(prog, map)) {
|
||||
verbose(env, "offload device mismatch between prog and map\n");
|
||||
|
@ -1111,8 +1111,6 @@ bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
|
||||
return &bpf_ktime_get_ns_proto;
|
||||
case BPF_FUNC_ktime_get_boot_ns:
|
||||
return &bpf_ktime_get_boot_ns_proto;
|
||||
case BPF_FUNC_ktime_get_coarse_ns:
|
||||
return &bpf_ktime_get_coarse_ns_proto;
|
||||
case BPF_FUNC_tail_call:
|
||||
return &bpf_tail_call_proto;
|
||||
case BPF_FUNC_get_current_pid_tgid:
|
||||
|
@ -4229,7 +4229,9 @@ static void __devlink_flash_update_notify(struct devlink *devlink,
|
||||
WARN_ON(cmd != DEVLINK_CMD_FLASH_UPDATE &&
|
||||
cmd != DEVLINK_CMD_FLASH_UPDATE_END &&
|
||||
cmd != DEVLINK_CMD_FLASH_UPDATE_STATUS);
|
||||
WARN_ON(!xa_get_mark(&devlinks, devlink->index, DEVLINK_REGISTERED));
|
||||
|
||||
if (!xa_get_mark(&devlinks, devlink->index, DEVLINK_REGISTERED))
|
||||
return;
|
||||
|
||||
msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
|
||||
if (!msg)
|
||||
|
@ -7162,6 +7162,8 @@ sock_filter_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
|
||||
#endif
|
||||
case BPF_FUNC_sk_storage_get:
|
||||
return &bpf_sk_storage_get_cg_sock_proto;
|
||||
case BPF_FUNC_ktime_get_coarse_ns:
|
||||
return &bpf_ktime_get_coarse_ns_proto;
|
||||
default:
|
||||
return bpf_base_func_proto(func_id);
|
||||
}
|
||||
@ -10327,6 +10329,8 @@ sk_reuseport_func_proto(enum bpf_func_id func_id,
|
||||
return &sk_reuseport_load_bytes_relative_proto;
|
||||
case BPF_FUNC_get_socket_cookie:
|
||||
return &bpf_get_socket_ptr_cookie_proto;
|
||||
case BPF_FUNC_ktime_get_coarse_ns:
|
||||
return &bpf_ktime_get_coarse_ns_proto;
|
||||
default:
|
||||
return bpf_base_func_proto(func_id);
|
||||
}
|
||||
@ -10833,6 +10837,8 @@ bpf_sk_base_func_proto(enum bpf_func_id func_id)
|
||||
case BPF_FUNC_skc_to_unix_sock:
|
||||
func = &bpf_skc_to_unix_sock_proto;
|
||||
break;
|
||||
case BPF_FUNC_ktime_get_coarse_ns:
|
||||
return &bpf_ktime_get_coarse_ns_proto;
|
||||
default:
|
||||
return bpf_base_func_proto(func_id);
|
||||
}
|
||||
|
@ -49,12 +49,6 @@ static int page_pool_init(struct page_pool *pool,
|
||||
* which is the XDP_TX use-case.
|
||||
*/
|
||||
if (pool->p.flags & PP_FLAG_DMA_MAP) {
|
||||
/* DMA-mapping is not supported on 32-bit systems with
|
||||
* 64-bit DMA mapping.
|
||||
*/
|
||||
if (sizeof(dma_addr_t) > sizeof(unsigned long))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if ((pool->p.dma_dir != DMA_FROM_DEVICE) &&
|
||||
(pool->p.dma_dir != DMA_BIDIRECTIONAL))
|
||||
return -EINVAL;
|
||||
@ -75,6 +69,10 @@ static int page_pool_init(struct page_pool *pool,
|
||||
*/
|
||||
}
|
||||
|
||||
if (PAGE_POOL_DMA_USE_PP_FRAG_COUNT &&
|
||||
pool->p.flags & PP_FLAG_PAGE_FRAG)
|
||||
return -EINVAL;
|
||||
|
||||
if (ptr_ring_init(&pool->ring, ring_qsize, GFP_KERNEL) < 0)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -2124,8 +2124,10 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
|
||||
newsk->sk_prot_creator = prot;
|
||||
|
||||
/* SANITY */
|
||||
if (likely(newsk->sk_net_refcnt))
|
||||
if (likely(newsk->sk_net_refcnt)) {
|
||||
get_net(sock_net(newsk));
|
||||
sock_inuse_add(sock_net(newsk), 1);
|
||||
}
|
||||
sk_node_init(&newsk->sk_node);
|
||||
sock_lock_init(newsk);
|
||||
bh_lock_sock(newsk);
|
||||
@ -2197,8 +2199,6 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
|
||||
newsk->sk_err_soft = 0;
|
||||
newsk->sk_priority = 0;
|
||||
newsk->sk_incoming_cpu = raw_smp_processor_id();
|
||||
if (likely(newsk->sk_net_refcnt))
|
||||
sock_inuse_add(sock_net(newsk), 1);
|
||||
|
||||
/* Before updating sk_refcnt, we must commit prior changes to memory
|
||||
* (Documentation/RCU/rculist_nulls.rst for details)
|
||||
|
@ -205,6 +205,8 @@ bpf_tcp_ca_get_func_proto(enum bpf_func_id func_id,
|
||||
offsetof(struct tcp_congestion_ops, release))
|
||||
return &bpf_sk_getsockopt_proto;
|
||||
return NULL;
|
||||
case BPF_FUNC_ktime_get_coarse_ns:
|
||||
return &bpf_ktime_get_coarse_ns_proto;
|
||||
default:
|
||||
return bpf_base_func_proto(func_id);
|
||||
}
|
||||
|
@ -2591,7 +2591,7 @@ static int __devinet_sysctl_register(struct net *net, char *dev_name,
|
||||
free:
|
||||
kfree(t);
|
||||
out:
|
||||
return -ENOBUFS;
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
static void __devinet_sysctl_unregister(struct net *net,
|
||||
|
@ -1758,6 +1758,9 @@ static skb_frag_t *skb_advance_to_frag(struct sk_buff *skb, u32 offset_skb,
|
||||
{
|
||||
skb_frag_t *frag;
|
||||
|
||||
if (unlikely(offset_skb >= skb->len))
|
||||
return NULL;
|
||||
|
||||
offset_skb -= skb_headlen(skb);
|
||||
if ((int)offset_skb < 0 || skb_has_frag_list(skb))
|
||||
return NULL;
|
||||
|
@ -1807,6 +1807,17 @@ int udp_read_sock(struct sock *sk, read_descriptor_t *desc,
|
||||
skb = skb_recv_udp(sk, 0, 1, &err);
|
||||
if (!skb)
|
||||
return err;
|
||||
|
||||
if (udp_lib_checksum_complete(skb)) {
|
||||
__UDP_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS,
|
||||
IS_UDPLITE(sk));
|
||||
__UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS,
|
||||
IS_UDPLITE(sk));
|
||||
atomic_inc(&sk->sk_drops);
|
||||
kfree_skb(skb);
|
||||
continue;
|
||||
}
|
||||
|
||||
used = recv_actor(desc, skb, 0, skb->len);
|
||||
if (used <= 0) {
|
||||
if (!copied)
|
||||
|
@ -808,6 +808,12 @@ int esp6_input_done2(struct sk_buff *skb, int err)
|
||||
struct tcphdr *th;
|
||||
|
||||
offset = ipv6_skip_exthdr(skb, offset, &nexthdr, &frag_off);
|
||||
|
||||
if (offset < 0) {
|
||||
err = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
uh = (void *)(skb->data + offset);
|
||||
th = (void *)(skb->data + offset);
|
||||
hdr_len += offset;
|
||||
|
@ -80,7 +80,8 @@ static int ieee80211_set_mon_options(struct ieee80211_sub_if_data *sdata,
|
||||
}
|
||||
|
||||
/* also validate MU-MIMO change */
|
||||
monitor_sdata = rtnl_dereference(local->monitor_sdata);
|
||||
monitor_sdata = wiphy_dereference(local->hw.wiphy,
|
||||
local->monitor_sdata);
|
||||
|
||||
if (!monitor_sdata &&
|
||||
(params->vht_mumimo_groups || params->vht_mumimo_follow_addr))
|
||||
@ -840,7 +841,8 @@ static int ieee80211_set_monitor_channel(struct wiphy *wiphy,
|
||||
|
||||
mutex_lock(&local->mtx);
|
||||
if (local->use_chanctx) {
|
||||
sdata = rtnl_dereference(local->monitor_sdata);
|
||||
sdata = wiphy_dereference(local->hw.wiphy,
|
||||
local->monitor_sdata);
|
||||
if (sdata) {
|
||||
ieee80211_vif_release_channel(sdata);
|
||||
ret = ieee80211_vif_use_channel(sdata, chandef,
|
||||
@ -2707,7 +2709,8 @@ static int ieee80211_set_tx_power(struct wiphy *wiphy,
|
||||
sdata = IEEE80211_WDEV_TO_SUB_IF(wdev);
|
||||
|
||||
if (sdata->vif.type == NL80211_IFTYPE_MONITOR) {
|
||||
sdata = rtnl_dereference(local->monitor_sdata);
|
||||
sdata = wiphy_dereference(local->hw.wiphy,
|
||||
local->monitor_sdata);
|
||||
if (!sdata)
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
@ -2767,7 +2770,8 @@ static int ieee80211_set_tx_power(struct wiphy *wiphy,
|
||||
mutex_unlock(&local->iflist_mtx);
|
||||
|
||||
if (has_monitor) {
|
||||
sdata = rtnl_dereference(local->monitor_sdata);
|
||||
sdata = wiphy_dereference(local->hw.wiphy,
|
||||
local->monitor_sdata);
|
||||
if (sdata) {
|
||||
sdata->user_power_level = local->user_power_level;
|
||||
if (txp_type != sdata->vif.bss_conf.txpower_type)
|
||||
|
@ -588,7 +588,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata, bool going_do
|
||||
*/
|
||||
if (local->suspended) {
|
||||
WARN_ON(local->wowlan);
|
||||
WARN_ON(rtnl_dereference(local->monitor_sdata));
|
||||
WARN_ON(rcu_access_pointer(local->monitor_sdata));
|
||||
return;
|
||||
}
|
||||
|
||||
@ -961,6 +961,7 @@ int ieee80211_add_virtual_monitor(struct ieee80211_local *local)
|
||||
return 0;
|
||||
|
||||
ASSERT_RTNL();
|
||||
lockdep_assert_wiphy(local->hw.wiphy);
|
||||
|
||||
if (local->monitor_sdata)
|
||||
return 0;
|
||||
@ -1028,6 +1029,7 @@ void ieee80211_del_virtual_monitor(struct ieee80211_local *local)
|
||||
return;
|
||||
|
||||
ASSERT_RTNL();
|
||||
lockdep_assert_wiphy(local->hw.wiphy);
|
||||
|
||||
mutex_lock(&local->iflist_mtx);
|
||||
|
||||
|
@ -72,19 +72,19 @@ static inline void ieee80211_mod_tpt_led_trig(struct ieee80211_local *local,
|
||||
#endif
|
||||
|
||||
static inline void
|
||||
ieee80211_tpt_led_trig_tx(struct ieee80211_local *local, __le16 fc, int bytes)
|
||||
ieee80211_tpt_led_trig_tx(struct ieee80211_local *local, int bytes)
|
||||
{
|
||||
#ifdef CONFIG_MAC80211_LEDS
|
||||
if (ieee80211_is_data(fc) && atomic_read(&local->tpt_led_active))
|
||||
if (atomic_read(&local->tpt_led_active))
|
||||
local->tpt_led_trigger->tx_bytes += bytes;
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void
|
||||
ieee80211_tpt_led_trig_rx(struct ieee80211_local *local, __le16 fc, int bytes)
|
||||
ieee80211_tpt_led_trig_rx(struct ieee80211_local *local, int bytes)
|
||||
{
|
||||
#ifdef CONFIG_MAC80211_LEDS
|
||||
if (ieee80211_is_data(fc) && atomic_read(&local->tpt_led_active))
|
||||
if (atomic_read(&local->tpt_led_active))
|
||||
local->tpt_led_trigger->rx_bytes += bytes;
|
||||
#endif
|
||||
}
|
||||
|
@ -364,7 +364,7 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
|
||||
* the compiler to think we have walked past the end of the
|
||||
* struct member.
|
||||
*/
|
||||
pos = (void *)&rthdr->it_optional[it_present - rthdr->it_optional];
|
||||
pos = (void *)&rthdr->it_optional[it_present + 1 - rthdr->it_optional];
|
||||
|
||||
/* the order of the following fields is important */
|
||||
|
||||
@ -1952,7 +1952,8 @@ ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx)
|
||||
int keyid = rx->sta->ptk_idx;
|
||||
sta_ptk = rcu_dereference(rx->sta->ptk[keyid]);
|
||||
|
||||
if (ieee80211_has_protected(fc)) {
|
||||
if (ieee80211_has_protected(fc) &&
|
||||
!(status->flag & RX_FLAG_IV_STRIPPED)) {
|
||||
cs = rx->sta->cipher_scheme;
|
||||
keyid = ieee80211_get_keyid(rx->skb, cs);
|
||||
|
||||
@ -4863,6 +4864,7 @@ void ieee80211_rx_list(struct ieee80211_hw *hw, struct ieee80211_sta *pubsta,
|
||||
struct ieee80211_rate *rate = NULL;
|
||||
struct ieee80211_supported_band *sband;
|
||||
struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
|
||||
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
|
||||
|
||||
WARN_ON_ONCE(softirq_count() == 0);
|
||||
|
||||
@ -4959,9 +4961,9 @@ void ieee80211_rx_list(struct ieee80211_hw *hw, struct ieee80211_sta *pubsta,
|
||||
if (!(status->flag & RX_FLAG_8023))
|
||||
skb = ieee80211_rx_monitor(local, skb, rate);
|
||||
if (skb) {
|
||||
ieee80211_tpt_led_trig_rx(local,
|
||||
((struct ieee80211_hdr *)skb->data)->frame_control,
|
||||
skb->len);
|
||||
if ((status->flag & RX_FLAG_8023) ||
|
||||
ieee80211_is_data_present(hdr->frame_control))
|
||||
ieee80211_tpt_led_trig_rx(local, skb->len);
|
||||
|
||||
if (status->flag & RX_FLAG_8023)
|
||||
__ieee80211_rx_handle_8023(hw, pubsta, skb, list);
|
||||
|
@ -1721,21 +1721,19 @@ static bool ieee80211_tx_frags(struct ieee80211_local *local,
|
||||
* Returns false if the frame couldn't be transmitted but was queued instead.
|
||||
*/
|
||||
static bool __ieee80211_tx(struct ieee80211_local *local,
|
||||
struct sk_buff_head *skbs, int led_len,
|
||||
struct sta_info *sta, bool txpending)
|
||||
struct sk_buff_head *skbs, struct sta_info *sta,
|
||||
bool txpending)
|
||||
{
|
||||
struct ieee80211_tx_info *info;
|
||||
struct ieee80211_sub_if_data *sdata;
|
||||
struct ieee80211_vif *vif;
|
||||
struct sk_buff *skb;
|
||||
bool result;
|
||||
__le16 fc;
|
||||
|
||||
if (WARN_ON(skb_queue_empty(skbs)))
|
||||
return true;
|
||||
|
||||
skb = skb_peek(skbs);
|
||||
fc = ((struct ieee80211_hdr *)skb->data)->frame_control;
|
||||
info = IEEE80211_SKB_CB(skb);
|
||||
sdata = vif_to_sdata(info->control.vif);
|
||||
if (sta && !sta->uploaded)
|
||||
@ -1769,8 +1767,6 @@ static bool __ieee80211_tx(struct ieee80211_local *local,
|
||||
|
||||
result = ieee80211_tx_frags(local, vif, sta, skbs, txpending);
|
||||
|
||||
ieee80211_tpt_led_trig_tx(local, fc, led_len);
|
||||
|
||||
WARN_ON_ONCE(!skb_queue_empty(skbs));
|
||||
|
||||
return result;
|
||||
@ -1920,7 +1916,6 @@ static bool ieee80211_tx(struct ieee80211_sub_if_data *sdata,
|
||||
ieee80211_tx_result res_prepare;
|
||||
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
|
||||
bool result = true;
|
||||
int led_len;
|
||||
|
||||
if (unlikely(skb->len < 10)) {
|
||||
dev_kfree_skb(skb);
|
||||
@ -1928,7 +1923,6 @@ static bool ieee80211_tx(struct ieee80211_sub_if_data *sdata,
|
||||
}
|
||||
|
||||
/* initialises tx */
|
||||
led_len = skb->len;
|
||||
res_prepare = ieee80211_tx_prepare(sdata, &tx, sta, skb);
|
||||
|
||||
if (unlikely(res_prepare == TX_DROP)) {
|
||||
@ -1951,8 +1945,7 @@ static bool ieee80211_tx(struct ieee80211_sub_if_data *sdata,
|
||||
return true;
|
||||
|
||||
if (!invoke_tx_handlers_late(&tx))
|
||||
result = __ieee80211_tx(local, &tx.skbs, led_len,
|
||||
tx.sta, txpending);
|
||||
result = __ieee80211_tx(local, &tx.skbs, tx.sta, txpending);
|
||||
|
||||
return result;
|
||||
}
|
||||
@ -4175,6 +4168,7 @@ void __ieee80211_subif_start_xmit(struct sk_buff *skb,
|
||||
struct ieee80211_local *local = sdata->local;
|
||||
struct sta_info *sta;
|
||||
struct sk_buff *next;
|
||||
int len = skb->len;
|
||||
|
||||
if (unlikely(skb->len < ETH_HLEN)) {
|
||||
kfree_skb(skb);
|
||||
@ -4221,10 +4215,8 @@ void __ieee80211_subif_start_xmit(struct sk_buff *skb,
|
||||
}
|
||||
} else {
|
||||
/* we cannot process non-linear frames on this path */
|
||||
if (skb_linearize(skb)) {
|
||||
kfree_skb(skb);
|
||||
goto out;
|
||||
}
|
||||
if (skb_linearize(skb))
|
||||
goto out_free;
|
||||
|
||||
/* the frame could be fragmented, software-encrypted, and other
|
||||
* things so we cannot really handle checksum offload with it -
|
||||
@ -4258,7 +4250,10 @@ void __ieee80211_subif_start_xmit(struct sk_buff *skb,
|
||||
goto out;
|
||||
out_free:
|
||||
kfree_skb(skb);
|
||||
len = 0;
|
||||
out:
|
||||
if (len)
|
||||
ieee80211_tpt_led_trig_tx(local, len);
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
@ -4396,8 +4391,7 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
|
||||
}
|
||||
|
||||
static bool ieee80211_tx_8023(struct ieee80211_sub_if_data *sdata,
|
||||
struct sk_buff *skb, int led_len,
|
||||
struct sta_info *sta,
|
||||
struct sk_buff *skb, struct sta_info *sta,
|
||||
bool txpending)
|
||||
{
|
||||
struct ieee80211_local *local = sdata->local;
|
||||
@ -4410,6 +4404,8 @@ static bool ieee80211_tx_8023(struct ieee80211_sub_if_data *sdata,
|
||||
if (sta)
|
||||
sk_pacing_shift_update(skb->sk, local->hw.tx_sk_pacing_shift);
|
||||
|
||||
ieee80211_tpt_led_trig_tx(local, skb->len);
|
||||
|
||||
if (ieee80211_queue_skb(local, sdata, sta, skb))
|
||||
return true;
|
||||
|
||||
@ -4498,7 +4494,7 @@ static void ieee80211_8023_xmit(struct ieee80211_sub_if_data *sdata,
|
||||
if (key)
|
||||
info->control.hw_key = &key->conf;
|
||||
|
||||
ieee80211_tx_8023(sdata, skb, skb->len, sta, false);
|
||||
ieee80211_tx_8023(sdata, skb, sta, false);
|
||||
|
||||
return;
|
||||
|
||||
@ -4637,7 +4633,7 @@ static bool ieee80211_tx_pending_skb(struct ieee80211_local *local,
|
||||
if (IS_ERR(sta) || (sta && !sta->uploaded))
|
||||
sta = NULL;
|
||||
|
||||
result = ieee80211_tx_8023(sdata, skb, skb->len, sta, true);
|
||||
result = ieee80211_tx_8023(sdata, skb, sta, true);
|
||||
} else {
|
||||
struct sk_buff_head skbs;
|
||||
|
||||
@ -4647,7 +4643,7 @@ static bool ieee80211_tx_pending_skb(struct ieee80211_local *local,
|
||||
hdr = (struct ieee80211_hdr *)skb->data;
|
||||
sta = sta_info_get(sdata, hdr->addr1);
|
||||
|
||||
result = __ieee80211_tx(local, &skbs, skb->len, sta, true);
|
||||
result = __ieee80211_tx(local, &skbs, sta, true);
|
||||
}
|
||||
|
||||
return result;
|
||||
|
@ -796,7 +796,7 @@ static void __iterate_interfaces(struct ieee80211_local *local,
|
||||
|
||||
sdata = rcu_dereference_check(local->monitor_sdata,
|
||||
lockdep_is_held(&local->iflist_mtx) ||
|
||||
lockdep_rtnl_is_held());
|
||||
lockdep_is_held(&local->hw.wiphy->mtx));
|
||||
if (sdata &&
|
||||
(iter_flags & IEEE80211_IFACE_ITER_RESUME_ALL || !active_only ||
|
||||
sdata->flags & IEEE80211_SDATA_IN_DRIVER))
|
||||
@ -2381,7 +2381,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
|
||||
IEEE80211_TPT_LEDTRIG_FL_RADIO, 0);
|
||||
|
||||
/* add interfaces */
|
||||
sdata = rtnl_dereference(local->monitor_sdata);
|
||||
sdata = wiphy_dereference(local->hw.wiphy, local->monitor_sdata);
|
||||
if (sdata) {
|
||||
/* in HW restart it exists already */
|
||||
WARN_ON(local->resuming);
|
||||
@ -2426,7 +2426,8 @@ int ieee80211_reconfig(struct ieee80211_local *local)
|
||||
WARN_ON(drv_add_chanctx(local, ctx));
|
||||
mutex_unlock(&local->chanctx_mtx);
|
||||
|
||||
sdata = rtnl_dereference(local->monitor_sdata);
|
||||
sdata = wiphy_dereference(local->hw.wiphy,
|
||||
local->monitor_sdata);
|
||||
if (sdata && ieee80211_sdata_running(sdata))
|
||||
ieee80211_assign_chanctx(local, sdata);
|
||||
}
|
||||
|
@ -143,7 +143,6 @@ u16 ieee80211_select_queue_80211(struct ieee80211_sub_if_data *sdata,
|
||||
u16 __ieee80211_select_queue(struct ieee80211_sub_if_data *sdata,
|
||||
struct sta_info *sta, struct sk_buff *skb)
|
||||
{
|
||||
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
|
||||
struct mac80211_qos_map *qos_map;
|
||||
bool qos;
|
||||
|
||||
@ -156,7 +155,7 @@ u16 __ieee80211_select_queue(struct ieee80211_sub_if_data *sdata,
|
||||
else
|
||||
qos = false;
|
||||
|
||||
if (!qos || (info->control.flags & IEEE80211_TX_CTRL_DONT_REORDER)) {
|
||||
if (!qos) {
|
||||
skb->priority = 0; /* required for correct WPA/11i MIC */
|
||||
return IEEE80211_AC_BE;
|
||||
}
|
||||
|
@ -94,13 +94,13 @@ int nfc_dev_up(struct nfc_dev *dev)
|
||||
|
||||
device_lock(&dev->dev);
|
||||
|
||||
if (dev->rfkill && rfkill_blocked(dev->rfkill)) {
|
||||
rc = -ERFKILL;
|
||||
if (!device_is_registered(&dev->dev)) {
|
||||
rc = -ENODEV;
|
||||
goto error;
|
||||
}
|
||||
|
||||
if (!device_is_registered(&dev->dev)) {
|
||||
rc = -ENODEV;
|
||||
if (dev->rfkill && rfkill_blocked(dev->rfkill)) {
|
||||
rc = -ERFKILL;
|
||||
goto error;
|
||||
}
|
||||
|
||||
@ -1125,11 +1125,7 @@ int nfc_register_device(struct nfc_dev *dev)
|
||||
if (rc)
|
||||
pr_err("Could not register llcp device\n");
|
||||
|
||||
rc = nfc_genl_device_added(dev);
|
||||
if (rc)
|
||||
pr_debug("The userspace won't be notified that the device %s was added\n",
|
||||
dev_name(&dev->dev));
|
||||
|
||||
device_lock(&dev->dev);
|
||||
dev->rfkill = rfkill_alloc(dev_name(&dev->dev), &dev->dev,
|
||||
RFKILL_TYPE_NFC, &nfc_rfkill_ops, dev);
|
||||
if (dev->rfkill) {
|
||||
@ -1138,6 +1134,12 @@ int nfc_register_device(struct nfc_dev *dev)
|
||||
dev->rfkill = NULL;
|
||||
}
|
||||
}
|
||||
device_unlock(&dev->dev);
|
||||
|
||||
rc = nfc_genl_device_added(dev);
|
||||
if (rc)
|
||||
pr_debug("The userspace won't be notified that the device %s was added\n",
|
||||
dev_name(&dev->dev));
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -1154,10 +1156,17 @@ void nfc_unregister_device(struct nfc_dev *dev)
|
||||
|
||||
pr_debug("dev_name=%s\n", dev_name(&dev->dev));
|
||||
|
||||
rc = nfc_genl_device_removed(dev);
|
||||
if (rc)
|
||||
pr_debug("The userspace won't be notified that the device %s "
|
||||
"was removed\n", dev_name(&dev->dev));
|
||||
|
||||
device_lock(&dev->dev);
|
||||
if (dev->rfkill) {
|
||||
rfkill_unregister(dev->rfkill);
|
||||
rfkill_destroy(dev->rfkill);
|
||||
}
|
||||
device_unlock(&dev->dev);
|
||||
|
||||
if (dev->ops->check_presence) {
|
||||
device_lock(&dev->dev);
|
||||
@ -1167,11 +1176,6 @@ void nfc_unregister_device(struct nfc_dev *dev)
|
||||
cancel_work_sync(&dev->check_pres_work);
|
||||
}
|
||||
|
||||
rc = nfc_genl_device_removed(dev);
|
||||
if (rc)
|
||||
pr_debug("The userspace won't be notified that the device %s "
|
||||
"was removed\n", dev_name(&dev->dev));
|
||||
|
||||
nfc_llcp_unregister_device(dev);
|
||||
|
||||
mutex_lock(&nfc_devlist_mutex);
|
||||
|
@ -144,12 +144,15 @@ inline int nci_request(struct nci_dev *ndev,
|
||||
{
|
||||
int rc;
|
||||
|
||||
if (!test_bit(NCI_UP, &ndev->flags))
|
||||
return -ENETDOWN;
|
||||
|
||||
/* Serialize all requests */
|
||||
mutex_lock(&ndev->req_lock);
|
||||
rc = __nci_request(ndev, req, opt, timeout);
|
||||
/* check the state after obtaing the lock against any races
|
||||
* from nci_close_device when the device gets removed.
|
||||
*/
|
||||
if (test_bit(NCI_UP, &ndev->flags))
|
||||
rc = __nci_request(ndev, req, opt, timeout);
|
||||
else
|
||||
rc = -ENETDOWN;
|
||||
mutex_unlock(&ndev->req_lock);
|
||||
|
||||
return rc;
|
||||
@ -473,6 +476,11 @@ static int nci_open_device(struct nci_dev *ndev)
|
||||
|
||||
mutex_lock(&ndev->req_lock);
|
||||
|
||||
if (test_bit(NCI_UNREG, &ndev->flags)) {
|
||||
rc = -ENODEV;
|
||||
goto done;
|
||||
}
|
||||
|
||||
if (test_bit(NCI_UP, &ndev->flags)) {
|
||||
rc = -EALREADY;
|
||||
goto done;
|
||||
@ -545,6 +553,10 @@ static int nci_open_device(struct nci_dev *ndev)
|
||||
static int nci_close_device(struct nci_dev *ndev)
|
||||
{
|
||||
nci_req_cancel(ndev, ENODEV);
|
||||
|
||||
/* This mutex needs to be held as a barrier for
|
||||
* caller nci_unregister_device
|
||||
*/
|
||||
mutex_lock(&ndev->req_lock);
|
||||
|
||||
if (!test_and_clear_bit(NCI_UP, &ndev->flags)) {
|
||||
@ -582,8 +594,8 @@ static int nci_close_device(struct nci_dev *ndev)
|
||||
|
||||
del_timer_sync(&ndev->cmd_timer);
|
||||
|
||||
/* Clear flags */
|
||||
ndev->flags = 0;
|
||||
/* Clear flags except NCI_UNREG */
|
||||
ndev->flags &= BIT(NCI_UNREG);
|
||||
|
||||
mutex_unlock(&ndev->req_lock);
|
||||
|
||||
@ -1266,6 +1278,12 @@ void nci_unregister_device(struct nci_dev *ndev)
|
||||
{
|
||||
struct nci_conn_info *conn_info, *n;
|
||||
|
||||
/* This set_bit is not protected with specialized barrier,
|
||||
* However, it is fine because the mutex_lock(&ndev->req_lock);
|
||||
* in nci_close_device() will help to emit one.
|
||||
*/
|
||||
set_bit(NCI_UNREG, &ndev->flags);
|
||||
|
||||
nci_close_device(ndev);
|
||||
|
||||
destroy_workqueue(ndev->cmd_wq);
|
||||
|
@ -19,6 +19,7 @@
|
||||
#include <linux/if_arp.h>
|
||||
#include <net/net_namespace.h>
|
||||
#include <net/netlink.h>
|
||||
#include <net/dst.h>
|
||||
#include <net/pkt_sched.h>
|
||||
#include <net/pkt_cls.h>
|
||||
#include <linux/tc_act/tc_mirred.h>
|
||||
@ -228,6 +229,7 @@ static int tcf_mirred_act(struct sk_buff *skb, const struct tc_action *a,
|
||||
bool want_ingress;
|
||||
bool is_redirect;
|
||||
bool expects_nh;
|
||||
bool at_ingress;
|
||||
int m_eaction;
|
||||
int mac_len;
|
||||
bool at_nh;
|
||||
@ -263,7 +265,8 @@ static int tcf_mirred_act(struct sk_buff *skb, const struct tc_action *a,
|
||||
* ingress - that covers the TC S/W datapath.
|
||||
*/
|
||||
is_redirect = tcf_mirred_is_act_redirect(m_eaction);
|
||||
use_reinsert = skb_at_tc_ingress(skb) && is_redirect &&
|
||||
at_ingress = skb_at_tc_ingress(skb);
|
||||
use_reinsert = at_ingress && is_redirect &&
|
||||
tcf_mirred_can_reinsert(retval);
|
||||
if (!use_reinsert) {
|
||||
skb2 = skb_clone(skb, GFP_ATOMIC);
|
||||
@ -271,10 +274,12 @@ static int tcf_mirred_act(struct sk_buff *skb, const struct tc_action *a,
|
||||
goto out;
|
||||
}
|
||||
|
||||
want_ingress = tcf_mirred_act_wants_ingress(m_eaction);
|
||||
|
||||
/* All mirred/redirected skbs should clear previous ct info */
|
||||
nf_reset_ct(skb2);
|
||||
|
||||
want_ingress = tcf_mirred_act_wants_ingress(m_eaction);
|
||||
if (want_ingress && !at_ingress) /* drop dst for egress -> ingress */
|
||||
skb_dst_drop(skb2);
|
||||
|
||||
expects_nh = want_ingress || !m_mac_header_xmit;
|
||||
at_nh = skb->data == skb_network_header(skb);
|
||||
|
@ -566,6 +566,10 @@ static void smc_stat_fallback(struct smc_sock *smc)
|
||||
|
||||
static void smc_switch_to_fallback(struct smc_sock *smc, int reason_code)
|
||||
{
|
||||
wait_queue_head_t *smc_wait = sk_sleep(&smc->sk);
|
||||
wait_queue_head_t *clc_wait = sk_sleep(smc->clcsock->sk);
|
||||
unsigned long flags;
|
||||
|
||||
smc->use_fallback = true;
|
||||
smc->fallback_rsn = reason_code;
|
||||
smc_stat_fallback(smc);
|
||||
@ -575,6 +579,16 @@ static void smc_switch_to_fallback(struct smc_sock *smc, int reason_code)
|
||||
smc->clcsock->file->private_data = smc->clcsock;
|
||||
smc->clcsock->wq.fasync_list =
|
||||
smc->sk.sk_socket->wq.fasync_list;
|
||||
|
||||
/* There may be some entries remaining in
|
||||
* smc socket->wq, which should be removed
|
||||
* to clcsocket->wq during the fallback.
|
||||
*/
|
||||
spin_lock_irqsave(&smc_wait->lock, flags);
|
||||
spin_lock(&clc_wait->lock);
|
||||
list_splice_init(&smc_wait->head, &clc_wait->head);
|
||||
spin_unlock(&clc_wait->lock);
|
||||
spin_unlock_irqrestore(&smc_wait->lock, flags);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -708,13 +708,14 @@ static u8 smcr_next_link_id(struct smc_link_group *lgr)
|
||||
int i;
|
||||
|
||||
while (1) {
|
||||
again:
|
||||
link_id = ++lgr->next_link_id;
|
||||
if (!link_id) /* skip zero as link_id */
|
||||
link_id = ++lgr->next_link_id;
|
||||
for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
|
||||
if (smc_link_usable(&lgr->lnk[i]) &&
|
||||
lgr->lnk[i].link_id == link_id)
|
||||
continue;
|
||||
goto again;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
@ -524,7 +524,7 @@ static int tipc_aead_init(struct tipc_aead **aead, struct tipc_aead_key *ukey,
|
||||
return -EEXIST;
|
||||
|
||||
/* Allocate a new AEAD */
|
||||
tmp = kzalloc(sizeof(*tmp), GFP_ATOMIC);
|
||||
tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
|
||||
if (unlikely(!tmp))
|
||||
return -ENOMEM;
|
||||
|
||||
@ -597,6 +597,10 @@ static int tipc_aead_init(struct tipc_aead **aead, struct tipc_aead_key *ukey,
|
||||
tmp->cloned = NULL;
|
||||
tmp->authsize = TIPC_AES_GCM_TAG_SIZE;
|
||||
tmp->key = kmemdup(ukey, tipc_aead_key_size(ukey), GFP_KERNEL);
|
||||
if (!tmp->key) {
|
||||
tipc_aead_free(&tmp->rcu);
|
||||
return -ENOMEM;
|
||||
}
|
||||
memcpy(&tmp->salt, ukey->key + keylen, TIPC_AES_GCM_SALT_SIZE);
|
||||
atomic_set(&tmp->users, 0);
|
||||
atomic64_set(&tmp->seqno, 0);
|
||||
@ -1470,7 +1474,7 @@ int tipc_crypto_start(struct tipc_crypto **crypto, struct net *net,
|
||||
return -EEXIST;
|
||||
|
||||
/* Allocate crypto */
|
||||
c = kzalloc(sizeof(*c), GFP_ATOMIC);
|
||||
c = kzalloc(sizeof(*c), GFP_KERNEL);
|
||||
if (!c)
|
||||
return -ENOMEM;
|
||||
|
||||
@ -1484,7 +1488,7 @@ int tipc_crypto_start(struct tipc_crypto **crypto, struct net *net,
|
||||
}
|
||||
|
||||
/* Allocate statistic structure */
|
||||
c->stats = alloc_percpu_gfp(struct tipc_crypto_stats, GFP_ATOMIC);
|
||||
c->stats = alloc_percpu(struct tipc_crypto_stats);
|
||||
if (!c->stats) {
|
||||
if (c->wq)
|
||||
destroy_workqueue(c->wq);
|
||||
@ -2457,7 +2461,7 @@ static void tipc_crypto_work_tx(struct work_struct *work)
|
||||
}
|
||||
|
||||
/* Lets duplicate it first */
|
||||
skey = kmemdup(aead->key, tipc_aead_key_size(aead->key), GFP_ATOMIC);
|
||||
skey = kmemdup(aead->key, tipc_aead_key_size(aead->key), GFP_KERNEL);
|
||||
rcu_read_unlock();
|
||||
|
||||
/* Now, generate new key, initiate & distribute it */
|
||||
|
@ -1298,8 +1298,11 @@ static bool tipc_data_input(struct tipc_link *l, struct sk_buff *skb,
|
||||
return false;
|
||||
#ifdef CONFIG_TIPC_CRYPTO
|
||||
case MSG_CRYPTO:
|
||||
tipc_crypto_msg_rcv(l->net, skb);
|
||||
return true;
|
||||
if (TIPC_SKB_CB(skb)->decrypted) {
|
||||
tipc_crypto_msg_rcv(l->net, skb);
|
||||
return true;
|
||||
}
|
||||
fallthrough;
|
||||
#endif
|
||||
default:
|
||||
pr_warn("Dropping received illegal msg type\n");
|
||||
|
@ -936,33 +936,37 @@ nl80211_packet_pattern_policy[MAX_NL80211_PKTPAT + 1] = {
|
||||
[NL80211_PKTPAT_OFFSET] = { .type = NLA_U32 },
|
||||
};
|
||||
|
||||
int nl80211_prepare_wdev_dump(struct netlink_callback *cb,
|
||||
struct cfg80211_registered_device **rdev,
|
||||
struct wireless_dev **wdev)
|
||||
static int nl80211_prepare_wdev_dump(struct netlink_callback *cb,
|
||||
struct cfg80211_registered_device **rdev,
|
||||
struct wireless_dev **wdev,
|
||||
struct nlattr **attrbuf)
|
||||
{
|
||||
int err;
|
||||
|
||||
if (!cb->args[0]) {
|
||||
struct nlattr **attrbuf;
|
||||
struct nlattr **attrbuf_free = NULL;
|
||||
|
||||
attrbuf = kcalloc(NUM_NL80211_ATTR, sizeof(*attrbuf),
|
||||
GFP_KERNEL);
|
||||
if (!attrbuf)
|
||||
return -ENOMEM;
|
||||
if (!attrbuf) {
|
||||
attrbuf = kcalloc(NUM_NL80211_ATTR, sizeof(*attrbuf),
|
||||
GFP_KERNEL);
|
||||
if (!attrbuf)
|
||||
return -ENOMEM;
|
||||
attrbuf_free = attrbuf;
|
||||
}
|
||||
|
||||
err = nlmsg_parse_deprecated(cb->nlh,
|
||||
GENL_HDRLEN + nl80211_fam.hdrsize,
|
||||
attrbuf, nl80211_fam.maxattr,
|
||||
nl80211_policy, NULL);
|
||||
if (err) {
|
||||
kfree(attrbuf);
|
||||
kfree(attrbuf_free);
|
||||
return err;
|
||||
}
|
||||
|
||||
rtnl_lock();
|
||||
*wdev = __cfg80211_wdev_from_attrs(NULL, sock_net(cb->skb->sk),
|
||||
attrbuf);
|
||||
kfree(attrbuf);
|
||||
kfree(attrbuf_free);
|
||||
if (IS_ERR(*wdev)) {
|
||||
rtnl_unlock();
|
||||
return PTR_ERR(*wdev);
|
||||
@ -6197,7 +6201,7 @@ static int nl80211_dump_station(struct sk_buff *skb,
|
||||
int sta_idx = cb->args[2];
|
||||
int err;
|
||||
|
||||
err = nl80211_prepare_wdev_dump(cb, &rdev, &wdev);
|
||||
err = nl80211_prepare_wdev_dump(cb, &rdev, &wdev, NULL);
|
||||
if (err)
|
||||
return err;
|
||||
/* nl80211_prepare_wdev_dump acquired it in the successful case */
|
||||
@ -7092,7 +7096,7 @@ static int nl80211_dump_mpath(struct sk_buff *skb,
|
||||
int path_idx = cb->args[2];
|
||||
int err;
|
||||
|
||||
err = nl80211_prepare_wdev_dump(cb, &rdev, &wdev);
|
||||
err = nl80211_prepare_wdev_dump(cb, &rdev, &wdev, NULL);
|
||||
if (err)
|
||||
return err;
|
||||
/* nl80211_prepare_wdev_dump acquired it in the successful case */
|
||||
@ -7292,7 +7296,7 @@ static int nl80211_dump_mpp(struct sk_buff *skb,
|
||||
int path_idx = cb->args[2];
|
||||
int err;
|
||||
|
||||
err = nl80211_prepare_wdev_dump(cb, &rdev, &wdev);
|
||||
err = nl80211_prepare_wdev_dump(cb, &rdev, &wdev, NULL);
|
||||
if (err)
|
||||
return err;
|
||||
/* nl80211_prepare_wdev_dump acquired it in the successful case */
|
||||
@ -9718,7 +9722,7 @@ static int nl80211_dump_scan(struct sk_buff *skb, struct netlink_callback *cb)
|
||||
int start = cb->args[2], idx = 0;
|
||||
int err;
|
||||
|
||||
err = nl80211_prepare_wdev_dump(cb, &rdev, &wdev);
|
||||
err = nl80211_prepare_wdev_dump(cb, &rdev, &wdev, NULL);
|
||||
if (err)
|
||||
return err;
|
||||
/* nl80211_prepare_wdev_dump acquired it in the successful case */
|
||||
@ -9851,7 +9855,7 @@ static int nl80211_dump_survey(struct sk_buff *skb, struct netlink_callback *cb)
|
||||
if (!attrbuf)
|
||||
return -ENOMEM;
|
||||
|
||||
res = nl80211_prepare_wdev_dump(cb, &rdev, &wdev);
|
||||
res = nl80211_prepare_wdev_dump(cb, &rdev, &wdev, attrbuf);
|
||||
if (res) {
|
||||
kfree(attrbuf);
|
||||
return res;
|
||||
|
@ -1,7 +1,7 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*
|
||||
* Portions of this file
|
||||
* Copyright (C) 2018, 2020 Intel Corporation
|
||||
* Copyright (C) 2018, 2020-2021 Intel Corporation
|
||||
*/
|
||||
#ifndef __NET_WIRELESS_NL80211_H
|
||||
#define __NET_WIRELESS_NL80211_H
|
||||
@ -22,10 +22,6 @@ static inline u64 wdev_id(struct wireless_dev *wdev)
|
||||
((u64)wiphy_to_rdev(wdev->wiphy)->wiphy_idx << 32);
|
||||
}
|
||||
|
||||
int nl80211_prepare_wdev_dump(struct netlink_callback *cb,
|
||||
struct cfg80211_registered_device **rdev,
|
||||
struct wireless_dev **wdev);
|
||||
|
||||
int nl80211_parse_chandef(struct cfg80211_registered_device *rdev,
|
||||
struct genl_info *info,
|
||||
struct cfg80211_chan_def *chandef);
|
||||
|
@ -1046,6 +1046,7 @@ int cfg80211_change_iface(struct cfg80211_registered_device *rdev,
|
||||
|
||||
switch (otype) {
|
||||
case NL80211_IFTYPE_AP:
|
||||
case NL80211_IFTYPE_P2P_GO:
|
||||
cfg80211_stop_ap(rdev, dev, true);
|
||||
break;
|
||||
case NL80211_IFTYPE_ADHOC:
|
||||
|
@ -500,7 +500,7 @@ struct xdp_buff *xp_alloc(struct xsk_buff_pool *pool)
|
||||
pool->free_list_cnt--;
|
||||
xskb = list_first_entry(&pool->free_list, struct xdp_buff_xsk,
|
||||
free_list_node);
|
||||
list_del(&xskb->free_list_node);
|
||||
list_del_init(&xskb->free_list_node);
|
||||
}
|
||||
|
||||
xskb->xdp.data = xskb->xdp.data_hard_start + XDP_PACKET_HEADROOM;
|
||||
@ -568,7 +568,7 @@ static u32 xp_alloc_reused(struct xsk_buff_pool *pool, struct xdp_buff **xdp, u3
|
||||
i = nb_entries;
|
||||
while (i--) {
|
||||
xskb = list_first_entry(&pool->free_list, struct xdp_buff_xsk, free_list_node);
|
||||
list_del(&xskb->free_list_node);
|
||||
list_del_init(&xskb->free_list_node);
|
||||
|
||||
*xdp = &xskb->xdp;
|
||||
xdp++;
|
||||
@ -615,6 +615,9 @@ EXPORT_SYMBOL(xp_can_alloc);
|
||||
|
||||
void xp_free(struct xdp_buff_xsk *xskb)
|
||||
{
|
||||
if (!list_empty(&xskb->free_list_node))
|
||||
return;
|
||||
|
||||
xskb->pool->free_list_cnt++;
|
||||
list_add(&xskb->free_list_node, &xskb->pool->free_list);
|
||||
}
|
||||
|
@ -9,8 +9,6 @@
|
||||
* Include file for sample Host Bandwidth Manager (HBM) BPF programs
|
||||
*/
|
||||
#define KBUILD_MODNAME "foo"
|
||||
#include <stddef.h>
|
||||
#include <stdbool.h>
|
||||
#include <uapi/linux/bpf.h>
|
||||
#include <uapi/linux/if_ether.h>
|
||||
#include <uapi/linux/if_packet.h>
|
||||
|
@ -309,7 +309,6 @@ int main(int argc, char **argv)
|
||||
const char *mprog_filename = NULL, *mprog_name = NULL;
|
||||
struct xdp_redirect_cpu *skel;
|
||||
struct bpf_map_info info = {};
|
||||
char ifname_buf[IF_NAMESIZE];
|
||||
struct bpf_cpumap_val value;
|
||||
__u32 infosz = sizeof(info);
|
||||
int ret = EXIT_FAIL_OPTION;
|
||||
@ -390,10 +389,10 @@ int main(int argc, char **argv)
|
||||
case 'd':
|
||||
if (strlen(optarg) >= IF_NAMESIZE) {
|
||||
fprintf(stderr, "-d/--dev name too long\n");
|
||||
usage(argv, long_options, __doc__, mask, true, skel->obj);
|
||||
goto end_cpu;
|
||||
}
|
||||
safe_strncpy(ifname_buf, optarg, strlen(ifname_buf));
|
||||
ifindex = if_nametoindex(ifname_buf);
|
||||
ifindex = if_nametoindex(optarg);
|
||||
if (!ifindex)
|
||||
ifindex = strtoul(optarg, NULL, 0);
|
||||
if (!ifindex) {
|
||||
|
@ -120,7 +120,10 @@ struct sample_output {
|
||||
__u64 xmit;
|
||||
} totals;
|
||||
struct {
|
||||
__u64 pps;
|
||||
union {
|
||||
__u64 pps;
|
||||
__u64 num;
|
||||
};
|
||||
__u64 drop;
|
||||
__u64 err;
|
||||
} rx_cnt;
|
||||
@ -1322,7 +1325,7 @@ int sample_install_xdp(struct bpf_program *xdp_prog, int ifindex, bool generic,
|
||||
|
||||
static void sample_summary_print(void)
|
||||
{
|
||||
double period = sample_out.rx_cnt.pps;
|
||||
double num = sample_out.rx_cnt.num;
|
||||
|
||||
if (sample_out.totals.rx) {
|
||||
double pkts = sample_out.totals.rx;
|
||||
@ -1330,7 +1333,7 @@ static void sample_summary_print(void)
|
||||
print_always(" Packets received : %'-10llu\n",
|
||||
sample_out.totals.rx);
|
||||
print_always(" Average packets/s : %'-10.0f\n",
|
||||
sample_round(pkts / period));
|
||||
sample_round(pkts / num));
|
||||
}
|
||||
if (sample_out.totals.redir) {
|
||||
double pkts = sample_out.totals.redir;
|
||||
@ -1338,7 +1341,7 @@ static void sample_summary_print(void)
|
||||
print_always(" Packets redirected : %'-10llu\n",
|
||||
sample_out.totals.redir);
|
||||
print_always(" Average redir/s : %'-10.0f\n",
|
||||
sample_round(pkts / period));
|
||||
sample_round(pkts / num));
|
||||
}
|
||||
if (sample_out.totals.drop)
|
||||
print_always(" Rx dropped : %'-10llu\n",
|
||||
@ -1355,7 +1358,7 @@ static void sample_summary_print(void)
|
||||
print_always(" Packets transmitted : %'-10llu\n",
|
||||
sample_out.totals.xmit);
|
||||
print_always(" Average transmit/s : %'-10.0f\n",
|
||||
sample_round(pkts / period));
|
||||
sample_round(pkts / num));
|
||||
}
|
||||
}
|
||||
|
||||
@ -1422,7 +1425,7 @@ static int sample_stats_collect(struct stats_record *rec)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void sample_summary_update(struct sample_output *out, int interval)
|
||||
static void sample_summary_update(struct sample_output *out)
|
||||
{
|
||||
sample_out.totals.rx += out->totals.rx;
|
||||
sample_out.totals.redir += out->totals.redir;
|
||||
@ -1430,12 +1433,11 @@ static void sample_summary_update(struct sample_output *out, int interval)
|
||||
sample_out.totals.drop_xmit += out->totals.drop_xmit;
|
||||
sample_out.totals.err += out->totals.err;
|
||||
sample_out.totals.xmit += out->totals.xmit;
|
||||
sample_out.rx_cnt.pps += interval;
|
||||
sample_out.rx_cnt.num++;
|
||||
}
|
||||
|
||||
static void sample_stats_print(int mask, struct stats_record *cur,
|
||||
struct stats_record *prev, char *prog_name,
|
||||
int interval)
|
||||
struct stats_record *prev, char *prog_name)
|
||||
{
|
||||
struct sample_output out = {};
|
||||
|
||||
@ -1452,7 +1454,7 @@ static void sample_stats_print(int mask, struct stats_record *cur,
|
||||
else if (mask & SAMPLE_DEVMAP_XMIT_CNT_MULTI)
|
||||
stats_get_devmap_xmit_multi(cur, prev, 0, &out,
|
||||
mask & SAMPLE_DEVMAP_XMIT_CNT);
|
||||
sample_summary_update(&out, interval);
|
||||
sample_summary_update(&out);
|
||||
|
||||
stats_print(prog_name, mask, cur, prev, &out);
|
||||
}
|
||||
@ -1495,7 +1497,7 @@ static void swap(struct stats_record **a, struct stats_record **b)
|
||||
}
|
||||
|
||||
static int sample_timer_cb(int timerfd, struct stats_record **rec,
|
||||
struct stats_record **prev, int interval)
|
||||
struct stats_record **prev)
|
||||
{
|
||||
char line[64] = "Summary";
|
||||
int ret;
|
||||
@ -1524,7 +1526,7 @@ static int sample_timer_cb(int timerfd, struct stats_record **rec,
|
||||
snprintf(line, sizeof(line), "%s->%s", f ?: "?", t ?: "?");
|
||||
}
|
||||
|
||||
sample_stats_print(sample_mask, *rec, *prev, line, interval);
|
||||
sample_stats_print(sample_mask, *rec, *prev, line);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1579,7 +1581,7 @@ int sample_run(int interval, void (*post_cb)(void *), void *ctx)
|
||||
if (pfd[0].revents & POLLIN)
|
||||
ret = sample_signal_cb();
|
||||
else if (pfd[1].revents & POLLIN)
|
||||
ret = sample_timer_cb(timerfd, &rec, &prev, interval);
|
||||
ret = sample_timer_cb(timerfd, &rec, &prev);
|
||||
|
||||
if (ret)
|
||||
break;
|
||||
|
@ -88,5 +88,4 @@ $(BPFOBJ): $(wildcard $(LIBBPF_SRC)/*.[ch] $(LIBBPF_SRC)/Makefile) | $(BPFOBJ_OU
|
||||
|
||||
$(DEFAULT_BPFTOOL): $(BPFOBJ) | $(BPFTOOL_OUTPUT)
|
||||
$(Q)$(MAKE) $(submake_extras) -C ../bpftool OUTPUT=$(BPFTOOL_OUTPUT) \
|
||||
LIBBPF_OUTPUT=$(BPFOBJ_OUTPUT) \
|
||||
LIBBPF_DESTDIR=$(BPF_DESTDIR) CC=$(HOSTCC) LD=$(HOSTLD)
|
||||
CC=$(HOSTCC) LD=$(HOSTLD)
|
||||
|
@ -45,8 +45,8 @@ struct bpf_gen {
|
||||
int nr_fd_array;
|
||||
};
|
||||
|
||||
void bpf_gen__init(struct bpf_gen *gen, int log_level);
|
||||
int bpf_gen__finish(struct bpf_gen *gen);
|
||||
void bpf_gen__init(struct bpf_gen *gen, int log_level, int nr_progs, int nr_maps);
|
||||
int bpf_gen__finish(struct bpf_gen *gen, int nr_progs, int nr_maps);
|
||||
void bpf_gen__free(struct bpf_gen *gen);
|
||||
void bpf_gen__load_btf(struct bpf_gen *gen, const void *raw_data, __u32 raw_size);
|
||||
void bpf_gen__map_create(struct bpf_gen *gen, struct bpf_create_map_params *map_attr, int map_idx);
|
||||
|
@ -18,7 +18,7 @@
|
||||
#define MAX_USED_MAPS 64
|
||||
#define MAX_USED_PROGS 32
|
||||
#define MAX_KFUNC_DESCS 256
|
||||
#define MAX_FD_ARRAY_SZ (MAX_USED_PROGS + MAX_KFUNC_DESCS)
|
||||
#define MAX_FD_ARRAY_SZ (MAX_USED_MAPS + MAX_KFUNC_DESCS)
|
||||
|
||||
/* The following structure describes the stack layout of the loader program.
|
||||
* In addition R6 contains the pointer to context.
|
||||
@ -33,8 +33,8 @@
|
||||
*/
|
||||
struct loader_stack {
|
||||
__u32 btf_fd;
|
||||
__u32 prog_fd[MAX_USED_PROGS];
|
||||
__u32 inner_map_fd;
|
||||
__u32 prog_fd[MAX_USED_PROGS];
|
||||
};
|
||||
|
||||
#define stack_off(field) \
|
||||
@ -42,6 +42,11 @@ struct loader_stack {
|
||||
|
||||
#define attr_field(attr, field) (attr + offsetof(union bpf_attr, field))
|
||||
|
||||
static int blob_fd_array_off(struct bpf_gen *gen, int index)
|
||||
{
|
||||
return gen->fd_array + index * sizeof(int);
|
||||
}
|
||||
|
||||
static int realloc_insn_buf(struct bpf_gen *gen, __u32 size)
|
||||
{
|
||||
size_t off = gen->insn_cur - gen->insn_start;
|
||||
@ -102,11 +107,15 @@ static void emit2(struct bpf_gen *gen, struct bpf_insn insn1, struct bpf_insn in
|
||||
emit(gen, insn2);
|
||||
}
|
||||
|
||||
void bpf_gen__init(struct bpf_gen *gen, int log_level)
|
||||
static int add_data(struct bpf_gen *gen, const void *data, __u32 size);
|
||||
static void emit_sys_close_blob(struct bpf_gen *gen, int blob_off);
|
||||
|
||||
void bpf_gen__init(struct bpf_gen *gen, int log_level, int nr_progs, int nr_maps)
|
||||
{
|
||||
size_t stack_sz = sizeof(struct loader_stack);
|
||||
size_t stack_sz = sizeof(struct loader_stack), nr_progs_sz;
|
||||
int i;
|
||||
|
||||
gen->fd_array = add_data(gen, NULL, MAX_FD_ARRAY_SZ * sizeof(int));
|
||||
gen->log_level = log_level;
|
||||
/* save ctx pointer into R6 */
|
||||
emit(gen, BPF_MOV64_REG(BPF_REG_6, BPF_REG_1));
|
||||
@ -118,19 +127,27 @@ void bpf_gen__init(struct bpf_gen *gen, int log_level)
|
||||
emit(gen, BPF_MOV64_IMM(BPF_REG_3, 0));
|
||||
emit(gen, BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel));
|
||||
|
||||
/* amount of stack actually used, only used to calculate iterations, not stack offset */
|
||||
nr_progs_sz = offsetof(struct loader_stack, prog_fd[nr_progs]);
|
||||
/* jump over cleanup code */
|
||||
emit(gen, BPF_JMP_IMM(BPF_JA, 0, 0,
|
||||
/* size of cleanup code below */
|
||||
(stack_sz / 4) * 3 + 2));
|
||||
/* size of cleanup code below (including map fd cleanup) */
|
||||
(nr_progs_sz / 4) * 3 + 2 +
|
||||
/* 6 insns for emit_sys_close_blob,
|
||||
* 6 insns for debug_regs in emit_sys_close_blob
|
||||
*/
|
||||
nr_maps * (6 + (gen->log_level ? 6 : 0))));
|
||||
|
||||
/* remember the label where all error branches will jump to */
|
||||
gen->cleanup_label = gen->insn_cur - gen->insn_start;
|
||||
/* emit cleanup code: close all temp FDs */
|
||||
for (i = 0; i < stack_sz; i += 4) {
|
||||
for (i = 0; i < nr_progs_sz; i += 4) {
|
||||
emit(gen, BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_10, -stack_sz + i));
|
||||
emit(gen, BPF_JMP_IMM(BPF_JSLE, BPF_REG_1, 0, 1));
|
||||
emit(gen, BPF_EMIT_CALL(BPF_FUNC_sys_close));
|
||||
}
|
||||
for (i = 0; i < nr_maps; i++)
|
||||
emit_sys_close_blob(gen, blob_fd_array_off(gen, i));
|
||||
/* R7 contains the error code from sys_bpf. Copy it into R0 and exit. */
|
||||
emit(gen, BPF_MOV64_REG(BPF_REG_0, BPF_REG_7));
|
||||
emit(gen, BPF_EXIT_INSN());
|
||||
@ -160,8 +177,6 @@ static int add_data(struct bpf_gen *gen, const void *data, __u32 size)
|
||||
*/
|
||||
static int add_map_fd(struct bpf_gen *gen)
|
||||
{
|
||||
if (!gen->fd_array)
|
||||
gen->fd_array = add_data(gen, NULL, MAX_FD_ARRAY_SZ * sizeof(int));
|
||||
if (gen->nr_maps == MAX_USED_MAPS) {
|
||||
pr_warn("Total maps exceeds %d\n", MAX_USED_MAPS);
|
||||
gen->error = -E2BIG;
|
||||
@ -174,8 +189,6 @@ static int add_kfunc_btf_fd(struct bpf_gen *gen)
|
||||
{
|
||||
int cur;
|
||||
|
||||
if (!gen->fd_array)
|
||||
gen->fd_array = add_data(gen, NULL, MAX_FD_ARRAY_SZ * sizeof(int));
|
||||
if (gen->nr_fd_array == MAX_KFUNC_DESCS) {
|
||||
cur = add_data(gen, NULL, sizeof(int));
|
||||
return (cur - gen->fd_array) / sizeof(int);
|
||||
@ -183,11 +196,6 @@ static int add_kfunc_btf_fd(struct bpf_gen *gen)
|
||||
return MAX_USED_MAPS + gen->nr_fd_array++;
|
||||
}
|
||||
|
||||
static int blob_fd_array_off(struct bpf_gen *gen, int index)
|
||||
{
|
||||
return gen->fd_array + index * sizeof(int);
|
||||
}
|
||||
|
||||
static int insn_bytes_to_bpf_size(__u32 sz)
|
||||
{
|
||||
switch (sz) {
|
||||
@ -359,10 +367,15 @@ static void emit_sys_close_blob(struct bpf_gen *gen, int blob_off)
|
||||
__emit_sys_close(gen);
|
||||
}
|
||||
|
||||
int bpf_gen__finish(struct bpf_gen *gen)
|
||||
int bpf_gen__finish(struct bpf_gen *gen, int nr_progs, int nr_maps)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (nr_progs != gen->nr_progs || nr_maps != gen->nr_maps) {
|
||||
pr_warn("progs/maps mismatch\n");
|
||||
gen->error = -EFAULT;
|
||||
return gen->error;
|
||||
}
|
||||
emit_sys_close_stack(gen, stack_off(btf_fd));
|
||||
for (i = 0; i < gen->nr_progs; i++)
|
||||
move_stack2ctx(gen,
|
||||
|
@ -7258,7 +7258,7 @@ int bpf_object__load_xattr(struct bpf_object_load_attr *attr)
|
||||
}
|
||||
|
||||
if (obj->gen_loader)
|
||||
bpf_gen__init(obj->gen_loader, attr->log_level);
|
||||
bpf_gen__init(obj->gen_loader, attr->log_level, obj->nr_programs, obj->nr_maps);
|
||||
|
||||
err = bpf_object__probe_loading(obj);
|
||||
err = err ? : bpf_object__load_vmlinux_btf(obj, false);
|
||||
@ -7277,7 +7277,7 @@ int bpf_object__load_xattr(struct bpf_object_load_attr *attr)
|
||||
for (i = 0; i < obj->nr_maps; i++)
|
||||
obj->maps[i].fd = -1;
|
||||
if (!err)
|
||||
err = bpf_gen__finish(obj->gen_loader);
|
||||
err = bpf_gen__finish(obj->gen_loader, obj->nr_programs, obj->nr_maps);
|
||||
}
|
||||
|
||||
/* clean up fd_array */
|
||||
|
@ -187,7 +187,7 @@ DEFAULT_BPFTOOL := $(HOST_SCRATCH_DIR)/sbin/bpftool
|
||||
$(OUTPUT)/runqslower: $(BPFOBJ) | $(DEFAULT_BPFTOOL) $(RUNQSLOWER_OUTPUT)
|
||||
$(Q)$(MAKE) $(submake_extras) -C $(TOOLSDIR)/bpf/runqslower \
|
||||
OUTPUT=$(RUNQSLOWER_OUTPUT) VMLINUX_BTF=$(VMLINUX_BTF) \
|
||||
BPFTOOL_OUTPUT=$(BUILD_DIR)/bpftool/ \
|
||||
BPFTOOL_OUTPUT=$(HOST_BUILD_DIR)/bpftool/ \
|
||||
BPFOBJ_OUTPUT=$(BUILD_DIR)/libbpf \
|
||||
BPFOBJ=$(BPFOBJ) BPF_INCLUDE=$(INCLUDE_DIR) && \
|
||||
cp $(RUNQSLOWER_OUTPUT)runqslower $@
|
||||
|
33
tools/testing/selftests/bpf/prog_tests/helper_restricted.c
Normal file
33
tools/testing/selftests/bpf/prog_tests/helper_restricted.c
Normal file
@ -0,0 +1,33 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
|
||||
#include <test_progs.h>
|
||||
#include "test_helper_restricted.skel.h"
|
||||
|
||||
void test_helper_restricted(void)
|
||||
{
|
||||
int prog_i = 0, prog_cnt;
|
||||
int duration = 0;
|
||||
|
||||
do {
|
||||
struct test_helper_restricted *test;
|
||||
int maybeOK;
|
||||
|
||||
test = test_helper_restricted__open();
|
||||
if (!ASSERT_OK_PTR(test, "open"))
|
||||
return;
|
||||
|
||||
prog_cnt = test->skeleton->prog_cnt;
|
||||
|
||||
for (int j = 0; j < prog_cnt; ++j) {
|
||||
struct bpf_program *prog = *test->skeleton->progs[j].prog;
|
||||
|
||||
maybeOK = bpf_program__set_autoload(prog, prog_i == j);
|
||||
ASSERT_OK(maybeOK, "set autoload");
|
||||
}
|
||||
|
||||
maybeOK = test_helper_restricted__load(test);
|
||||
CHECK(!maybeOK, test->skeleton->progs[prog_i].name, "helper isn't restricted");
|
||||
|
||||
test_helper_restricted__destroy(test);
|
||||
} while (++prog_i < prog_cnt);
|
||||
}
|
123
tools/testing/selftests/bpf/progs/test_helper_restricted.c
Normal file
123
tools/testing/selftests/bpf/progs/test_helper_restricted.c
Normal file
@ -0,0 +1,123 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
#include <time.h>
|
||||
#include <linux/bpf.h>
|
||||
#include <bpf/bpf_helpers.h>
|
||||
|
||||
struct timer {
|
||||
struct bpf_timer t;
|
||||
};
|
||||
|
||||
struct lock {
|
||||
struct bpf_spin_lock l;
|
||||
};
|
||||
|
||||
struct {
|
||||
__uint(type, BPF_MAP_TYPE_ARRAY);
|
||||
__uint(max_entries, 1);
|
||||
__type(key, __u32);
|
||||
__type(value, struct timer);
|
||||
} timers SEC(".maps");
|
||||
|
||||
struct {
|
||||
__uint(type, BPF_MAP_TYPE_ARRAY);
|
||||
__uint(max_entries, 1);
|
||||
__type(key, __u32);
|
||||
__type(value, struct lock);
|
||||
} locks SEC(".maps");
|
||||
|
||||
static int timer_cb(void *map, int *key, struct timer *timer)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void timer_work(void)
|
||||
{
|
||||
struct timer *timer;
|
||||
const int key = 0;
|
||||
|
||||
timer = bpf_map_lookup_elem(&timers, &key);
|
||||
if (timer) {
|
||||
bpf_timer_init(&timer->t, &timers, CLOCK_MONOTONIC);
|
||||
bpf_timer_set_callback(&timer->t, timer_cb);
|
||||
bpf_timer_start(&timer->t, 10E9, 0);
|
||||
bpf_timer_cancel(&timer->t);
|
||||
}
|
||||
}
|
||||
|
||||
static void spin_lock_work(void)
|
||||
{
|
||||
const int key = 0;
|
||||
struct lock *lock;
|
||||
|
||||
lock = bpf_map_lookup_elem(&locks, &key);
|
||||
if (lock) {
|
||||
bpf_spin_lock(&lock->l);
|
||||
bpf_spin_unlock(&lock->l);
|
||||
}
|
||||
}
|
||||
|
||||
SEC("raw_tp/sys_enter")
|
||||
int raw_tp_timer(void *ctx)
|
||||
{
|
||||
timer_work();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
SEC("tp/syscalls/sys_enter_nanosleep")
|
||||
int tp_timer(void *ctx)
|
||||
{
|
||||
timer_work();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
SEC("kprobe/sys_nanosleep")
|
||||
int kprobe_timer(void *ctx)
|
||||
{
|
||||
timer_work();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
SEC("perf_event")
|
||||
int perf_event_timer(void *ctx)
|
||||
{
|
||||
timer_work();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
SEC("raw_tp/sys_enter")
|
||||
int raw_tp_spin_lock(void *ctx)
|
||||
{
|
||||
spin_lock_work();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
SEC("tp/syscalls/sys_enter_nanosleep")
|
||||
int tp_spin_lock(void *ctx)
|
||||
{
|
||||
spin_lock_work();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
SEC("kprobe/sys_nanosleep")
|
||||
int kprobe_spin_lock(void *ctx)
|
||||
{
|
||||
spin_lock_work();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
SEC("perf_event")
|
||||
int perf_event_spin_lock(void *ctx)
|
||||
{
|
||||
spin_lock_work();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
const char LICENSE[] SEC("license") = "GPL";
|
@ -92,6 +92,7 @@ struct bpf_test {
|
||||
int fixup_map_event_output[MAX_FIXUPS];
|
||||
int fixup_map_reuseport_array[MAX_FIXUPS];
|
||||
int fixup_map_ringbuf[MAX_FIXUPS];
|
||||
int fixup_map_timer[MAX_FIXUPS];
|
||||
/* Expected verifier log output for result REJECT or VERBOSE_ACCEPT.
|
||||
* Can be a tab-separated sequence of expected strings. An empty string
|
||||
* means no log verification.
|
||||
@ -604,8 +605,15 @@ static int create_cgroup_storage(bool percpu)
|
||||
* int cnt;
|
||||
* struct bpf_spin_lock l;
|
||||
* };
|
||||
* struct bpf_timer {
|
||||
* __u64 :64;
|
||||
* __u64 :64;
|
||||
* } __attribute__((aligned(8)));
|
||||
* struct timer {
|
||||
* struct bpf_timer t;
|
||||
* };
|
||||
*/
|
||||
static const char btf_str_sec[] = "\0bpf_spin_lock\0val\0cnt\0l";
|
||||
static const char btf_str_sec[] = "\0bpf_spin_lock\0val\0cnt\0l\0bpf_timer\0timer\0t";
|
||||
static __u32 btf_raw_types[] = {
|
||||
/* int */
|
||||
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
|
||||
@ -616,6 +624,11 @@ static __u32 btf_raw_types[] = {
|
||||
BTF_TYPE_ENC(15, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 2), 8),
|
||||
BTF_MEMBER_ENC(19, 1, 0), /* int cnt; */
|
||||
BTF_MEMBER_ENC(23, 2, 32),/* struct bpf_spin_lock l; */
|
||||
/* struct bpf_timer */ /* [4] */
|
||||
BTF_TYPE_ENC(25, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 0), 16),
|
||||
/* struct timer */ /* [5] */
|
||||
BTF_TYPE_ENC(35, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 1), 16),
|
||||
BTF_MEMBER_ENC(41, 4, 0), /* struct bpf_timer t; */
|
||||
};
|
||||
|
||||
static int load_btf(void)
|
||||
@ -696,6 +709,29 @@ static int create_sk_storage_map(void)
|
||||
return fd;
|
||||
}
|
||||
|
||||
static int create_map_timer(void)
|
||||
{
|
||||
struct bpf_create_map_attr attr = {
|
||||
.name = "test_map",
|
||||
.map_type = BPF_MAP_TYPE_ARRAY,
|
||||
.key_size = 4,
|
||||
.value_size = 16,
|
||||
.max_entries = 1,
|
||||
.btf_key_type_id = 1,
|
||||
.btf_value_type_id = 5,
|
||||
};
|
||||
int fd, btf_fd;
|
||||
|
||||
btf_fd = load_btf();
|
||||
if (btf_fd < 0)
|
||||
return -1;
|
||||
attr.btf_fd = btf_fd;
|
||||
fd = bpf_create_map_xattr(&attr);
|
||||
if (fd < 0)
|
||||
printf("Failed to create map with timer\n");
|
||||
return fd;
|
||||
}
|
||||
|
||||
static char bpf_vlog[UINT_MAX >> 8];
|
||||
|
||||
static void do_test_fixup(struct bpf_test *test, enum bpf_prog_type prog_type,
|
||||
@ -722,6 +758,7 @@ static void do_test_fixup(struct bpf_test *test, enum bpf_prog_type prog_type,
|
||||
int *fixup_map_event_output = test->fixup_map_event_output;
|
||||
int *fixup_map_reuseport_array = test->fixup_map_reuseport_array;
|
||||
int *fixup_map_ringbuf = test->fixup_map_ringbuf;
|
||||
int *fixup_map_timer = test->fixup_map_timer;
|
||||
|
||||
if (test->fill_helper) {
|
||||
test->fill_insns = calloc(MAX_TEST_INSNS, sizeof(struct bpf_insn));
|
||||
@ -907,6 +944,13 @@ static void do_test_fixup(struct bpf_test *test, enum bpf_prog_type prog_type,
|
||||
fixup_map_ringbuf++;
|
||||
} while (*fixup_map_ringbuf);
|
||||
}
|
||||
if (*fixup_map_timer) {
|
||||
map_fds[21] = create_map_timer();
|
||||
do {
|
||||
prog[*fixup_map_timer].imm = map_fds[21];
|
||||
fixup_map_timer++;
|
||||
} while (*fixup_map_timer);
|
||||
}
|
||||
}
|
||||
|
||||
struct libcap {
|
||||
|
196
tools/testing/selftests/bpf/verifier/helper_restricted.c
Normal file
196
tools/testing/selftests/bpf/verifier/helper_restricted.c
Normal file
@ -0,0 +1,196 @@
|
||||
{
|
||||
"bpf_ktime_get_coarse_ns is forbidden in BPF_PROG_TYPE_KPROBE",
|
||||
.insns = {
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_ktime_get_coarse_ns),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.errstr = "unknown func bpf_ktime_get_coarse_ns",
|
||||
.result = REJECT,
|
||||
.prog_type = BPF_PROG_TYPE_KPROBE,
|
||||
},
|
||||
{
|
||||
"bpf_ktime_get_coarse_ns is forbidden in BPF_PROG_TYPE_TRACEPOINT",
|
||||
.insns = {
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_ktime_get_coarse_ns),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.errstr = "unknown func bpf_ktime_get_coarse_ns",
|
||||
.result = REJECT,
|
||||
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
|
||||
},
|
||||
{
|
||||
"bpf_ktime_get_coarse_ns is forbidden in BPF_PROG_TYPE_PERF_EVENT",
|
||||
.insns = {
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_ktime_get_coarse_ns),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.errstr = "unknown func bpf_ktime_get_coarse_ns",
|
||||
.result = REJECT,
|
||||
.prog_type = BPF_PROG_TYPE_PERF_EVENT,
|
||||
},
|
||||
{
|
||||
"bpf_ktime_get_coarse_ns is forbidden in BPF_PROG_TYPE_RAW_TRACEPOINT",
|
||||
.insns = {
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_ktime_get_coarse_ns),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.errstr = "unknown func bpf_ktime_get_coarse_ns",
|
||||
.result = REJECT,
|
||||
.prog_type = BPF_PROG_TYPE_RAW_TRACEPOINT,
|
||||
},
|
||||
{
|
||||
"bpf_timer_init isn restricted in BPF_PROG_TYPE_KPROBE",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
|
||||
BPF_LD_MAP_FD(BPF_REG_2, 0),
|
||||
BPF_MOV64_IMM(BPF_REG_3, 1),
|
||||
BPF_EMIT_CALL(BPF_FUNC_timer_init),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map_timer = { 3, 8 },
|
||||
.errstr = "tracing progs cannot use bpf_timer yet",
|
||||
.result = REJECT,
|
||||
.prog_type = BPF_PROG_TYPE_KPROBE,
|
||||
},
|
||||
{
|
||||
"bpf_timer_init is forbidden in BPF_PROG_TYPE_PERF_EVENT",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
|
||||
BPF_LD_MAP_FD(BPF_REG_2, 0),
|
||||
BPF_MOV64_IMM(BPF_REG_3, 1),
|
||||
BPF_EMIT_CALL(BPF_FUNC_timer_init),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map_timer = { 3, 8 },
|
||||
.errstr = "tracing progs cannot use bpf_timer yet",
|
||||
.result = REJECT,
|
||||
.prog_type = BPF_PROG_TYPE_PERF_EVENT,
|
||||
},
|
||||
{
|
||||
"bpf_timer_init is forbidden in BPF_PROG_TYPE_TRACEPOINT",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
|
||||
BPF_LD_MAP_FD(BPF_REG_2, 0),
|
||||
BPF_MOV64_IMM(BPF_REG_3, 1),
|
||||
BPF_EMIT_CALL(BPF_FUNC_timer_init),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map_timer = { 3, 8 },
|
||||
.errstr = "tracing progs cannot use bpf_timer yet",
|
||||
.result = REJECT,
|
||||
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
|
||||
},
|
||||
{
|
||||
"bpf_timer_init is forbidden in BPF_PROG_TYPE_RAW_TRACEPOINT",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
|
||||
BPF_LD_MAP_FD(BPF_REG_2, 0),
|
||||
BPF_MOV64_IMM(BPF_REG_3, 1),
|
||||
BPF_EMIT_CALL(BPF_FUNC_timer_init),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map_timer = { 3, 8 },
|
||||
.errstr = "tracing progs cannot use bpf_timer yet",
|
||||
.result = REJECT,
|
||||
.prog_type = BPF_PROG_TYPE_RAW_TRACEPOINT,
|
||||
},
|
||||
{
|
||||
"bpf_spin_lock is forbidden in BPF_PROG_TYPE_KPROBE",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_spin_lock),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map_spin_lock = { 3 },
|
||||
.errstr = "tracing progs cannot use bpf_spin_lock yet",
|
||||
.result = REJECT,
|
||||
.prog_type = BPF_PROG_TYPE_KPROBE,
|
||||
},
|
||||
{
|
||||
"bpf_spin_lock is forbidden in BPF_PROG_TYPE_TRACEPOINT",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_spin_lock),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map_spin_lock = { 3 },
|
||||
.errstr = "tracing progs cannot use bpf_spin_lock yet",
|
||||
.result = REJECT,
|
||||
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
|
||||
},
|
||||
{
|
||||
"bpf_spin_lock is forbidden in BPF_PROG_TYPE_PERF_EVENT",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_spin_lock),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map_spin_lock = { 3 },
|
||||
.errstr = "tracing progs cannot use bpf_spin_lock yet",
|
||||
.result = REJECT,
|
||||
.prog_type = BPF_PROG_TYPE_PERF_EVENT,
|
||||
},
|
||||
{
|
||||
"bpf_spin_lock is forbidden in BPF_PROG_TYPE_RAW_TRACEPOINT",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_spin_lock),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map_spin_lock = { 3 },
|
||||
.errstr = "tracing progs cannot use bpf_spin_lock yet",
|
||||
.result = REJECT,
|
||||
.prog_type = BPF_PROG_TYPE_RAW_TRACEPOINT,
|
||||
},
|
@ -18,6 +18,40 @@
|
||||
.fixup_map_in_map = { 3 },
|
||||
.result = ACCEPT,
|
||||
},
|
||||
{
|
||||
"map in map state pruning",
|
||||
.insns = {
|
||||
BPF_ST_MEM(0, BPF_REG_10, -4, 0),
|
||||
BPF_MOV64_REG(BPF_REG_6, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -4),
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_6),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
|
||||
BPF_EXIT_INSN(),
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_6),
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 11),
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_6),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
|
||||
BPF_EXIT_INSN(),
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_6),
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
|
||||
BPF_EXIT_INSN(),
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map_in_map = { 4, 14 },
|
||||
.flags = BPF_F_TEST_STATE_FREQ,
|
||||
.result = VERBOSE_ACCEPT,
|
||||
.errstr = "processed 25 insns",
|
||||
.prog_type = BPF_PROG_TYPE_XDP,
|
||||
},
|
||||
{
|
||||
"invalid inner map pointer",
|
||||
.insns = {
|
||||
|
@ -6,6 +6,7 @@ CONFIG_IPV6_MULTIPLE_TABLES=y
|
||||
CONFIG_NET_VRF=m
|
||||
CONFIG_BPF_SYSCALL=y
|
||||
CONFIG_CGROUP_BPF=y
|
||||
CONFIG_NET_ACT_CT=m
|
||||
CONFIG_NET_ACT_MIRRED=m
|
||||
CONFIG_NET_ACT_MPLS=m
|
||||
CONFIG_NET_ACT_VLAN=m
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user