mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2025-01-04 04:02:26 +00:00
Including fixes from CAN and BPF.
Previous releases - regressions: - af_packet: fix fortified memcpy() without flex array. - tcp: fix crashes trying to free half-baked MTU probes - xdp: fix zero-size allocation warning in xskq_create() - can: sja1000: always restart the tx queue after an overrun - eth: mlx5e: again mutually exclude RX-FCS and RX-port-timestamp - eth: nfp: avoid rmmod nfp crash issues - eth: octeontx2-pf: fix page pool frag allocation warning Previous releases - always broken: - mctp: perform route lookups under a RCU read-side lock - bpf: s390: fix clobbering the caller's backchain in the trampoline - phy: lynx-28g: cancel the CDR check work item on the remove path - dsa: qca8k: fix qca8k driver for Turris 1.x - eth: ravb: fix use-after-free issue in ravb_tx_timeout_work() - eth: ixgbe: fix crash with empty VF macvlan list Signed-off-by: Paolo Abeni <pabeni@redhat.com> -----BEGIN PGP SIGNATURE----- iQJGBAABCAAwFiEEg1AjqC77wbdLX2LbKSR5jcyPE6QFAmUnw0USHHBhYmVuaUBy ZWRoYXQuY29tAAoJECkkeY3MjxOkN0EP/RKl317fLqlm6ZzRUMVP169CNRAgMaBG 7FIwxlCv4hfO2Rx09Mxu2wjDp+tBQKqBKaxfcwh8tEdLMqqCymOW2K5+tWVty8C8 TJJS+zggqLAo7DjXbnT8GBm5owHPLKGNxW6vRmnw9xraCD/nuV1wqolI2+l4IxB+ kqfliltepnJSakg0uXg7/uwAE87slBzX5VgB6K5JKLiiDMD8tYoAUmZzH8bMJd0l Cl7+L+ucRfQkj0DPfuZM/FncM0el7oFB6imnKd36hD6vfDfCNxpyNBYG1yZ/61/N 7H3E595Hr9PA+YBZjja3UvQGbFXkyMHloQdYxmq4s0T2WHqKwRyjLlwPayMXvavn OTJh2VAs68ivtti0ry5Nbgz4viiNfr32PLyZr6XySwCZ1/TCLjV4Cq9IYnaP3YeM KA+CIl3d0asQdZuMXTBivmtF65Buawt9UX/gJzUst2mNdcqhV1RTNWDNWoFLQ0qW gz8XN68V5LhbaaOq/Lat80krWgNLNZIlTNmSsE/Ie799w7dAHn/xvT6h+h5pF1XX dhng9NK7RL7KVcI/9walArOnhz9ksGWc2+JPMQohuPM/ITMHW11oOUOX6NwAre5m hBJKh+Rz7ylLDLn33C4qowUhxnJlqqm+rDCVDTmoYngEFQvhEl19mfndSsC8P/K/ xXQJ+diS/Jug =orAS -----END PGP SIGNATURE----- Merge tag 'net-6.6-rc6' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net Pull networking fixes from Paolo Abeni: "Including fixes from CAN and BPF. We have a regression in TC currently under investigation, otherwise the things that stand off most are probably the TCP and AF_PACKET fixes, with both issues coming from 6.5. Previous releases - regressions: - af_packet: fix fortified memcpy() without flex array. - tcp: fix crashes trying to free half-baked MTU probes - xdp: fix zero-size allocation warning in xskq_create() - can: sja1000: always restart the tx queue after an overrun - eth: mlx5e: again mutually exclude RX-FCS and RX-port-timestamp - eth: nfp: avoid rmmod nfp crash issues - eth: octeontx2-pf: fix page pool frag allocation warning Previous releases - always broken: - mctp: perform route lookups under a RCU read-side lock - bpf: s390: fix clobbering the caller's backchain in the trampoline - phy: lynx-28g: cancel the CDR check work item on the remove path - dsa: qca8k: fix qca8k driver for Turris 1.x - eth: ravb: fix use-after-free issue in ravb_tx_timeout_work() - eth: ixgbe: fix crash with empty VF macvlan list" * tag 'net-6.6-rc6' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net: (54 commits) rswitch: Fix imbalance phy_power_off() calling rswitch: Fix renesas_eth_sw_remove() implementation octeontx2-pf: Fix page pool frag allocation warning nfc: nci: assert requested protocol is valid af_packet: Fix fortified memcpy() without flex array. net: tcp: fix crashes trying to free half-baked MTU probes net/smc: Fix pos miscalculation in statistics nfp: flower: avoid rmmod nfp crash issues net: usb: dm9601: fix uninitialized variable use in dm9601_mdio_read ethtool: Fix mod state of verbose no_mask bitset net: nfc: fix races in nfc_llcp_sock_get() and nfc_llcp_sock_get_sn() mctp: perform route lookups under a RCU read-side lock net: skbuff: fix kernel-doc typos s390/bpf: Fix unwinding past the trampoline s390/bpf: Fix clobbering the caller's backchain in the trampoline net/mlx5e: Again mutually exclude RX-FCS and RX-port-timestamp net/smc: Fix dependency of SMC on ISM ixgbe: fix crash with empty VF macvlan list net/mlx5e: macsec: use update_pn flag instead of PN comparation net: phy: mscc: macsec: reject PN update requests ...
This commit is contained in:
commit
e8c127b057
@ -185,7 +185,7 @@ aips1: bus@44000000 {
|
||||
#size-cells = <1>;
|
||||
ranges;
|
||||
|
||||
anomix_ns_gpr: syscon@44210000 {
|
||||
aonmix_ns_gpr: syscon@44210000 {
|
||||
compatible = "fsl,imx93-aonmix-ns-syscfg", "syscon";
|
||||
reg = <0x44210000 0x1000>;
|
||||
};
|
||||
@ -319,6 +319,7 @@ flexcan1: can@443a0000 {
|
||||
assigned-clock-parents = <&clk IMX93_CLK_SYS_PLL_PFD1_DIV2>;
|
||||
assigned-clock-rates = <40000000>;
|
||||
fsl,clk-source = /bits/ 8 <0>;
|
||||
fsl,stop-mode = <&aonmix_ns_gpr 0x14 0>;
|
||||
status = "disabled";
|
||||
};
|
||||
|
||||
@ -591,6 +592,7 @@ flexcan2: can@425b0000 {
|
||||
assigned-clock-parents = <&clk IMX93_CLK_SYS_PLL_PFD1_DIV2>;
|
||||
assigned-clock-rates = <40000000>;
|
||||
fsl,clk-source = /bits/ 8 <0>;
|
||||
fsl,stop-mode = <&wakeupmix_gpr 0x0c 2>;
|
||||
status = "disabled";
|
||||
};
|
||||
|
||||
|
@ -245,7 +245,7 @@ static void __build_epilogue(bool is_tail_call, struct rv_jit_context *ctx)
|
||||
emit_addi(RV_REG_SP, RV_REG_SP, stack_adjust, ctx);
|
||||
/* Set return value. */
|
||||
if (!is_tail_call)
|
||||
emit_mv(RV_REG_A0, RV_REG_A5, ctx);
|
||||
emit_addiw(RV_REG_A0, RV_REG_A5, 0, ctx);
|
||||
emit_jalr(RV_REG_ZERO, is_tail_call ? RV_REG_T3 : RV_REG_RA,
|
||||
is_tail_call ? (RV_FENTRY_NINSNS + 1) * 4 : 0, /* skip reserved nops and TCC init */
|
||||
ctx);
|
||||
@ -759,8 +759,10 @@ static int invoke_bpf_prog(struct bpf_tramp_link *l, int args_off, int retval_of
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (save_ret)
|
||||
emit_sd(RV_REG_FP, -retval_off, regmap[BPF_REG_0], ctx);
|
||||
if (save_ret) {
|
||||
emit_sd(RV_REG_FP, -retval_off, RV_REG_A0, ctx);
|
||||
emit_sd(RV_REG_FP, -(retval_off - 8), regmap[BPF_REG_0], ctx);
|
||||
}
|
||||
|
||||
/* update branch with beqz */
|
||||
if (ctx->insns) {
|
||||
@ -853,7 +855,7 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im,
|
||||
|
||||
save_ret = flags & (BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_RET_FENTRY_RET);
|
||||
if (save_ret) {
|
||||
stack_size += 8;
|
||||
stack_size += 16; /* Save both A5 (BPF R0) and A0 */
|
||||
retval_off = stack_size;
|
||||
}
|
||||
|
||||
@ -957,6 +959,7 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im,
|
||||
if (ret)
|
||||
goto out;
|
||||
emit_sd(RV_REG_FP, -retval_off, RV_REG_A0, ctx);
|
||||
emit_sd(RV_REG_FP, -(retval_off - 8), regmap[BPF_REG_0], ctx);
|
||||
im->ip_after_call = ctx->insns + ctx->ninsns;
|
||||
/* 2 nops reserved for auipc+jalr pair */
|
||||
emit(rv_nop(), ctx);
|
||||
@ -988,8 +991,10 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im,
|
||||
if (flags & BPF_TRAMP_F_RESTORE_REGS)
|
||||
restore_args(nregs, args_off, ctx);
|
||||
|
||||
if (save_ret)
|
||||
if (save_ret) {
|
||||
emit_ld(RV_REG_A0, -retval_off, RV_REG_FP, ctx);
|
||||
emit_ld(regmap[BPF_REG_0], -(retval_off - 8), RV_REG_FP, ctx);
|
||||
}
|
||||
|
||||
emit_ld(RV_REG_S1, -sreg_off, RV_REG_FP, ctx);
|
||||
|
||||
@ -1515,7 +1520,8 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx,
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
emit_mv(bpf_to_rv_reg(BPF_REG_0, ctx), RV_REG_A0, ctx);
|
||||
if (insn->src_reg != BPF_PSEUDO_CALL)
|
||||
emit_mv(bpf_to_rv_reg(BPF_REG_0, ctx), RV_REG_A0, ctx);
|
||||
break;
|
||||
}
|
||||
/* tail call */
|
||||
|
@ -2066,6 +2066,7 @@ struct bpf_tramp_jit {
|
||||
* func_addr's original caller
|
||||
*/
|
||||
int stack_size; /* Trampoline stack size */
|
||||
int backchain_off; /* Offset of backchain */
|
||||
int stack_args_off; /* Offset of stack arguments for calling
|
||||
* func_addr, has to be at the top
|
||||
*/
|
||||
@ -2086,9 +2087,10 @@ struct bpf_tramp_jit {
|
||||
* for __bpf_prog_enter() return value and
|
||||
* func_addr respectively
|
||||
*/
|
||||
int r14_off; /* Offset of saved %r14 */
|
||||
int run_ctx_off; /* Offset of struct bpf_tramp_run_ctx */
|
||||
int tccnt_off; /* Offset of saved tailcall counter */
|
||||
int r14_off; /* Offset of saved %r14, has to be at the
|
||||
* bottom */
|
||||
int do_fexit; /* do_fexit: label */
|
||||
};
|
||||
|
||||
@ -2247,8 +2249,12 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im,
|
||||
* Calculate the stack layout.
|
||||
*/
|
||||
|
||||
/* Reserve STACK_FRAME_OVERHEAD bytes for the callees. */
|
||||
/*
|
||||
* Allocate STACK_FRAME_OVERHEAD bytes for the callees. As the s390x
|
||||
* ABI requires, put our backchain at the end of the allocated memory.
|
||||
*/
|
||||
tjit->stack_size = STACK_FRAME_OVERHEAD;
|
||||
tjit->backchain_off = tjit->stack_size - sizeof(u64);
|
||||
tjit->stack_args_off = alloc_stack(tjit, nr_stack_args * sizeof(u64));
|
||||
tjit->reg_args_off = alloc_stack(tjit, nr_reg_args * sizeof(u64));
|
||||
tjit->ip_off = alloc_stack(tjit, sizeof(u64));
|
||||
@ -2256,16 +2262,25 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im,
|
||||
tjit->bpf_args_off = alloc_stack(tjit, nr_bpf_args * sizeof(u64));
|
||||
tjit->retval_off = alloc_stack(tjit, sizeof(u64));
|
||||
tjit->r7_r8_off = alloc_stack(tjit, 2 * sizeof(u64));
|
||||
tjit->r14_off = alloc_stack(tjit, sizeof(u64));
|
||||
tjit->run_ctx_off = alloc_stack(tjit,
|
||||
sizeof(struct bpf_tramp_run_ctx));
|
||||
tjit->tccnt_off = alloc_stack(tjit, sizeof(u64));
|
||||
/* The caller has already reserved STACK_FRAME_OVERHEAD bytes. */
|
||||
tjit->stack_size -= STACK_FRAME_OVERHEAD;
|
||||
tjit->r14_off = alloc_stack(tjit, sizeof(u64) * 2);
|
||||
/*
|
||||
* In accordance with the s390x ABI, the caller has allocated
|
||||
* STACK_FRAME_OVERHEAD bytes for us. 8 of them contain the caller's
|
||||
* backchain, and the rest we can use.
|
||||
*/
|
||||
tjit->stack_size -= STACK_FRAME_OVERHEAD - sizeof(u64);
|
||||
tjit->orig_stack_args_off = tjit->stack_size + STACK_FRAME_OVERHEAD;
|
||||
|
||||
/* lgr %r1,%r15 */
|
||||
EMIT4(0xb9040000, REG_1, REG_15);
|
||||
/* aghi %r15,-stack_size */
|
||||
EMIT4_IMM(0xa70b0000, REG_15, -tjit->stack_size);
|
||||
/* stg %r1,backchain_off(%r15) */
|
||||
EMIT6_DISP_LH(0xe3000000, 0x0024, REG_1, REG_0, REG_15,
|
||||
tjit->backchain_off);
|
||||
/* mvc tccnt_off(4,%r15),stack_size+STK_OFF_TCCNT(%r15) */
|
||||
_EMIT6(0xd203f000 | tjit->tccnt_off,
|
||||
0xf000 | (tjit->stack_size + STK_OFF_TCCNT));
|
||||
|
@ -190,7 +190,7 @@ config CAN_SLCAN
|
||||
|
||||
config CAN_SUN4I
|
||||
tristate "Allwinner A10 CAN controller"
|
||||
depends on MACH_SUN4I || MACH_SUN7I || RISCV || COMPILE_TEST
|
||||
depends on MACH_SUN4I || MACH_SUN7I || (RISCV && ARCH_SUNXI) || COMPILE_TEST
|
||||
help
|
||||
Say Y here if you want to use CAN controller found on Allwinner
|
||||
A10/A20/D1 SoCs.
|
||||
|
@ -348,7 +348,7 @@ static struct flexcan_devtype_data fsl_imx8mp_devtype_data = {
|
||||
static struct flexcan_devtype_data fsl_imx93_devtype_data = {
|
||||
.quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS |
|
||||
FLEXCAN_QUIRK_DISABLE_MECR | FLEXCAN_QUIRK_USE_RX_MAILBOX |
|
||||
FLEXCAN_QUIRK_BROKEN_PERR_STATE | FLEXCAN_QUIRK_AUTO_STOP_MODE |
|
||||
FLEXCAN_QUIRK_BROKEN_PERR_STATE | FLEXCAN_QUIRK_SETUP_STOP_MODE_GPR |
|
||||
FLEXCAN_QUIRK_SUPPORT_FD | FLEXCAN_QUIRK_SUPPORT_ECC |
|
||||
FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX |
|
||||
FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX_RTR,
|
||||
@ -544,11 +544,6 @@ static inline int flexcan_enter_stop_mode(struct flexcan_priv *priv)
|
||||
} else if (priv->devtype_data.quirks & FLEXCAN_QUIRK_SETUP_STOP_MODE_GPR) {
|
||||
regmap_update_bits(priv->stm.gpr, priv->stm.req_gpr,
|
||||
1 << priv->stm.req_bit, 1 << priv->stm.req_bit);
|
||||
} else if (priv->devtype_data.quirks & FLEXCAN_QUIRK_AUTO_STOP_MODE) {
|
||||
/* For the auto stop mode, software do nothing, hardware will cover
|
||||
* all the operation automatically after system go into low power mode.
|
||||
*/
|
||||
return 0;
|
||||
}
|
||||
|
||||
return flexcan_low_power_enter_ack(priv);
|
||||
@ -574,12 +569,6 @@ static inline int flexcan_exit_stop_mode(struct flexcan_priv *priv)
|
||||
reg_mcr &= ~FLEXCAN_MCR_SLF_WAK;
|
||||
priv->write(reg_mcr, ®s->mcr);
|
||||
|
||||
/* For the auto stop mode, hardware will exist stop mode
|
||||
* automatically after system go out of low power mode.
|
||||
*/
|
||||
if (priv->devtype_data.quirks & FLEXCAN_QUIRK_AUTO_STOP_MODE)
|
||||
return 0;
|
||||
|
||||
return flexcan_low_power_exit_ack(priv);
|
||||
}
|
||||
|
||||
@ -1994,13 +1983,18 @@ static int flexcan_setup_stop_mode(struct platform_device *pdev)
|
||||
ret = flexcan_setup_stop_mode_scfw(pdev);
|
||||
else if (priv->devtype_data.quirks & FLEXCAN_QUIRK_SETUP_STOP_MODE_GPR)
|
||||
ret = flexcan_setup_stop_mode_gpr(pdev);
|
||||
else if (priv->devtype_data.quirks & FLEXCAN_QUIRK_AUTO_STOP_MODE)
|
||||
ret = 0;
|
||||
else
|
||||
/* return 0 directly if doesn't support stop mode feature */
|
||||
return 0;
|
||||
|
||||
if (ret)
|
||||
/* If ret is -EINVAL, this means SoC claim to support stop mode, but
|
||||
* dts file lack the stop mode property definition. For this case,
|
||||
* directly return 0, this will skip the wakeup capable setting and
|
||||
* will not block the driver probe.
|
||||
*/
|
||||
if (ret == -EINVAL)
|
||||
return 0;
|
||||
else if (ret)
|
||||
return ret;
|
||||
|
||||
device_set_wakeup_capable(&pdev->dev, true);
|
||||
@ -2320,16 +2314,8 @@ static int __maybe_unused flexcan_noirq_suspend(struct device *device)
|
||||
if (netif_running(dev)) {
|
||||
int err;
|
||||
|
||||
if (device_may_wakeup(device)) {
|
||||
if (device_may_wakeup(device))
|
||||
flexcan_enable_wakeup_irq(priv, true);
|
||||
/* For auto stop mode, need to keep the clock on before
|
||||
* system go into low power mode. After system go into
|
||||
* low power mode, hardware will config the flexcan into
|
||||
* stop mode, and gate off the clock automatically.
|
||||
*/
|
||||
if (priv->devtype_data.quirks & FLEXCAN_QUIRK_AUTO_STOP_MODE)
|
||||
return 0;
|
||||
}
|
||||
|
||||
err = pm_runtime_force_suspend(device);
|
||||
if (err)
|
||||
@ -2347,15 +2333,9 @@ static int __maybe_unused flexcan_noirq_resume(struct device *device)
|
||||
if (netif_running(dev)) {
|
||||
int err;
|
||||
|
||||
/* For the wakeup in auto stop mode, no need to gate on the
|
||||
* clock here, hardware will do this automatically.
|
||||
*/
|
||||
if (!(device_may_wakeup(device) &&
|
||||
priv->devtype_data.quirks & FLEXCAN_QUIRK_AUTO_STOP_MODE)) {
|
||||
err = pm_runtime_force_resume(device);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
err = pm_runtime_force_resume(device);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
if (device_may_wakeup(device))
|
||||
flexcan_enable_wakeup_irq(priv, false);
|
||||
|
@ -68,8 +68,6 @@
|
||||
#define FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX_RTR BIT(15)
|
||||
/* Device supports RX via FIFO */
|
||||
#define FLEXCAN_QUIRK_SUPPORT_RX_FIFO BIT(16)
|
||||
/* auto enter stop mode to support wakeup */
|
||||
#define FLEXCAN_QUIRK_AUTO_STOP_MODE BIT(17)
|
||||
|
||||
struct flexcan_devtype_data {
|
||||
u32 quirks; /* quirks needed for different IP cores */
|
||||
|
@ -125,7 +125,7 @@ static const struct tcan4x5x_version_info tcan4x5x_versions[] = {
|
||||
},
|
||||
[TCAN4553] = {
|
||||
.name = "4553",
|
||||
.id2_register = 0x32353534,
|
||||
.id2_register = 0x33353534,
|
||||
},
|
||||
/* generic version with no id2_register at the end */
|
||||
[TCAN4X5X] = {
|
||||
|
@ -392,7 +392,13 @@ static irqreturn_t sja1000_reset_interrupt(int irq, void *dev_id)
|
||||
struct net_device *dev = (struct net_device *)dev_id;
|
||||
|
||||
netdev_dbg(dev, "performing a soft reset upon overrun\n");
|
||||
sja1000_start(dev);
|
||||
|
||||
netif_tx_lock(dev);
|
||||
|
||||
can_free_echo_skb(dev, 0, NULL);
|
||||
sja1000_set_mode(dev, CAN_MODE_START);
|
||||
|
||||
netif_tx_unlock(dev);
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
@ -505,8 +505,8 @@ qca8k_bulk_read(void *ctx, const void *reg_buf, size_t reg_len,
|
||||
void *val_buf, size_t val_len)
|
||||
{
|
||||
int i, count = val_len / sizeof(u32), ret;
|
||||
u32 reg = *(u32 *)reg_buf & U16_MAX;
|
||||
struct qca8k_priv *priv = ctx;
|
||||
u32 reg = *(u16 *)reg_buf;
|
||||
|
||||
if (priv->mgmt_master &&
|
||||
!qca8k_read_eth(priv, reg, val_buf, val_len))
|
||||
@ -527,8 +527,8 @@ qca8k_bulk_gather_write(void *ctx, const void *reg_buf, size_t reg_len,
|
||||
const void *val_buf, size_t val_len)
|
||||
{
|
||||
int i, count = val_len / sizeof(u32), ret;
|
||||
u32 reg = *(u32 *)reg_buf & U16_MAX;
|
||||
struct qca8k_priv *priv = ctx;
|
||||
u32 reg = *(u16 *)reg_buf;
|
||||
u32 *val = (u32 *)val_buf;
|
||||
|
||||
if (priv->mgmt_master &&
|
||||
@ -666,6 +666,15 @@ qca8k_phy_eth_command(struct qca8k_priv *priv, bool read, int phy,
|
||||
goto err_read_skb;
|
||||
}
|
||||
|
||||
/* It seems that accessing the switch's internal PHYs via management
|
||||
* packets still uses the MDIO bus within the switch internally, and
|
||||
* these accesses can conflict with external MDIO accesses to other
|
||||
* devices on the MDIO bus.
|
||||
* We therefore need to lock the MDIO bus onto which the switch is
|
||||
* connected.
|
||||
*/
|
||||
mutex_lock(&priv->bus->mdio_lock);
|
||||
|
||||
/* Actually start the request:
|
||||
* 1. Send mdio master packet
|
||||
* 2. Busy Wait for mdio master command
|
||||
@ -678,6 +687,7 @@ qca8k_phy_eth_command(struct qca8k_priv *priv, bool read, int phy,
|
||||
mgmt_master = priv->mgmt_master;
|
||||
if (!mgmt_master) {
|
||||
mutex_unlock(&mgmt_eth_data->mutex);
|
||||
mutex_unlock(&priv->bus->mdio_lock);
|
||||
ret = -EINVAL;
|
||||
goto err_mgmt_master;
|
||||
}
|
||||
@ -765,6 +775,7 @@ qca8k_phy_eth_command(struct qca8k_priv *priv, bool read, int phy,
|
||||
QCA8K_ETHERNET_TIMEOUT);
|
||||
|
||||
mutex_unlock(&mgmt_eth_data->mutex);
|
||||
mutex_unlock(&priv->bus->mdio_lock);
|
||||
|
||||
return ret;
|
||||
|
||||
|
@ -2093,3 +2093,35 @@ void ice_lag_rebuild(struct ice_pf *pf)
|
||||
}
|
||||
mutex_unlock(&pf->lag_mutex);
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_lag_is_switchdev_running
|
||||
* @pf: pointer to PF structure
|
||||
*
|
||||
* Check if switchdev is running on any of the interfaces connected to lag.
|
||||
*/
|
||||
bool ice_lag_is_switchdev_running(struct ice_pf *pf)
|
||||
{
|
||||
struct ice_lag *lag = pf->lag;
|
||||
struct net_device *tmp_nd;
|
||||
|
||||
if (!ice_is_feature_supported(pf, ICE_F_SRIOV_LAG) || !lag)
|
||||
return false;
|
||||
|
||||
rcu_read_lock();
|
||||
for_each_netdev_in_bond_rcu(lag->upper_netdev, tmp_nd) {
|
||||
struct ice_netdev_priv *priv = netdev_priv(tmp_nd);
|
||||
|
||||
if (!netif_is_ice(tmp_nd) || !priv || !priv->vsi ||
|
||||
!priv->vsi->back)
|
||||
continue;
|
||||
|
||||
if (ice_is_switchdev_running(priv->vsi->back)) {
|
||||
rcu_read_unlock();
|
||||
return true;
|
||||
}
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
return false;
|
||||
}
|
||||
|
@ -62,4 +62,5 @@ void ice_lag_move_new_vf_nodes(struct ice_vf *vf);
|
||||
int ice_init_lag(struct ice_pf *pf);
|
||||
void ice_deinit_lag(struct ice_pf *pf);
|
||||
void ice_lag_rebuild(struct ice_pf *pf);
|
||||
bool ice_lag_is_switchdev_running(struct ice_pf *pf);
|
||||
#endif /* _ICE_LAG_H_ */
|
||||
|
@ -3575,6 +3575,12 @@ int ice_set_dflt_vsi(struct ice_vsi *vsi)
|
||||
|
||||
dev = ice_pf_to_dev(vsi->back);
|
||||
|
||||
if (ice_lag_is_switchdev_running(vsi->back)) {
|
||||
dev_dbg(dev, "VSI %d passed is a part of LAG containing interfaces in switchdev mode, nothing to do\n",
|
||||
vsi->vsi_num);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* the VSI passed in is already the default VSI */
|
||||
if (ice_is_vsi_dflt_vsi(vsi)) {
|
||||
dev_dbg(dev, "VSI %d passed in is already the default forwarding VSI, nothing to do\n",
|
||||
|
@ -28,6 +28,9 @@ static inline void ixgbe_alloc_vf_macvlans(struct ixgbe_adapter *adapter,
|
||||
struct vf_macvlans *mv_list;
|
||||
int num_vf_macvlans, i;
|
||||
|
||||
/* Initialize list of VF macvlans */
|
||||
INIT_LIST_HEAD(&adapter->vf_mvs.l);
|
||||
|
||||
num_vf_macvlans = hw->mac.num_rar_entries -
|
||||
(IXGBE_MAX_PF_MACVLANS + 1 + num_vfs);
|
||||
if (!num_vf_macvlans)
|
||||
@ -36,8 +39,6 @@ static inline void ixgbe_alloc_vf_macvlans(struct ixgbe_adapter *adapter,
|
||||
mv_list = kcalloc(num_vf_macvlans, sizeof(struct vf_macvlans),
|
||||
GFP_KERNEL);
|
||||
if (mv_list) {
|
||||
/* Initialize list of VF macvlans */
|
||||
INIT_LIST_HEAD(&adapter->vf_mvs.l);
|
||||
for (i = 0; i < num_vf_macvlans; i++) {
|
||||
mv_list[i].vf = -1;
|
||||
mv_list[i].free = true;
|
||||
|
@ -1357,10 +1357,12 @@ static int cn10k_mdo_upd_txsa(struct macsec_context *ctx)
|
||||
|
||||
if (netif_running(secy->netdev)) {
|
||||
/* Keys cannot be changed after creation */
|
||||
err = cn10k_write_tx_sa_pn(pfvf, txsc, sa_num,
|
||||
sw_tx_sa->next_pn);
|
||||
if (err)
|
||||
return err;
|
||||
if (ctx->sa.update_pn) {
|
||||
err = cn10k_write_tx_sa_pn(pfvf, txsc, sa_num,
|
||||
sw_tx_sa->next_pn);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
|
||||
err = cn10k_mcs_link_tx_sa2sc(pfvf, secy, txsc,
|
||||
sa_num, sw_tx_sa->active);
|
||||
@ -1529,6 +1531,9 @@ static int cn10k_mdo_upd_rxsa(struct macsec_context *ctx)
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
if (!ctx->sa.update_pn)
|
||||
return 0;
|
||||
|
||||
err = cn10k_mcs_write_rx_sa_pn(pfvf, rxsc, sa_num,
|
||||
rx_sa->next_pn);
|
||||
if (err)
|
||||
|
@ -1403,6 +1403,7 @@ int otx2_pool_init(struct otx2_nic *pfvf, u16 pool_id,
|
||||
return 0;
|
||||
}
|
||||
|
||||
pp_params.order = get_order(buf_size);
|
||||
pp_params.flags = PP_FLAG_PAGE_FRAG | PP_FLAG_DMA_MAP;
|
||||
pp_params.pool_size = min(OTX2_PAGE_POOL_SZ, numptrs);
|
||||
pp_params.nid = NUMA_NO_NODE;
|
||||
|
@ -580,7 +580,7 @@ static int mlx5e_macsec_upd_txsa(struct macsec_context *ctx)
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (tx_sa->next_pn != ctx_tx_sa->next_pn_halves.lower) {
|
||||
if (ctx->sa.update_pn) {
|
||||
netdev_err(netdev, "MACsec offload: update TX sa %d PN isn't supported\n",
|
||||
assoc_num);
|
||||
err = -EINVAL;
|
||||
@ -973,7 +973,7 @@ static int mlx5e_macsec_upd_rxsa(struct macsec_context *ctx)
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (rx_sa->next_pn != ctx_rx_sa->next_pn_halves.lower) {
|
||||
if (ctx->sa.update_pn) {
|
||||
netdev_err(ctx->netdev,
|
||||
"MACsec offload update RX sa %d PN isn't supported\n",
|
||||
assoc_num);
|
||||
|
@ -3952,13 +3952,14 @@ static int set_feature_rx_fcs(struct net_device *netdev, bool enable)
|
||||
struct mlx5e_channels *chs = &priv->channels;
|
||||
struct mlx5e_params new_params;
|
||||
int err;
|
||||
bool rx_ts_over_crc = !enable;
|
||||
|
||||
mutex_lock(&priv->state_lock);
|
||||
|
||||
new_params = chs->params;
|
||||
new_params.scatter_fcs_en = enable;
|
||||
err = mlx5e_safe_switch_params(priv, &new_params, mlx5e_set_rx_port_ts_wrap,
|
||||
&new_params.scatter_fcs_en, true);
|
||||
&rx_ts_over_crc, true);
|
||||
mutex_unlock(&priv->state_lock);
|
||||
return err;
|
||||
}
|
||||
|
@ -310,8 +310,8 @@ const struct mlxsw_sp_nve_ops mlxsw_sp1_nve_vxlan_ops = {
|
||||
.fdb_clear_offload = mlxsw_sp_nve_vxlan_clear_offload,
|
||||
};
|
||||
|
||||
static bool mlxsw_sp2_nve_vxlan_learning_set(struct mlxsw_sp *mlxsw_sp,
|
||||
bool learning_en)
|
||||
static int mlxsw_sp2_nve_vxlan_learning_set(struct mlxsw_sp *mlxsw_sp,
|
||||
bool learning_en)
|
||||
{
|
||||
char tnpc_pl[MLXSW_REG_TNPC_LEN];
|
||||
|
||||
|
@ -210,6 +210,7 @@ nfp_flower_cmsg_merge_hint_rx(struct nfp_app *app, struct sk_buff *skb)
|
||||
unsigned int msg_len = nfp_flower_cmsg_get_data_len(skb);
|
||||
struct nfp_flower_cmsg_merge_hint *msg;
|
||||
struct nfp_fl_payload *sub_flows[2];
|
||||
struct nfp_flower_priv *priv;
|
||||
int err, i, flow_cnt;
|
||||
|
||||
msg = nfp_flower_cmsg_get_data(skb);
|
||||
@ -228,14 +229,15 @@ nfp_flower_cmsg_merge_hint_rx(struct nfp_app *app, struct sk_buff *skb)
|
||||
return;
|
||||
}
|
||||
|
||||
rtnl_lock();
|
||||
priv = app->priv;
|
||||
mutex_lock(&priv->nfp_fl_lock);
|
||||
for (i = 0; i < flow_cnt; i++) {
|
||||
u32 ctx = be32_to_cpu(msg->flow[i].host_ctx);
|
||||
|
||||
sub_flows[i] = nfp_flower_get_fl_payload_from_ctx(app, ctx);
|
||||
if (!sub_flows[i]) {
|
||||
nfp_flower_cmsg_warn(app, "Invalid flow in merge hint\n");
|
||||
goto err_rtnl_unlock;
|
||||
goto err_mutex_unlock;
|
||||
}
|
||||
}
|
||||
|
||||
@ -244,8 +246,8 @@ nfp_flower_cmsg_merge_hint_rx(struct nfp_app *app, struct sk_buff *skb)
|
||||
if (err == -ENOMEM)
|
||||
nfp_flower_cmsg_warn(app, "Flow merge memory fail.\n");
|
||||
|
||||
err_rtnl_unlock:
|
||||
rtnl_unlock();
|
||||
err_mutex_unlock:
|
||||
mutex_unlock(&priv->nfp_fl_lock);
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -2131,8 +2131,6 @@ nfp_fl_ct_offload_nft_flow(struct nfp_fl_ct_zone_entry *zt, struct flow_cls_offl
|
||||
struct nfp_fl_ct_flow_entry *ct_entry;
|
||||
struct netlink_ext_ack *extack = NULL;
|
||||
|
||||
ASSERT_RTNL();
|
||||
|
||||
extack = flow->common.extack;
|
||||
switch (flow->command) {
|
||||
case FLOW_CLS_REPLACE:
|
||||
@ -2178,9 +2176,13 @@ int nfp_fl_ct_handle_nft_flow(enum tc_setup_type type, void *type_data, void *cb
|
||||
|
||||
switch (type) {
|
||||
case TC_SETUP_CLSFLOWER:
|
||||
rtnl_lock();
|
||||
while (!mutex_trylock(&zt->priv->nfp_fl_lock)) {
|
||||
if (!zt->nft) /* avoid deadlock */
|
||||
return err;
|
||||
msleep(20);
|
||||
}
|
||||
err = nfp_fl_ct_offload_nft_flow(zt, flow);
|
||||
rtnl_unlock();
|
||||
mutex_unlock(&zt->priv->nfp_fl_lock);
|
||||
break;
|
||||
default:
|
||||
return -EOPNOTSUPP;
|
||||
@ -2208,6 +2210,7 @@ int nfp_fl_ct_del_flow(struct nfp_fl_ct_map_entry *ct_map_ent)
|
||||
struct nfp_fl_ct_flow_entry *ct_entry;
|
||||
struct nfp_fl_ct_zone_entry *zt;
|
||||
struct rhashtable *m_table;
|
||||
struct nf_flowtable *nft;
|
||||
|
||||
if (!ct_map_ent)
|
||||
return -ENOENT;
|
||||
@ -2226,8 +2229,12 @@ int nfp_fl_ct_del_flow(struct nfp_fl_ct_map_entry *ct_map_ent)
|
||||
if (ct_map_ent->cookie > 0)
|
||||
kfree(ct_map_ent);
|
||||
|
||||
if (!zt->pre_ct_count) {
|
||||
zt->nft = NULL;
|
||||
if (!zt->pre_ct_count && zt->nft) {
|
||||
nft = zt->nft;
|
||||
zt->nft = NULL; /* avoid deadlock */
|
||||
nf_flow_table_offload_del_cb(nft,
|
||||
nfp_fl_ct_handle_nft_flow,
|
||||
zt);
|
||||
nfp_fl_ct_clean_nft_entries(zt);
|
||||
}
|
||||
break;
|
||||
|
@ -297,6 +297,7 @@ struct nfp_fl_internal_ports {
|
||||
* @predt_list: List to keep track of decap pretun flows
|
||||
* @neigh_table: Table to keep track of neighbor entries
|
||||
* @predt_lock: Lock to serialise predt/neigh table updates
|
||||
* @nfp_fl_lock: Lock to protect the flow offload operation
|
||||
*/
|
||||
struct nfp_flower_priv {
|
||||
struct nfp_app *app;
|
||||
@ -339,6 +340,7 @@ struct nfp_flower_priv {
|
||||
struct list_head predt_list;
|
||||
struct rhashtable neigh_table;
|
||||
spinlock_t predt_lock; /* Lock to serialise predt/neigh table updates */
|
||||
struct mutex nfp_fl_lock; /* Protect the flow operation */
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -528,6 +528,8 @@ int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count,
|
||||
if (err)
|
||||
goto err_free_stats_ctx_table;
|
||||
|
||||
mutex_init(&priv->nfp_fl_lock);
|
||||
|
||||
err = rhashtable_init(&priv->ct_zone_table, &nfp_zone_table_params);
|
||||
if (err)
|
||||
goto err_free_merge_table;
|
||||
|
@ -1009,8 +1009,6 @@ int nfp_flower_merge_offloaded_flows(struct nfp_app *app,
|
||||
u64 parent_ctx = 0;
|
||||
int err;
|
||||
|
||||
ASSERT_RTNL();
|
||||
|
||||
if (sub_flow1 == sub_flow2 ||
|
||||
nfp_flower_is_merge_flow(sub_flow1) ||
|
||||
nfp_flower_is_merge_flow(sub_flow2))
|
||||
@ -1727,19 +1725,30 @@ static int
|
||||
nfp_flower_repr_offload(struct nfp_app *app, struct net_device *netdev,
|
||||
struct flow_cls_offload *flower)
|
||||
{
|
||||
struct nfp_flower_priv *priv = app->priv;
|
||||
int ret;
|
||||
|
||||
if (!eth_proto_is_802_3(flower->common.protocol))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
mutex_lock(&priv->nfp_fl_lock);
|
||||
switch (flower->command) {
|
||||
case FLOW_CLS_REPLACE:
|
||||
return nfp_flower_add_offload(app, netdev, flower);
|
||||
ret = nfp_flower_add_offload(app, netdev, flower);
|
||||
break;
|
||||
case FLOW_CLS_DESTROY:
|
||||
return nfp_flower_del_offload(app, netdev, flower);
|
||||
ret = nfp_flower_del_offload(app, netdev, flower);
|
||||
break;
|
||||
case FLOW_CLS_STATS:
|
||||
return nfp_flower_get_stats(app, netdev, flower);
|
||||
ret = nfp_flower_get_stats(app, netdev, flower);
|
||||
break;
|
||||
default:
|
||||
return -EOPNOTSUPP;
|
||||
ret = -EOPNOTSUPP;
|
||||
break;
|
||||
}
|
||||
mutex_unlock(&priv->nfp_fl_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int nfp_flower_setup_tc_block_cb(enum tc_setup_type type,
|
||||
@ -1778,6 +1787,7 @@ static int nfp_flower_setup_tc_block(struct net_device *netdev,
|
||||
repr_priv = repr->app_priv;
|
||||
repr_priv->block_shared = f->block_shared;
|
||||
f->driver_block_list = &nfp_block_cb_list;
|
||||
f->unlocked_driver_cb = true;
|
||||
|
||||
switch (f->command) {
|
||||
case FLOW_BLOCK_BIND:
|
||||
@ -1876,6 +1886,8 @@ nfp_flower_setup_indr_tc_block(struct net_device *netdev, struct Qdisc *sch, str
|
||||
nfp_flower_internal_port_can_offload(app, netdev)))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
f->unlocked_driver_cb = true;
|
||||
|
||||
switch (f->command) {
|
||||
case FLOW_BLOCK_BIND:
|
||||
cb_priv = nfp_flower_indr_block_cb_priv_lookup(app, netdev);
|
||||
|
@ -523,25 +523,31 @@ int nfp_flower_setup_qos_offload(struct nfp_app *app, struct net_device *netdev,
|
||||
{
|
||||
struct netlink_ext_ack *extack = flow->common.extack;
|
||||
struct nfp_flower_priv *fl_priv = app->priv;
|
||||
int ret;
|
||||
|
||||
if (!(fl_priv->flower_ext_feats & NFP_FL_FEATS_VF_RLIM)) {
|
||||
NL_SET_ERR_MSG_MOD(extack, "unsupported offload: loaded firmware does not support qos rate limit offload");
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
mutex_lock(&fl_priv->nfp_fl_lock);
|
||||
switch (flow->command) {
|
||||
case TC_CLSMATCHALL_REPLACE:
|
||||
return nfp_flower_install_rate_limiter(app, netdev, flow,
|
||||
extack);
|
||||
ret = nfp_flower_install_rate_limiter(app, netdev, flow, extack);
|
||||
break;
|
||||
case TC_CLSMATCHALL_DESTROY:
|
||||
return nfp_flower_remove_rate_limiter(app, netdev, flow,
|
||||
extack);
|
||||
ret = nfp_flower_remove_rate_limiter(app, netdev, flow, extack);
|
||||
break;
|
||||
case TC_CLSMATCHALL_STATS:
|
||||
return nfp_flower_stats_rate_limiter(app, netdev, flow,
|
||||
extack);
|
||||
ret = nfp_flower_stats_rate_limiter(app, netdev, flow, extack);
|
||||
break;
|
||||
default:
|
||||
return -EOPNOTSUPP;
|
||||
ret = -EOPNOTSUPP;
|
||||
break;
|
||||
}
|
||||
mutex_unlock(&fl_priv->nfp_fl_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Offload tc action, currently only for tc police */
|
||||
|
@ -2167,6 +2167,8 @@ static int ravb_close(struct net_device *ndev)
|
||||
of_phy_deregister_fixed_link(np);
|
||||
}
|
||||
|
||||
cancel_work_sync(&priv->work);
|
||||
|
||||
if (info->multi_irqs) {
|
||||
free_irq(priv->tx_irqs[RAVB_NC], ndev);
|
||||
free_irq(priv->rx_irqs[RAVB_NC], ndev);
|
||||
@ -2891,8 +2893,6 @@ static int ravb_remove(struct platform_device *pdev)
|
||||
clk_disable_unprepare(priv->gptp_clk);
|
||||
clk_disable_unprepare(priv->refclk);
|
||||
|
||||
dma_free_coherent(ndev->dev.parent, priv->desc_bat_size, priv->desc_bat,
|
||||
priv->desc_bat_dma);
|
||||
/* Set reset mode */
|
||||
ravb_write(ndev, CCC_OPC_RESET, CCC);
|
||||
unregister_netdev(ndev);
|
||||
@ -2900,6 +2900,8 @@ static int ravb_remove(struct platform_device *pdev)
|
||||
netif_napi_del(&priv->napi[RAVB_NC]);
|
||||
netif_napi_del(&priv->napi[RAVB_BE]);
|
||||
ravb_mdio_release(priv);
|
||||
dma_free_coherent(ndev->dev.parent, priv->desc_bat_size, priv->desc_bat,
|
||||
priv->desc_bat_dma);
|
||||
pm_runtime_put_sync(&pdev->dev);
|
||||
pm_runtime_disable(&pdev->dev);
|
||||
reset_control_assert(priv->rstc);
|
||||
|
@ -1254,7 +1254,7 @@ static void rswitch_adjust_link(struct net_device *ndev)
|
||||
phy_print_status(phydev);
|
||||
if (phydev->link)
|
||||
phy_power_on(rdev->serdes);
|
||||
else
|
||||
else if (rdev->serdes->power_count)
|
||||
phy_power_off(rdev->serdes);
|
||||
|
||||
rdev->etha->link = phydev->link;
|
||||
@ -1964,15 +1964,17 @@ static void rswitch_deinit(struct rswitch_private *priv)
|
||||
rswitch_gwca_hw_deinit(priv);
|
||||
rcar_gen4_ptp_unregister(priv->ptp_priv);
|
||||
|
||||
for (i = 0; i < RSWITCH_NUM_PORTS; i++) {
|
||||
rswitch_for_each_enabled_port(priv, i) {
|
||||
struct rswitch_device *rdev = priv->rdev[i];
|
||||
|
||||
phy_exit(priv->rdev[i]->serdes);
|
||||
rswitch_ether_port_deinit_one(rdev);
|
||||
unregister_netdev(rdev->ndev);
|
||||
rswitch_device_free(priv, i);
|
||||
rswitch_ether_port_deinit_one(rdev);
|
||||
phy_exit(priv->rdev[i]->serdes);
|
||||
}
|
||||
|
||||
for (i = 0; i < RSWITCH_NUM_PORTS; i++)
|
||||
rswitch_device_free(priv, i);
|
||||
|
||||
rswitch_gwca_ts_queue_free(priv);
|
||||
rswitch_gwca_linkfix_free(priv);
|
||||
|
||||
|
@ -2740,7 +2740,6 @@ static int ca8210_register_ext_clock(struct spi_device *spi)
|
||||
struct device_node *np = spi->dev.of_node;
|
||||
struct ca8210_priv *priv = spi_get_drvdata(spi);
|
||||
struct ca8210_platform_data *pdata = spi->dev.platform_data;
|
||||
int ret = 0;
|
||||
|
||||
if (!np)
|
||||
return -EFAULT;
|
||||
@ -2757,18 +2756,8 @@ static int ca8210_register_ext_clock(struct spi_device *spi)
|
||||
dev_crit(&spi->dev, "Failed to register external clk\n");
|
||||
return PTR_ERR(priv->clk);
|
||||
}
|
||||
ret = of_clk_add_provider(np, of_clk_src_simple_get, priv->clk);
|
||||
if (ret) {
|
||||
clk_unregister(priv->clk);
|
||||
dev_crit(
|
||||
&spi->dev,
|
||||
"Failed to register external clock as clock provider\n"
|
||||
);
|
||||
} else {
|
||||
dev_info(&spi->dev, "External clock set as clock provider\n");
|
||||
}
|
||||
|
||||
return ret;
|
||||
return of_clk_add_provider(np, of_clk_src_simple_get, priv->clk);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -2780,8 +2769,8 @@ static void ca8210_unregister_ext_clock(struct spi_device *spi)
|
||||
{
|
||||
struct ca8210_priv *priv = spi_get_drvdata(spi);
|
||||
|
||||
if (!priv->clk)
|
||||
return
|
||||
if (IS_ERR_OR_NULL(priv->clk))
|
||||
return;
|
||||
|
||||
of_clk_del_provider(spi->dev.of_node);
|
||||
clk_unregister(priv->clk);
|
||||
|
@ -2383,6 +2383,7 @@ static int macsec_upd_txsa(struct sk_buff *skb, struct genl_info *info)
|
||||
|
||||
ctx.sa.assoc_num = assoc_num;
|
||||
ctx.sa.tx_sa = tx_sa;
|
||||
ctx.sa.update_pn = !!prev_pn.full64;
|
||||
ctx.secy = secy;
|
||||
|
||||
ret = macsec_offload(ops->mdo_upd_txsa, &ctx);
|
||||
@ -2476,6 +2477,7 @@ static int macsec_upd_rxsa(struct sk_buff *skb, struct genl_info *info)
|
||||
|
||||
ctx.sa.assoc_num = assoc_num;
|
||||
ctx.sa.rx_sa = rx_sa;
|
||||
ctx.sa.update_pn = !!prev_pn.full64;
|
||||
ctx.secy = secy;
|
||||
|
||||
ret = macsec_offload(ops->mdo_upd_rxsa, &ctx);
|
||||
|
@ -849,6 +849,9 @@ static int vsc8584_macsec_upd_rxsa(struct macsec_context *ctx)
|
||||
struct macsec_flow *flow;
|
||||
int ret;
|
||||
|
||||
if (ctx->sa.update_pn)
|
||||
return -EINVAL;
|
||||
|
||||
flow = vsc8584_macsec_find_flow(ctx, MACSEC_INGR);
|
||||
if (IS_ERR(flow))
|
||||
return PTR_ERR(flow);
|
||||
@ -900,6 +903,9 @@ static int vsc8584_macsec_upd_txsa(struct macsec_context *ctx)
|
||||
struct macsec_flow *flow;
|
||||
int ret;
|
||||
|
||||
if (ctx->sa.update_pn)
|
||||
return -EINVAL;
|
||||
|
||||
flow = vsc8584_macsec_find_flow(ctx, MACSEC_EGR);
|
||||
if (IS_ERR(flow))
|
||||
return PTR_ERR(flow);
|
||||
|
@ -222,13 +222,18 @@ static int dm9601_mdio_read(struct net_device *netdev, int phy_id, int loc)
|
||||
struct usbnet *dev = netdev_priv(netdev);
|
||||
|
||||
__le16 res;
|
||||
int err;
|
||||
|
||||
if (phy_id) {
|
||||
netdev_dbg(dev->net, "Only internal phy supported\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
dm_read_shared_word(dev, 1, loc, &res);
|
||||
err = dm_read_shared_word(dev, 1, loc, &res);
|
||||
if (err < 0) {
|
||||
netdev_err(dev->net, "MDIO read error: %d\n", err);
|
||||
return err;
|
||||
}
|
||||
|
||||
netdev_dbg(dev->net,
|
||||
"dm9601_mdio_read() phy_id=0x%02x, loc=0x%02x, returns=0x%04x\n",
|
||||
|
@ -41,8 +41,6 @@
|
||||
#include <asm/xen/hypercall.h>
|
||||
#include <xen/balloon.h>
|
||||
|
||||
#define XENVIF_QUEUE_LENGTH 32
|
||||
|
||||
/* Number of bytes allowed on the internal guest Rx queue. */
|
||||
#define XENVIF_RX_QUEUE_BYTES (XEN_NETIF_RX_RING_SIZE/2 * PAGE_SIZE)
|
||||
|
||||
@ -530,8 +528,6 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
|
||||
dev->features = dev->hw_features | NETIF_F_RXCSUM;
|
||||
dev->ethtool_ops = &xenvif_ethtool_ops;
|
||||
|
||||
dev->tx_queue_len = XENVIF_QUEUE_LENGTH;
|
||||
|
||||
dev->min_mtu = ETH_MIN_MTU;
|
||||
dev->max_mtu = ETH_MAX_MTU - VLAN_ETH_HLEN;
|
||||
|
||||
|
@ -127,6 +127,10 @@ struct lynx_28g_lane {
|
||||
struct lynx_28g_priv {
|
||||
void __iomem *base;
|
||||
struct device *dev;
|
||||
/* Serialize concurrent access to registers shared between lanes,
|
||||
* like PCCn
|
||||
*/
|
||||
spinlock_t pcc_lock;
|
||||
struct lynx_28g_pll pll[LYNX_28G_NUM_PLL];
|
||||
struct lynx_28g_lane lane[LYNX_28G_NUM_LANE];
|
||||
|
||||
@ -397,6 +401,8 @@ static int lynx_28g_set_mode(struct phy *phy, enum phy_mode mode, int submode)
|
||||
if (powered_up)
|
||||
lynx_28g_power_off(phy);
|
||||
|
||||
spin_lock(&priv->pcc_lock);
|
||||
|
||||
switch (submode) {
|
||||
case PHY_INTERFACE_MODE_SGMII:
|
||||
case PHY_INTERFACE_MODE_1000BASEX:
|
||||
@ -413,6 +419,8 @@ static int lynx_28g_set_mode(struct phy *phy, enum phy_mode mode, int submode)
|
||||
lane->interface = submode;
|
||||
|
||||
out:
|
||||
spin_unlock(&priv->pcc_lock);
|
||||
|
||||
/* Power up the lane if necessary */
|
||||
if (powered_up)
|
||||
lynx_28g_power_on(phy);
|
||||
@ -508,11 +516,12 @@ static void lynx_28g_cdr_lock_check(struct work_struct *work)
|
||||
for (i = 0; i < LYNX_28G_NUM_LANE; i++) {
|
||||
lane = &priv->lane[i];
|
||||
|
||||
if (!lane->init)
|
||||
continue;
|
||||
mutex_lock(&lane->phy->mutex);
|
||||
|
||||
if (!lane->powered_up)
|
||||
if (!lane->init || !lane->powered_up) {
|
||||
mutex_unlock(&lane->phy->mutex);
|
||||
continue;
|
||||
}
|
||||
|
||||
rrstctl = lynx_28g_lane_read(lane, LNaRRSTCTL);
|
||||
if (!(rrstctl & LYNX_28G_LNaRRSTCTL_CDR_LOCK)) {
|
||||
@ -521,6 +530,8 @@ static void lynx_28g_cdr_lock_check(struct work_struct *work)
|
||||
rrstctl = lynx_28g_lane_read(lane, LNaRRSTCTL);
|
||||
} while (!(rrstctl & LYNX_28G_LNaRRSTCTL_RST_DONE));
|
||||
}
|
||||
|
||||
mutex_unlock(&lane->phy->mutex);
|
||||
}
|
||||
queue_delayed_work(system_power_efficient_wq, &priv->cdr_check,
|
||||
msecs_to_jiffies(1000));
|
||||
@ -593,6 +604,7 @@ static int lynx_28g_probe(struct platform_device *pdev)
|
||||
|
||||
dev_set_drvdata(dev, priv);
|
||||
|
||||
spin_lock_init(&priv->pcc_lock);
|
||||
INIT_DELAYED_WORK(&priv->cdr_check, lynx_28g_cdr_lock_check);
|
||||
|
||||
queue_delayed_work(system_power_efficient_wq, &priv->cdr_check,
|
||||
@ -604,6 +616,14 @@ static int lynx_28g_probe(struct platform_device *pdev)
|
||||
return PTR_ERR_OR_ZERO(provider);
|
||||
}
|
||||
|
||||
static void lynx_28g_remove(struct platform_device *pdev)
|
||||
{
|
||||
struct device *dev = &pdev->dev;
|
||||
struct lynx_28g_priv *priv = dev_get_drvdata(dev);
|
||||
|
||||
cancel_delayed_work_sync(&priv->cdr_check);
|
||||
}
|
||||
|
||||
static const struct of_device_id lynx_28g_of_match_table[] = {
|
||||
{ .compatible = "fsl,lynx-28g" },
|
||||
{ },
|
||||
@ -612,6 +632,7 @@ MODULE_DEVICE_TABLE(of, lynx_28g_of_match_table);
|
||||
|
||||
static struct platform_driver lynx_28g_driver = {
|
||||
.probe = lynx_28g_probe,
|
||||
.remove_new = lynx_28g_remove,
|
||||
.driver = {
|
||||
.name = "lynx-28g",
|
||||
.of_match_table = lynx_28g_of_match_table,
|
||||
|
@ -102,7 +102,7 @@ config CCWGROUP
|
||||
|
||||
config ISM
|
||||
tristate "Support for ISM vPCI Adapter"
|
||||
depends on PCI && SMC
|
||||
depends on PCI
|
||||
default n
|
||||
help
|
||||
Select this option if you want to use the Internal Shared Memory
|
||||
|
@ -1309,7 +1309,7 @@ struct sk_buff_fclones {
|
||||
*
|
||||
* Returns true if skb is a fast clone, and its clone is not freed.
|
||||
* Some drivers call skb_orphan() in their ndo_start_xmit(),
|
||||
* so we also check that this didnt happen.
|
||||
* so we also check that didn't happen.
|
||||
*/
|
||||
static inline bool skb_fclone_busy(const struct sock *sk,
|
||||
const struct sk_buff *skb)
|
||||
@ -2016,7 +2016,7 @@ static inline struct sk_buff *skb_share_check(struct sk_buff *skb, gfp_t pri)
|
||||
* Copy shared buffers into a new sk_buff. We effectively do COW on
|
||||
* packets to handle cases where we have a local reader and forward
|
||||
* and a couple of other messy ones. The normal one is tcpdumping
|
||||
* a packet thats being forwarded.
|
||||
* a packet that's being forwarded.
|
||||
*/
|
||||
|
||||
/**
|
||||
|
@ -258,6 +258,7 @@ struct macsec_context {
|
||||
struct macsec_secy *secy;
|
||||
struct macsec_rx_sc *rx_sc;
|
||||
struct {
|
||||
bool update_pn;
|
||||
unsigned char assoc_num;
|
||||
u8 key[MACSEC_MAX_KEY_LEN];
|
||||
union {
|
||||
|
@ -18,11 +18,7 @@ struct sockaddr_ll {
|
||||
unsigned short sll_hatype;
|
||||
unsigned char sll_pkttype;
|
||||
unsigned char sll_halen;
|
||||
union {
|
||||
unsigned char sll_addr[8];
|
||||
/* Actual length is in sll_halen. */
|
||||
__DECLARE_FLEX_ARRAY(unsigned char, sll_addr_flex);
|
||||
};
|
||||
unsigned char sll_addr[8];
|
||||
};
|
||||
|
||||
/* Packet types */
|
||||
|
@ -401,14 +401,16 @@ int bpf_mprog_query(const union bpf_attr *attr, union bpf_attr __user *uattr,
|
||||
struct bpf_mprog_cp *cp;
|
||||
struct bpf_prog *prog;
|
||||
const u32 flags = 0;
|
||||
u32 id, count = 0;
|
||||
u64 revision = 1;
|
||||
int i, ret = 0;
|
||||
u32 id, count;
|
||||
u64 revision;
|
||||
|
||||
if (attr->query.query_flags || attr->query.attach_flags)
|
||||
return -EINVAL;
|
||||
revision = bpf_mprog_revision(entry);
|
||||
count = bpf_mprog_total(entry);
|
||||
if (entry) {
|
||||
revision = bpf_mprog_revision(entry);
|
||||
count = bpf_mprog_total(entry);
|
||||
}
|
||||
if (copy_to_user(&uattr->query.attach_flags, &flags, sizeof(flags)))
|
||||
return -EFAULT;
|
||||
if (copy_to_user(&uattr->query.revision, &revision, sizeof(revision)))
|
||||
|
@ -3796,7 +3796,6 @@ static int bpf_prog_attach(const union bpf_attr *attr)
|
||||
{
|
||||
enum bpf_prog_type ptype;
|
||||
struct bpf_prog *prog;
|
||||
u32 mask;
|
||||
int ret;
|
||||
|
||||
if (CHECK_ATTR(BPF_PROG_ATTACH))
|
||||
@ -3805,10 +3804,16 @@ static int bpf_prog_attach(const union bpf_attr *attr)
|
||||
ptype = attach_type_to_prog_type(attr->attach_type);
|
||||
if (ptype == BPF_PROG_TYPE_UNSPEC)
|
||||
return -EINVAL;
|
||||
mask = bpf_mprog_supported(ptype) ?
|
||||
BPF_F_ATTACH_MASK_MPROG : BPF_F_ATTACH_MASK_BASE;
|
||||
if (attr->attach_flags & ~mask)
|
||||
return -EINVAL;
|
||||
if (bpf_mprog_supported(ptype)) {
|
||||
if (attr->attach_flags & ~BPF_F_ATTACH_MASK_MPROG)
|
||||
return -EINVAL;
|
||||
} else {
|
||||
if (attr->attach_flags & ~BPF_F_ATTACH_MASK_BASE)
|
||||
return -EINVAL;
|
||||
if (attr->relative_fd ||
|
||||
attr->expected_revision)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype);
|
||||
if (IS_ERR(prog))
|
||||
@ -3878,6 +3883,10 @@ static int bpf_prog_detach(const union bpf_attr *attr)
|
||||
if (IS_ERR(prog))
|
||||
return PTR_ERR(prog);
|
||||
}
|
||||
} else if (attr->attach_flags ||
|
||||
attr->relative_fd ||
|
||||
attr->expected_revision) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
switch (ptype) {
|
||||
@ -3913,7 +3922,7 @@ static int bpf_prog_detach(const union bpf_attr *attr)
|
||||
return ret;
|
||||
}
|
||||
|
||||
#define BPF_PROG_QUERY_LAST_FIELD query.link_attach_flags
|
||||
#define BPF_PROG_QUERY_LAST_FIELD query.revision
|
||||
|
||||
static int bpf_prog_query(const union bpf_attr *attr,
|
||||
union bpf_attr __user *uattr)
|
||||
|
@ -123,7 +123,6 @@ int tcx_prog_query(const union bpf_attr *attr, union bpf_attr __user *uattr)
|
||||
{
|
||||
bool ingress = attr->query.attach_type == BPF_TCX_INGRESS;
|
||||
struct net *net = current->nsproxy->net_ns;
|
||||
struct bpf_mprog_entry *entry;
|
||||
struct net_device *dev;
|
||||
int ret;
|
||||
|
||||
@ -133,12 +132,7 @@ int tcx_prog_query(const union bpf_attr *attr, union bpf_attr __user *uattr)
|
||||
ret = -ENODEV;
|
||||
goto out;
|
||||
}
|
||||
entry = tcx_entry_fetch(dev, ingress);
|
||||
if (!entry) {
|
||||
ret = -ENOENT;
|
||||
goto out;
|
||||
}
|
||||
ret = bpf_mprog_query(attr, uattr, entry);
|
||||
ret = bpf_mprog_query(attr, uattr, tcx_entry_fetch(dev, ingress));
|
||||
out:
|
||||
rtnl_unlock();
|
||||
return ret;
|
||||
|
@ -14479,7 +14479,7 @@ static int check_return_code(struct bpf_verifier_env *env)
|
||||
struct tnum enforce_attach_type_range = tnum_unknown;
|
||||
const struct bpf_prog *prog = env->prog;
|
||||
struct bpf_reg_state *reg;
|
||||
struct tnum range = tnum_range(0, 1);
|
||||
struct tnum range = tnum_range(0, 1), const_0 = tnum_const(0);
|
||||
enum bpf_prog_type prog_type = resolve_prog_type(env->prog);
|
||||
int err;
|
||||
struct bpf_func_state *frame = env->cur_state->frame[0];
|
||||
@ -14527,8 +14527,8 @@ static int check_return_code(struct bpf_verifier_env *env)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!tnum_in(tnum_const(0), reg->var_off)) {
|
||||
verbose_invalid_scalar(env, reg, &range, "async callback", "R0");
|
||||
if (!tnum_in(const_0, reg->var_off)) {
|
||||
verbose_invalid_scalar(env, reg, &const_0, "async callback", "R0");
|
||||
return -EINVAL;
|
||||
}
|
||||
return 0;
|
||||
|
@ -948,21 +948,18 @@ static int isotp_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
|
||||
if (!so->bound || so->tx.state == ISOTP_SHUTDOWN)
|
||||
return -EADDRNOTAVAIL;
|
||||
|
||||
wait_free_buffer:
|
||||
/* we do not support multiple buffers - for now */
|
||||
if (wq_has_sleeper(&so->wait) && (msg->msg_flags & MSG_DONTWAIT))
|
||||
return -EAGAIN;
|
||||
while (cmpxchg(&so->tx.state, ISOTP_IDLE, ISOTP_SENDING) != ISOTP_IDLE) {
|
||||
/* we do not support multiple buffers - for now */
|
||||
if (msg->msg_flags & MSG_DONTWAIT)
|
||||
return -EAGAIN;
|
||||
|
||||
/* wait for complete transmission of current pdu */
|
||||
err = wait_event_interruptible(so->wait, so->tx.state == ISOTP_IDLE);
|
||||
if (err)
|
||||
goto err_event_drop;
|
||||
|
||||
if (cmpxchg(&so->tx.state, ISOTP_IDLE, ISOTP_SENDING) != ISOTP_IDLE) {
|
||||
if (so->tx.state == ISOTP_SHUTDOWN)
|
||||
return -EADDRNOTAVAIL;
|
||||
|
||||
goto wait_free_buffer;
|
||||
/* wait for complete transmission of current pdu */
|
||||
err = wait_event_interruptible(so->wait, so->tx.state == ISOTP_IDLE);
|
||||
if (err)
|
||||
goto err_event_drop;
|
||||
}
|
||||
|
||||
/* PDU size > default => try max_pdu_size */
|
||||
|
@ -3292,15 +3292,19 @@ int skb_checksum_help(struct sk_buff *skb)
|
||||
|
||||
offset = skb_checksum_start_offset(skb);
|
||||
ret = -EINVAL;
|
||||
if (WARN_ON_ONCE(offset >= skb_headlen(skb))) {
|
||||
if (unlikely(offset >= skb_headlen(skb))) {
|
||||
DO_ONCE_LITE(skb_dump, KERN_ERR, skb, false);
|
||||
WARN_ONCE(true, "offset (%d) >= skb_headlen() (%u)\n",
|
||||
offset, skb_headlen(skb));
|
||||
goto out;
|
||||
}
|
||||
csum = skb_checksum(skb, offset, skb->len - offset, 0);
|
||||
|
||||
offset += skb->csum_offset;
|
||||
if (WARN_ON_ONCE(offset + sizeof(__sum16) > skb_headlen(skb))) {
|
||||
if (unlikely(offset + sizeof(__sum16) > skb_headlen(skb))) {
|
||||
DO_ONCE_LITE(skb_dump, KERN_ERR, skb, false);
|
||||
WARN_ONCE(true, "offset+2 (%zu) > skb_headlen() (%u)\n",
|
||||
offset + sizeof(__sum16), skb_headlen(skb));
|
||||
goto out;
|
||||
}
|
||||
ret = skb_ensure_writable(skb, offset + sizeof(__sum16));
|
||||
|
@ -58,7 +58,6 @@ struct devlink_health_reporter {
|
||||
struct devlink *devlink;
|
||||
struct devlink_port *devlink_port;
|
||||
struct devlink_fmsg *dump_fmsg;
|
||||
struct mutex dump_lock; /* lock parallel read/write from dump buffers */
|
||||
u64 graceful_period;
|
||||
bool auto_recover;
|
||||
bool auto_dump;
|
||||
@ -125,7 +124,6 @@ __devlink_health_reporter_create(struct devlink *devlink,
|
||||
reporter->graceful_period = graceful_period;
|
||||
reporter->auto_recover = !!ops->recover;
|
||||
reporter->auto_dump = !!ops->dump;
|
||||
mutex_init(&reporter->dump_lock);
|
||||
return reporter;
|
||||
}
|
||||
|
||||
@ -226,7 +224,6 @@ EXPORT_SYMBOL_GPL(devlink_health_reporter_create);
|
||||
static void
|
||||
devlink_health_reporter_free(struct devlink_health_reporter *reporter)
|
||||
{
|
||||
mutex_destroy(&reporter->dump_lock);
|
||||
if (reporter->dump_fmsg)
|
||||
devlink_fmsg_free(reporter->dump_fmsg);
|
||||
kfree(reporter);
|
||||
@ -625,10 +622,10 @@ int devlink_health_report(struct devlink_health_reporter *reporter,
|
||||
}
|
||||
|
||||
if (reporter->auto_dump) {
|
||||
mutex_lock(&reporter->dump_lock);
|
||||
devl_lock(devlink);
|
||||
/* store current dump of current error, for later analysis */
|
||||
devlink_health_do_dump(reporter, priv_ctx, NULL);
|
||||
mutex_unlock(&reporter->dump_lock);
|
||||
devl_unlock(devlink);
|
||||
}
|
||||
|
||||
if (!reporter->auto_recover)
|
||||
@ -1262,7 +1259,7 @@ int devlink_nl_cmd_health_reporter_diagnose_doit(struct sk_buff *skb,
|
||||
}
|
||||
|
||||
static struct devlink_health_reporter *
|
||||
devlink_health_reporter_get_from_cb(struct netlink_callback *cb)
|
||||
devlink_health_reporter_get_from_cb_lock(struct netlink_callback *cb)
|
||||
{
|
||||
const struct genl_info *info = genl_info_dump(cb);
|
||||
struct devlink_health_reporter *reporter;
|
||||
@ -1272,10 +1269,12 @@ devlink_health_reporter_get_from_cb(struct netlink_callback *cb)
|
||||
devlink = devlink_get_from_attrs_lock(sock_net(cb->skb->sk), attrs);
|
||||
if (IS_ERR(devlink))
|
||||
return NULL;
|
||||
devl_unlock(devlink);
|
||||
|
||||
reporter = devlink_health_reporter_get_from_attrs(devlink, attrs);
|
||||
devlink_put(devlink);
|
||||
if (!reporter) {
|
||||
devl_unlock(devlink);
|
||||
devlink_put(devlink);
|
||||
}
|
||||
return reporter;
|
||||
}
|
||||
|
||||
@ -1284,16 +1283,20 @@ int devlink_nl_cmd_health_reporter_dump_get_dumpit(struct sk_buff *skb,
|
||||
{
|
||||
struct devlink_nl_dump_state *state = devlink_dump_state(cb);
|
||||
struct devlink_health_reporter *reporter;
|
||||
struct devlink *devlink;
|
||||
int err;
|
||||
|
||||
reporter = devlink_health_reporter_get_from_cb(cb);
|
||||
reporter = devlink_health_reporter_get_from_cb_lock(cb);
|
||||
if (!reporter)
|
||||
return -EINVAL;
|
||||
|
||||
if (!reporter->ops->dump)
|
||||
devlink = reporter->devlink;
|
||||
if (!reporter->ops->dump) {
|
||||
devl_unlock(devlink);
|
||||
devlink_put(devlink);
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
mutex_lock(&reporter->dump_lock);
|
||||
if (!state->idx) {
|
||||
err = devlink_health_do_dump(reporter, NULL, cb->extack);
|
||||
if (err)
|
||||
@ -1309,7 +1312,8 @@ int devlink_nl_cmd_health_reporter_dump_get_dumpit(struct sk_buff *skb,
|
||||
err = devlink_fmsg_dumpit(reporter->dump_fmsg, skb, cb,
|
||||
DEVLINK_CMD_HEALTH_REPORTER_DUMP_GET);
|
||||
unlock:
|
||||
mutex_unlock(&reporter->dump_lock);
|
||||
devl_unlock(devlink);
|
||||
devlink_put(devlink);
|
||||
return err;
|
||||
}
|
||||
|
||||
@ -1326,9 +1330,7 @@ int devlink_nl_cmd_health_reporter_dump_clear_doit(struct sk_buff *skb,
|
||||
if (!reporter->ops->dump)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
mutex_lock(&reporter->dump_lock);
|
||||
devlink_health_dump_clear(reporter);
|
||||
mutex_unlock(&reporter->dump_lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -431,8 +431,10 @@ ethnl_update_bitset32_verbose(u32 *bitmap, unsigned int nbits,
|
||||
ethnl_string_array_t names,
|
||||
struct netlink_ext_ack *extack, bool *mod)
|
||||
{
|
||||
u32 *orig_bitmap, *saved_bitmap = NULL;
|
||||
struct nlattr *bit_attr;
|
||||
bool no_mask;
|
||||
bool dummy;
|
||||
int rem;
|
||||
int ret;
|
||||
|
||||
@ -448,8 +450,22 @@ ethnl_update_bitset32_verbose(u32 *bitmap, unsigned int nbits,
|
||||
}
|
||||
|
||||
no_mask = tb[ETHTOOL_A_BITSET_NOMASK];
|
||||
if (no_mask)
|
||||
ethnl_bitmap32_clear(bitmap, 0, nbits, mod);
|
||||
if (no_mask) {
|
||||
unsigned int nwords = DIV_ROUND_UP(nbits, 32);
|
||||
unsigned int nbytes = nwords * sizeof(u32);
|
||||
|
||||
/* The bitmap size is only the size of the map part without
|
||||
* its mask part.
|
||||
*/
|
||||
saved_bitmap = kcalloc(nwords, sizeof(u32), GFP_KERNEL);
|
||||
if (!saved_bitmap)
|
||||
return -ENOMEM;
|
||||
memcpy(saved_bitmap, bitmap, nbytes);
|
||||
ethnl_bitmap32_clear(bitmap, 0, nbits, &dummy);
|
||||
orig_bitmap = saved_bitmap;
|
||||
} else {
|
||||
orig_bitmap = bitmap;
|
||||
}
|
||||
|
||||
nla_for_each_nested(bit_attr, tb[ETHTOOL_A_BITSET_BITS], rem) {
|
||||
bool old_val, new_val;
|
||||
@ -458,13 +474,14 @@ ethnl_update_bitset32_verbose(u32 *bitmap, unsigned int nbits,
|
||||
if (nla_type(bit_attr) != ETHTOOL_A_BITSET_BITS_BIT) {
|
||||
NL_SET_ERR_MSG_ATTR(extack, bit_attr,
|
||||
"only ETHTOOL_A_BITSET_BITS_BIT allowed in ETHTOOL_A_BITSET_BITS");
|
||||
return -EINVAL;
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
ret = ethnl_parse_bit(&idx, &new_val, nbits, bit_attr, no_mask,
|
||||
names, extack);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
old_val = bitmap[idx / 32] & ((u32)1 << (idx % 32));
|
||||
goto out;
|
||||
old_val = orig_bitmap[idx / 32] & ((u32)1 << (idx % 32));
|
||||
if (new_val != old_val) {
|
||||
if (new_val)
|
||||
bitmap[idx / 32] |= ((u32)1 << (idx % 32));
|
||||
@ -474,7 +491,10 @@ ethnl_update_bitset32_verbose(u32 *bitmap, unsigned int nbits,
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
ret = 0;
|
||||
out:
|
||||
kfree(saved_bitmap);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int ethnl_compact_sanity_checks(unsigned int nbits,
|
||||
|
@ -2456,6 +2456,7 @@ static int tcp_mtu_probe(struct sock *sk)
|
||||
|
||||
/* build the payload, and be prepared to abort if this fails. */
|
||||
if (tcp_clone_payload(sk, nskb, probe_size)) {
|
||||
tcp_skb_tsorted_anchor_cleanup(nskb);
|
||||
consume_skb(nskb);
|
||||
return -1;
|
||||
}
|
||||
|
@ -737,6 +737,8 @@ struct mctp_route *mctp_route_lookup(struct net *net, unsigned int dnet,
|
||||
{
|
||||
struct mctp_route *tmp, *rt = NULL;
|
||||
|
||||
rcu_read_lock();
|
||||
|
||||
list_for_each_entry_rcu(tmp, &net->mctp.routes, list) {
|
||||
/* TODO: add metrics */
|
||||
if (mctp_rt_match_eid(tmp, dnet, daddr)) {
|
||||
@ -747,21 +749,29 @@ struct mctp_route *mctp_route_lookup(struct net *net, unsigned int dnet,
|
||||
}
|
||||
}
|
||||
|
||||
rcu_read_unlock();
|
||||
|
||||
return rt;
|
||||
}
|
||||
|
||||
static struct mctp_route *mctp_route_lookup_null(struct net *net,
|
||||
struct net_device *dev)
|
||||
{
|
||||
struct mctp_route *rt;
|
||||
struct mctp_route *tmp, *rt = NULL;
|
||||
|
||||
list_for_each_entry_rcu(rt, &net->mctp.routes, list) {
|
||||
if (rt->dev->dev == dev && rt->type == RTN_LOCAL &&
|
||||
refcount_inc_not_zero(&rt->refs))
|
||||
return rt;
|
||||
rcu_read_lock();
|
||||
|
||||
list_for_each_entry_rcu(tmp, &net->mctp.routes, list) {
|
||||
if (tmp->dev->dev == dev && tmp->type == RTN_LOCAL &&
|
||||
refcount_inc_not_zero(&tmp->refs)) {
|
||||
rt = tmp;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return NULL;
|
||||
rcu_read_unlock();
|
||||
|
||||
return rt;
|
||||
}
|
||||
|
||||
static int mctp_do_fragment_route(struct mctp_route *rt, struct sk_buff *skb,
|
||||
|
@ -203,17 +203,13 @@ static struct nfc_llcp_sock *nfc_llcp_sock_get(struct nfc_llcp_local *local,
|
||||
|
||||
if (tmp_sock->ssap == ssap && tmp_sock->dsap == dsap) {
|
||||
llcp_sock = tmp_sock;
|
||||
sock_hold(&llcp_sock->sk);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
read_unlock(&local->sockets.lock);
|
||||
|
||||
if (llcp_sock == NULL)
|
||||
return NULL;
|
||||
|
||||
sock_hold(&llcp_sock->sk);
|
||||
|
||||
return llcp_sock;
|
||||
}
|
||||
|
||||
@ -346,7 +342,8 @@ static int nfc_llcp_wks_sap(const char *service_name, size_t service_name_len)
|
||||
|
||||
static
|
||||
struct nfc_llcp_sock *nfc_llcp_sock_from_sn(struct nfc_llcp_local *local,
|
||||
const u8 *sn, size_t sn_len)
|
||||
const u8 *sn, size_t sn_len,
|
||||
bool needref)
|
||||
{
|
||||
struct sock *sk;
|
||||
struct nfc_llcp_sock *llcp_sock, *tmp_sock;
|
||||
@ -382,6 +379,8 @@ struct nfc_llcp_sock *nfc_llcp_sock_from_sn(struct nfc_llcp_local *local,
|
||||
|
||||
if (memcmp(sn, tmp_sock->service_name, sn_len) == 0) {
|
||||
llcp_sock = tmp_sock;
|
||||
if (needref)
|
||||
sock_hold(&llcp_sock->sk);
|
||||
break;
|
||||
}
|
||||
}
|
||||
@ -423,7 +422,8 @@ u8 nfc_llcp_get_sdp_ssap(struct nfc_llcp_local *local,
|
||||
* to this service name.
|
||||
*/
|
||||
if (nfc_llcp_sock_from_sn(local, sock->service_name,
|
||||
sock->service_name_len) != NULL) {
|
||||
sock->service_name_len,
|
||||
false) != NULL) {
|
||||
mutex_unlock(&local->sdp_lock);
|
||||
|
||||
return LLCP_SAP_MAX;
|
||||
@ -824,16 +824,7 @@ static struct nfc_llcp_sock *nfc_llcp_connecting_sock_get(struct nfc_llcp_local
|
||||
static struct nfc_llcp_sock *nfc_llcp_sock_get_sn(struct nfc_llcp_local *local,
|
||||
const u8 *sn, size_t sn_len)
|
||||
{
|
||||
struct nfc_llcp_sock *llcp_sock;
|
||||
|
||||
llcp_sock = nfc_llcp_sock_from_sn(local, sn, sn_len);
|
||||
|
||||
if (llcp_sock == NULL)
|
||||
return NULL;
|
||||
|
||||
sock_hold(&llcp_sock->sk);
|
||||
|
||||
return llcp_sock;
|
||||
return nfc_llcp_sock_from_sn(local, sn, sn_len, true);
|
||||
}
|
||||
|
||||
static const u8 *nfc_llcp_connect_sn(const struct sk_buff *skb, size_t *sn_len)
|
||||
@ -1298,7 +1289,8 @@ static void nfc_llcp_recv_snl(struct nfc_llcp_local *local,
|
||||
}
|
||||
|
||||
llcp_sock = nfc_llcp_sock_from_sn(local, service_name,
|
||||
service_name_len);
|
||||
service_name_len,
|
||||
true);
|
||||
if (!llcp_sock) {
|
||||
sap = 0;
|
||||
goto add_snl;
|
||||
@ -1318,6 +1310,7 @@ static void nfc_llcp_recv_snl(struct nfc_llcp_local *local,
|
||||
|
||||
if (sap == LLCP_SAP_MAX) {
|
||||
sap = 0;
|
||||
nfc_llcp_sock_put(llcp_sock);
|
||||
goto add_snl;
|
||||
}
|
||||
|
||||
@ -1335,6 +1328,7 @@ static void nfc_llcp_recv_snl(struct nfc_llcp_local *local,
|
||||
|
||||
pr_debug("%p %d\n", llcp_sock, sap);
|
||||
|
||||
nfc_llcp_sock_put(llcp_sock);
|
||||
add_snl:
|
||||
sdp = nfc_llcp_build_sdres_tlv(tid, sap);
|
||||
if (sdp == NULL)
|
||||
|
@ -909,6 +909,11 @@ static int nci_activate_target(struct nfc_dev *nfc_dev,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (protocol >= NFC_PROTO_MAX) {
|
||||
pr_err("the requested nfc protocol is invalid\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!(nci_target->supported_protocols & (1 << protocol))) {
|
||||
pr_err("target does not support the requested protocol 0x%x\n",
|
||||
protocol);
|
||||
|
@ -3607,7 +3607,12 @@ static int packet_getname(struct socket *sock, struct sockaddr *uaddr,
|
||||
if (dev) {
|
||||
sll->sll_hatype = dev->type;
|
||||
sll->sll_halen = dev->addr_len;
|
||||
memcpy(sll->sll_addr_flex, dev->dev_addr, dev->addr_len);
|
||||
|
||||
/* Let __fortify_memcpy_chk() know the actual buffer size. */
|
||||
memcpy(((struct sockaddr_storage *)sll)->__data +
|
||||
offsetof(struct sockaddr_ll, sll_addr) -
|
||||
offsetofend(struct sockaddr_ll, sll_family),
|
||||
dev->dev_addr, dev->addr_len);
|
||||
} else {
|
||||
sll->sll_hatype = 0; /* Bad: we have no ARPHRD_UNSPEC */
|
||||
sll->sll_halen = 0;
|
||||
|
@ -366,7 +366,7 @@ static int u32_init(struct tcf_proto *tp)
|
||||
idr_init(&root_ht->handle_idr);
|
||||
|
||||
if (tp_c == NULL) {
|
||||
tp_c = kzalloc(struct_size(tp_c, hlist->ht, 1), GFP_KERNEL);
|
||||
tp_c = kzalloc(sizeof(*tp_c), GFP_KERNEL);
|
||||
if (tp_c == NULL) {
|
||||
kfree(root_ht);
|
||||
return -ENOBUFS;
|
||||
|
@ -2,6 +2,7 @@
|
||||
config SMC
|
||||
tristate "SMC socket protocol family"
|
||||
depends on INET && INFINIBAND
|
||||
depends on m || ISM != m
|
||||
help
|
||||
SMC-R provides a "sockets over RDMA" solution making use of
|
||||
RDMA over Converged Ethernet (RoCE) technology to upgrade
|
||||
|
@ -92,13 +92,14 @@ do { \
|
||||
typeof(_smc_stats) stats = (_smc_stats); \
|
||||
typeof(_tech) t = (_tech); \
|
||||
typeof(_len) l = (_len); \
|
||||
int _pos = fls64((l) >> 13); \
|
||||
int _pos; \
|
||||
typeof(_rc) r = (_rc); \
|
||||
int m = SMC_BUF_MAX - 1; \
|
||||
this_cpu_inc((*stats).smc[t].key ## _cnt); \
|
||||
if (r <= 0) \
|
||||
if (r <= 0 || l <= 0) \
|
||||
break; \
|
||||
_pos = (_pos < m) ? ((l == 1 << (_pos + 12)) ? _pos - 1 : _pos) : m; \
|
||||
_pos = fls64((l - 1) >> 13); \
|
||||
_pos = (_pos <= m) ? _pos : m; \
|
||||
this_cpu_inc((*stats).smc[t].key ## _pd.buf[_pos]); \
|
||||
this_cpu_add((*stats).smc[t].key ## _bytes, r); \
|
||||
} \
|
||||
@ -138,9 +139,12 @@ while (0)
|
||||
do { \
|
||||
typeof(_len) _l = (_len); \
|
||||
typeof(_tech) t = (_tech); \
|
||||
int _pos = fls((_l) >> 13); \
|
||||
int _pos; \
|
||||
int m = SMC_BUF_MAX - 1; \
|
||||
_pos = (_pos < m) ? ((_l == 1 << (_pos + 12)) ? _pos - 1 : _pos) : m; \
|
||||
if (_l <= 0) \
|
||||
break; \
|
||||
_pos = fls((_l - 1) >> 13); \
|
||||
_pos = (_pos <= m) ? _pos : m; \
|
||||
this_cpu_inc((*(_smc_stats)).smc[t].k ## _rmbsize.buf[_pos]); \
|
||||
} \
|
||||
while (0)
|
||||
|
@ -34,6 +34,16 @@ struct xsk_queue *xskq_create(u32 nentries, bool umem_queue)
|
||||
q->ring_mask = nentries - 1;
|
||||
|
||||
size = xskq_get_ring_size(q, umem_queue);
|
||||
|
||||
/* size which is overflowing or close to SIZE_MAX will become 0 in
|
||||
* PAGE_ALIGN(), checking SIZE_MAX is enough due to the previous
|
||||
* is_power_of_2(), the rest will be handled by vmalloc_user()
|
||||
*/
|
||||
if (unlikely(size == SIZE_MAX)) {
|
||||
kfree(q);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
size = PAGE_ALIGN(size);
|
||||
|
||||
q->ring = vmalloc_user(size);
|
||||
|
@ -45,7 +45,7 @@ static inline __u32 ifindex_from_link_fd(int fd)
|
||||
return link_info.tcx.ifindex;
|
||||
}
|
||||
|
||||
static inline void __assert_mprog_count(int target, int expected, bool miniq, int ifindex)
|
||||
static inline void __assert_mprog_count(int target, int expected, int ifindex)
|
||||
{
|
||||
__u32 count = 0, attach_flags = 0;
|
||||
int err;
|
||||
@ -53,20 +53,22 @@ static inline void __assert_mprog_count(int target, int expected, bool miniq, in
|
||||
err = bpf_prog_query(ifindex, target, 0, &attach_flags,
|
||||
NULL, &count);
|
||||
ASSERT_EQ(count, expected, "count");
|
||||
if (!expected && !miniq)
|
||||
ASSERT_EQ(err, -ENOENT, "prog_query");
|
||||
else
|
||||
ASSERT_EQ(err, 0, "prog_query");
|
||||
ASSERT_EQ(err, 0, "prog_query");
|
||||
}
|
||||
|
||||
static inline void assert_mprog_count(int target, int expected)
|
||||
{
|
||||
__assert_mprog_count(target, expected, false, loopback);
|
||||
__assert_mprog_count(target, expected, loopback);
|
||||
}
|
||||
|
||||
static inline void assert_mprog_count_ifindex(int ifindex, int target, int expected)
|
||||
{
|
||||
__assert_mprog_count(target, expected, false, ifindex);
|
||||
__assert_mprog_count(target, expected, ifindex);
|
||||
}
|
||||
|
||||
static inline void tc_skel_reset_all_seen(struct test_tc_link *skel)
|
||||
{
|
||||
memset(skel->bss, 0, sizeof(*skel->bss));
|
||||
}
|
||||
|
||||
#endif /* TC_HELPERS */
|
||||
|
@ -65,6 +65,7 @@ void serial_test_tc_links_basic(void)
|
||||
ASSERT_EQ(optq.prog_ids[1], 0, "prog_ids[1]");
|
||||
ASSERT_EQ(optq.link_ids[1], 0, "link_ids[1]");
|
||||
|
||||
tc_skel_reset_all_seen(skel);
|
||||
ASSERT_OK(system(ping_cmd), ping_cmd);
|
||||
|
||||
ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
|
||||
@ -97,6 +98,7 @@ void serial_test_tc_links_basic(void)
|
||||
ASSERT_EQ(optq.prog_ids[1], 0, "prog_ids[1]");
|
||||
ASSERT_EQ(optq.link_ids[1], 0, "link_ids[1]");
|
||||
|
||||
tc_skel_reset_all_seen(skel);
|
||||
ASSERT_OK(system(ping_cmd), ping_cmd);
|
||||
|
||||
ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
|
||||
@ -187,6 +189,7 @@ static void test_tc_links_before_target(int target)
|
||||
ASSERT_EQ(optq.prog_ids[2], 0, "prog_ids[2]");
|
||||
ASSERT_EQ(optq.link_ids[2], 0, "link_ids[2]");
|
||||
|
||||
tc_skel_reset_all_seen(skel);
|
||||
ASSERT_OK(system(ping_cmd), ping_cmd);
|
||||
|
||||
ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
|
||||
@ -194,9 +197,6 @@ static void test_tc_links_before_target(int target)
|
||||
ASSERT_EQ(skel->bss->seen_tc3, false, "seen_tc3");
|
||||
ASSERT_EQ(skel->bss->seen_tc4, false, "seen_tc4");
|
||||
|
||||
skel->bss->seen_tc1 = false;
|
||||
skel->bss->seen_tc2 = false;
|
||||
|
||||
LIBBPF_OPTS_RESET(optl,
|
||||
.flags = BPF_F_BEFORE,
|
||||
.relative_fd = bpf_program__fd(skel->progs.tc2),
|
||||
@ -246,6 +246,7 @@ static void test_tc_links_before_target(int target)
|
||||
ASSERT_EQ(optq.prog_ids[4], 0, "prog_ids[4]");
|
||||
ASSERT_EQ(optq.link_ids[4], 0, "link_ids[4]");
|
||||
|
||||
tc_skel_reset_all_seen(skel);
|
||||
ASSERT_OK(system(ping_cmd), ping_cmd);
|
||||
|
||||
ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
|
||||
@ -342,6 +343,7 @@ static void test_tc_links_after_target(int target)
|
||||
ASSERT_EQ(optq.prog_ids[2], 0, "prog_ids[2]");
|
||||
ASSERT_EQ(optq.link_ids[2], 0, "link_ids[2]");
|
||||
|
||||
tc_skel_reset_all_seen(skel);
|
||||
ASSERT_OK(system(ping_cmd), ping_cmd);
|
||||
|
||||
ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
|
||||
@ -349,9 +351,6 @@ static void test_tc_links_after_target(int target)
|
||||
ASSERT_EQ(skel->bss->seen_tc3, false, "seen_tc3");
|
||||
ASSERT_EQ(skel->bss->seen_tc4, false, "seen_tc4");
|
||||
|
||||
skel->bss->seen_tc1 = false;
|
||||
skel->bss->seen_tc2 = false;
|
||||
|
||||
LIBBPF_OPTS_RESET(optl,
|
||||
.flags = BPF_F_AFTER,
|
||||
.relative_fd = bpf_program__fd(skel->progs.tc1),
|
||||
@ -401,6 +400,7 @@ static void test_tc_links_after_target(int target)
|
||||
ASSERT_EQ(optq.prog_ids[4], 0, "prog_ids[4]");
|
||||
ASSERT_EQ(optq.link_ids[4], 0, "link_ids[4]");
|
||||
|
||||
tc_skel_reset_all_seen(skel);
|
||||
ASSERT_OK(system(ping_cmd), ping_cmd);
|
||||
|
||||
ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
|
||||
@ -502,6 +502,7 @@ static void test_tc_links_revision_target(int target)
|
||||
ASSERT_EQ(optq.prog_ids[2], 0, "prog_ids[2]");
|
||||
ASSERT_EQ(optq.link_ids[2], 0, "prog_ids[2]");
|
||||
|
||||
tc_skel_reset_all_seen(skel);
|
||||
ASSERT_OK(system(ping_cmd), ping_cmd);
|
||||
|
||||
ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
|
||||
@ -581,22 +582,20 @@ static void test_tc_chain_classic(int target, bool chain_tc_old)
|
||||
|
||||
assert_mprog_count(target, 2);
|
||||
|
||||
tc_skel_reset_all_seen(skel);
|
||||
ASSERT_OK(system(ping_cmd), ping_cmd);
|
||||
|
||||
ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
|
||||
ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2");
|
||||
ASSERT_EQ(skel->bss->seen_tc3, chain_tc_old, "seen_tc3");
|
||||
|
||||
skel->bss->seen_tc1 = false;
|
||||
skel->bss->seen_tc2 = false;
|
||||
skel->bss->seen_tc3 = false;
|
||||
|
||||
err = bpf_link__detach(skel->links.tc2);
|
||||
if (!ASSERT_OK(err, "prog_detach"))
|
||||
goto cleanup;
|
||||
|
||||
assert_mprog_count(target, 1);
|
||||
|
||||
tc_skel_reset_all_seen(skel);
|
||||
ASSERT_OK(system(ping_cmd), ping_cmd);
|
||||
|
||||
ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
|
||||
@ -707,16 +706,13 @@ static void test_tc_links_replace_target(int target)
|
||||
ASSERT_EQ(optq.prog_ids[2], 0, "prog_ids[2]");
|
||||
ASSERT_EQ(optq.link_ids[2], 0, "link_ids[2]");
|
||||
|
||||
tc_skel_reset_all_seen(skel);
|
||||
ASSERT_OK(system(ping_cmd), ping_cmd);
|
||||
|
||||
ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
|
||||
ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2");
|
||||
ASSERT_EQ(skel->bss->seen_tc3, false, "seen_tc3");
|
||||
|
||||
skel->bss->seen_tc1 = false;
|
||||
skel->bss->seen_tc2 = false;
|
||||
skel->bss->seen_tc3 = false;
|
||||
|
||||
LIBBPF_OPTS_RESET(optl,
|
||||
.flags = BPF_F_REPLACE,
|
||||
.relative_fd = bpf_program__fd(skel->progs.tc2),
|
||||
@ -781,16 +777,13 @@ static void test_tc_links_replace_target(int target)
|
||||
ASSERT_EQ(optq.prog_ids[2], 0, "prog_ids[2]");
|
||||
ASSERT_EQ(optq.link_ids[2], 0, "link_ids[2]");
|
||||
|
||||
tc_skel_reset_all_seen(skel);
|
||||
ASSERT_OK(system(ping_cmd), ping_cmd);
|
||||
|
||||
ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
|
||||
ASSERT_EQ(skel->bss->seen_tc2, false, "seen_tc2");
|
||||
ASSERT_EQ(skel->bss->seen_tc3, true, "seen_tc3");
|
||||
|
||||
skel->bss->seen_tc1 = false;
|
||||
skel->bss->seen_tc2 = false;
|
||||
skel->bss->seen_tc3 = false;
|
||||
|
||||
err = bpf_link__detach(skel->links.tc2);
|
||||
if (!ASSERT_OK(err, "link_detach"))
|
||||
goto cleanup;
|
||||
@ -812,16 +805,13 @@ static void test_tc_links_replace_target(int target)
|
||||
ASSERT_EQ(optq.prog_ids[1], 0, "prog_ids[1]");
|
||||
ASSERT_EQ(optq.link_ids[1], 0, "link_ids[1]");
|
||||
|
||||
tc_skel_reset_all_seen(skel);
|
||||
ASSERT_OK(system(ping_cmd), ping_cmd);
|
||||
|
||||
ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
|
||||
ASSERT_EQ(skel->bss->seen_tc2, false, "seen_tc2");
|
||||
ASSERT_EQ(skel->bss->seen_tc3, false, "seen_tc3");
|
||||
|
||||
skel->bss->seen_tc1 = false;
|
||||
skel->bss->seen_tc2 = false;
|
||||
skel->bss->seen_tc3 = false;
|
||||
|
||||
err = bpf_link__update_program(skel->links.tc1, skel->progs.tc1);
|
||||
if (!ASSERT_OK(err, "link_update_self"))
|
||||
goto cleanup;
|
||||
@ -843,6 +833,7 @@ static void test_tc_links_replace_target(int target)
|
||||
ASSERT_EQ(optq.prog_ids[1], 0, "prog_ids[1]");
|
||||
ASSERT_EQ(optq.link_ids[1], 0, "link_ids[1]");
|
||||
|
||||
tc_skel_reset_all_seen(skel);
|
||||
ASSERT_OK(system(ping_cmd), ping_cmd);
|
||||
|
||||
ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
|
||||
@ -1254,6 +1245,7 @@ static void test_tc_links_prepend_target(int target)
|
||||
ASSERT_EQ(optq.prog_ids[2], 0, "prog_ids[2]");
|
||||
ASSERT_EQ(optq.link_ids[2], 0, "link_ids[2]");
|
||||
|
||||
tc_skel_reset_all_seen(skel);
|
||||
ASSERT_OK(system(ping_cmd), ping_cmd);
|
||||
|
||||
ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
|
||||
@ -1261,9 +1253,6 @@ static void test_tc_links_prepend_target(int target)
|
||||
ASSERT_EQ(skel->bss->seen_tc3, false, "seen_tc3");
|
||||
ASSERT_EQ(skel->bss->seen_tc4, false, "seen_tc4");
|
||||
|
||||
skel->bss->seen_tc1 = false;
|
||||
skel->bss->seen_tc2 = false;
|
||||
|
||||
LIBBPF_OPTS_RESET(optl,
|
||||
.flags = BPF_F_BEFORE,
|
||||
);
|
||||
@ -1311,6 +1300,7 @@ static void test_tc_links_prepend_target(int target)
|
||||
ASSERT_EQ(optq.prog_ids[4], 0, "prog_ids[4]");
|
||||
ASSERT_EQ(optq.link_ids[4], 0, "link_ids[4]");
|
||||
|
||||
tc_skel_reset_all_seen(skel);
|
||||
ASSERT_OK(system(ping_cmd), ping_cmd);
|
||||
|
||||
ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
|
||||
@ -1411,6 +1401,7 @@ static void test_tc_links_append_target(int target)
|
||||
ASSERT_EQ(optq.prog_ids[2], 0, "prog_ids[2]");
|
||||
ASSERT_EQ(optq.link_ids[2], 0, "link_ids[2]");
|
||||
|
||||
tc_skel_reset_all_seen(skel);
|
||||
ASSERT_OK(system(ping_cmd), ping_cmd);
|
||||
|
||||
ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
|
||||
@ -1418,9 +1409,6 @@ static void test_tc_links_append_target(int target)
|
||||
ASSERT_EQ(skel->bss->seen_tc3, false, "seen_tc3");
|
||||
ASSERT_EQ(skel->bss->seen_tc4, false, "seen_tc4");
|
||||
|
||||
skel->bss->seen_tc1 = false;
|
||||
skel->bss->seen_tc2 = false;
|
||||
|
||||
LIBBPF_OPTS_RESET(optl,
|
||||
.flags = BPF_F_AFTER,
|
||||
);
|
||||
@ -1468,6 +1456,7 @@ static void test_tc_links_append_target(int target)
|
||||
ASSERT_EQ(optq.prog_ids[4], 0, "prog_ids[4]");
|
||||
ASSERT_EQ(optq.link_ids[4], 0, "link_ids[4]");
|
||||
|
||||
tc_skel_reset_all_seen(skel);
|
||||
ASSERT_OK(system(ping_cmd), ping_cmd);
|
||||
|
||||
ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
|
||||
@ -1637,38 +1626,33 @@ static void test_tc_chain_mixed(int target)
|
||||
|
||||
assert_mprog_count(target, 1);
|
||||
|
||||
tc_skel_reset_all_seen(skel);
|
||||
ASSERT_OK(system(ping_cmd), ping_cmd);
|
||||
|
||||
ASSERT_EQ(skel->bss->seen_tc4, false, "seen_tc4");
|
||||
ASSERT_EQ(skel->bss->seen_tc5, false, "seen_tc5");
|
||||
ASSERT_EQ(skel->bss->seen_tc6, true, "seen_tc6");
|
||||
|
||||
skel->bss->seen_tc4 = false;
|
||||
skel->bss->seen_tc5 = false;
|
||||
skel->bss->seen_tc6 = false;
|
||||
|
||||
err = bpf_link__update_program(skel->links.tc6, skel->progs.tc4);
|
||||
if (!ASSERT_OK(err, "link_update"))
|
||||
goto cleanup;
|
||||
|
||||
assert_mprog_count(target, 1);
|
||||
|
||||
tc_skel_reset_all_seen(skel);
|
||||
ASSERT_OK(system(ping_cmd), ping_cmd);
|
||||
|
||||
ASSERT_EQ(skel->bss->seen_tc4, true, "seen_tc4");
|
||||
ASSERT_EQ(skel->bss->seen_tc5, true, "seen_tc5");
|
||||
ASSERT_EQ(skel->bss->seen_tc6, false, "seen_tc6");
|
||||
|
||||
skel->bss->seen_tc4 = false;
|
||||
skel->bss->seen_tc5 = false;
|
||||
skel->bss->seen_tc6 = false;
|
||||
|
||||
err = bpf_link__detach(skel->links.tc6);
|
||||
if (!ASSERT_OK(err, "prog_detach"))
|
||||
goto cleanup;
|
||||
|
||||
__assert_mprog_count(target, 0, true, loopback);
|
||||
assert_mprog_count(target, 0);
|
||||
|
||||
tc_skel_reset_all_seen(skel);
|
||||
ASSERT_OK(system(ping_cmd), ping_cmd);
|
||||
|
||||
ASSERT_EQ(skel->bss->seen_tc4, false, "seen_tc4");
|
||||
@ -1758,22 +1742,20 @@ static void test_tc_links_ingress(int target, bool chain_tc_old,
|
||||
|
||||
assert_mprog_count(target, 2);
|
||||
|
||||
tc_skel_reset_all_seen(skel);
|
||||
ASSERT_OK(system(ping_cmd), ping_cmd);
|
||||
|
||||
ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
|
||||
ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2");
|
||||
ASSERT_EQ(skel->bss->seen_tc3, chain_tc_old, "seen_tc3");
|
||||
|
||||
skel->bss->seen_tc1 = false;
|
||||
skel->bss->seen_tc2 = false;
|
||||
skel->bss->seen_tc3 = false;
|
||||
|
||||
err = bpf_link__detach(skel->links.tc2);
|
||||
if (!ASSERT_OK(err, "prog_detach"))
|
||||
goto cleanup;
|
||||
|
||||
assert_mprog_count(target, 1);
|
||||
|
||||
tc_skel_reset_all_seen(skel);
|
||||
ASSERT_OK(system(ping_cmd), ping_cmd);
|
||||
|
||||
ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
|
||||
|
@ -59,6 +59,7 @@ void serial_test_tc_opts_basic(void)
|
||||
ASSERT_EQ(optq.prog_ids[0], id1, "prog_ids[0]");
|
||||
ASSERT_EQ(optq.prog_ids[1], 0, "prog_ids[1]");
|
||||
|
||||
tc_skel_reset_all_seen(skel);
|
||||
ASSERT_OK(system(ping_cmd), ping_cmd);
|
||||
|
||||
ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
|
||||
@ -83,6 +84,7 @@ void serial_test_tc_opts_basic(void)
|
||||
ASSERT_EQ(optq.prog_ids[0], id2, "prog_ids[0]");
|
||||
ASSERT_EQ(optq.prog_ids[1], 0, "prog_ids[1]");
|
||||
|
||||
tc_skel_reset_all_seen(skel);
|
||||
ASSERT_OK(system(ping_cmd), ping_cmd);
|
||||
|
||||
ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
|
||||
@ -163,6 +165,7 @@ static void test_tc_opts_before_target(int target)
|
||||
ASSERT_EQ(optq.prog_ids[1], id2, "prog_ids[1]");
|
||||
ASSERT_EQ(optq.prog_ids[2], 0, "prog_ids[2]");
|
||||
|
||||
tc_skel_reset_all_seen(skel);
|
||||
ASSERT_OK(system(ping_cmd), ping_cmd);
|
||||
|
||||
ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
|
||||
@ -219,6 +222,7 @@ static void test_tc_opts_before_target(int target)
|
||||
ASSERT_EQ(optq.prog_ids[3], id2, "prog_ids[3]");
|
||||
ASSERT_EQ(optq.prog_ids[4], 0, "prog_ids[4]");
|
||||
|
||||
tc_skel_reset_all_seen(skel);
|
||||
ASSERT_OK(system(ping_cmd), ping_cmd);
|
||||
|
||||
ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
|
||||
@ -313,6 +317,7 @@ static void test_tc_opts_after_target(int target)
|
||||
ASSERT_EQ(optq.prog_ids[1], id2, "prog_ids[1]");
|
||||
ASSERT_EQ(optq.prog_ids[2], 0, "prog_ids[2]");
|
||||
|
||||
tc_skel_reset_all_seen(skel);
|
||||
ASSERT_OK(system(ping_cmd), ping_cmd);
|
||||
|
||||
ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
|
||||
@ -369,6 +374,7 @@ static void test_tc_opts_after_target(int target)
|
||||
ASSERT_EQ(optq.prog_ids[3], id4, "prog_ids[3]");
|
||||
ASSERT_EQ(optq.prog_ids[4], 0, "prog_ids[4]");
|
||||
|
||||
tc_skel_reset_all_seen(skel);
|
||||
ASSERT_OK(system(ping_cmd), ping_cmd);
|
||||
|
||||
ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
|
||||
@ -514,6 +520,7 @@ static void test_tc_opts_revision_target(int target)
|
||||
ASSERT_EQ(optq.prog_ids[1], id2, "prog_ids[1]");
|
||||
ASSERT_EQ(optq.prog_ids[2], 0, "prog_ids[2]");
|
||||
|
||||
tc_skel_reset_all_seen(skel);
|
||||
ASSERT_OK(system(ping_cmd), ping_cmd);
|
||||
|
||||
ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
|
||||
@ -608,22 +615,20 @@ static void test_tc_chain_classic(int target, bool chain_tc_old)
|
||||
|
||||
assert_mprog_count(target, 2);
|
||||
|
||||
tc_skel_reset_all_seen(skel);
|
||||
ASSERT_OK(system(ping_cmd), ping_cmd);
|
||||
|
||||
ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
|
||||
ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2");
|
||||
ASSERT_EQ(skel->bss->seen_tc3, chain_tc_old, "seen_tc3");
|
||||
|
||||
skel->bss->seen_tc1 = false;
|
||||
skel->bss->seen_tc2 = false;
|
||||
skel->bss->seen_tc3 = false;
|
||||
|
||||
err = bpf_prog_detach_opts(fd2, loopback, target, &optd);
|
||||
if (!ASSERT_OK(err, "prog_detach"))
|
||||
goto cleanup_detach;
|
||||
|
||||
assert_mprog_count(target, 1);
|
||||
|
||||
tc_skel_reset_all_seen(skel);
|
||||
ASSERT_OK(system(ping_cmd), ping_cmd);
|
||||
|
||||
ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
|
||||
@ -635,7 +640,7 @@ static void test_tc_chain_classic(int target, bool chain_tc_old)
|
||||
if (!ASSERT_OK(err, "prog_detach"))
|
||||
goto cleanup;
|
||||
|
||||
__assert_mprog_count(target, 0, chain_tc_old, loopback);
|
||||
assert_mprog_count(target, 0);
|
||||
cleanup:
|
||||
if (tc_attached) {
|
||||
tc_opts.flags = tc_opts.prog_fd = tc_opts.prog_id = 0;
|
||||
@ -730,16 +735,13 @@ static void test_tc_opts_replace_target(int target)
|
||||
ASSERT_EQ(optq.prog_attach_flags[1], 0, "prog_flags[1]");
|
||||
ASSERT_EQ(optq.prog_attach_flags[2], 0, "prog_flags[2]");
|
||||
|
||||
tc_skel_reset_all_seen(skel);
|
||||
ASSERT_OK(system(ping_cmd), ping_cmd);
|
||||
|
||||
ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
|
||||
ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2");
|
||||
ASSERT_EQ(skel->bss->seen_tc3, false, "seen_tc3");
|
||||
|
||||
skel->bss->seen_tc1 = false;
|
||||
skel->bss->seen_tc2 = false;
|
||||
skel->bss->seen_tc3 = false;
|
||||
|
||||
LIBBPF_OPTS_RESET(opta,
|
||||
.flags = BPF_F_REPLACE,
|
||||
.replace_prog_fd = fd2,
|
||||
@ -767,16 +769,13 @@ static void test_tc_opts_replace_target(int target)
|
||||
ASSERT_EQ(optq.prog_ids[1], id1, "prog_ids[1]");
|
||||
ASSERT_EQ(optq.prog_ids[2], 0, "prog_ids[2]");
|
||||
|
||||
tc_skel_reset_all_seen(skel);
|
||||
ASSERT_OK(system(ping_cmd), ping_cmd);
|
||||
|
||||
ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
|
||||
ASSERT_EQ(skel->bss->seen_tc2, false, "seen_tc2");
|
||||
ASSERT_EQ(skel->bss->seen_tc3, true, "seen_tc3");
|
||||
|
||||
skel->bss->seen_tc1 = false;
|
||||
skel->bss->seen_tc2 = false;
|
||||
skel->bss->seen_tc3 = false;
|
||||
|
||||
LIBBPF_OPTS_RESET(opta,
|
||||
.flags = BPF_F_REPLACE | BPF_F_BEFORE,
|
||||
.replace_prog_fd = fd3,
|
||||
@ -805,6 +804,7 @@ static void test_tc_opts_replace_target(int target)
|
||||
ASSERT_EQ(optq.prog_ids[1], id1, "prog_ids[1]");
|
||||
ASSERT_EQ(optq.prog_ids[2], 0, "prog_ids[2]");
|
||||
|
||||
tc_skel_reset_all_seen(skel);
|
||||
ASSERT_OK(system(ping_cmd), ping_cmd);
|
||||
|
||||
ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
|
||||
@ -1084,6 +1084,7 @@ static void test_tc_opts_prepend_target(int target)
|
||||
ASSERT_EQ(optq.prog_ids[1], id1, "prog_ids[1]");
|
||||
ASSERT_EQ(optq.prog_ids[2], 0, "prog_ids[2]");
|
||||
|
||||
tc_skel_reset_all_seen(skel);
|
||||
ASSERT_OK(system(ping_cmd), ping_cmd);
|
||||
|
||||
ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
|
||||
@ -1124,6 +1125,7 @@ static void test_tc_opts_prepend_target(int target)
|
||||
ASSERT_EQ(optq.prog_ids[3], id1, "prog_ids[3]");
|
||||
ASSERT_EQ(optq.prog_ids[4], 0, "prog_ids[4]");
|
||||
|
||||
tc_skel_reset_all_seen(skel);
|
||||
ASSERT_OK(system(ping_cmd), ping_cmd);
|
||||
|
||||
ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
|
||||
@ -1222,6 +1224,7 @@ static void test_tc_opts_append_target(int target)
|
||||
ASSERT_EQ(optq.prog_ids[1], id2, "prog_ids[1]");
|
||||
ASSERT_EQ(optq.prog_ids[2], 0, "prog_ids[2]");
|
||||
|
||||
tc_skel_reset_all_seen(skel);
|
||||
ASSERT_OK(system(ping_cmd), ping_cmd);
|
||||
|
||||
ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
|
||||
@ -1262,6 +1265,7 @@ static void test_tc_opts_append_target(int target)
|
||||
ASSERT_EQ(optq.prog_ids[3], id4, "prog_ids[3]");
|
||||
ASSERT_EQ(optq.prog_ids[4], 0, "prog_ids[4]");
|
||||
|
||||
tc_skel_reset_all_seen(skel);
|
||||
ASSERT_OK(system(ping_cmd), ping_cmd);
|
||||
|
||||
ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
|
||||
@ -2250,7 +2254,7 @@ static void test_tc_opts_delete_empty(int target, bool chain_tc_old)
|
||||
BPF_TC_INGRESS : BPF_TC_EGRESS;
|
||||
err = bpf_tc_hook_create(&tc_hook);
|
||||
ASSERT_OK(err, "bpf_tc_hook_create");
|
||||
__assert_mprog_count(target, 0, true, loopback);
|
||||
assert_mprog_count(target, 0);
|
||||
}
|
||||
err = bpf_prog_detach_opts(0, loopback, target, &optd);
|
||||
ASSERT_EQ(err, -ENOENT, "prog_detach");
|
||||
@ -2316,16 +2320,13 @@ static void test_tc_chain_mixed(int target)
|
||||
|
||||
assert_mprog_count(target, 1);
|
||||
|
||||
tc_skel_reset_all_seen(skel);
|
||||
ASSERT_OK(system(ping_cmd), ping_cmd);
|
||||
|
||||
ASSERT_EQ(skel->bss->seen_tc4, false, "seen_tc4");
|
||||
ASSERT_EQ(skel->bss->seen_tc5, false, "seen_tc5");
|
||||
ASSERT_EQ(skel->bss->seen_tc6, true, "seen_tc6");
|
||||
|
||||
skel->bss->seen_tc4 = false;
|
||||
skel->bss->seen_tc5 = false;
|
||||
skel->bss->seen_tc6 = false;
|
||||
|
||||
LIBBPF_OPTS_RESET(opta,
|
||||
.flags = BPF_F_REPLACE,
|
||||
.replace_prog_fd = fd3,
|
||||
@ -2339,21 +2340,19 @@ static void test_tc_chain_mixed(int target)
|
||||
|
||||
assert_mprog_count(target, 1);
|
||||
|
||||
tc_skel_reset_all_seen(skel);
|
||||
ASSERT_OK(system(ping_cmd), ping_cmd);
|
||||
|
||||
ASSERT_EQ(skel->bss->seen_tc4, true, "seen_tc4");
|
||||
ASSERT_EQ(skel->bss->seen_tc5, true, "seen_tc5");
|
||||
ASSERT_EQ(skel->bss->seen_tc6, false, "seen_tc6");
|
||||
|
||||
skel->bss->seen_tc4 = false;
|
||||
skel->bss->seen_tc5 = false;
|
||||
skel->bss->seen_tc6 = false;
|
||||
|
||||
cleanup_opts:
|
||||
err = bpf_prog_detach_opts(detach_fd, loopback, target, &optd);
|
||||
ASSERT_OK(err, "prog_detach");
|
||||
__assert_mprog_count(target, 0, true, loopback);
|
||||
assert_mprog_count(target, 0);
|
||||
|
||||
tc_skel_reset_all_seen(skel);
|
||||
ASSERT_OK(system(ping_cmd), ping_cmd);
|
||||
|
||||
ASSERT_EQ(skel->bss->seen_tc4, false, "seen_tc4");
|
||||
@ -2462,3 +2461,229 @@ void serial_test_tc_opts_max(void)
|
||||
test_tc_opts_max_target(BPF_TCX_INGRESS, BPF_F_AFTER, true);
|
||||
test_tc_opts_max_target(BPF_TCX_EGRESS, BPF_F_AFTER, false);
|
||||
}
|
||||
|
||||
static void test_tc_opts_query_target(int target)
|
||||
{
|
||||
const size_t attr_size = offsetofend(union bpf_attr, query);
|
||||
LIBBPF_OPTS(bpf_prog_attach_opts, opta);
|
||||
LIBBPF_OPTS(bpf_prog_detach_opts, optd);
|
||||
LIBBPF_OPTS(bpf_prog_query_opts, optq);
|
||||
__u32 fd1, fd2, fd3, fd4, id1, id2, id3, id4;
|
||||
struct test_tc_link *skel;
|
||||
union bpf_attr attr;
|
||||
__u32 prog_ids[5];
|
||||
int err;
|
||||
|
||||
skel = test_tc_link__open_and_load();
|
||||
if (!ASSERT_OK_PTR(skel, "skel_load"))
|
||||
goto cleanup;
|
||||
|
||||
fd1 = bpf_program__fd(skel->progs.tc1);
|
||||
fd2 = bpf_program__fd(skel->progs.tc2);
|
||||
fd3 = bpf_program__fd(skel->progs.tc3);
|
||||
fd4 = bpf_program__fd(skel->progs.tc4);
|
||||
|
||||
id1 = id_from_prog_fd(fd1);
|
||||
id2 = id_from_prog_fd(fd2);
|
||||
id3 = id_from_prog_fd(fd3);
|
||||
id4 = id_from_prog_fd(fd4);
|
||||
|
||||
assert_mprog_count(target, 0);
|
||||
|
||||
LIBBPF_OPTS_RESET(opta,
|
||||
.expected_revision = 1,
|
||||
);
|
||||
|
||||
err = bpf_prog_attach_opts(fd1, loopback, target, &opta);
|
||||
if (!ASSERT_EQ(err, 0, "prog_attach"))
|
||||
goto cleanup;
|
||||
|
||||
assert_mprog_count(target, 1);
|
||||
|
||||
LIBBPF_OPTS_RESET(opta,
|
||||
.expected_revision = 2,
|
||||
);
|
||||
|
||||
err = bpf_prog_attach_opts(fd2, loopback, target, &opta);
|
||||
if (!ASSERT_EQ(err, 0, "prog_attach"))
|
||||
goto cleanup1;
|
||||
|
||||
assert_mprog_count(target, 2);
|
||||
|
||||
LIBBPF_OPTS_RESET(opta,
|
||||
.expected_revision = 3,
|
||||
);
|
||||
|
||||
err = bpf_prog_attach_opts(fd3, loopback, target, &opta);
|
||||
if (!ASSERT_EQ(err, 0, "prog_attach"))
|
||||
goto cleanup2;
|
||||
|
||||
assert_mprog_count(target, 3);
|
||||
|
||||
LIBBPF_OPTS_RESET(opta,
|
||||
.expected_revision = 4,
|
||||
);
|
||||
|
||||
err = bpf_prog_attach_opts(fd4, loopback, target, &opta);
|
||||
if (!ASSERT_EQ(err, 0, "prog_attach"))
|
||||
goto cleanup3;
|
||||
|
||||
assert_mprog_count(target, 4);
|
||||
|
||||
/* Test 1: Double query via libbpf API */
|
||||
err = bpf_prog_query_opts(loopback, target, &optq);
|
||||
if (!ASSERT_OK(err, "prog_query"))
|
||||
goto cleanup4;
|
||||
|
||||
ASSERT_EQ(optq.count, 4, "count");
|
||||
ASSERT_EQ(optq.revision, 5, "revision");
|
||||
ASSERT_EQ(optq.prog_ids, NULL, "prog_ids");
|
||||
ASSERT_EQ(optq.link_ids, NULL, "link_ids");
|
||||
|
||||
memset(prog_ids, 0, sizeof(prog_ids));
|
||||
optq.prog_ids = prog_ids;
|
||||
|
||||
err = bpf_prog_query_opts(loopback, target, &optq);
|
||||
if (!ASSERT_OK(err, "prog_query"))
|
||||
goto cleanup4;
|
||||
|
||||
ASSERT_EQ(optq.count, 4, "count");
|
||||
ASSERT_EQ(optq.revision, 5, "revision");
|
||||
ASSERT_EQ(optq.prog_ids[0], id1, "prog_ids[0]");
|
||||
ASSERT_EQ(optq.prog_ids[1], id2, "prog_ids[1]");
|
||||
ASSERT_EQ(optq.prog_ids[2], id3, "prog_ids[2]");
|
||||
ASSERT_EQ(optq.prog_ids[3], id4, "prog_ids[3]");
|
||||
ASSERT_EQ(optq.prog_ids[4], 0, "prog_ids[4]");
|
||||
ASSERT_EQ(optq.link_ids, NULL, "link_ids");
|
||||
|
||||
/* Test 2: Double query via bpf_attr & bpf(2) directly */
|
||||
memset(&attr, 0, attr_size);
|
||||
attr.query.target_ifindex = loopback;
|
||||
attr.query.attach_type = target;
|
||||
|
||||
err = syscall(__NR_bpf, BPF_PROG_QUERY, &attr, attr_size);
|
||||
if (!ASSERT_OK(err, "prog_query"))
|
||||
goto cleanup4;
|
||||
|
||||
ASSERT_EQ(attr.query.count, 4, "count");
|
||||
ASSERT_EQ(attr.query.revision, 5, "revision");
|
||||
ASSERT_EQ(attr.query.query_flags, 0, "query_flags");
|
||||
ASSERT_EQ(attr.query.attach_flags, 0, "attach_flags");
|
||||
ASSERT_EQ(attr.query.target_ifindex, loopback, "target_ifindex");
|
||||
ASSERT_EQ(attr.query.attach_type, target, "attach_type");
|
||||
ASSERT_EQ(attr.query.prog_ids, 0, "prog_ids");
|
||||
ASSERT_EQ(attr.query.prog_attach_flags, 0, "prog_attach_flags");
|
||||
ASSERT_EQ(attr.query.link_ids, 0, "link_ids");
|
||||
ASSERT_EQ(attr.query.link_attach_flags, 0, "link_attach_flags");
|
||||
|
||||
memset(prog_ids, 0, sizeof(prog_ids));
|
||||
attr.query.prog_ids = ptr_to_u64(prog_ids);
|
||||
|
||||
err = syscall(__NR_bpf, BPF_PROG_QUERY, &attr, attr_size);
|
||||
if (!ASSERT_OK(err, "prog_query"))
|
||||
goto cleanup4;
|
||||
|
||||
ASSERT_EQ(attr.query.count, 4, "count");
|
||||
ASSERT_EQ(attr.query.revision, 5, "revision");
|
||||
ASSERT_EQ(attr.query.query_flags, 0, "query_flags");
|
||||
ASSERT_EQ(attr.query.attach_flags, 0, "attach_flags");
|
||||
ASSERT_EQ(attr.query.target_ifindex, loopback, "target_ifindex");
|
||||
ASSERT_EQ(attr.query.attach_type, target, "attach_type");
|
||||
ASSERT_EQ(attr.query.prog_ids, ptr_to_u64(prog_ids), "prog_ids");
|
||||
ASSERT_EQ(prog_ids[0], id1, "prog_ids[0]");
|
||||
ASSERT_EQ(prog_ids[1], id2, "prog_ids[1]");
|
||||
ASSERT_EQ(prog_ids[2], id3, "prog_ids[2]");
|
||||
ASSERT_EQ(prog_ids[3], id4, "prog_ids[3]");
|
||||
ASSERT_EQ(prog_ids[4], 0, "prog_ids[4]");
|
||||
ASSERT_EQ(attr.query.prog_attach_flags, 0, "prog_attach_flags");
|
||||
ASSERT_EQ(attr.query.link_ids, 0, "link_ids");
|
||||
ASSERT_EQ(attr.query.link_attach_flags, 0, "link_attach_flags");
|
||||
|
||||
cleanup4:
|
||||
err = bpf_prog_detach_opts(fd4, loopback, target, &optd);
|
||||
ASSERT_OK(err, "prog_detach");
|
||||
assert_mprog_count(target, 3);
|
||||
|
||||
cleanup3:
|
||||
err = bpf_prog_detach_opts(fd3, loopback, target, &optd);
|
||||
ASSERT_OK(err, "prog_detach");
|
||||
assert_mprog_count(target, 2);
|
||||
|
||||
cleanup2:
|
||||
err = bpf_prog_detach_opts(fd2, loopback, target, &optd);
|
||||
ASSERT_OK(err, "prog_detach");
|
||||
assert_mprog_count(target, 1);
|
||||
|
||||
cleanup1:
|
||||
err = bpf_prog_detach_opts(fd1, loopback, target, &optd);
|
||||
ASSERT_OK(err, "prog_detach");
|
||||
assert_mprog_count(target, 0);
|
||||
|
||||
cleanup:
|
||||
test_tc_link__destroy(skel);
|
||||
}
|
||||
|
||||
void serial_test_tc_opts_query(void)
|
||||
{
|
||||
test_tc_opts_query_target(BPF_TCX_INGRESS);
|
||||
test_tc_opts_query_target(BPF_TCX_EGRESS);
|
||||
}
|
||||
|
||||
static void test_tc_opts_query_attach_target(int target)
|
||||
{
|
||||
LIBBPF_OPTS(bpf_prog_attach_opts, opta);
|
||||
LIBBPF_OPTS(bpf_prog_detach_opts, optd);
|
||||
LIBBPF_OPTS(bpf_prog_query_opts, optq);
|
||||
struct test_tc_link *skel;
|
||||
__u32 prog_ids[2];
|
||||
__u32 fd1, id1;
|
||||
int err;
|
||||
|
||||
skel = test_tc_link__open_and_load();
|
||||
if (!ASSERT_OK_PTR(skel, "skel_load"))
|
||||
goto cleanup;
|
||||
|
||||
fd1 = bpf_program__fd(skel->progs.tc1);
|
||||
id1 = id_from_prog_fd(fd1);
|
||||
|
||||
err = bpf_prog_query_opts(loopback, target, &optq);
|
||||
if (!ASSERT_OK(err, "prog_query"))
|
||||
goto cleanup;
|
||||
|
||||
ASSERT_EQ(optq.count, 0, "count");
|
||||
ASSERT_EQ(optq.revision, 1, "revision");
|
||||
|
||||
LIBBPF_OPTS_RESET(opta,
|
||||
.expected_revision = optq.revision,
|
||||
);
|
||||
|
||||
err = bpf_prog_attach_opts(fd1, loopback, target, &opta);
|
||||
if (!ASSERT_EQ(err, 0, "prog_attach"))
|
||||
goto cleanup;
|
||||
|
||||
memset(prog_ids, 0, sizeof(prog_ids));
|
||||
optq.prog_ids = prog_ids;
|
||||
optq.count = ARRAY_SIZE(prog_ids);
|
||||
|
||||
err = bpf_prog_query_opts(loopback, target, &optq);
|
||||
if (!ASSERT_OK(err, "prog_query"))
|
||||
goto cleanup1;
|
||||
|
||||
ASSERT_EQ(optq.count, 1, "count");
|
||||
ASSERT_EQ(optq.revision, 2, "revision");
|
||||
ASSERT_EQ(optq.prog_ids[0], id1, "prog_ids[0]");
|
||||
ASSERT_EQ(optq.prog_ids[1], 0, "prog_ids[1]");
|
||||
|
||||
cleanup1:
|
||||
err = bpf_prog_detach_opts(fd1, loopback, target, &optd);
|
||||
ASSERT_OK(err, "prog_detach");
|
||||
assert_mprog_count(target, 0);
|
||||
cleanup:
|
||||
test_tc_link__destroy(skel);
|
||||
}
|
||||
|
||||
void serial_test_tc_opts_query_attach(void)
|
||||
{
|
||||
test_tc_opts_query_attach_target(BPF_TCX_INGRESS);
|
||||
test_tc_opts_query_attach_target(BPF_TCX_EGRESS);
|
||||
}
|
||||
|
@ -2,6 +2,7 @@
|
||||
/* Copyright (c) 2021 Facebook */
|
||||
#include <test_progs.h>
|
||||
#include "timer.skel.h"
|
||||
#include "timer_failure.skel.h"
|
||||
|
||||
static int timer(struct timer *timer_skel)
|
||||
{
|
||||
@ -49,10 +50,11 @@ void serial_test_timer(void)
|
||||
|
||||
timer_skel = timer__open_and_load();
|
||||
if (!ASSERT_OK_PTR(timer_skel, "timer_skel_load"))
|
||||
goto cleanup;
|
||||
return;
|
||||
|
||||
err = timer(timer_skel);
|
||||
ASSERT_OK(err, "timer");
|
||||
cleanup:
|
||||
timer__destroy(timer_skel);
|
||||
|
||||
RUN_TESTS(timer_failure);
|
||||
}
|
||||
|
47
tools/testing/selftests/bpf/progs/timer_failure.c
Normal file
47
tools/testing/selftests/bpf/progs/timer_failure.c
Normal file
@ -0,0 +1,47 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
|
||||
|
||||
#include <linux/bpf.h>
|
||||
#include <time.h>
|
||||
#include <errno.h>
|
||||
#include <bpf/bpf_helpers.h>
|
||||
#include "bpf_misc.h"
|
||||
#include "bpf_tcp_helpers.h"
|
||||
|
||||
char _license[] SEC("license") = "GPL";
|
||||
|
||||
struct elem {
|
||||
struct bpf_timer t;
|
||||
};
|
||||
|
||||
struct {
|
||||
__uint(type, BPF_MAP_TYPE_ARRAY);
|
||||
__uint(max_entries, 1);
|
||||
__type(key, int);
|
||||
__type(value, struct elem);
|
||||
} timer_map SEC(".maps");
|
||||
|
||||
static int timer_cb_ret1(void *map, int *key, struct bpf_timer *timer)
|
||||
{
|
||||
if (bpf_get_smp_processor_id() % 2)
|
||||
return 1;
|
||||
else
|
||||
return 0;
|
||||
}
|
||||
|
||||
SEC("fentry/bpf_fentry_test1")
|
||||
__failure __msg("should have been in (0x0; 0x0)")
|
||||
int BPF_PROG2(test_ret_1, int, a)
|
||||
{
|
||||
int key = 0;
|
||||
struct bpf_timer *timer;
|
||||
|
||||
timer = bpf_map_lookup_elem(&timer_map, &key);
|
||||
if (timer) {
|
||||
bpf_timer_init(timer, &timer_map, CLOCK_BOOTTIME);
|
||||
bpf_timer_set_callback(timer, timer_cb_ret1);
|
||||
bpf_timer_start(timer, 1000, 0);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
Loading…
Reference in New Issue
Block a user