mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-01-06 05:13:18 +00:00
Alexei Starovoitov says: ==================== The following pull-request contains BPF updates for your *net-next* tree. We've added 73 non-merge commits during the last 9 day(s) which contain a total of 79 files changed, 5275 insertions(+), 600 deletions(-). The main changes are: 1) Basic BTF validation in libbpf, from Andrii Nakryiko. 2) bpf_assert(), bpf_throw(), exceptions in bpf progs, from Kumar Kartikeya Dwivedi. 3) next_thread cleanups, from Oleg Nesterov. 4) Add mcpu=v4 support to arm32, from Puranjay Mohan. 5) Add support for __percpu pointers in bpf progs, from Yonghong Song. 6) Fix bpf tailcall interaction with bpf trampoline, from Leon Hwang. 7) Raise irq_work in bpf_mem_alloc while irqs are disabled to improve refill probabablity, from Hou Tao. Please consider pulling these changes from: git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next.git Thanks a lot! Also thanks to reporters, reviewers and testers of commits in this pull-request: Alan Maguire, Andrey Konovalov, Dave Marchevsky, "Eric W. Biederman", Jiri Olsa, Maciej Fijalkowski, Quentin Monnet, Russell King (Oracle), Song Liu, Stanislav Fomichev, Yonghong Song ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
685c6d5b2c
@ -113,7 +113,7 @@ Flags
|
||||
used by ``eth_get_headlen`` to estimate length of all headers for GRO.
|
||||
* ``BPF_FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL`` - tells BPF flow dissector to
|
||||
stop parsing as soon as it reaches IPv6 flow label; used by
|
||||
``___skb_get_hash`` and ``__skb_get_hash_symmetric`` to get flow hash.
|
||||
``___skb_get_hash`` to get flow hash.
|
||||
* ``BPF_FLOW_DISSECTOR_F_STOP_AT_ENCAP`` - tells BPF flow dissector to stop
|
||||
parsing as soon as it reaches encapsulated headers; used by routing
|
||||
infrastructure.
|
||||
|
@ -42,6 +42,19 @@ definitions:
|
||||
doc:
|
||||
This feature informs if netdev implements non-linear XDP buffer
|
||||
support in ndo_xdp_xmit callback.
|
||||
-
|
||||
type: flags
|
||||
name: xdp-rx-metadata
|
||||
render-max: true
|
||||
entries:
|
||||
-
|
||||
name: timestamp
|
||||
doc:
|
||||
Device is capable of exposing receive HW timestamp via bpf_xdp_metadata_rx_timestamp().
|
||||
-
|
||||
name: hash
|
||||
doc:
|
||||
Device is capable of exposing receive packet hash via bpf_xdp_metadata_rx_hash().
|
||||
|
||||
attribute-sets:
|
||||
-
|
||||
@ -68,6 +81,13 @@ attribute-sets:
|
||||
type: u32
|
||||
checks:
|
||||
min: 1
|
||||
-
|
||||
name: xdp-rx-metadata-features
|
||||
doc: Bitmask of supported XDP receive metadata features.
|
||||
See Documentation/networking/xdp-rx-metadata.rst for more details.
|
||||
type: u64
|
||||
enum: xdp-rx-metadata
|
||||
enum-as-flags: true
|
||||
|
||||
operations:
|
||||
list:
|
||||
@ -84,6 +104,7 @@ operations:
|
||||
- ifindex
|
||||
- xdp-features
|
||||
- xdp-zc-max-segs
|
||||
- xdp-rx-metadata-features
|
||||
dump:
|
||||
reply: *dev-all
|
||||
-
|
||||
|
@ -105,6 +105,13 @@ bpf_tail_call
|
||||
Adding programs that access metadata kfuncs to the ``BPF_MAP_TYPE_PROG_ARRAY``
|
||||
is currently not supported.
|
||||
|
||||
Supported Devices
|
||||
=================
|
||||
|
||||
It is possible to query which kfunc the particular netdev implements via
|
||||
netlink. See ``xdp-rx-metadata-features`` attribute set in
|
||||
``Documentation/netlink/specs/netdev.yaml``.
|
||||
|
||||
Example
|
||||
=======
|
||||
|
||||
|
@ -3596,9 +3596,10 @@ F: Documentation/devicetree/bindings/iio/accel/bosch,bma400.yaml
|
||||
F: drivers/iio/accel/bma400*
|
||||
|
||||
BPF JIT for ARM
|
||||
M: Shubham Bansal <illusionist.neo@gmail.com>
|
||||
M: Russell King <linux@armlinux.org.uk>
|
||||
M: Puranjay Mohan <puranjay12@gmail.com>
|
||||
L: bpf@vger.kernel.org
|
||||
S: Odd Fixes
|
||||
S: Maintained
|
||||
F: arch/arm/net/
|
||||
|
||||
BPF JIT for ARM64
|
||||
|
@ -2,6 +2,7 @@
|
||||
/*
|
||||
* Just-In-Time compiler for eBPF filters on 32bit ARM
|
||||
*
|
||||
* Copyright (c) 2023 Puranjay Mohan <puranjay12@gmail.com>
|
||||
* Copyright (c) 2017 Shubham Bansal <illusionist.neo@gmail.com>
|
||||
* Copyright (c) 2011 Mircea Gherzan <mgherzan@gmail.com>
|
||||
*/
|
||||
@ -15,6 +16,7 @@
|
||||
#include <linux/string.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/if_vlan.h>
|
||||
#include <linux/math64.h>
|
||||
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/hwcap.h>
|
||||
@ -228,6 +230,44 @@ static u32 jit_mod32(u32 dividend, u32 divisor)
|
||||
return dividend % divisor;
|
||||
}
|
||||
|
||||
static s32 jit_sdiv32(s32 dividend, s32 divisor)
|
||||
{
|
||||
return dividend / divisor;
|
||||
}
|
||||
|
||||
static s32 jit_smod32(s32 dividend, s32 divisor)
|
||||
{
|
||||
return dividend % divisor;
|
||||
}
|
||||
|
||||
/* Wrappers for 64-bit div/mod */
|
||||
static u64 jit_udiv64(u64 dividend, u64 divisor)
|
||||
{
|
||||
return div64_u64(dividend, divisor);
|
||||
}
|
||||
|
||||
static u64 jit_mod64(u64 dividend, u64 divisor)
|
||||
{
|
||||
u64 rem;
|
||||
|
||||
div64_u64_rem(dividend, divisor, &rem);
|
||||
return rem;
|
||||
}
|
||||
|
||||
static s64 jit_sdiv64(s64 dividend, s64 divisor)
|
||||
{
|
||||
return div64_s64(dividend, divisor);
|
||||
}
|
||||
|
||||
static s64 jit_smod64(s64 dividend, s64 divisor)
|
||||
{
|
||||
u64 q;
|
||||
|
||||
q = div64_s64(dividend, divisor);
|
||||
|
||||
return dividend - q * divisor;
|
||||
}
|
||||
|
||||
static inline void _emit(int cond, u32 inst, struct jit_ctx *ctx)
|
||||
{
|
||||
inst |= (cond << 28);
|
||||
@ -333,6 +373,9 @@ static u32 arm_bpf_ldst_imm8(u32 op, u8 rt, u8 rn, s16 imm8)
|
||||
#define ARM_LDRD_I(rt, rn, off) arm_bpf_ldst_imm8(ARM_INST_LDRD_I, rt, rn, off)
|
||||
#define ARM_LDRH_I(rt, rn, off) arm_bpf_ldst_imm8(ARM_INST_LDRH_I, rt, rn, off)
|
||||
|
||||
#define ARM_LDRSH_I(rt, rn, off) arm_bpf_ldst_imm8(ARM_INST_LDRSH_I, rt, rn, off)
|
||||
#define ARM_LDRSB_I(rt, rn, off) arm_bpf_ldst_imm8(ARM_INST_LDRSB_I, rt, rn, off)
|
||||
|
||||
#define ARM_STR_I(rt, rn, off) arm_bpf_ldst_imm12(ARM_INST_STR_I, rt, rn, off)
|
||||
#define ARM_STRB_I(rt, rn, off) arm_bpf_ldst_imm12(ARM_INST_STRB_I, rt, rn, off)
|
||||
#define ARM_STRD_I(rt, rn, off) arm_bpf_ldst_imm8(ARM_INST_STRD_I, rt, rn, off)
|
||||
@ -474,17 +517,18 @@ static inline int epilogue_offset(const struct jit_ctx *ctx)
|
||||
return to - from - 2;
|
||||
}
|
||||
|
||||
static inline void emit_udivmod(u8 rd, u8 rm, u8 rn, struct jit_ctx *ctx, u8 op)
|
||||
static inline void emit_udivmod(u8 rd, u8 rm, u8 rn, struct jit_ctx *ctx, u8 op, u8 sign)
|
||||
{
|
||||
const int exclude_mask = BIT(ARM_R0) | BIT(ARM_R1);
|
||||
const s8 *tmp = bpf2a32[TMP_REG_1];
|
||||
u32 dst;
|
||||
|
||||
#if __LINUX_ARM_ARCH__ == 7
|
||||
if (elf_hwcap & HWCAP_IDIVA) {
|
||||
if (op == BPF_DIV)
|
||||
emit(ARM_UDIV(rd, rm, rn), ctx);
|
||||
else {
|
||||
emit(ARM_UDIV(ARM_IP, rm, rn), ctx);
|
||||
if (op == BPF_DIV) {
|
||||
emit(sign ? ARM_SDIV(rd, rm, rn) : ARM_UDIV(rd, rm, rn), ctx);
|
||||
} else {
|
||||
emit(sign ? ARM_SDIV(ARM_IP, rm, rn) : ARM_UDIV(ARM_IP, rm, rn), ctx);
|
||||
emit(ARM_MLS(rd, rn, ARM_IP, rm), ctx);
|
||||
}
|
||||
return;
|
||||
@ -512,8 +556,19 @@ static inline void emit_udivmod(u8 rd, u8 rm, u8 rn, struct jit_ctx *ctx, u8 op)
|
||||
emit(ARM_PUSH(CALLER_MASK & ~exclude_mask), ctx);
|
||||
|
||||
/* Call appropriate function */
|
||||
emit_mov_i(ARM_IP, op == BPF_DIV ?
|
||||
(u32)jit_udiv32 : (u32)jit_mod32, ctx);
|
||||
if (sign) {
|
||||
if (op == BPF_DIV)
|
||||
dst = (u32)jit_sdiv32;
|
||||
else
|
||||
dst = (u32)jit_smod32;
|
||||
} else {
|
||||
if (op == BPF_DIV)
|
||||
dst = (u32)jit_udiv32;
|
||||
else
|
||||
dst = (u32)jit_mod32;
|
||||
}
|
||||
|
||||
emit_mov_i(ARM_IP, dst, ctx);
|
||||
emit_blx_r(ARM_IP, ctx);
|
||||
|
||||
/* Restore caller-saved registers from stack */
|
||||
@ -530,6 +585,78 @@ static inline void emit_udivmod(u8 rd, u8 rm, u8 rn, struct jit_ctx *ctx, u8 op)
|
||||
emit(ARM_MOV_R(ARM_R0, tmp[1]), ctx);
|
||||
}
|
||||
|
||||
static inline void emit_udivmod64(const s8 *rd, const s8 *rm, const s8 *rn, struct jit_ctx *ctx,
|
||||
u8 op, u8 sign)
|
||||
{
|
||||
u32 dst;
|
||||
|
||||
/* Push caller-saved registers on stack */
|
||||
emit(ARM_PUSH(CALLER_MASK), ctx);
|
||||
|
||||
/*
|
||||
* As we are implementing 64-bit div/mod as function calls, We need to put the dividend in
|
||||
* R0-R1 and the divisor in R2-R3. As we have already pushed these registers on the stack,
|
||||
* we can recover them later after returning from the function call.
|
||||
*/
|
||||
if (rm[1] != ARM_R0 || rn[1] != ARM_R2) {
|
||||
/*
|
||||
* Move Rm to {R1, R0} if it is not already there.
|
||||
*/
|
||||
if (rm[1] != ARM_R0) {
|
||||
if (rn[1] == ARM_R0)
|
||||
emit(ARM_PUSH(BIT(ARM_R0) | BIT(ARM_R1)), ctx);
|
||||
emit(ARM_MOV_R(ARM_R1, rm[0]), ctx);
|
||||
emit(ARM_MOV_R(ARM_R0, rm[1]), ctx);
|
||||
if (rn[1] == ARM_R0) {
|
||||
emit(ARM_POP(BIT(ARM_R2) | BIT(ARM_R3)), ctx);
|
||||
goto cont;
|
||||
}
|
||||
}
|
||||
/*
|
||||
* Move Rn to {R3, R2} if it is not already there.
|
||||
*/
|
||||
if (rn[1] != ARM_R2) {
|
||||
emit(ARM_MOV_R(ARM_R3, rn[0]), ctx);
|
||||
emit(ARM_MOV_R(ARM_R2, rn[1]), ctx);
|
||||
}
|
||||
}
|
||||
|
||||
cont:
|
||||
|
||||
/* Call appropriate function */
|
||||
if (sign) {
|
||||
if (op == BPF_DIV)
|
||||
dst = (u32)jit_sdiv64;
|
||||
else
|
||||
dst = (u32)jit_smod64;
|
||||
} else {
|
||||
if (op == BPF_DIV)
|
||||
dst = (u32)jit_udiv64;
|
||||
else
|
||||
dst = (u32)jit_mod64;
|
||||
}
|
||||
|
||||
emit_mov_i(ARM_IP, dst, ctx);
|
||||
emit_blx_r(ARM_IP, ctx);
|
||||
|
||||
/* Save return value */
|
||||
if (rd[1] != ARM_R0) {
|
||||
emit(ARM_MOV_R(rd[0], ARM_R1), ctx);
|
||||
emit(ARM_MOV_R(rd[1], ARM_R0), ctx);
|
||||
}
|
||||
|
||||
/* Recover {R3, R2} and {R1, R0} from stack if they are not Rd */
|
||||
if (rd[1] != ARM_R0 && rd[1] != ARM_R2) {
|
||||
emit(ARM_POP(CALLER_MASK), ctx);
|
||||
} else if (rd[1] != ARM_R0) {
|
||||
emit(ARM_POP(BIT(ARM_R0) | BIT(ARM_R1)), ctx);
|
||||
emit(ARM_ADD_I(ARM_SP, ARM_SP, 8), ctx);
|
||||
} else {
|
||||
emit(ARM_ADD_I(ARM_SP, ARM_SP, 8), ctx);
|
||||
emit(ARM_POP(BIT(ARM_R2) | BIT(ARM_R3)), ctx);
|
||||
}
|
||||
}
|
||||
|
||||
/* Is the translated BPF register on stack? */
|
||||
static bool is_stacked(s8 reg)
|
||||
{
|
||||
@ -744,12 +871,16 @@ static inline void emit_a32_alu_r64(const bool is64, const s8 dst[],
|
||||
}
|
||||
|
||||
/* dst = src (4 bytes)*/
|
||||
static inline void emit_a32_mov_r(const s8 dst, const s8 src,
|
||||
static inline void emit_a32_mov_r(const s8 dst, const s8 src, const u8 off,
|
||||
struct jit_ctx *ctx) {
|
||||
const s8 *tmp = bpf2a32[TMP_REG_1];
|
||||
s8 rt;
|
||||
|
||||
rt = arm_bpf_get_reg32(src, tmp[0], ctx);
|
||||
if (off && off != 32) {
|
||||
emit(ARM_LSL_I(rt, rt, 32 - off), ctx);
|
||||
emit(ARM_ASR_I(rt, rt, 32 - off), ctx);
|
||||
}
|
||||
arm_bpf_put_reg32(dst, rt, ctx);
|
||||
}
|
||||
|
||||
@ -758,15 +889,15 @@ static inline void emit_a32_mov_r64(const bool is64, const s8 dst[],
|
||||
const s8 src[],
|
||||
struct jit_ctx *ctx) {
|
||||
if (!is64) {
|
||||
emit_a32_mov_r(dst_lo, src_lo, ctx);
|
||||
emit_a32_mov_r(dst_lo, src_lo, 0, ctx);
|
||||
if (!ctx->prog->aux->verifier_zext)
|
||||
/* Zero out high 4 bytes */
|
||||
emit_a32_mov_i(dst_hi, 0, ctx);
|
||||
} else if (__LINUX_ARM_ARCH__ < 6 &&
|
||||
ctx->cpu_architecture < CPU_ARCH_ARMv5TE) {
|
||||
/* complete 8 byte move */
|
||||
emit_a32_mov_r(dst_lo, src_lo, ctx);
|
||||
emit_a32_mov_r(dst_hi, src_hi, ctx);
|
||||
emit_a32_mov_r(dst_lo, src_lo, 0, ctx);
|
||||
emit_a32_mov_r(dst_hi, src_hi, 0, ctx);
|
||||
} else if (is_stacked(src_lo) && is_stacked(dst_lo)) {
|
||||
const u8 *tmp = bpf2a32[TMP_REG_1];
|
||||
|
||||
@ -782,6 +913,24 @@ static inline void emit_a32_mov_r64(const bool is64, const s8 dst[],
|
||||
}
|
||||
}
|
||||
|
||||
/* dst = (signed)src */
|
||||
static inline void emit_a32_movsx_r64(const bool is64, const u8 off, const s8 dst[], const s8 src[],
|
||||
struct jit_ctx *ctx) {
|
||||
const s8 *tmp = bpf2a32[TMP_REG_1];
|
||||
const s8 *rt;
|
||||
|
||||
rt = arm_bpf_get_reg64(dst, tmp, ctx);
|
||||
|
||||
emit_a32_mov_r(dst_lo, src_lo, off, ctx);
|
||||
if (!is64) {
|
||||
if (!ctx->prog->aux->verifier_zext)
|
||||
/* Zero out high 4 bytes */
|
||||
emit_a32_mov_i(dst_hi, 0, ctx);
|
||||
} else {
|
||||
emit(ARM_ASR_I(rt[0], rt[1], 31), ctx);
|
||||
}
|
||||
}
|
||||
|
||||
/* Shift operations */
|
||||
static inline void emit_a32_alu_i(const s8 dst, const u32 val,
|
||||
struct jit_ctx *ctx, const u8 op) {
|
||||
@ -1026,6 +1175,24 @@ static bool is_ldst_imm(s16 off, const u8 size)
|
||||
return -off_max <= off && off <= off_max;
|
||||
}
|
||||
|
||||
static bool is_ldst_imm8(s16 off, const u8 size)
|
||||
{
|
||||
s16 off_max = 0;
|
||||
|
||||
switch (size) {
|
||||
case BPF_B:
|
||||
off_max = 0xff;
|
||||
break;
|
||||
case BPF_W:
|
||||
off_max = 0xfff;
|
||||
break;
|
||||
case BPF_H:
|
||||
off_max = 0xff;
|
||||
break;
|
||||
}
|
||||
return -off_max <= off && off <= off_max;
|
||||
}
|
||||
|
||||
/* *(size *)(dst + off) = src */
|
||||
static inline void emit_str_r(const s8 dst, const s8 src[],
|
||||
s16 off, struct jit_ctx *ctx, const u8 sz){
|
||||
@ -1105,6 +1272,50 @@ static inline void emit_ldx_r(const s8 dst[], const s8 src,
|
||||
arm_bpf_put_reg64(dst, rd, ctx);
|
||||
}
|
||||
|
||||
/* dst = *(signed size*)(src + off) */
|
||||
static inline void emit_ldsx_r(const s8 dst[], const s8 src,
|
||||
s16 off, struct jit_ctx *ctx, const u8 sz){
|
||||
const s8 *tmp = bpf2a32[TMP_REG_1];
|
||||
const s8 *rd = is_stacked(dst_lo) ? tmp : dst;
|
||||
s8 rm = src;
|
||||
int add_off;
|
||||
|
||||
if (!is_ldst_imm8(off, sz)) {
|
||||
/*
|
||||
* offset does not fit in the load/store immediate,
|
||||
* construct an ADD instruction to apply the offset.
|
||||
*/
|
||||
add_off = imm8m(off);
|
||||
if (add_off > 0) {
|
||||
emit(ARM_ADD_I(tmp[0], src, add_off), ctx);
|
||||
rm = tmp[0];
|
||||
} else {
|
||||
emit_a32_mov_i(tmp[0], off, ctx);
|
||||
emit(ARM_ADD_R(tmp[0], tmp[0], src), ctx);
|
||||
rm = tmp[0];
|
||||
}
|
||||
off = 0;
|
||||
}
|
||||
|
||||
switch (sz) {
|
||||
case BPF_B:
|
||||
/* Load a Byte with sign extension*/
|
||||
emit(ARM_LDRSB_I(rd[1], rm, off), ctx);
|
||||
break;
|
||||
case BPF_H:
|
||||
/* Load a HalfWord with sign extension*/
|
||||
emit(ARM_LDRSH_I(rd[1], rm, off), ctx);
|
||||
break;
|
||||
case BPF_W:
|
||||
/* Load a Word*/
|
||||
emit(ARM_LDR_I(rd[1], rm, off), ctx);
|
||||
break;
|
||||
}
|
||||
/* Carry the sign extension to upper 32 bits */
|
||||
emit(ARM_ASR_I(rd[0], rd[1], 31), ctx);
|
||||
arm_bpf_put_reg64(dst, rd, ctx);
|
||||
}
|
||||
|
||||
/* Arithmatic Operation */
|
||||
static inline void emit_ar_r(const u8 rd, const u8 rt, const u8 rm,
|
||||
const u8 rn, struct jit_ctx *ctx, u8 op,
|
||||
@ -1385,7 +1596,10 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
|
||||
emit_a32_mov_i(dst_hi, 0, ctx);
|
||||
break;
|
||||
}
|
||||
emit_a32_mov_r64(is64, dst, src, ctx);
|
||||
if (insn->off)
|
||||
emit_a32_movsx_r64(is64, insn->off, dst, src, ctx);
|
||||
else
|
||||
emit_a32_mov_r64(is64, dst, src, ctx);
|
||||
break;
|
||||
case BPF_K:
|
||||
/* Sign-extend immediate value to destination reg */
|
||||
@ -1461,7 +1675,7 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
|
||||
rt = src_lo;
|
||||
break;
|
||||
}
|
||||
emit_udivmod(rd_lo, rd_lo, rt, ctx, BPF_OP(code));
|
||||
emit_udivmod(rd_lo, rd_lo, rt, ctx, BPF_OP(code), off);
|
||||
arm_bpf_put_reg32(dst_lo, rd_lo, ctx);
|
||||
if (!ctx->prog->aux->verifier_zext)
|
||||
emit_a32_mov_i(dst_hi, 0, ctx);
|
||||
@ -1470,7 +1684,19 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
|
||||
case BPF_ALU64 | BPF_DIV | BPF_X:
|
||||
case BPF_ALU64 | BPF_MOD | BPF_K:
|
||||
case BPF_ALU64 | BPF_MOD | BPF_X:
|
||||
goto notyet;
|
||||
rd = arm_bpf_get_reg64(dst, tmp2, ctx);
|
||||
switch (BPF_SRC(code)) {
|
||||
case BPF_X:
|
||||
rs = arm_bpf_get_reg64(src, tmp, ctx);
|
||||
break;
|
||||
case BPF_K:
|
||||
rs = tmp;
|
||||
emit_a32_mov_se_i64(is64, rs, imm, ctx);
|
||||
break;
|
||||
}
|
||||
emit_udivmod64(rd, rd, rs, ctx, BPF_OP(code), off);
|
||||
arm_bpf_put_reg64(dst, rd, ctx);
|
||||
break;
|
||||
/* dst = dst << imm */
|
||||
/* dst = dst >> imm */
|
||||
/* dst = dst >> imm (signed) */
|
||||
@ -1545,10 +1771,12 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
|
||||
break;
|
||||
/* dst = htole(dst) */
|
||||
/* dst = htobe(dst) */
|
||||
case BPF_ALU | BPF_END | BPF_FROM_LE:
|
||||
case BPF_ALU | BPF_END | BPF_FROM_BE:
|
||||
case BPF_ALU | BPF_END | BPF_FROM_LE: /* also BPF_TO_LE */
|
||||
case BPF_ALU | BPF_END | BPF_FROM_BE: /* also BPF_TO_BE */
|
||||
/* dst = bswap(dst) */
|
||||
case BPF_ALU64 | BPF_END | BPF_FROM_LE: /* also BPF_TO_LE */
|
||||
rd = arm_bpf_get_reg64(dst, tmp, ctx);
|
||||
if (BPF_SRC(code) == BPF_FROM_LE)
|
||||
if (BPF_SRC(code) == BPF_FROM_LE && BPF_CLASS(code) != BPF_ALU64)
|
||||
goto emit_bswap_uxt;
|
||||
switch (imm) {
|
||||
case 16:
|
||||
@ -1603,8 +1831,15 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
|
||||
case BPF_LDX | BPF_MEM | BPF_H:
|
||||
case BPF_LDX | BPF_MEM | BPF_B:
|
||||
case BPF_LDX | BPF_MEM | BPF_DW:
|
||||
/* LDSX: dst = *(signed size *)(src + off) */
|
||||
case BPF_LDX | BPF_MEMSX | BPF_B:
|
||||
case BPF_LDX | BPF_MEMSX | BPF_H:
|
||||
case BPF_LDX | BPF_MEMSX | BPF_W:
|
||||
rn = arm_bpf_get_reg32(src_lo, tmp2[1], ctx);
|
||||
emit_ldx_r(dst, rn, off, ctx, BPF_SIZE(code));
|
||||
if (BPF_MODE(insn->code) == BPF_MEMSX)
|
||||
emit_ldsx_r(dst, rn, off, ctx, BPF_SIZE(code));
|
||||
else
|
||||
emit_ldx_r(dst, rn, off, ctx, BPF_SIZE(code));
|
||||
break;
|
||||
/* speculation barrier */
|
||||
case BPF_ST | BPF_NOSPEC:
|
||||
@ -1761,10 +1996,15 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
|
||||
break;
|
||||
/* JMP OFF */
|
||||
case BPF_JMP | BPF_JA:
|
||||
case BPF_JMP32 | BPF_JA:
|
||||
{
|
||||
if (off == 0)
|
||||
if (BPF_CLASS(code) == BPF_JMP32 && imm != 0)
|
||||
jmp_offset = bpf2a32_offset(i + imm, i, ctx);
|
||||
else if (BPF_CLASS(code) == BPF_JMP && off != 0)
|
||||
jmp_offset = bpf2a32_offset(i + off, i, ctx);
|
||||
else
|
||||
break;
|
||||
jmp_offset = bpf2a32_offset(i+off, i, ctx);
|
||||
|
||||
check_imm24(jmp_offset);
|
||||
emit(ARM_B(jmp_offset), ctx);
|
||||
break;
|
||||
|
@ -79,9 +79,11 @@
|
||||
#define ARM_INST_LDST__IMM12 0x00000fff
|
||||
#define ARM_INST_LDRB_I 0x05500000
|
||||
#define ARM_INST_LDRB_R 0x07d00000
|
||||
#define ARM_INST_LDRSB_I 0x015000d0
|
||||
#define ARM_INST_LDRD_I 0x014000d0
|
||||
#define ARM_INST_LDRH_I 0x015000b0
|
||||
#define ARM_INST_LDRH_R 0x019000b0
|
||||
#define ARM_INST_LDRSH_I 0x015000f0
|
||||
#define ARM_INST_LDR_I 0x05100000
|
||||
#define ARM_INST_LDR_R 0x07900000
|
||||
|
||||
@ -137,6 +139,7 @@
|
||||
#define ARM_INST_TST_I 0x03100000
|
||||
|
||||
#define ARM_INST_UDIV 0x0730f010
|
||||
#define ARM_INST_SDIV 0x0710f010
|
||||
|
||||
#define ARM_INST_UMULL 0x00800090
|
||||
|
||||
@ -265,6 +268,7 @@
|
||||
#define ARM_TST_I(rn, imm) _AL3_I(ARM_INST_TST, 0, rn, imm)
|
||||
|
||||
#define ARM_UDIV(rd, rn, rm) (ARM_INST_UDIV | (rd) << 16 | (rn) | (rm) << 8)
|
||||
#define ARM_SDIV(rd, rn, rm) (ARM_INST_SDIV | (rd) << 16 | (rn) | (rm) << 8)
|
||||
|
||||
#define ARM_UMULL(rd_lo, rd_hi, rn, rm) (ARM_INST_UMULL | (rd_hi) << 16 \
|
||||
| (rd_lo) << 12 | (rm) << 8 | rn)
|
||||
|
@ -288,7 +288,7 @@ static bool is_lsi_offset(int offset, int scale)
|
||||
static int build_prologue(struct jit_ctx *ctx, bool ebpf_from_cbpf)
|
||||
{
|
||||
const struct bpf_prog *prog = ctx->prog;
|
||||
const bool is_main_prog = prog->aux->func_idx == 0;
|
||||
const bool is_main_prog = !bpf_is_subprog(prog);
|
||||
const u8 r6 = bpf2a64[BPF_REG_6];
|
||||
const u8 r7 = bpf2a64[BPF_REG_7];
|
||||
const u8 r8 = bpf2a64[BPF_REG_8];
|
||||
|
@ -556,7 +556,7 @@ static void bpf_jit_prologue(struct bpf_jit *jit, struct bpf_prog *fp,
|
||||
EMIT6_PCREL_RILC(0xc0040000, 0, jit->prologue_plt);
|
||||
jit->prologue_plt_ret = jit->prg;
|
||||
|
||||
if (fp->aux->func_idx == 0) {
|
||||
if (!bpf_is_subprog(fp)) {
|
||||
/* Initialize the tail call counter in the main program. */
|
||||
/* xc STK_OFF_TCCNT(4,%r15),STK_OFF_TCCNT(%r15) */
|
||||
_EMIT6(0xd703f000 | STK_OFF_TCCNT, 0xf000 | STK_OFF_TCCNT);
|
||||
|
@ -16,6 +16,9 @@
|
||||
#include <asm/set_memory.h>
|
||||
#include <asm/nospec-branch.h>
|
||||
#include <asm/text-patching.h>
|
||||
#include <asm/unwind.h>
|
||||
|
||||
static bool all_callee_regs_used[4] = {true, true, true, true};
|
||||
|
||||
static u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len)
|
||||
{
|
||||
@ -255,6 +258,14 @@ struct jit_context {
|
||||
/* Number of bytes that will be skipped on tailcall */
|
||||
#define X86_TAIL_CALL_OFFSET (11 + ENDBR_INSN_SIZE)
|
||||
|
||||
static void push_r12(u8 **pprog)
|
||||
{
|
||||
u8 *prog = *pprog;
|
||||
|
||||
EMIT2(0x41, 0x54); /* push r12 */
|
||||
*pprog = prog;
|
||||
}
|
||||
|
||||
static void push_callee_regs(u8 **pprog, bool *callee_regs_used)
|
||||
{
|
||||
u8 *prog = *pprog;
|
||||
@ -270,6 +281,14 @@ static void push_callee_regs(u8 **pprog, bool *callee_regs_used)
|
||||
*pprog = prog;
|
||||
}
|
||||
|
||||
static void pop_r12(u8 **pprog)
|
||||
{
|
||||
u8 *prog = *pprog;
|
||||
|
||||
EMIT2(0x41, 0x5C); /* pop r12 */
|
||||
*pprog = prog;
|
||||
}
|
||||
|
||||
static void pop_callee_regs(u8 **pprog, bool *callee_regs_used)
|
||||
{
|
||||
u8 *prog = *pprog;
|
||||
@ -291,7 +310,8 @@ static void pop_callee_regs(u8 **pprog, bool *callee_regs_used)
|
||||
* while jumping to another program
|
||||
*/
|
||||
static void emit_prologue(u8 **pprog, u32 stack_depth, bool ebpf_from_cbpf,
|
||||
bool tail_call_reachable, bool is_subprog)
|
||||
bool tail_call_reachable, bool is_subprog,
|
||||
bool is_exception_cb)
|
||||
{
|
||||
u8 *prog = *pprog;
|
||||
|
||||
@ -303,12 +323,30 @@ static void emit_prologue(u8 **pprog, u32 stack_depth, bool ebpf_from_cbpf,
|
||||
prog += X86_PATCH_SIZE;
|
||||
if (!ebpf_from_cbpf) {
|
||||
if (tail_call_reachable && !is_subprog)
|
||||
/* When it's the entry of the whole tailcall context,
|
||||
* zeroing rax means initialising tail_call_cnt.
|
||||
*/
|
||||
EMIT2(0x31, 0xC0); /* xor eax, eax */
|
||||
else
|
||||
/* Keep the same instruction layout. */
|
||||
EMIT2(0x66, 0x90); /* nop2 */
|
||||
}
|
||||
EMIT1(0x55); /* push rbp */
|
||||
EMIT3(0x48, 0x89, 0xE5); /* mov rbp, rsp */
|
||||
/* Exception callback receives FP as third parameter */
|
||||
if (is_exception_cb) {
|
||||
EMIT3(0x48, 0x89, 0xF4); /* mov rsp, rsi */
|
||||
EMIT3(0x48, 0x89, 0xD5); /* mov rbp, rdx */
|
||||
/* The main frame must have exception_boundary as true, so we
|
||||
* first restore those callee-saved regs from stack, before
|
||||
* reusing the stack frame.
|
||||
*/
|
||||
pop_callee_regs(&prog, all_callee_regs_used);
|
||||
pop_r12(&prog);
|
||||
/* Reset the stack frame. */
|
||||
EMIT3(0x48, 0x89, 0xEC); /* mov rsp, rbp */
|
||||
} else {
|
||||
EMIT1(0x55); /* push rbp */
|
||||
EMIT3(0x48, 0x89, 0xE5); /* mov rbp, rsp */
|
||||
}
|
||||
|
||||
/* X86_TAIL_CALL_OFFSET is here */
|
||||
EMIT_ENDBR();
|
||||
@ -467,7 +505,8 @@ static void emit_return(u8 **pprog, u8 *ip)
|
||||
* goto *(prog->bpf_func + prologue_size);
|
||||
* out:
|
||||
*/
|
||||
static void emit_bpf_tail_call_indirect(u8 **pprog, bool *callee_regs_used,
|
||||
static void emit_bpf_tail_call_indirect(struct bpf_prog *bpf_prog,
|
||||
u8 **pprog, bool *callee_regs_used,
|
||||
u32 stack_depth, u8 *ip,
|
||||
struct jit_context *ctx)
|
||||
{
|
||||
@ -517,7 +556,12 @@ static void emit_bpf_tail_call_indirect(u8 **pprog, bool *callee_regs_used,
|
||||
offset = ctx->tail_call_indirect_label - (prog + 2 - start);
|
||||
EMIT2(X86_JE, offset); /* je out */
|
||||
|
||||
pop_callee_regs(&prog, callee_regs_used);
|
||||
if (bpf_prog->aux->exception_boundary) {
|
||||
pop_callee_regs(&prog, all_callee_regs_used);
|
||||
pop_r12(&prog);
|
||||
} else {
|
||||
pop_callee_regs(&prog, callee_regs_used);
|
||||
}
|
||||
|
||||
EMIT1(0x58); /* pop rax */
|
||||
if (stack_depth)
|
||||
@ -541,7 +585,8 @@ static void emit_bpf_tail_call_indirect(u8 **pprog, bool *callee_regs_used,
|
||||
*pprog = prog;
|
||||
}
|
||||
|
||||
static void emit_bpf_tail_call_direct(struct bpf_jit_poke_descriptor *poke,
|
||||
static void emit_bpf_tail_call_direct(struct bpf_prog *bpf_prog,
|
||||
struct bpf_jit_poke_descriptor *poke,
|
||||
u8 **pprog, u8 *ip,
|
||||
bool *callee_regs_used, u32 stack_depth,
|
||||
struct jit_context *ctx)
|
||||
@ -570,7 +615,13 @@ static void emit_bpf_tail_call_direct(struct bpf_jit_poke_descriptor *poke,
|
||||
emit_jump(&prog, (u8 *)poke->tailcall_target + X86_PATCH_SIZE,
|
||||
poke->tailcall_bypass);
|
||||
|
||||
pop_callee_regs(&prog, callee_regs_used);
|
||||
if (bpf_prog->aux->exception_boundary) {
|
||||
pop_callee_regs(&prog, all_callee_regs_used);
|
||||
pop_r12(&prog);
|
||||
} else {
|
||||
pop_callee_regs(&prog, callee_regs_used);
|
||||
}
|
||||
|
||||
EMIT1(0x58); /* pop rax */
|
||||
if (stack_depth)
|
||||
EMIT3_off32(0x48, 0x81, 0xC4, round_up(stack_depth, 8));
|
||||
@ -1018,6 +1069,10 @@ static void emit_shiftx(u8 **pprog, u32 dst_reg, u8 src_reg, bool is64, u8 op)
|
||||
|
||||
#define INSN_SZ_DIFF (((addrs[i] - addrs[i - 1]) - (prog - temp)))
|
||||
|
||||
/* mov rax, qword ptr [rbp - rounded_stack_depth - 8] */
|
||||
#define RESTORE_TAIL_CALL_CNT(stack) \
|
||||
EMIT3_off32(0x48, 0x8B, 0x85, -round_up(stack, 8) - 8)
|
||||
|
||||
static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, u8 *rw_image,
|
||||
int oldproglen, struct jit_context *ctx, bool jmp_padding)
|
||||
{
|
||||
@ -1041,8 +1096,20 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, u8 *rw_image
|
||||
|
||||
emit_prologue(&prog, bpf_prog->aux->stack_depth,
|
||||
bpf_prog_was_classic(bpf_prog), tail_call_reachable,
|
||||
bpf_prog->aux->func_idx != 0);
|
||||
push_callee_regs(&prog, callee_regs_used);
|
||||
bpf_is_subprog(bpf_prog), bpf_prog->aux->exception_cb);
|
||||
/* Exception callback will clobber callee regs for its own use, and
|
||||
* restore the original callee regs from main prog's stack frame.
|
||||
*/
|
||||
if (bpf_prog->aux->exception_boundary) {
|
||||
/* We also need to save r12, which is not mapped to any BPF
|
||||
* register, as we throw after entry into the kernel, which may
|
||||
* overwrite r12.
|
||||
*/
|
||||
push_r12(&prog);
|
||||
push_callee_regs(&prog, all_callee_regs_used);
|
||||
} else {
|
||||
push_callee_regs(&prog, callee_regs_used);
|
||||
}
|
||||
|
||||
ilen = prog - temp;
|
||||
if (rw_image)
|
||||
@ -1623,9 +1690,7 @@ st: if (is_imm8(insn->off))
|
||||
|
||||
func = (u8 *) __bpf_call_base + imm32;
|
||||
if (tail_call_reachable) {
|
||||
/* mov rax, qword ptr [rbp - rounded_stack_depth - 8] */
|
||||
EMIT3_off32(0x48, 0x8B, 0x85,
|
||||
-round_up(bpf_prog->aux->stack_depth, 8) - 8);
|
||||
RESTORE_TAIL_CALL_CNT(bpf_prog->aux->stack_depth);
|
||||
if (!imm32)
|
||||
return -EINVAL;
|
||||
offs = 7 + x86_call_depth_emit_accounting(&prog, func);
|
||||
@ -1641,13 +1706,15 @@ st: if (is_imm8(insn->off))
|
||||
|
||||
case BPF_JMP | BPF_TAIL_CALL:
|
||||
if (imm32)
|
||||
emit_bpf_tail_call_direct(&bpf_prog->aux->poke_tab[imm32 - 1],
|
||||
emit_bpf_tail_call_direct(bpf_prog,
|
||||
&bpf_prog->aux->poke_tab[imm32 - 1],
|
||||
&prog, image + addrs[i - 1],
|
||||
callee_regs_used,
|
||||
bpf_prog->aux->stack_depth,
|
||||
ctx);
|
||||
else
|
||||
emit_bpf_tail_call_indirect(&prog,
|
||||
emit_bpf_tail_call_indirect(bpf_prog,
|
||||
&prog,
|
||||
callee_regs_used,
|
||||
bpf_prog->aux->stack_depth,
|
||||
image + addrs[i - 1],
|
||||
@ -1900,7 +1967,12 @@ st: if (is_imm8(insn->off))
|
||||
seen_exit = true;
|
||||
/* Update cleanup_addr */
|
||||
ctx->cleanup_addr = proglen;
|
||||
pop_callee_regs(&prog, callee_regs_used);
|
||||
if (bpf_prog->aux->exception_boundary) {
|
||||
pop_callee_regs(&prog, all_callee_regs_used);
|
||||
pop_r12(&prog);
|
||||
} else {
|
||||
pop_callee_regs(&prog, callee_regs_used);
|
||||
}
|
||||
EMIT1(0xC9); /* leave */
|
||||
emit_return(&prog, image + addrs[i - 1] + (prog - temp));
|
||||
break;
|
||||
@ -2400,6 +2472,7 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i
|
||||
* [ ... ]
|
||||
* [ stack_arg2 ]
|
||||
* RBP - arg_stack_off [ stack_arg1 ]
|
||||
* RSP [ tail_call_cnt ] BPF_TRAMP_F_TAIL_CALL_CTX
|
||||
*/
|
||||
|
||||
/* room for return value of orig_call or fentry prog */
|
||||
@ -2464,6 +2537,8 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i
|
||||
else
|
||||
/* sub rsp, stack_size */
|
||||
EMIT4(0x48, 0x83, 0xEC, stack_size);
|
||||
if (flags & BPF_TRAMP_F_TAIL_CALL_CTX)
|
||||
EMIT1(0x50); /* push rax */
|
||||
/* mov QWORD PTR [rbp - rbx_off], rbx */
|
||||
emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_6, -rbx_off);
|
||||
|
||||
@ -2516,9 +2591,15 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i
|
||||
restore_regs(m, &prog, regs_off);
|
||||
save_args(m, &prog, arg_stack_off, true);
|
||||
|
||||
if (flags & BPF_TRAMP_F_TAIL_CALL_CTX)
|
||||
/* Before calling the original function, restore the
|
||||
* tail_call_cnt from stack to rax.
|
||||
*/
|
||||
RESTORE_TAIL_CALL_CNT(stack_size);
|
||||
|
||||
if (flags & BPF_TRAMP_F_ORIG_STACK) {
|
||||
emit_ldx(&prog, BPF_DW, BPF_REG_0, BPF_REG_FP, 8);
|
||||
EMIT2(0xff, 0xd0); /* call *rax */
|
||||
emit_ldx(&prog, BPF_DW, BPF_REG_6, BPF_REG_FP, 8);
|
||||
EMIT2(0xff, 0xd3); /* call *rbx */
|
||||
} else {
|
||||
/* call original function */
|
||||
if (emit_rsb_call(&prog, orig_call, prog)) {
|
||||
@ -2569,7 +2650,12 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i
|
||||
ret = -EINVAL;
|
||||
goto cleanup;
|
||||
}
|
||||
}
|
||||
} else if (flags & BPF_TRAMP_F_TAIL_CALL_CTX)
|
||||
/* Before running the original function, restore the
|
||||
* tail_call_cnt from stack to rax.
|
||||
*/
|
||||
RESTORE_TAIL_CALL_CNT(stack_size);
|
||||
|
||||
/* restore return value of orig_call or fentry prog back into RAX */
|
||||
if (save_ret)
|
||||
emit_ldx(&prog, BPF_DW, BPF_REG_0, BPF_REG_FP, -8);
|
||||
@ -2913,3 +2999,30 @@ void bpf_jit_free(struct bpf_prog *prog)
|
||||
|
||||
bpf_prog_unlock_free(prog);
|
||||
}
|
||||
|
||||
bool bpf_jit_supports_exceptions(void)
|
||||
{
|
||||
/* We unwind through both kernel frames (starting from within bpf_throw
|
||||
* call) and BPF frames. Therefore we require one of ORC or FP unwinder
|
||||
* to be enabled to walk kernel frames and reach BPF frames in the stack
|
||||
* trace.
|
||||
*/
|
||||
return IS_ENABLED(CONFIG_UNWINDER_ORC) || IS_ENABLED(CONFIG_UNWINDER_FRAME_POINTER);
|
||||
}
|
||||
|
||||
void arch_bpf_stack_walk(bool (*consume_fn)(void *cookie, u64 ip, u64 sp, u64 bp), void *cookie)
|
||||
{
|
||||
#if defined(CONFIG_UNWINDER_ORC) || defined(CONFIG_UNWINDER_FRAME_POINTER)
|
||||
struct unwind_state state;
|
||||
unsigned long addr;
|
||||
|
||||
for (unwind_start(&state, current, NULL, NULL); !unwind_done(&state);
|
||||
unwind_next_frame(&state)) {
|
||||
addr = unwind_get_return_address(&state);
|
||||
if (!addr || !consume_fn(cookie, (u64)addr, (u64)state.sp, (u64)state.bp))
|
||||
break;
|
||||
}
|
||||
return;
|
||||
#endif
|
||||
WARN(1, "verification of programs using bpf_throw should have failed\n");
|
||||
}
|
||||
|
@ -55,8 +55,8 @@ struct cgroup;
|
||||
extern struct idr btf_idr;
|
||||
extern spinlock_t btf_idr_lock;
|
||||
extern struct kobject *btf_kobj;
|
||||
extern struct bpf_mem_alloc bpf_global_ma;
|
||||
extern bool bpf_global_ma_set;
|
||||
extern struct bpf_mem_alloc bpf_global_ma, bpf_global_percpu_ma;
|
||||
extern bool bpf_global_ma_set, bpf_global_percpu_ma_set;
|
||||
|
||||
typedef u64 (*bpf_callback_t)(u64, u64, u64, u64, u64);
|
||||
typedef int (*bpf_iter_init_seq_priv_t)(void *private_data,
|
||||
@ -180,14 +180,15 @@ enum btf_field_type {
|
||||
BPF_TIMER = (1 << 1),
|
||||
BPF_KPTR_UNREF = (1 << 2),
|
||||
BPF_KPTR_REF = (1 << 3),
|
||||
BPF_KPTR = BPF_KPTR_UNREF | BPF_KPTR_REF,
|
||||
BPF_LIST_HEAD = (1 << 4),
|
||||
BPF_LIST_NODE = (1 << 5),
|
||||
BPF_RB_ROOT = (1 << 6),
|
||||
BPF_RB_NODE = (1 << 7),
|
||||
BPF_KPTR_PERCPU = (1 << 4),
|
||||
BPF_KPTR = BPF_KPTR_UNREF | BPF_KPTR_REF | BPF_KPTR_PERCPU,
|
||||
BPF_LIST_HEAD = (1 << 5),
|
||||
BPF_LIST_NODE = (1 << 6),
|
||||
BPF_RB_ROOT = (1 << 7),
|
||||
BPF_RB_NODE = (1 << 8),
|
||||
BPF_GRAPH_NODE_OR_ROOT = BPF_LIST_NODE | BPF_LIST_HEAD |
|
||||
BPF_RB_NODE | BPF_RB_ROOT,
|
||||
BPF_REFCOUNT = (1 << 8),
|
||||
BPF_REFCOUNT = (1 << 9),
|
||||
};
|
||||
|
||||
typedef void (*btf_dtor_kfunc_t)(void *);
|
||||
@ -300,6 +301,8 @@ static inline const char *btf_field_type_name(enum btf_field_type type)
|
||||
case BPF_KPTR_UNREF:
|
||||
case BPF_KPTR_REF:
|
||||
return "kptr";
|
||||
case BPF_KPTR_PERCPU:
|
||||
return "percpu_kptr";
|
||||
case BPF_LIST_HEAD:
|
||||
return "bpf_list_head";
|
||||
case BPF_LIST_NODE:
|
||||
@ -325,6 +328,7 @@ static inline u32 btf_field_type_size(enum btf_field_type type)
|
||||
return sizeof(struct bpf_timer);
|
||||
case BPF_KPTR_UNREF:
|
||||
case BPF_KPTR_REF:
|
||||
case BPF_KPTR_PERCPU:
|
||||
return sizeof(u64);
|
||||
case BPF_LIST_HEAD:
|
||||
return sizeof(struct bpf_list_head);
|
||||
@ -351,6 +355,7 @@ static inline u32 btf_field_type_align(enum btf_field_type type)
|
||||
return __alignof__(struct bpf_timer);
|
||||
case BPF_KPTR_UNREF:
|
||||
case BPF_KPTR_REF:
|
||||
case BPF_KPTR_PERCPU:
|
||||
return __alignof__(u64);
|
||||
case BPF_LIST_HEAD:
|
||||
return __alignof__(struct bpf_list_head);
|
||||
@ -389,6 +394,7 @@ static inline void bpf_obj_init_field(const struct btf_field *field, void *addr)
|
||||
case BPF_TIMER:
|
||||
case BPF_KPTR_UNREF:
|
||||
case BPF_KPTR_REF:
|
||||
case BPF_KPTR_PERCPU:
|
||||
break;
|
||||
default:
|
||||
WARN_ON_ONCE(1);
|
||||
@ -1029,6 +1035,11 @@ struct btf_func_model {
|
||||
*/
|
||||
#define BPF_TRAMP_F_SHARE_IPMODIFY BIT(6)
|
||||
|
||||
/* Indicate that current trampoline is in a tail call context. Then, it has to
|
||||
* cache and restore tail_call_cnt to avoid infinite tail call loop.
|
||||
*/
|
||||
#define BPF_TRAMP_F_TAIL_CALL_CTX BIT(7)
|
||||
|
||||
/* Each call __bpf_prog_enter + call bpf_func + call __bpf_prog_exit is ~50
|
||||
* bytes on x86.
|
||||
*/
|
||||
@ -1378,6 +1389,7 @@ struct bpf_prog_aux {
|
||||
u32 stack_depth;
|
||||
u32 id;
|
||||
u32 func_cnt; /* used by non-func prog as the number of func progs */
|
||||
u32 real_func_cnt; /* includes hidden progs, only used for JIT and freeing progs */
|
||||
u32 func_idx; /* 0 for non-func prog, the index in func array for func prog */
|
||||
u32 attach_btf_id; /* in-kernel BTF type id to attach to */
|
||||
u32 ctx_arg_info_size;
|
||||
@ -1398,6 +1410,8 @@ struct bpf_prog_aux {
|
||||
bool sleepable;
|
||||
bool tail_call_reachable;
|
||||
bool xdp_has_frags;
|
||||
bool exception_cb;
|
||||
bool exception_boundary;
|
||||
/* BTF_KIND_FUNC_PROTO for valid attach_btf_id */
|
||||
const struct btf_type *attach_func_proto;
|
||||
/* function name for valid attach_btf_id */
|
||||
@ -1420,6 +1434,7 @@ struct bpf_prog_aux {
|
||||
int cgroup_atype; /* enum cgroup_bpf_attach_type */
|
||||
struct bpf_map *cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE];
|
||||
char name[BPF_OBJ_NAME_LEN];
|
||||
unsigned int (*bpf_exception_cb)(u64 cookie, u64 sp, u64 bp);
|
||||
#ifdef CONFIG_SECURITY
|
||||
void *security;
|
||||
#endif
|
||||
@ -2407,9 +2422,11 @@ int btf_check_subprog_arg_match(struct bpf_verifier_env *env, int subprog,
|
||||
int btf_check_subprog_call(struct bpf_verifier_env *env, int subprog,
|
||||
struct bpf_reg_state *regs);
|
||||
int btf_prepare_func_args(struct bpf_verifier_env *env, int subprog,
|
||||
struct bpf_reg_state *reg);
|
||||
struct bpf_reg_state *reg, bool is_ex_cb);
|
||||
int btf_check_type_match(struct bpf_verifier_log *log, const struct bpf_prog *prog,
|
||||
struct btf *btf, const struct btf_type *t);
|
||||
const char *btf_find_decl_tag_value(const struct btf *btf, const struct btf_type *pt,
|
||||
int comp_idx, const char *tag_key);
|
||||
|
||||
struct bpf_prog *bpf_prog_by_id(u32 id);
|
||||
struct bpf_link *bpf_link_by_id(u32 id);
|
||||
@ -3183,4 +3200,9 @@ static inline gfp_t bpf_memcg_flags(gfp_t flags)
|
||||
return flags;
|
||||
}
|
||||
|
||||
static inline bool bpf_is_subprog(const struct bpf_prog *prog)
|
||||
{
|
||||
return prog->aux->func_idx != 0;
|
||||
}
|
||||
|
||||
#endif /* _LINUX_BPF_H */
|
||||
|
@ -300,6 +300,7 @@ struct bpf_func_state {
|
||||
bool in_callback_fn;
|
||||
struct tnum callback_ret_range;
|
||||
bool in_async_callback_fn;
|
||||
bool in_exception_callback_fn;
|
||||
|
||||
/* The following fields should be last. See copy_func_state() */
|
||||
int acquired_refs;
|
||||
@ -480,6 +481,7 @@ struct bpf_insn_aux_data {
|
||||
bool zext_dst; /* this insn zero extends dst reg */
|
||||
bool storage_get_func_atomic; /* bpf_*_storage_get() with atomic memory alloc */
|
||||
bool is_iter_next; /* bpf_iter_<type>_next() kfunc call */
|
||||
bool call_with_percpu_alloc_ptr; /* {this,per}_cpu_ptr() with prog percpu alloc */
|
||||
u8 alu_state; /* used in combination with alu_limit */
|
||||
|
||||
/* below fields are initialized once */
|
||||
@ -540,7 +542,9 @@ struct bpf_subprog_info {
|
||||
bool has_tail_call;
|
||||
bool tail_call_reachable;
|
||||
bool has_ld_abs;
|
||||
bool is_cb;
|
||||
bool is_async_cb;
|
||||
bool is_exception_cb;
|
||||
};
|
||||
|
||||
struct bpf_verifier_env;
|
||||
@ -587,6 +591,8 @@ struct bpf_verifier_env {
|
||||
u32 used_map_cnt; /* number of used maps */
|
||||
u32 used_btf_cnt; /* number of used BTF objects */
|
||||
u32 id_gen; /* used to generate unique reg IDs */
|
||||
u32 hidden_subprog_cnt; /* number of hidden subprogs */
|
||||
int exception_callback_subprog;
|
||||
bool explore_alu_limits;
|
||||
bool allow_ptr_leaks;
|
||||
bool allow_uninit_stack;
|
||||
@ -594,10 +600,11 @@ struct bpf_verifier_env {
|
||||
bool bypass_spec_v1;
|
||||
bool bypass_spec_v4;
|
||||
bool seen_direct_write;
|
||||
bool seen_exception;
|
||||
struct bpf_insn_aux_data *insn_aux_data; /* array of per-insn state */
|
||||
const struct bpf_line_info *prev_linfo;
|
||||
struct bpf_verifier_log log;
|
||||
struct bpf_subprog_info subprog_info[BPF_MAX_SUBPROGS + 1];
|
||||
struct bpf_subprog_info subprog_info[BPF_MAX_SUBPROGS + 2]; /* max + 2 for the fake and exception subprogs */
|
||||
union {
|
||||
struct bpf_idmap idmap_scratch;
|
||||
struct bpf_idset idset_scratch;
|
||||
|
@ -117,21 +117,25 @@ struct ctl_table_header;
|
||||
|
||||
/* ALU ops on immediates, bpf_add|sub|...: dst_reg += imm32 */
|
||||
|
||||
#define BPF_ALU64_IMM(OP, DST, IMM) \
|
||||
#define BPF_ALU64_IMM_OFF(OP, DST, IMM, OFF) \
|
||||
((struct bpf_insn) { \
|
||||
.code = BPF_ALU64 | BPF_OP(OP) | BPF_K, \
|
||||
.dst_reg = DST, \
|
||||
.src_reg = 0, \
|
||||
.off = 0, \
|
||||
.off = OFF, \
|
||||
.imm = IMM })
|
||||
#define BPF_ALU64_IMM(OP, DST, IMM) \
|
||||
BPF_ALU64_IMM_OFF(OP, DST, IMM, 0)
|
||||
|
||||
#define BPF_ALU32_IMM(OP, DST, IMM) \
|
||||
#define BPF_ALU32_IMM_OFF(OP, DST, IMM, OFF) \
|
||||
((struct bpf_insn) { \
|
||||
.code = BPF_ALU | BPF_OP(OP) | BPF_K, \
|
||||
.dst_reg = DST, \
|
||||
.src_reg = 0, \
|
||||
.off = 0, \
|
||||
.off = OFF, \
|
||||
.imm = IMM })
|
||||
#define BPF_ALU32_IMM(OP, DST, IMM) \
|
||||
BPF_ALU32_IMM_OFF(OP, DST, IMM, 0)
|
||||
|
||||
/* Endianess conversion, cpu_to_{l,b}e(), {l,b}e_to_cpu() */
|
||||
|
||||
@ -143,6 +147,16 @@ struct ctl_table_header;
|
||||
.off = 0, \
|
||||
.imm = LEN })
|
||||
|
||||
/* Byte Swap, bswap16/32/64 */
|
||||
|
||||
#define BPF_BSWAP(DST, LEN) \
|
||||
((struct bpf_insn) { \
|
||||
.code = BPF_ALU64 | BPF_END | BPF_SRC(BPF_TO_LE), \
|
||||
.dst_reg = DST, \
|
||||
.src_reg = 0, \
|
||||
.off = 0, \
|
||||
.imm = LEN })
|
||||
|
||||
/* Short form of mov, dst_reg = src_reg */
|
||||
|
||||
#define BPF_MOV64_REG(DST, SRC) \
|
||||
@ -179,6 +193,24 @@ struct ctl_table_header;
|
||||
.off = 0, \
|
||||
.imm = IMM })
|
||||
|
||||
/* Short form of movsx, dst_reg = (s8,s16,s32)src_reg */
|
||||
|
||||
#define BPF_MOVSX64_REG(DST, SRC, OFF) \
|
||||
((struct bpf_insn) { \
|
||||
.code = BPF_ALU64 | BPF_MOV | BPF_X, \
|
||||
.dst_reg = DST, \
|
||||
.src_reg = SRC, \
|
||||
.off = OFF, \
|
||||
.imm = 0 })
|
||||
|
||||
#define BPF_MOVSX32_REG(DST, SRC, OFF) \
|
||||
((struct bpf_insn) { \
|
||||
.code = BPF_ALU | BPF_MOV | BPF_X, \
|
||||
.dst_reg = DST, \
|
||||
.src_reg = SRC, \
|
||||
.off = OFF, \
|
||||
.imm = 0 })
|
||||
|
||||
/* Special form of mov32, used for doing explicit zero extension on dst. */
|
||||
#define BPF_ZEXT_REG(DST) \
|
||||
((struct bpf_insn) { \
|
||||
@ -263,6 +295,16 @@ static inline bool insn_is_zext(const struct bpf_insn *insn)
|
||||
.off = OFF, \
|
||||
.imm = 0 })
|
||||
|
||||
/* Memory load, dst_reg = *(signed size *) (src_reg + off16) */
|
||||
|
||||
#define BPF_LDX_MEMSX(SIZE, DST, SRC, OFF) \
|
||||
((struct bpf_insn) { \
|
||||
.code = BPF_LDX | BPF_SIZE(SIZE) | BPF_MEMSX, \
|
||||
.dst_reg = DST, \
|
||||
.src_reg = SRC, \
|
||||
.off = OFF, \
|
||||
.imm = 0 })
|
||||
|
||||
/* Memory store, *(uint *) (dst_reg + off16) = src_reg */
|
||||
|
||||
#define BPF_STX_MEM(SIZE, DST, SRC, OFF) \
|
||||
@ -912,6 +954,8 @@ bool bpf_jit_needs_zext(void);
|
||||
bool bpf_jit_supports_subprog_tailcalls(void);
|
||||
bool bpf_jit_supports_kfunc_call(void);
|
||||
bool bpf_jit_supports_far_kfunc_call(void);
|
||||
bool bpf_jit_supports_exceptions(void);
|
||||
void arch_bpf_stack_walk(bool (*consume_fn)(void *cookie, u64 ip, u64 sp, u64 bp), void *cookie);
|
||||
bool bpf_helper_changes_pkt_data(void *func);
|
||||
|
||||
static inline bool bpf_dump_raw_ok(const struct cred *cred)
|
||||
@ -1127,6 +1171,7 @@ const char *__bpf_address_lookup(unsigned long addr, unsigned long *size,
|
||||
bool is_bpf_text_address(unsigned long addr);
|
||||
int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
|
||||
char *sym);
|
||||
struct bpf_prog *bpf_prog_ksym_find(unsigned long addr);
|
||||
|
||||
static inline const char *
|
||||
bpf_address_lookup(unsigned long addr, unsigned long *size,
|
||||
@ -1194,6 +1239,11 @@ static inline int bpf_get_kallsym(unsigned int symnum, unsigned long *value,
|
||||
return -ERANGE;
|
||||
}
|
||||
|
||||
static inline struct bpf_prog *bpf_prog_ksym_find(unsigned long addr)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline const char *
|
||||
bpf_address_lookup(unsigned long addr, unsigned long *size,
|
||||
unsigned long *off, char **modname, char *sym)
|
||||
|
@ -285,8 +285,10 @@ static inline bool kasan_check_byte(const void *address)
|
||||
|
||||
#if defined(CONFIG_KASAN) && defined(CONFIG_KASAN_STACK)
|
||||
void kasan_unpoison_task_stack(struct task_struct *task);
|
||||
asmlinkage void kasan_unpoison_task_stack_below(const void *watermark);
|
||||
#else
|
||||
static inline void kasan_unpoison_task_stack(struct task_struct *task) {}
|
||||
static inline void kasan_unpoison_task_stack_below(const void *watermark) {}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_KASAN_GENERIC
|
||||
|
@ -383,14 +383,25 @@ void xdp_attachment_setup(struct xdp_attachment_info *info,
|
||||
|
||||
#define DEV_MAP_BULK_SIZE XDP_BULK_QUEUE_SIZE
|
||||
|
||||
/* Define the relationship between xdp-rx-metadata kfunc and
|
||||
* various other entities:
|
||||
* - xdp_rx_metadata enum
|
||||
* - netdev netlink enum (Documentation/netlink/specs/netdev.yaml)
|
||||
* - kfunc name
|
||||
* - xdp_metadata_ops field
|
||||
*/
|
||||
#define XDP_METADATA_KFUNC_xxx \
|
||||
XDP_METADATA_KFUNC(XDP_METADATA_KFUNC_RX_TIMESTAMP, \
|
||||
bpf_xdp_metadata_rx_timestamp) \
|
||||
NETDEV_XDP_RX_METADATA_TIMESTAMP, \
|
||||
bpf_xdp_metadata_rx_timestamp, \
|
||||
xmo_rx_timestamp) \
|
||||
XDP_METADATA_KFUNC(XDP_METADATA_KFUNC_RX_HASH, \
|
||||
bpf_xdp_metadata_rx_hash) \
|
||||
NETDEV_XDP_RX_METADATA_HASH, \
|
||||
bpf_xdp_metadata_rx_hash, \
|
||||
xmo_rx_hash) \
|
||||
|
||||
enum {
|
||||
#define XDP_METADATA_KFUNC(name, _) name,
|
||||
enum xdp_rx_metadata {
|
||||
#define XDP_METADATA_KFUNC(name, _, __, ___) name,
|
||||
XDP_METADATA_KFUNC_xxx
|
||||
#undef XDP_METADATA_KFUNC
|
||||
MAX_XDP_METADATA_KFUNC,
|
||||
|
@ -14,6 +14,8 @@
|
||||
#include <linux/mm.h>
|
||||
#include <net/sock.h>
|
||||
|
||||
#define XDP_UMEM_SG_FLAG (1 << 1)
|
||||
|
||||
struct net_device;
|
||||
struct xsk_queue;
|
||||
struct xdp_buff;
|
||||
|
@ -932,7 +932,14 @@ enum bpf_map_type {
|
||||
*/
|
||||
BPF_MAP_TYPE_CGROUP_STORAGE = BPF_MAP_TYPE_CGROUP_STORAGE_DEPRECATED,
|
||||
BPF_MAP_TYPE_REUSEPORT_SOCKARRAY,
|
||||
BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE,
|
||||
BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE_DEPRECATED,
|
||||
/* BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE is available to bpf programs
|
||||
* attaching to a cgroup. The new mechanism (BPF_MAP_TYPE_CGRP_STORAGE +
|
||||
* local percpu kptr) supports all BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE
|
||||
* functionality and more. So mark * BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE
|
||||
* deprecated.
|
||||
*/
|
||||
BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE = BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE_DEPRECATED,
|
||||
BPF_MAP_TYPE_QUEUE,
|
||||
BPF_MAP_TYPE_STACK,
|
||||
BPF_MAP_TYPE_SK_STORAGE,
|
||||
|
@ -38,11 +38,27 @@ enum netdev_xdp_act {
|
||||
NETDEV_XDP_ACT_MASK = 127,
|
||||
};
|
||||
|
||||
/**
|
||||
* enum netdev_xdp_rx_metadata
|
||||
* @NETDEV_XDP_RX_METADATA_TIMESTAMP: Device is capable of exposing receive HW
|
||||
* timestamp via bpf_xdp_metadata_rx_timestamp().
|
||||
* @NETDEV_XDP_RX_METADATA_HASH: Device is capable of exposing receive packet
|
||||
* hash via bpf_xdp_metadata_rx_hash().
|
||||
*/
|
||||
enum netdev_xdp_rx_metadata {
|
||||
NETDEV_XDP_RX_METADATA_TIMESTAMP = 1,
|
||||
NETDEV_XDP_RX_METADATA_HASH = 2,
|
||||
|
||||
/* private: */
|
||||
NETDEV_XDP_RX_METADATA_MASK = 3,
|
||||
};
|
||||
|
||||
enum {
|
||||
NETDEV_A_DEV_IFINDEX = 1,
|
||||
NETDEV_A_DEV_PAD,
|
||||
NETDEV_A_DEV_XDP_FEATURES,
|
||||
NETDEV_A_DEV_XDP_ZC_MAX_SEGS,
|
||||
NETDEV_A_DEV_XDP_RX_METADATA_FEATURES,
|
||||
|
||||
__NETDEV_A_DEV_MAX,
|
||||
NETDEV_A_DEV_MAX = (__NETDEV_A_DEV_MAX - 1)
|
||||
|
@ -615,7 +615,10 @@ static void __bpf_struct_ops_map_free(struct bpf_map *map)
|
||||
if (st_map->links)
|
||||
bpf_struct_ops_map_put_progs(st_map);
|
||||
bpf_map_area_free(st_map->links);
|
||||
bpf_jit_free_exec(st_map->image);
|
||||
if (st_map->image) {
|
||||
bpf_jit_free_exec(st_map->image);
|
||||
bpf_jit_uncharge_modmem(PAGE_SIZE);
|
||||
}
|
||||
bpf_map_area_free(st_map->uvalue);
|
||||
bpf_map_area_free(st_map);
|
||||
}
|
||||
@ -657,6 +660,7 @@ static struct bpf_map *bpf_struct_ops_map_alloc(union bpf_attr *attr)
|
||||
struct bpf_struct_ops_map *st_map;
|
||||
const struct btf_type *t, *vt;
|
||||
struct bpf_map *map;
|
||||
int ret;
|
||||
|
||||
st_ops = bpf_struct_ops_find_value(attr->btf_vmlinux_value_type_id);
|
||||
if (!st_ops)
|
||||
@ -681,12 +685,27 @@ static struct bpf_map *bpf_struct_ops_map_alloc(union bpf_attr *attr)
|
||||
st_map->st_ops = st_ops;
|
||||
map = &st_map->map;
|
||||
|
||||
ret = bpf_jit_charge_modmem(PAGE_SIZE);
|
||||
if (ret) {
|
||||
__bpf_struct_ops_map_free(map);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
st_map->image = bpf_jit_alloc_exec(PAGE_SIZE);
|
||||
if (!st_map->image) {
|
||||
/* __bpf_struct_ops_map_free() uses st_map->image as flag
|
||||
* for "charged or not". In this case, we need to unchange
|
||||
* here.
|
||||
*/
|
||||
bpf_jit_uncharge_modmem(PAGE_SIZE);
|
||||
__bpf_struct_ops_map_free(map);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
st_map->uvalue = bpf_map_area_alloc(vt->size, NUMA_NO_NODE);
|
||||
st_map->links =
|
||||
bpf_map_area_alloc(btf_type_vlen(t) * sizeof(struct bpf_links *),
|
||||
NUMA_NO_NODE);
|
||||
st_map->image = bpf_jit_alloc_exec(PAGE_SIZE);
|
||||
if (!st_map->uvalue || !st_map->links || !st_map->image) {
|
||||
if (!st_map->uvalue || !st_map->links) {
|
||||
__bpf_struct_ops_map_free(map);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
@ -907,4 +926,3 @@ int bpf_struct_ops_link_create(union bpf_attr *attr)
|
||||
kfree(link);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -3293,6 +3293,8 @@ static int btf_find_kptr(const struct btf *btf, const struct btf_type *t,
|
||||
type = BPF_KPTR_UNREF;
|
||||
else if (!strcmp("kptr", __btf_name_by_offset(btf, t->name_off)))
|
||||
type = BPF_KPTR_REF;
|
||||
else if (!strcmp("percpu_kptr", __btf_name_by_offset(btf, t->name_off)))
|
||||
type = BPF_KPTR_PERCPU;
|
||||
else
|
||||
return -EINVAL;
|
||||
|
||||
@ -3308,10 +3310,10 @@ static int btf_find_kptr(const struct btf *btf, const struct btf_type *t,
|
||||
return BTF_FIELD_FOUND;
|
||||
}
|
||||
|
||||
static const char *btf_find_decl_tag_value(const struct btf *btf,
|
||||
const struct btf_type *pt,
|
||||
int comp_idx, const char *tag_key)
|
||||
const char *btf_find_decl_tag_value(const struct btf *btf, const struct btf_type *pt,
|
||||
int comp_idx, const char *tag_key)
|
||||
{
|
||||
const char *value = NULL;
|
||||
int i;
|
||||
|
||||
for (i = 1; i < btf_nr_types(btf); i++) {
|
||||
@ -3325,9 +3327,14 @@ static const char *btf_find_decl_tag_value(const struct btf *btf,
|
||||
continue;
|
||||
if (strncmp(__btf_name_by_offset(btf, t->name_off), tag_key, len))
|
||||
continue;
|
||||
return __btf_name_by_offset(btf, t->name_off) + len;
|
||||
/* Prevent duplicate entries for same type */
|
||||
if (value)
|
||||
return ERR_PTR(-EEXIST);
|
||||
value = __btf_name_by_offset(btf, t->name_off) + len;
|
||||
}
|
||||
return NULL;
|
||||
if (!value)
|
||||
return ERR_PTR(-ENOENT);
|
||||
return value;
|
||||
}
|
||||
|
||||
static int
|
||||
@ -3345,7 +3352,7 @@ btf_find_graph_root(const struct btf *btf, const struct btf_type *pt,
|
||||
if (t->size != sz)
|
||||
return BTF_FIELD_IGNORE;
|
||||
value_type = btf_find_decl_tag_value(btf, pt, comp_idx, "contains:");
|
||||
if (!value_type)
|
||||
if (IS_ERR(value_type))
|
||||
return -EINVAL;
|
||||
node_field_name = strstr(value_type, ":");
|
||||
if (!node_field_name)
|
||||
@ -3457,6 +3464,7 @@ static int btf_find_struct_field(const struct btf *btf,
|
||||
break;
|
||||
case BPF_KPTR_UNREF:
|
||||
case BPF_KPTR_REF:
|
||||
case BPF_KPTR_PERCPU:
|
||||
ret = btf_find_kptr(btf, member_type, off, sz,
|
||||
idx < info_cnt ? &info[idx] : &tmp);
|
||||
if (ret < 0)
|
||||
@ -3523,6 +3531,7 @@ static int btf_find_datasec_var(const struct btf *btf, const struct btf_type *t,
|
||||
break;
|
||||
case BPF_KPTR_UNREF:
|
||||
case BPF_KPTR_REF:
|
||||
case BPF_KPTR_PERCPU:
|
||||
ret = btf_find_kptr(btf, var_type, off, sz,
|
||||
idx < info_cnt ? &info[idx] : &tmp);
|
||||
if (ret < 0)
|
||||
@ -3783,6 +3792,7 @@ struct btf_record *btf_parse_fields(const struct btf *btf, const struct btf_type
|
||||
break;
|
||||
case BPF_KPTR_UNREF:
|
||||
case BPF_KPTR_REF:
|
||||
case BPF_KPTR_PERCPU:
|
||||
ret = btf_parse_kptr(btf, &rec->fields[i], &info_arr[i]);
|
||||
if (ret < 0)
|
||||
goto end;
|
||||
@ -6949,7 +6959,7 @@ int btf_check_subprog_call(struct bpf_verifier_env *env, int subprog,
|
||||
* (either PTR_TO_CTX or SCALAR_VALUE).
|
||||
*/
|
||||
int btf_prepare_func_args(struct bpf_verifier_env *env, int subprog,
|
||||
struct bpf_reg_state *regs)
|
||||
struct bpf_reg_state *regs, bool is_ex_cb)
|
||||
{
|
||||
struct bpf_verifier_log *log = &env->log;
|
||||
struct bpf_prog *prog = env->prog;
|
||||
@ -7006,7 +7016,7 @@ int btf_prepare_func_args(struct bpf_verifier_env *env, int subprog,
|
||||
tname, nargs, MAX_BPF_FUNC_REG_ARGS);
|
||||
return -EINVAL;
|
||||
}
|
||||
/* check that function returns int */
|
||||
/* check that function returns int, exception cb also requires this */
|
||||
t = btf_type_by_id(btf, t->type);
|
||||
while (btf_type_is_modifier(t))
|
||||
t = btf_type_by_id(btf, t->type);
|
||||
@ -7055,6 +7065,14 @@ int btf_prepare_func_args(struct bpf_verifier_env *env, int subprog,
|
||||
i, btf_type_str(t), tname);
|
||||
return -EINVAL;
|
||||
}
|
||||
/* We have already ensured that the callback returns an integer, just
|
||||
* like all global subprogs. We need to determine it only has a single
|
||||
* scalar argument.
|
||||
*/
|
||||
if (is_ex_cb && (nargs != 1 || regs[BPF_REG_1].type != SCALAR_VALUE)) {
|
||||
bpf_log(log, "exception cb only supports single integer argument\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -64,8 +64,8 @@
|
||||
#define OFF insn->off
|
||||
#define IMM insn->imm
|
||||
|
||||
struct bpf_mem_alloc bpf_global_ma;
|
||||
bool bpf_global_ma_set;
|
||||
struct bpf_mem_alloc bpf_global_ma, bpf_global_percpu_ma;
|
||||
bool bpf_global_ma_set, bpf_global_percpu_ma_set;
|
||||
|
||||
/* No hurry in this branch
|
||||
*
|
||||
@ -212,7 +212,7 @@ void bpf_prog_fill_jited_linfo(struct bpf_prog *prog,
|
||||
const struct bpf_line_info *linfo;
|
||||
void **jited_linfo;
|
||||
|
||||
if (!prog->aux->jited_linfo)
|
||||
if (!prog->aux->jited_linfo || prog->aux->func_idx > prog->aux->func_cnt)
|
||||
/* Userspace did not provide linfo */
|
||||
return;
|
||||
|
||||
@ -539,7 +539,7 @@ static void bpf_prog_kallsyms_del_subprogs(struct bpf_prog *fp)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < fp->aux->func_cnt; i++)
|
||||
for (i = 0; i < fp->aux->real_func_cnt; i++)
|
||||
bpf_prog_kallsyms_del(fp->aux->func[i]);
|
||||
}
|
||||
|
||||
@ -589,7 +589,7 @@ bpf_prog_ksym_set_name(struct bpf_prog *prog)
|
||||
sym = bin2hex(sym, prog->tag, sizeof(prog->tag));
|
||||
|
||||
/* prog->aux->name will be ignored if full btf name is available */
|
||||
if (prog->aux->func_info_cnt) {
|
||||
if (prog->aux->func_info_cnt && prog->aux->func_idx < prog->aux->func_info_cnt) {
|
||||
type = btf_type_by_id(prog->aux->btf,
|
||||
prog->aux->func_info[prog->aux->func_idx].type_id);
|
||||
func_name = btf_name_by_offset(prog->aux->btf, type->name_off);
|
||||
@ -623,7 +623,11 @@ static __always_inline int bpf_tree_comp(void *key, struct latch_tree_node *n)
|
||||
|
||||
if (val < ksym->start)
|
||||
return -1;
|
||||
if (val >= ksym->end)
|
||||
/* Ensure that we detect return addresses as part of the program, when
|
||||
* the final instruction is a call for a program part of the stack
|
||||
* trace. Therefore, do val > ksym->end instead of val >= ksym->end.
|
||||
*/
|
||||
if (val > ksym->end)
|
||||
return 1;
|
||||
|
||||
return 0;
|
||||
@ -733,7 +737,7 @@ bool is_bpf_text_address(unsigned long addr)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static struct bpf_prog *bpf_prog_ksym_find(unsigned long addr)
|
||||
struct bpf_prog *bpf_prog_ksym_find(unsigned long addr)
|
||||
{
|
||||
struct bpf_ksym *ksym = bpf_ksym_find(addr);
|
||||
|
||||
@ -1208,7 +1212,7 @@ int bpf_jit_get_func_addr(const struct bpf_prog *prog,
|
||||
if (!extra_pass)
|
||||
addr = NULL;
|
||||
else if (prog->aux->func &&
|
||||
off >= 0 && off < prog->aux->func_cnt)
|
||||
off >= 0 && off < prog->aux->real_func_cnt)
|
||||
addr = (u8 *)prog->aux->func[off]->bpf_func;
|
||||
else
|
||||
return -EINVAL;
|
||||
@ -2721,7 +2725,7 @@ static void bpf_prog_free_deferred(struct work_struct *work)
|
||||
#endif
|
||||
if (aux->dst_trampoline)
|
||||
bpf_trampoline_put(aux->dst_trampoline);
|
||||
for (i = 0; i < aux->func_cnt; i++) {
|
||||
for (i = 0; i < aux->real_func_cnt; i++) {
|
||||
/* We can just unlink the subprog poke descriptor table as
|
||||
* it was originally linked to the main program and is also
|
||||
* released along with it.
|
||||
@ -2729,7 +2733,7 @@ static void bpf_prog_free_deferred(struct work_struct *work)
|
||||
aux->func[i]->aux->poke_tab = NULL;
|
||||
bpf_jit_free(aux->func[i]);
|
||||
}
|
||||
if (aux->func_cnt) {
|
||||
if (aux->real_func_cnt) {
|
||||
kfree(aux->func);
|
||||
bpf_prog_unlock_free(aux->prog);
|
||||
} else {
|
||||
@ -2914,6 +2918,15 @@ int __weak bpf_arch_text_invalidate(void *dst, size_t len)
|
||||
return -ENOTSUPP;
|
||||
}
|
||||
|
||||
bool __weak bpf_jit_supports_exceptions(void)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
void __weak arch_bpf_stack_walk(bool (*consume_fn)(void *cookie, u64 ip, u64 sp, u64 bp), void *cookie)
|
||||
{
|
||||
}
|
||||
|
||||
#ifdef CONFIG_BPF_SYSCALL
|
||||
static int __init bpf_global_ma_init(void)
|
||||
{
|
||||
@ -2921,7 +2934,9 @@ static int __init bpf_global_ma_init(void)
|
||||
|
||||
ret = bpf_mem_alloc_init(&bpf_global_ma, 0, false);
|
||||
bpf_global_ma_set = !ret;
|
||||
return ret;
|
||||
ret = bpf_mem_alloc_init(&bpf_global_percpu_ma, 0, true);
|
||||
bpf_global_percpu_ma_set = !ret;
|
||||
return !bpf_global_ma_set || !bpf_global_percpu_ma_set;
|
||||
}
|
||||
late_initcall(bpf_global_ma_init);
|
||||
#endif
|
||||
|
@ -22,6 +22,7 @@
|
||||
#include <linux/security.h>
|
||||
#include <linux/btf_ids.h>
|
||||
#include <linux/bpf_mem_alloc.h>
|
||||
#include <linux/kasan.h>
|
||||
|
||||
#include "../../lib/kstrtox.h"
|
||||
|
||||
@ -1902,6 +1903,14 @@ __bpf_kfunc void *bpf_obj_new_impl(u64 local_type_id__k, void *meta__ign)
|
||||
return p;
|
||||
}
|
||||
|
||||
__bpf_kfunc void *bpf_percpu_obj_new_impl(u64 local_type_id__k, void *meta__ign)
|
||||
{
|
||||
u64 size = local_type_id__k;
|
||||
|
||||
/* The verifier has ensured that meta__ign must be NULL */
|
||||
return bpf_mem_alloc(&bpf_global_percpu_ma, size);
|
||||
}
|
||||
|
||||
/* Must be called under migrate_disable(), as required by bpf_mem_free */
|
||||
void __bpf_obj_drop_impl(void *p, const struct btf_record *rec)
|
||||
{
|
||||
@ -1930,6 +1939,12 @@ __bpf_kfunc void bpf_obj_drop_impl(void *p__alloc, void *meta__ign)
|
||||
__bpf_obj_drop_impl(p, meta ? meta->record : NULL);
|
||||
}
|
||||
|
||||
__bpf_kfunc void bpf_percpu_obj_drop_impl(void *p__alloc, void *meta__ign)
|
||||
{
|
||||
/* The verifier has ensured that meta__ign must be NULL */
|
||||
bpf_mem_free_rcu(&bpf_global_percpu_ma, p__alloc);
|
||||
}
|
||||
|
||||
__bpf_kfunc void *bpf_refcount_acquire_impl(void *p__refcounted_kptr, void *meta__ign)
|
||||
{
|
||||
struct btf_struct_meta *meta = meta__ign;
|
||||
@ -2435,6 +2450,49 @@ __bpf_kfunc void bpf_rcu_read_unlock(void)
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
struct bpf_throw_ctx {
|
||||
struct bpf_prog_aux *aux;
|
||||
u64 sp;
|
||||
u64 bp;
|
||||
int cnt;
|
||||
};
|
||||
|
||||
static bool bpf_stack_walker(void *cookie, u64 ip, u64 sp, u64 bp)
|
||||
{
|
||||
struct bpf_throw_ctx *ctx = cookie;
|
||||
struct bpf_prog *prog;
|
||||
|
||||
if (!is_bpf_text_address(ip))
|
||||
return !ctx->cnt;
|
||||
prog = bpf_prog_ksym_find(ip);
|
||||
ctx->cnt++;
|
||||
if (bpf_is_subprog(prog))
|
||||
return true;
|
||||
ctx->aux = prog->aux;
|
||||
ctx->sp = sp;
|
||||
ctx->bp = bp;
|
||||
return false;
|
||||
}
|
||||
|
||||
__bpf_kfunc void bpf_throw(u64 cookie)
|
||||
{
|
||||
struct bpf_throw_ctx ctx = {};
|
||||
|
||||
arch_bpf_stack_walk(bpf_stack_walker, &ctx);
|
||||
WARN_ON_ONCE(!ctx.aux);
|
||||
if (ctx.aux)
|
||||
WARN_ON_ONCE(!ctx.aux->exception_boundary);
|
||||
WARN_ON_ONCE(!ctx.bp);
|
||||
WARN_ON_ONCE(!ctx.cnt);
|
||||
/* Prevent KASAN false positives for CONFIG_KASAN_STACK by unpoisoning
|
||||
* deeper stack depths than ctx.sp as we do not return from bpf_throw,
|
||||
* which skips compiler generated instrumentation to do the same.
|
||||
*/
|
||||
kasan_unpoison_task_stack_below((void *)ctx.sp);
|
||||
ctx.aux->bpf_exception_cb(cookie, ctx.sp, ctx.bp);
|
||||
WARN(1, "A call to BPF exception callback should never return\n");
|
||||
}
|
||||
|
||||
__diag_pop();
|
||||
|
||||
BTF_SET8_START(generic_btf_ids)
|
||||
@ -2442,7 +2500,9 @@ BTF_SET8_START(generic_btf_ids)
|
||||
BTF_ID_FLAGS(func, crash_kexec, KF_DESTRUCTIVE)
|
||||
#endif
|
||||
BTF_ID_FLAGS(func, bpf_obj_new_impl, KF_ACQUIRE | KF_RET_NULL)
|
||||
BTF_ID_FLAGS(func, bpf_percpu_obj_new_impl, KF_ACQUIRE | KF_RET_NULL)
|
||||
BTF_ID_FLAGS(func, bpf_obj_drop_impl, KF_RELEASE)
|
||||
BTF_ID_FLAGS(func, bpf_percpu_obj_drop_impl, KF_RELEASE)
|
||||
BTF_ID_FLAGS(func, bpf_refcount_acquire_impl, KF_ACQUIRE | KF_RET_NULL)
|
||||
BTF_ID_FLAGS(func, bpf_list_push_front_impl)
|
||||
BTF_ID_FLAGS(func, bpf_list_push_back_impl)
|
||||
@ -2462,6 +2522,7 @@ BTF_ID_FLAGS(func, bpf_cgroup_from_id, KF_ACQUIRE | KF_RET_NULL)
|
||||
BTF_ID_FLAGS(func, bpf_task_under_cgroup, KF_RCU)
|
||||
#endif
|
||||
BTF_ID_FLAGS(func, bpf_task_from_pid, KF_ACQUIRE | KF_RET_NULL)
|
||||
BTF_ID_FLAGS(func, bpf_throw)
|
||||
BTF_SET8_END(generic_btf_ids)
|
||||
|
||||
static const struct btf_kfunc_id_set generic_kfunc_set = {
|
||||
|
@ -499,15 +499,16 @@ int bpf_mem_alloc_init(struct bpf_mem_alloc *ma, int size, bool percpu)
|
||||
struct obj_cgroup *objcg = NULL;
|
||||
int cpu, i, unit_size, percpu_size = 0;
|
||||
|
||||
/* room for llist_node and per-cpu pointer */
|
||||
if (percpu)
|
||||
percpu_size = LLIST_NODE_SZ + sizeof(void *);
|
||||
|
||||
if (size) {
|
||||
pc = __alloc_percpu_gfp(sizeof(*pc), 8, GFP_KERNEL);
|
||||
if (!pc)
|
||||
return -ENOMEM;
|
||||
|
||||
if (percpu)
|
||||
/* room for llist_node and per-cpu pointer */
|
||||
percpu_size = LLIST_NODE_SZ + sizeof(void *);
|
||||
else
|
||||
if (!percpu)
|
||||
size += LLIST_NODE_SZ; /* room for llist_node */
|
||||
unit_size = size;
|
||||
|
||||
@ -527,10 +528,6 @@ int bpf_mem_alloc_init(struct bpf_mem_alloc *ma, int size, bool percpu)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* size == 0 && percpu is an invalid combination */
|
||||
if (WARN_ON_ONCE(percpu))
|
||||
return -EINVAL;
|
||||
|
||||
pcc = __alloc_percpu_gfp(sizeof(*cc), 8, GFP_KERNEL);
|
||||
if (!pcc)
|
||||
return -ENOMEM;
|
||||
@ -543,6 +540,7 @@ int bpf_mem_alloc_init(struct bpf_mem_alloc *ma, int size, bool percpu)
|
||||
c = &cc->cache[i];
|
||||
c->unit_size = sizes[i];
|
||||
c->objcg = objcg;
|
||||
c->percpu_size = percpu_size;
|
||||
c->tgt = c;
|
||||
prefill_mem_cache(c, cpu);
|
||||
}
|
||||
@ -734,12 +732,17 @@ static void notrace *unit_alloc(struct bpf_mem_cache *c)
|
||||
}
|
||||
}
|
||||
local_dec(&c->active);
|
||||
local_irq_restore(flags);
|
||||
|
||||
WARN_ON(cnt < 0);
|
||||
|
||||
if (cnt < c->low_watermark)
|
||||
irq_work_raise(c);
|
||||
/* Enable IRQ after the enqueue of irq work completes, so irq work
|
||||
* will run after IRQ is enabled and free_llist may be refilled by
|
||||
* irq work before other task preempts current task.
|
||||
*/
|
||||
local_irq_restore(flags);
|
||||
|
||||
return llnode;
|
||||
}
|
||||
|
||||
@ -775,11 +778,16 @@ static void notrace unit_free(struct bpf_mem_cache *c, void *ptr)
|
||||
llist_add(llnode, &c->free_llist_extra);
|
||||
}
|
||||
local_dec(&c->active);
|
||||
local_irq_restore(flags);
|
||||
|
||||
if (cnt > c->high_watermark)
|
||||
/* free few objects from current cpu into global kmalloc pool */
|
||||
irq_work_raise(c);
|
||||
/* Enable IRQ after irq_work_raise() completes, otherwise when current
|
||||
* task is preempted by task which does unit_alloc(), unit_alloc() may
|
||||
* return NULL unexpectedly because irq work is already pending but can
|
||||
* not been triggered and free_llist can not be refilled timely.
|
||||
*/
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
static void notrace unit_free_rcu(struct bpf_mem_cache *c, void *ptr)
|
||||
@ -797,10 +805,10 @@ static void notrace unit_free_rcu(struct bpf_mem_cache *c, void *ptr)
|
||||
llist_add(llnode, &c->free_llist_extra_rcu);
|
||||
}
|
||||
local_dec(&c->active);
|
||||
local_irq_restore(flags);
|
||||
|
||||
if (!atomic_read(&c->call_rcu_in_progress))
|
||||
irq_work_raise(c);
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
/* Called from BPF program or from sys_bpf syscall.
|
||||
|
@ -232,7 +232,14 @@ int bpf_prog_dev_bound_init(struct bpf_prog *prog, union bpf_attr *attr)
|
||||
attr->prog_type != BPF_PROG_TYPE_XDP)
|
||||
return -EINVAL;
|
||||
|
||||
if (attr->prog_flags & ~BPF_F_XDP_DEV_BOUND_ONLY)
|
||||
if (attr->prog_flags & ~(BPF_F_XDP_DEV_BOUND_ONLY | BPF_F_XDP_HAS_FRAGS))
|
||||
return -EINVAL;
|
||||
|
||||
/* Frags are allowed only if program is dev-bound-only, but not
|
||||
* if it is requesting bpf offload.
|
||||
*/
|
||||
if (attr->prog_flags & BPF_F_XDP_HAS_FRAGS &&
|
||||
!(attr->prog_flags & BPF_F_XDP_DEV_BOUND_ONLY))
|
||||
return -EINVAL;
|
||||
|
||||
if (attr->prog_type == BPF_PROG_TYPE_SCHED_CLS &&
|
||||
@ -845,10 +852,11 @@ void *bpf_dev_bound_resolve_kfunc(struct bpf_prog *prog, u32 func_id)
|
||||
if (!ops)
|
||||
goto out;
|
||||
|
||||
if (func_id == bpf_xdp_metadata_kfunc_id(XDP_METADATA_KFUNC_RX_TIMESTAMP))
|
||||
p = ops->xmo_rx_timestamp;
|
||||
else if (func_id == bpf_xdp_metadata_kfunc_id(XDP_METADATA_KFUNC_RX_HASH))
|
||||
p = ops->xmo_rx_hash;
|
||||
#define XDP_METADATA_KFUNC(name, _, __, xmo) \
|
||||
if (func_id == bpf_xdp_metadata_kfunc_id(name)) p = ops->xmo;
|
||||
XDP_METADATA_KFUNC_xxx
|
||||
#undef XDP_METADATA_KFUNC
|
||||
|
||||
out:
|
||||
up_read(&bpf_devs_lock);
|
||||
|
||||
|
@ -514,6 +514,7 @@ void btf_record_free(struct btf_record *rec)
|
||||
switch (rec->fields[i].type) {
|
||||
case BPF_KPTR_UNREF:
|
||||
case BPF_KPTR_REF:
|
||||
case BPF_KPTR_PERCPU:
|
||||
if (rec->fields[i].kptr.module)
|
||||
module_put(rec->fields[i].kptr.module);
|
||||
btf_put(rec->fields[i].kptr.btf);
|
||||
@ -560,6 +561,7 @@ struct btf_record *btf_record_dup(const struct btf_record *rec)
|
||||
switch (fields[i].type) {
|
||||
case BPF_KPTR_UNREF:
|
||||
case BPF_KPTR_REF:
|
||||
case BPF_KPTR_PERCPU:
|
||||
btf_get(fields[i].kptr.btf);
|
||||
if (fields[i].kptr.module && !try_module_get(fields[i].kptr.module)) {
|
||||
ret = -ENXIO;
|
||||
@ -650,6 +652,7 @@ void bpf_obj_free_fields(const struct btf_record *rec, void *obj)
|
||||
WRITE_ONCE(*(u64 *)field_ptr, 0);
|
||||
break;
|
||||
case BPF_KPTR_REF:
|
||||
case BPF_KPTR_PERCPU:
|
||||
xchgd_field = (void *)xchg((unsigned long *)field_ptr, 0);
|
||||
if (!xchgd_field)
|
||||
break;
|
||||
@ -1045,6 +1048,7 @@ static int map_check_btf(struct bpf_map *map, const struct btf *btf,
|
||||
break;
|
||||
case BPF_KPTR_UNREF:
|
||||
case BPF_KPTR_REF:
|
||||
case BPF_KPTR_PERCPU:
|
||||
case BPF_REFCOUNT:
|
||||
if (map->map_type != BPF_MAP_TYPE_HASH &&
|
||||
map->map_type != BPF_MAP_TYPE_PERCPU_HASH &&
|
||||
@ -2745,7 +2749,7 @@ static int bpf_prog_load(union bpf_attr *attr, bpfptr_t uattr, u32 uattr_size)
|
||||
* period before we can tear down JIT memory since symbols
|
||||
* are already exposed under kallsyms.
|
||||
*/
|
||||
__bpf_prog_put_noref(prog, prog->aux->func_cnt);
|
||||
__bpf_prog_put_noref(prog, prog->aux->real_func_cnt);
|
||||
return err;
|
||||
free_prog_sec:
|
||||
free_uid(prog->aux->user);
|
||||
|
@ -35,16 +35,13 @@ static struct task_struct *task_group_seq_get_next(struct bpf_iter_seq_task_comm
|
||||
u32 *tid,
|
||||
bool skip_if_dup_files)
|
||||
{
|
||||
struct task_struct *task, *next_task;
|
||||
struct task_struct *task;
|
||||
struct pid *pid;
|
||||
u32 saved_tid;
|
||||
u32 next_tid;
|
||||
|
||||
if (!*tid) {
|
||||
/* The first time, the iterator calls this function. */
|
||||
pid = find_pid_ns(common->pid, common->ns);
|
||||
if (!pid)
|
||||
return NULL;
|
||||
|
||||
task = get_pid_task(pid, PIDTYPE_TGID);
|
||||
if (!task)
|
||||
return NULL;
|
||||
@ -66,44 +63,27 @@ static struct task_struct *task_group_seq_get_next(struct bpf_iter_seq_task_comm
|
||||
return task;
|
||||
}
|
||||
|
||||
pid = find_pid_ns(common->pid_visiting, common->ns);
|
||||
if (!pid)
|
||||
return NULL;
|
||||
|
||||
task = get_pid_task(pid, PIDTYPE_PID);
|
||||
task = find_task_by_pid_ns(common->pid_visiting, common->ns);
|
||||
if (!task)
|
||||
return NULL;
|
||||
|
||||
retry:
|
||||
if (!pid_alive(task)) {
|
||||
put_task_struct(task);
|
||||
return NULL;
|
||||
}
|
||||
task = next_thread(task);
|
||||
|
||||
next_task = next_thread(task);
|
||||
put_task_struct(task);
|
||||
if (!next_task)
|
||||
return NULL;
|
||||
|
||||
saved_tid = *tid;
|
||||
*tid = __task_pid_nr_ns(next_task, PIDTYPE_PID, common->ns);
|
||||
if (!*tid || *tid == common->pid) {
|
||||
next_tid = __task_pid_nr_ns(task, PIDTYPE_PID, common->ns);
|
||||
if (!next_tid || next_tid == common->pid) {
|
||||
/* Run out of tasks of a process. The tasks of a
|
||||
* thread_group are linked as circular linked list.
|
||||
*/
|
||||
*tid = saved_tid;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
get_task_struct(next_task);
|
||||
common->pid_visiting = *tid;
|
||||
|
||||
if (skip_if_dup_files && task->files == task->group_leader->files) {
|
||||
task = next_task;
|
||||
if (skip_if_dup_files && task->files == task->group_leader->files)
|
||||
goto retry;
|
||||
}
|
||||
|
||||
return next_task;
|
||||
*tid = common->pid_visiting = next_tid;
|
||||
get_task_struct(task);
|
||||
return task;
|
||||
}
|
||||
|
||||
static struct task_struct *task_seq_get_next(struct bpf_iter_seq_task_common *common,
|
||||
|
@ -415,8 +415,8 @@ static int bpf_trampoline_update(struct bpf_trampoline *tr, bool lock_direct_mut
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* clear all bits except SHARE_IPMODIFY */
|
||||
tr->flags &= BPF_TRAMP_F_SHARE_IPMODIFY;
|
||||
/* clear all bits except SHARE_IPMODIFY and TAIL_CALL_CTX */
|
||||
tr->flags &= (BPF_TRAMP_F_SHARE_IPMODIFY | BPF_TRAMP_F_TAIL_CALL_CTX);
|
||||
|
||||
if (tlinks[BPF_TRAMP_FEXIT].nr_links ||
|
||||
tlinks[BPF_TRAMP_MODIFY_RETURN].nr_links) {
|
||||
|
File diff suppressed because it is too large
Load Diff
371
lib/test_bpf.c
371
lib/test_bpf.c
@ -5111,6 +5111,104 @@ static struct bpf_test tests[] = {
|
||||
{ },
|
||||
{ { 0, 0xffffffff } }
|
||||
},
|
||||
/* MOVSX32 */
|
||||
{
|
||||
"ALU_MOVSX | BPF_B",
|
||||
.u.insns_int = {
|
||||
BPF_LD_IMM64(R2, 0x00000000ffffffefLL),
|
||||
BPF_LD_IMM64(R3, 0xdeadbeefdeadbeefLL),
|
||||
BPF_MOVSX32_REG(R1, R3, 8),
|
||||
BPF_JMP_REG(BPF_JEQ, R2, R1, 2),
|
||||
BPF_MOV32_IMM(R0, 2),
|
||||
BPF_EXIT_INSN(),
|
||||
BPF_MOV32_IMM(R0, 1),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
INTERNAL,
|
||||
{ },
|
||||
{ { 0, 0x1 } },
|
||||
},
|
||||
{
|
||||
"ALU_MOVSX | BPF_H",
|
||||
.u.insns_int = {
|
||||
BPF_LD_IMM64(R2, 0x00000000ffffbeefLL),
|
||||
BPF_LD_IMM64(R3, 0xdeadbeefdeadbeefLL),
|
||||
BPF_MOVSX32_REG(R1, R3, 16),
|
||||
BPF_JMP_REG(BPF_JEQ, R2, R1, 2),
|
||||
BPF_MOV32_IMM(R0, 2),
|
||||
BPF_EXIT_INSN(),
|
||||
BPF_MOV32_IMM(R0, 1),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
INTERNAL,
|
||||
{ },
|
||||
{ { 0, 0x1 } },
|
||||
},
|
||||
{
|
||||
"ALU_MOVSX | BPF_W",
|
||||
.u.insns_int = {
|
||||
BPF_LD_IMM64(R2, 0x00000000deadbeefLL),
|
||||
BPF_LD_IMM64(R3, 0xdeadbeefdeadbeefLL),
|
||||
BPF_MOVSX32_REG(R1, R3, 32),
|
||||
BPF_JMP_REG(BPF_JEQ, R2, R1, 2),
|
||||
BPF_MOV32_IMM(R0, 2),
|
||||
BPF_EXIT_INSN(),
|
||||
BPF_MOV32_IMM(R0, 1),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
INTERNAL,
|
||||
{ },
|
||||
{ { 0, 0x1 } },
|
||||
},
|
||||
/* MOVSX64 REG */
|
||||
{
|
||||
"ALU64_MOVSX | BPF_B",
|
||||
.u.insns_int = {
|
||||
BPF_LD_IMM64(R2, 0xffffffffffffffefLL),
|
||||
BPF_LD_IMM64(R3, 0xdeadbeefdeadbeefLL),
|
||||
BPF_MOVSX64_REG(R1, R3, 8),
|
||||
BPF_JMP_REG(BPF_JEQ, R2, R1, 2),
|
||||
BPF_MOV32_IMM(R0, 2),
|
||||
BPF_EXIT_INSN(),
|
||||
BPF_MOV32_IMM(R0, 1),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
INTERNAL,
|
||||
{ },
|
||||
{ { 0, 0x1 } },
|
||||
},
|
||||
{
|
||||
"ALU64_MOVSX | BPF_H",
|
||||
.u.insns_int = {
|
||||
BPF_LD_IMM64(R2, 0xffffffffffffbeefLL),
|
||||
BPF_LD_IMM64(R3, 0xdeadbeefdeadbeefLL),
|
||||
BPF_MOVSX64_REG(R1, R3, 16),
|
||||
BPF_JMP_REG(BPF_JEQ, R2, R1, 2),
|
||||
BPF_MOV32_IMM(R0, 2),
|
||||
BPF_EXIT_INSN(),
|
||||
BPF_MOV32_IMM(R0, 1),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
INTERNAL,
|
||||
{ },
|
||||
{ { 0, 0x1 } },
|
||||
},
|
||||
{
|
||||
"ALU64_MOVSX | BPF_W",
|
||||
.u.insns_int = {
|
||||
BPF_LD_IMM64(R2, 0xffffffffdeadbeefLL),
|
||||
BPF_LD_IMM64(R3, 0xdeadbeefdeadbeefLL),
|
||||
BPF_MOVSX64_REG(R1, R3, 32),
|
||||
BPF_JMP_REG(BPF_JEQ, R2, R1, 2),
|
||||
BPF_MOV32_IMM(R0, 2),
|
||||
BPF_EXIT_INSN(),
|
||||
BPF_MOV32_IMM(R0, 1),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
INTERNAL,
|
||||
{ },
|
||||
{ { 0, 0x1 } },
|
||||
},
|
||||
/* BPF_ALU | BPF_ADD | BPF_X */
|
||||
{
|
||||
"ALU_ADD_X: 1 + 2 = 3",
|
||||
@ -6105,6 +6203,106 @@ static struct bpf_test tests[] = {
|
||||
{ },
|
||||
{ { 0, 2 } },
|
||||
},
|
||||
/* BPF_ALU | BPF_DIV | BPF_X off=1 (SDIV) */
|
||||
{
|
||||
"ALU_SDIV_X: -6 / 2 = -3",
|
||||
.u.insns_int = {
|
||||
BPF_LD_IMM64(R0, -6),
|
||||
BPF_ALU32_IMM(BPF_MOV, R1, 2),
|
||||
BPF_ALU32_REG_OFF(BPF_DIV, R0, R1, 1),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
INTERNAL,
|
||||
{ },
|
||||
{ { 0, -3 } },
|
||||
},
|
||||
/* BPF_ALU | BPF_DIV | BPF_K off=1 (SDIV) */
|
||||
{
|
||||
"ALU_SDIV_K: -6 / 2 = -3",
|
||||
.u.insns_int = {
|
||||
BPF_LD_IMM64(R0, -6),
|
||||
BPF_ALU32_IMM_OFF(BPF_DIV, R0, 2, 1),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
INTERNAL,
|
||||
{ },
|
||||
{ { 0, -3 } },
|
||||
},
|
||||
/* BPF_ALU64 | BPF_DIV | BPF_X off=1 (SDIV64) */
|
||||
{
|
||||
"ALU64_SDIV_X: -6 / 2 = -3",
|
||||
.u.insns_int = {
|
||||
BPF_LD_IMM64(R0, -6),
|
||||
BPF_ALU32_IMM(BPF_MOV, R1, 2),
|
||||
BPF_ALU64_REG_OFF(BPF_DIV, R0, R1, 1),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
INTERNAL,
|
||||
{ },
|
||||
{ { 0, -3 } },
|
||||
},
|
||||
/* BPF_ALU64 | BPF_DIV | BPF_K off=1 (SDIV64) */
|
||||
{
|
||||
"ALU64_SDIV_K: -6 / 2 = -3",
|
||||
.u.insns_int = {
|
||||
BPF_LD_IMM64(R0, -6),
|
||||
BPF_ALU64_IMM_OFF(BPF_DIV, R0, 2, 1),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
INTERNAL,
|
||||
{ },
|
||||
{ { 0, -3 } },
|
||||
},
|
||||
/* BPF_ALU | BPF_MOD | BPF_X off=1 (SMOD) */
|
||||
{
|
||||
"ALU_SMOD_X: -7 % 2 = -1",
|
||||
.u.insns_int = {
|
||||
BPF_LD_IMM64(R0, -7),
|
||||
BPF_ALU32_IMM(BPF_MOV, R1, 2),
|
||||
BPF_ALU32_REG_OFF(BPF_MOD, R0, R1, 1),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
INTERNAL,
|
||||
{ },
|
||||
{ { 0, -1 } },
|
||||
},
|
||||
/* BPF_ALU | BPF_MOD | BPF_K off=1 (SMOD) */
|
||||
{
|
||||
"ALU_SMOD_K: -7 % 2 = -1",
|
||||
.u.insns_int = {
|
||||
BPF_LD_IMM64(R0, -7),
|
||||
BPF_ALU32_IMM_OFF(BPF_MOD, R0, 2, 1),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
INTERNAL,
|
||||
{ },
|
||||
{ { 0, -1 } },
|
||||
},
|
||||
/* BPF_ALU64 | BPF_MOD | BPF_X off=1 (SMOD64) */
|
||||
{
|
||||
"ALU64_SMOD_X: -7 % 2 = -1",
|
||||
.u.insns_int = {
|
||||
BPF_LD_IMM64(R0, -7),
|
||||
BPF_ALU32_IMM(BPF_MOV, R1, 2),
|
||||
BPF_ALU64_REG_OFF(BPF_MOD, R0, R1, 1),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
INTERNAL,
|
||||
{ },
|
||||
{ { 0, -1 } },
|
||||
},
|
||||
/* BPF_ALU64 | BPF_MOD | BPF_K off=1 (SMOD64) */
|
||||
{
|
||||
"ALU64_SMOD_X: -7 % 2 = -1",
|
||||
.u.insns_int = {
|
||||
BPF_LD_IMM64(R0, -7),
|
||||
BPF_ALU64_IMM_OFF(BPF_MOD, R0, 2, 1),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
INTERNAL,
|
||||
{ },
|
||||
{ { 0, -1 } },
|
||||
},
|
||||
/* BPF_ALU | BPF_AND | BPF_X */
|
||||
{
|
||||
"ALU_AND_X: 3 & 2 = 2",
|
||||
@ -7837,6 +8035,104 @@ static struct bpf_test tests[] = {
|
||||
{ },
|
||||
{ { 0, (u32) (cpu_to_le64(0xfedcba9876543210ULL) >> 32) } },
|
||||
},
|
||||
/* BSWAP */
|
||||
{
|
||||
"BSWAP 16: 0x0123456789abcdef -> 0xefcd",
|
||||
.u.insns_int = {
|
||||
BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
|
||||
BPF_BSWAP(R0, 16),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
INTERNAL,
|
||||
{ },
|
||||
{ { 0, 0xefcd } },
|
||||
},
|
||||
{
|
||||
"BSWAP 32: 0x0123456789abcdef -> 0xefcdab89",
|
||||
.u.insns_int = {
|
||||
BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
|
||||
BPF_BSWAP(R0, 32),
|
||||
BPF_ALU64_REG(BPF_MOV, R1, R0),
|
||||
BPF_ALU64_IMM(BPF_RSH, R1, 32),
|
||||
BPF_ALU32_REG(BPF_ADD, R0, R1), /* R1 = 0 */
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
INTERNAL,
|
||||
{ },
|
||||
{ { 0, 0xefcdab89 } },
|
||||
},
|
||||
{
|
||||
"BSWAP 64: 0x0123456789abcdef -> 0x67452301",
|
||||
.u.insns_int = {
|
||||
BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
|
||||
BPF_BSWAP(R0, 64),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
INTERNAL,
|
||||
{ },
|
||||
{ { 0, 0x67452301 } },
|
||||
},
|
||||
{
|
||||
"BSWAP 64: 0x0123456789abcdef >> 32 -> 0xefcdab89",
|
||||
.u.insns_int = {
|
||||
BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
|
||||
BPF_BSWAP(R0, 64),
|
||||
BPF_ALU64_IMM(BPF_RSH, R0, 32),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
INTERNAL,
|
||||
{ },
|
||||
{ { 0, 0xefcdab89 } },
|
||||
},
|
||||
/* BSWAP, reversed */
|
||||
{
|
||||
"BSWAP 16: 0xfedcba9876543210 -> 0x1032",
|
||||
.u.insns_int = {
|
||||
BPF_LD_IMM64(R0, 0xfedcba9876543210ULL),
|
||||
BPF_BSWAP(R0, 16),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
INTERNAL,
|
||||
{ },
|
||||
{ { 0, 0x1032 } },
|
||||
},
|
||||
{
|
||||
"BSWAP 32: 0xfedcba9876543210 -> 0x10325476",
|
||||
.u.insns_int = {
|
||||
BPF_LD_IMM64(R0, 0xfedcba9876543210ULL),
|
||||
BPF_BSWAP(R0, 32),
|
||||
BPF_ALU64_REG(BPF_MOV, R1, R0),
|
||||
BPF_ALU64_IMM(BPF_RSH, R1, 32),
|
||||
BPF_ALU32_REG(BPF_ADD, R0, R1), /* R1 = 0 */
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
INTERNAL,
|
||||
{ },
|
||||
{ { 0, 0x10325476 } },
|
||||
},
|
||||
{
|
||||
"BSWAP 64: 0xfedcba9876543210 -> 0x98badcfe",
|
||||
.u.insns_int = {
|
||||
BPF_LD_IMM64(R0, 0xfedcba9876543210ULL),
|
||||
BPF_BSWAP(R0, 64),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
INTERNAL,
|
||||
{ },
|
||||
{ { 0, 0x98badcfe } },
|
||||
},
|
||||
{
|
||||
"BSWAP 64: 0xfedcba9876543210 >> 32 -> 0x10325476",
|
||||
.u.insns_int = {
|
||||
BPF_LD_IMM64(R0, 0xfedcba9876543210ULL),
|
||||
BPF_BSWAP(R0, 64),
|
||||
BPF_ALU64_IMM(BPF_RSH, R0, 32),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
INTERNAL,
|
||||
{ },
|
||||
{ { 0, 0x10325476 } },
|
||||
},
|
||||
/* BPF_LDX_MEM B/H/W/DW */
|
||||
{
|
||||
"BPF_LDX_MEM | BPF_B, base",
|
||||
@ -8228,6 +8524,67 @@ static struct bpf_test tests[] = {
|
||||
{ { 32, 0 } },
|
||||
.stack_depth = 0,
|
||||
},
|
||||
/* BPF_LDX_MEMSX B/H/W */
|
||||
{
|
||||
"BPF_LDX_MEMSX | BPF_B",
|
||||
.u.insns_int = {
|
||||
BPF_LD_IMM64(R1, 0xdead0000000000f0ULL),
|
||||
BPF_LD_IMM64(R2, 0xfffffffffffffff0ULL),
|
||||
BPF_STX_MEM(BPF_DW, R10, R1, -8),
|
||||
#ifdef __BIG_ENDIAN
|
||||
BPF_LDX_MEMSX(BPF_B, R0, R10, -1),
|
||||
#else
|
||||
BPF_LDX_MEMSX(BPF_B, R0, R10, -8),
|
||||
#endif
|
||||
BPF_JMP_REG(BPF_JNE, R0, R2, 1),
|
||||
BPF_ALU64_IMM(BPF_MOV, R0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
INTERNAL,
|
||||
{ },
|
||||
{ { 0, 0 } },
|
||||
.stack_depth = 8,
|
||||
},
|
||||
{
|
||||
"BPF_LDX_MEMSX | BPF_H",
|
||||
.u.insns_int = {
|
||||
BPF_LD_IMM64(R1, 0xdead00000000f123ULL),
|
||||
BPF_LD_IMM64(R2, 0xfffffffffffff123ULL),
|
||||
BPF_STX_MEM(BPF_DW, R10, R1, -8),
|
||||
#ifdef __BIG_ENDIAN
|
||||
BPF_LDX_MEMSX(BPF_H, R0, R10, -2),
|
||||
#else
|
||||
BPF_LDX_MEMSX(BPF_H, R0, R10, -8),
|
||||
#endif
|
||||
BPF_JMP_REG(BPF_JNE, R0, R2, 1),
|
||||
BPF_ALU64_IMM(BPF_MOV, R0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
INTERNAL,
|
||||
{ },
|
||||
{ { 0, 0 } },
|
||||
.stack_depth = 8,
|
||||
},
|
||||
{
|
||||
"BPF_LDX_MEMSX | BPF_W",
|
||||
.u.insns_int = {
|
||||
BPF_LD_IMM64(R1, 0x00000000deadbeefULL),
|
||||
BPF_LD_IMM64(R2, 0xffffffffdeadbeefULL),
|
||||
BPF_STX_MEM(BPF_DW, R10, R1, -8),
|
||||
#ifdef __BIG_ENDIAN
|
||||
BPF_LDX_MEMSX(BPF_W, R0, R10, -4),
|
||||
#else
|
||||
BPF_LDX_MEMSX(BPF_W, R0, R10, -8),
|
||||
#endif
|
||||
BPF_JMP_REG(BPF_JNE, R0, R2, 1),
|
||||
BPF_ALU64_IMM(BPF_MOV, R0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
INTERNAL,
|
||||
{ },
|
||||
{ { 0, 0 } },
|
||||
.stack_depth = 8,
|
||||
},
|
||||
/* BPF_STX_MEM B/H/W/DW */
|
||||
{
|
||||
"BPF_STX_MEM | BPF_B",
|
||||
@ -9474,6 +9831,20 @@ static struct bpf_test tests[] = {
|
||||
{ },
|
||||
{ { 0, 1 } },
|
||||
},
|
||||
/* BPF_JMP32 | BPF_JA */
|
||||
{
|
||||
"JMP32_JA: Unconditional jump: if (true) return 1",
|
||||
.u.insns_int = {
|
||||
BPF_ALU32_IMM(BPF_MOV, R0, 0),
|
||||
BPF_JMP32_IMM(BPF_JA, 0, 1, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
BPF_ALU32_IMM(BPF_MOV, R0, 1),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
INTERNAL,
|
||||
{ },
|
||||
{ { 0, 1 } },
|
||||
},
|
||||
/* BPF_JMP | BPF_JSLT | BPF_K */
|
||||
{
|
||||
"JMP_JSLT_K: Signed jump: if (-2 < -1) return 1",
|
||||
|
@ -564,7 +564,6 @@ void kasan_restore_multi_shot(bool enabled);
|
||||
* code. Declared here to avoid warnings about missing declarations.
|
||||
*/
|
||||
|
||||
asmlinkage void kasan_unpoison_task_stack_below(const void *watermark);
|
||||
void __asan_register_globals(void *globals, ssize_t size);
|
||||
void __asan_unregister_globals(void *globals, ssize_t size);
|
||||
void __asan_handle_no_return(void);
|
||||
|
@ -5,6 +5,7 @@
|
||||
#include <linux/rtnetlink.h>
|
||||
#include <net/net_namespace.h>
|
||||
#include <net/sock.h>
|
||||
#include <net/xdp.h>
|
||||
|
||||
#include "netdev-genl-gen.h"
|
||||
|
||||
@ -12,15 +13,24 @@ static int
|
||||
netdev_nl_dev_fill(struct net_device *netdev, struct sk_buff *rsp,
|
||||
const struct genl_info *info)
|
||||
{
|
||||
u64 xdp_rx_meta = 0;
|
||||
void *hdr;
|
||||
|
||||
hdr = genlmsg_iput(rsp, info);
|
||||
if (!hdr)
|
||||
return -EMSGSIZE;
|
||||
|
||||
#define XDP_METADATA_KFUNC(_, flag, __, xmo) \
|
||||
if (netdev->xdp_metadata_ops && netdev->xdp_metadata_ops->xmo) \
|
||||
xdp_rx_meta |= flag;
|
||||
XDP_METADATA_KFUNC_xxx
|
||||
#undef XDP_METADATA_KFUNC
|
||||
|
||||
if (nla_put_u32(rsp, NETDEV_A_DEV_IFINDEX, netdev->ifindex) ||
|
||||
nla_put_u64_64bit(rsp, NETDEV_A_DEV_XDP_FEATURES,
|
||||
netdev->xdp_features, NETDEV_A_DEV_PAD)) {
|
||||
netdev->xdp_features, NETDEV_A_DEV_PAD) ||
|
||||
nla_put_u64_64bit(rsp, NETDEV_A_DEV_XDP_RX_METADATA_FEATURES,
|
||||
xdp_rx_meta, NETDEV_A_DEV_PAD)) {
|
||||
genlmsg_cancel(rsp, hdr);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -741,7 +741,7 @@ __bpf_kfunc int bpf_xdp_metadata_rx_hash(const struct xdp_md *ctx, u32 *hash,
|
||||
__diag_pop();
|
||||
|
||||
BTF_SET8_START(xdp_metadata_kfunc_ids)
|
||||
#define XDP_METADATA_KFUNC(_, name) BTF_ID_FLAGS(func, name, KF_TRUSTED_ARGS)
|
||||
#define XDP_METADATA_KFUNC(_, __, name, ___) BTF_ID_FLAGS(func, name, KF_TRUSTED_ARGS)
|
||||
XDP_METADATA_KFUNC_xxx
|
||||
#undef XDP_METADATA_KFUNC
|
||||
BTF_SET8_END(xdp_metadata_kfunc_ids)
|
||||
@ -752,7 +752,7 @@ static const struct btf_kfunc_id_set xdp_metadata_kfunc_set = {
|
||||
};
|
||||
|
||||
BTF_ID_LIST(xdp_metadata_kfunc_ids_unsorted)
|
||||
#define XDP_METADATA_KFUNC(name, str) BTF_ID(func, str)
|
||||
#define XDP_METADATA_KFUNC(name, _, str, __) BTF_ID(func, str)
|
||||
XDP_METADATA_KFUNC_xxx
|
||||
#undef XDP_METADATA_KFUNC
|
||||
|
||||
|
@ -1228,7 +1228,7 @@ static int xsk_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
|
||||
|
||||
xs->dev = dev;
|
||||
xs->zc = xs->umem->zc;
|
||||
xs->sg = !!(flags & XDP_USE_SG);
|
||||
xs->sg = !!(xs->umem->flags & XDP_UMEM_SG_FLAG);
|
||||
xs->queue_id = qid;
|
||||
xp_add_xsk(xs->pool, xs);
|
||||
|
||||
|
@ -170,6 +170,9 @@ int xp_assign_dev(struct xsk_buff_pool *pool,
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
if (flags & XDP_USE_SG)
|
||||
pool->umem->flags |= XDP_UMEM_SG_FLAG;
|
||||
|
||||
if (flags & XDP_USE_NEED_WAKEUP)
|
||||
pool->uses_need_wakeup = true;
|
||||
/* Tx needs to be explicitly woken up the first time. Also
|
||||
|
@ -175,6 +175,7 @@ TPROGS_CFLAGS += -I$(srctree)/tools/testing/selftests/bpf/
|
||||
TPROGS_CFLAGS += -I$(LIBBPF_INCLUDE)
|
||||
TPROGS_CFLAGS += -I$(srctree)/tools/include
|
||||
TPROGS_CFLAGS += -I$(srctree)/tools/perf
|
||||
TPROGS_CFLAGS += -I$(srctree)/tools/lib
|
||||
TPROGS_CFLAGS += -DHAVE_ATTR_TEST=0
|
||||
|
||||
ifdef SYSROOT
|
||||
@ -314,6 +315,9 @@ XDP_SAMPLE_CFLAGS += -Wall -O2 \
|
||||
|
||||
$(obj)/$(XDP_SAMPLE): TPROGS_CFLAGS = $(XDP_SAMPLE_CFLAGS)
|
||||
$(obj)/$(XDP_SAMPLE): $(src)/xdp_sample_user.h $(src)/xdp_sample_shared.h
|
||||
# Override includes for trace_helpers.o because __must_check won't be defined
|
||||
# in our include path.
|
||||
$(obj)/$(TRACE_HELPERS): TPROGS_CFLAGS := $(TPROGS_CFLAGS) -D__must_check=
|
||||
|
||||
-include $(BPF_SAMPLES_PATH)/Makefile.target
|
||||
|
||||
|
@ -1209,7 +1209,7 @@ static int do_skeleton(int argc, char **argv)
|
||||
codegen("\
|
||||
\n\
|
||||
\n\
|
||||
s->data = (void *)%2$s__elf_bytes(&s->data_sz); \n\
|
||||
s->data = %2$s__elf_bytes(&s->data_sz); \n\
|
||||
\n\
|
||||
obj->skeleton = s; \n\
|
||||
return 0; \n\
|
||||
|
@ -932,7 +932,14 @@ enum bpf_map_type {
|
||||
*/
|
||||
BPF_MAP_TYPE_CGROUP_STORAGE = BPF_MAP_TYPE_CGROUP_STORAGE_DEPRECATED,
|
||||
BPF_MAP_TYPE_REUSEPORT_SOCKARRAY,
|
||||
BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE,
|
||||
BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE_DEPRECATED,
|
||||
/* BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE is available to bpf programs
|
||||
* attaching to a cgroup. The new mechanism (BPF_MAP_TYPE_CGRP_STORAGE +
|
||||
* local percpu kptr) supports all BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE
|
||||
* functionality and more. So mark * BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE
|
||||
* deprecated.
|
||||
*/
|
||||
BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE = BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE_DEPRECATED,
|
||||
BPF_MAP_TYPE_QUEUE,
|
||||
BPF_MAP_TYPE_STACK,
|
||||
BPF_MAP_TYPE_SK_STORAGE,
|
||||
|
@ -38,11 +38,27 @@ enum netdev_xdp_act {
|
||||
NETDEV_XDP_ACT_MASK = 127,
|
||||
};
|
||||
|
||||
/**
|
||||
* enum netdev_xdp_rx_metadata
|
||||
* @NETDEV_XDP_RX_METADATA_TIMESTAMP: Device is capable of exposing receive HW
|
||||
* timestamp via bpf_xdp_metadata_rx_timestamp().
|
||||
* @NETDEV_XDP_RX_METADATA_HASH: Device is capable of exposing receive packet
|
||||
* hash via bpf_xdp_metadata_rx_hash().
|
||||
*/
|
||||
enum netdev_xdp_rx_metadata {
|
||||
NETDEV_XDP_RX_METADATA_TIMESTAMP = 1,
|
||||
NETDEV_XDP_RX_METADATA_HASH = 2,
|
||||
|
||||
/* private: */
|
||||
NETDEV_XDP_RX_METADATA_MASK = 3,
|
||||
};
|
||||
|
||||
enum {
|
||||
NETDEV_A_DEV_IFINDEX = 1,
|
||||
NETDEV_A_DEV_PAD,
|
||||
NETDEV_A_DEV_XDP_FEATURES,
|
||||
NETDEV_A_DEV_XDP_ZC_MAX_SEGS,
|
||||
NETDEV_A_DEV_XDP_RX_METADATA_FEATURES,
|
||||
|
||||
__NETDEV_A_DEV_MAX,
|
||||
NETDEV_A_DEV_MAX = (__NETDEV_A_DEV_MAX - 1)
|
||||
|
@ -181,6 +181,7 @@ enum libbpf_tristate {
|
||||
#define __ksym __attribute__((section(".ksyms")))
|
||||
#define __kptr_untrusted __attribute__((btf_type_tag("kptr_untrusted")))
|
||||
#define __kptr __attribute__((btf_type_tag("kptr")))
|
||||
#define __percpu_kptr __attribute__((btf_type_tag("percpu_kptr")))
|
||||
|
||||
#define bpf_ksym_exists(sym) ({ \
|
||||
_Static_assert(!__builtin_constant_p(!!sym), #sym " should be marked as __weak"); \
|
||||
|
@ -448,6 +448,165 @@ static int btf_parse_type_sec(struct btf *btf)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int btf_validate_str(const struct btf *btf, __u32 str_off, const char *what, __u32 type_id)
|
||||
{
|
||||
const char *s;
|
||||
|
||||
s = btf__str_by_offset(btf, str_off);
|
||||
if (!s) {
|
||||
pr_warn("btf: type [%u]: invalid %s (string offset %u)\n", type_id, what, str_off);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int btf_validate_id(const struct btf *btf, __u32 id, __u32 ctx_id)
|
||||
{
|
||||
const struct btf_type *t;
|
||||
|
||||
t = btf__type_by_id(btf, id);
|
||||
if (!t) {
|
||||
pr_warn("btf: type [%u]: invalid referenced type ID %u\n", ctx_id, id);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int btf_validate_type(const struct btf *btf, const struct btf_type *t, __u32 id)
|
||||
{
|
||||
__u32 kind = btf_kind(t);
|
||||
int err, i, n;
|
||||
|
||||
err = btf_validate_str(btf, t->name_off, "type name", id);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
switch (kind) {
|
||||
case BTF_KIND_UNKN:
|
||||
case BTF_KIND_INT:
|
||||
case BTF_KIND_FWD:
|
||||
case BTF_KIND_FLOAT:
|
||||
break;
|
||||
case BTF_KIND_PTR:
|
||||
case BTF_KIND_TYPEDEF:
|
||||
case BTF_KIND_VOLATILE:
|
||||
case BTF_KIND_CONST:
|
||||
case BTF_KIND_RESTRICT:
|
||||
case BTF_KIND_VAR:
|
||||
case BTF_KIND_DECL_TAG:
|
||||
case BTF_KIND_TYPE_TAG:
|
||||
err = btf_validate_id(btf, t->type, id);
|
||||
if (err)
|
||||
return err;
|
||||
break;
|
||||
case BTF_KIND_ARRAY: {
|
||||
const struct btf_array *a = btf_array(t);
|
||||
|
||||
err = btf_validate_id(btf, a->type, id);
|
||||
err = err ?: btf_validate_id(btf, a->index_type, id);
|
||||
if (err)
|
||||
return err;
|
||||
break;
|
||||
}
|
||||
case BTF_KIND_STRUCT:
|
||||
case BTF_KIND_UNION: {
|
||||
const struct btf_member *m = btf_members(t);
|
||||
|
||||
n = btf_vlen(t);
|
||||
for (i = 0; i < n; i++, m++) {
|
||||
err = btf_validate_str(btf, m->name_off, "field name", id);
|
||||
err = err ?: btf_validate_id(btf, m->type, id);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
break;
|
||||
}
|
||||
case BTF_KIND_ENUM: {
|
||||
const struct btf_enum *m = btf_enum(t);
|
||||
|
||||
n = btf_vlen(t);
|
||||
for (i = 0; i < n; i++, m++) {
|
||||
err = btf_validate_str(btf, m->name_off, "enum name", id);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
break;
|
||||
}
|
||||
case BTF_KIND_ENUM64: {
|
||||
const struct btf_enum64 *m = btf_enum64(t);
|
||||
|
||||
n = btf_vlen(t);
|
||||
for (i = 0; i < n; i++, m++) {
|
||||
err = btf_validate_str(btf, m->name_off, "enum name", id);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
break;
|
||||
}
|
||||
case BTF_KIND_FUNC: {
|
||||
const struct btf_type *ft;
|
||||
|
||||
err = btf_validate_id(btf, t->type, id);
|
||||
if (err)
|
||||
return err;
|
||||
ft = btf__type_by_id(btf, t->type);
|
||||
if (btf_kind(ft) != BTF_KIND_FUNC_PROTO) {
|
||||
pr_warn("btf: type [%u]: referenced type [%u] is not FUNC_PROTO\n", id, t->type);
|
||||
return -EINVAL;
|
||||
}
|
||||
break;
|
||||
}
|
||||
case BTF_KIND_FUNC_PROTO: {
|
||||
const struct btf_param *m = btf_params(t);
|
||||
|
||||
n = btf_vlen(t);
|
||||
for (i = 0; i < n; i++, m++) {
|
||||
err = btf_validate_str(btf, m->name_off, "param name", id);
|
||||
err = err ?: btf_validate_id(btf, m->type, id);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
break;
|
||||
}
|
||||
case BTF_KIND_DATASEC: {
|
||||
const struct btf_var_secinfo *m = btf_var_secinfos(t);
|
||||
|
||||
n = btf_vlen(t);
|
||||
for (i = 0; i < n; i++, m++) {
|
||||
err = btf_validate_id(btf, m->type, id);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
break;
|
||||
}
|
||||
default:
|
||||
pr_warn("btf: type [%u]: unrecognized kind %u\n", id, kind);
|
||||
return -EINVAL;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Validate basic sanity of BTF. It's intentionally less thorough than
|
||||
* kernel's validation and validates only properties of BTF that libbpf relies
|
||||
* on to be correct (e.g., valid type IDs, valid string offsets, etc)
|
||||
*/
|
||||
static int btf_sanity_check(const struct btf *btf)
|
||||
{
|
||||
const struct btf_type *t;
|
||||
__u32 i, n = btf__type_cnt(btf);
|
||||
int err;
|
||||
|
||||
for (i = 1; i < n; i++) {
|
||||
t = btf_type_by_id(btf, i);
|
||||
err = btf_validate_type(btf, t, i);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
__u32 btf__type_cnt(const struct btf *btf)
|
||||
{
|
||||
return btf->start_id + btf->nr_types;
|
||||
@ -902,6 +1061,7 @@ static struct btf *btf_new(const void *data, __u32 size, struct btf *base_btf)
|
||||
|
||||
err = btf_parse_str_sec(btf);
|
||||
err = err ?: btf_parse_type_sec(btf);
|
||||
err = err ?: btf_sanity_check(btf);
|
||||
if (err)
|
||||
goto done;
|
||||
|
||||
|
@ -436,9 +436,11 @@ struct bpf_program {
|
||||
int fd;
|
||||
bool autoload;
|
||||
bool autoattach;
|
||||
bool sym_global;
|
||||
bool mark_btf_static;
|
||||
enum bpf_prog_type type;
|
||||
enum bpf_attach_type expected_attach_type;
|
||||
int exception_cb_idx;
|
||||
|
||||
int prog_ifindex;
|
||||
__u32 attach_btf_obj_fd;
|
||||
@ -765,6 +767,7 @@ bpf_object__init_prog(struct bpf_object *obj, struct bpf_program *prog,
|
||||
|
||||
prog->type = BPF_PROG_TYPE_UNSPEC;
|
||||
prog->fd = -1;
|
||||
prog->exception_cb_idx = -1;
|
||||
|
||||
/* libbpf's convention for SEC("?abc...") is that it's just like
|
||||
* SEC("abc...") but the corresponding bpf_program starts out with
|
||||
@ -871,14 +874,16 @@ bpf_object__add_programs(struct bpf_object *obj, Elf_Data *sec_data,
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
if (ELF64_ST_BIND(sym->st_info) != STB_LOCAL)
|
||||
prog->sym_global = true;
|
||||
|
||||
/* if function is a global/weak symbol, but has restricted
|
||||
* (STV_HIDDEN or STV_INTERNAL) visibility, mark its BTF FUNC
|
||||
* as static to enable more permissive BPF verification mode
|
||||
* with more outside context available to BPF verifier
|
||||
*/
|
||||
if (ELF64_ST_BIND(sym->st_info) != STB_LOCAL
|
||||
&& (ELF64_ST_VISIBILITY(sym->st_other) == STV_HIDDEN
|
||||
|| ELF64_ST_VISIBILITY(sym->st_other) == STV_INTERNAL))
|
||||
if (prog->sym_global && (ELF64_ST_VISIBILITY(sym->st_other) == STV_HIDDEN
|
||||
|| ELF64_ST_VISIBILITY(sym->st_other) == STV_INTERNAL))
|
||||
prog->mark_btf_static = true;
|
||||
|
||||
nr_progs++;
|
||||
@ -3142,6 +3147,86 @@ static int bpf_object__sanitize_and_load_btf(struct bpf_object *obj)
|
||||
}
|
||||
}
|
||||
|
||||
if (!kernel_supports(obj, FEAT_BTF_DECL_TAG))
|
||||
goto skip_exception_cb;
|
||||
for (i = 0; i < obj->nr_programs; i++) {
|
||||
struct bpf_program *prog = &obj->programs[i];
|
||||
int j, k, n;
|
||||
|
||||
if (prog_is_subprog(obj, prog))
|
||||
continue;
|
||||
n = btf__type_cnt(obj->btf);
|
||||
for (j = 1; j < n; j++) {
|
||||
const char *str = "exception_callback:", *name;
|
||||
size_t len = strlen(str);
|
||||
struct btf_type *t;
|
||||
|
||||
t = btf_type_by_id(obj->btf, j);
|
||||
if (!btf_is_decl_tag(t) || btf_decl_tag(t)->component_idx != -1)
|
||||
continue;
|
||||
|
||||
name = btf__str_by_offset(obj->btf, t->name_off);
|
||||
if (strncmp(name, str, len))
|
||||
continue;
|
||||
|
||||
t = btf_type_by_id(obj->btf, t->type);
|
||||
if (!btf_is_func(t) || btf_func_linkage(t) != BTF_FUNC_GLOBAL) {
|
||||
pr_warn("prog '%s': exception_callback:<value> decl tag not applied to the main program\n",
|
||||
prog->name);
|
||||
return -EINVAL;
|
||||
}
|
||||
if (strcmp(prog->name, btf__str_by_offset(obj->btf, t->name_off)))
|
||||
continue;
|
||||
/* Multiple callbacks are specified for the same prog,
|
||||
* the verifier will eventually return an error for this
|
||||
* case, hence simply skip appending a subprog.
|
||||
*/
|
||||
if (prog->exception_cb_idx >= 0) {
|
||||
prog->exception_cb_idx = -1;
|
||||
break;
|
||||
}
|
||||
|
||||
name += len;
|
||||
if (str_is_empty(name)) {
|
||||
pr_warn("prog '%s': exception_callback:<value> decl tag contains empty value\n",
|
||||
prog->name);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
for (k = 0; k < obj->nr_programs; k++) {
|
||||
struct bpf_program *subprog = &obj->programs[k];
|
||||
|
||||
if (!prog_is_subprog(obj, subprog))
|
||||
continue;
|
||||
if (strcmp(name, subprog->name))
|
||||
continue;
|
||||
/* Enforce non-hidden, as from verifier point of
|
||||
* view it expects global functions, whereas the
|
||||
* mark_btf_static fixes up linkage as static.
|
||||
*/
|
||||
if (!subprog->sym_global || subprog->mark_btf_static) {
|
||||
pr_warn("prog '%s': exception callback %s must be a global non-hidden function\n",
|
||||
prog->name, subprog->name);
|
||||
return -EINVAL;
|
||||
}
|
||||
/* Let's see if we already saw a static exception callback with the same name */
|
||||
if (prog->exception_cb_idx >= 0) {
|
||||
pr_warn("prog '%s': multiple subprogs with same name as exception callback '%s'\n",
|
||||
prog->name, subprog->name);
|
||||
return -EINVAL;
|
||||
}
|
||||
prog->exception_cb_idx = k;
|
||||
break;
|
||||
}
|
||||
|
||||
if (prog->exception_cb_idx >= 0)
|
||||
continue;
|
||||
pr_warn("prog '%s': cannot find exception callback '%s'\n", prog->name, name);
|
||||
return -ENOENT;
|
||||
}
|
||||
}
|
||||
skip_exception_cb:
|
||||
|
||||
sanitize = btf_needs_sanitization(obj);
|
||||
if (sanitize) {
|
||||
const void *raw_data;
|
||||
@ -6234,14 +6319,46 @@ static int append_subprog_relos(struct bpf_program *main_prog, struct bpf_progra
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
bpf_object__append_subprog_code(struct bpf_object *obj, struct bpf_program *main_prog,
|
||||
struct bpf_program *subprog)
|
||||
{
|
||||
struct bpf_insn *insns;
|
||||
size_t new_cnt;
|
||||
int err;
|
||||
|
||||
subprog->sub_insn_off = main_prog->insns_cnt;
|
||||
|
||||
new_cnt = main_prog->insns_cnt + subprog->insns_cnt;
|
||||
insns = libbpf_reallocarray(main_prog->insns, new_cnt, sizeof(*insns));
|
||||
if (!insns) {
|
||||
pr_warn("prog '%s': failed to realloc prog code\n", main_prog->name);
|
||||
return -ENOMEM;
|
||||
}
|
||||
main_prog->insns = insns;
|
||||
main_prog->insns_cnt = new_cnt;
|
||||
|
||||
memcpy(main_prog->insns + subprog->sub_insn_off, subprog->insns,
|
||||
subprog->insns_cnt * sizeof(*insns));
|
||||
|
||||
pr_debug("prog '%s': added %zu insns from sub-prog '%s'\n",
|
||||
main_prog->name, subprog->insns_cnt, subprog->name);
|
||||
|
||||
/* The subprog insns are now appended. Append its relos too. */
|
||||
err = append_subprog_relos(main_prog, subprog);
|
||||
if (err)
|
||||
return err;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
bpf_object__reloc_code(struct bpf_object *obj, struct bpf_program *main_prog,
|
||||
struct bpf_program *prog)
|
||||
{
|
||||
size_t sub_insn_idx, insn_idx, new_cnt;
|
||||
size_t sub_insn_idx, insn_idx;
|
||||
struct bpf_program *subprog;
|
||||
struct bpf_insn *insns, *insn;
|
||||
struct reloc_desc *relo;
|
||||
struct bpf_insn *insn;
|
||||
int err;
|
||||
|
||||
err = reloc_prog_func_and_line_info(obj, main_prog, prog);
|
||||
@ -6316,25 +6433,7 @@ bpf_object__reloc_code(struct bpf_object *obj, struct bpf_program *main_prog,
|
||||
* and relocate.
|
||||
*/
|
||||
if (subprog->sub_insn_off == 0) {
|
||||
subprog->sub_insn_off = main_prog->insns_cnt;
|
||||
|
||||
new_cnt = main_prog->insns_cnt + subprog->insns_cnt;
|
||||
insns = libbpf_reallocarray(main_prog->insns, new_cnt, sizeof(*insns));
|
||||
if (!insns) {
|
||||
pr_warn("prog '%s': failed to realloc prog code\n", main_prog->name);
|
||||
return -ENOMEM;
|
||||
}
|
||||
main_prog->insns = insns;
|
||||
main_prog->insns_cnt = new_cnt;
|
||||
|
||||
memcpy(main_prog->insns + subprog->sub_insn_off, subprog->insns,
|
||||
subprog->insns_cnt * sizeof(*insns));
|
||||
|
||||
pr_debug("prog '%s': added %zu insns from sub-prog '%s'\n",
|
||||
main_prog->name, subprog->insns_cnt, subprog->name);
|
||||
|
||||
/* The subprog insns are now appended. Append its relos too. */
|
||||
err = append_subprog_relos(main_prog, subprog);
|
||||
err = bpf_object__append_subprog_code(obj, main_prog, subprog);
|
||||
if (err)
|
||||
return err;
|
||||
err = bpf_object__reloc_code(obj, main_prog, subprog);
|
||||
@ -6568,6 +6667,25 @@ bpf_object__relocate(struct bpf_object *obj, const char *targ_btf_path)
|
||||
prog->name, err);
|
||||
return err;
|
||||
}
|
||||
|
||||
/* Now, also append exception callback if it has not been done already. */
|
||||
if (prog->exception_cb_idx >= 0) {
|
||||
struct bpf_program *subprog = &obj->programs[prog->exception_cb_idx];
|
||||
|
||||
/* Calling exception callback directly is disallowed, which the
|
||||
* verifier will reject later. In case it was processed already,
|
||||
* we can skip this step, otherwise for all other valid cases we
|
||||
* have to append exception callback now.
|
||||
*/
|
||||
if (subprog->sub_insn_off == 0) {
|
||||
err = bpf_object__append_subprog_code(obj, prog, subprog);
|
||||
if (err)
|
||||
return err;
|
||||
err = bpf_object__reloc_code(obj, prog, subprog);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
}
|
||||
}
|
||||
/* Process data relos for main programs */
|
||||
for (i = 0; i < obj->nr_programs; i++) {
|
||||
|
@ -45,12 +45,26 @@ const char *netdev_xdp_act_str(enum netdev_xdp_act value)
|
||||
return netdev_xdp_act_strmap[value];
|
||||
}
|
||||
|
||||
static const char * const netdev_xdp_rx_metadata_strmap[] = {
|
||||
[0] = "timestamp",
|
||||
[1] = "hash",
|
||||
};
|
||||
|
||||
const char *netdev_xdp_rx_metadata_str(enum netdev_xdp_rx_metadata value)
|
||||
{
|
||||
value = ffs(value) - 1;
|
||||
if (value < 0 || value >= (int)MNL_ARRAY_SIZE(netdev_xdp_rx_metadata_strmap))
|
||||
return NULL;
|
||||
return netdev_xdp_rx_metadata_strmap[value];
|
||||
}
|
||||
|
||||
/* Policies */
|
||||
struct ynl_policy_attr netdev_dev_policy[NETDEV_A_DEV_MAX + 1] = {
|
||||
[NETDEV_A_DEV_IFINDEX] = { .name = "ifindex", .type = YNL_PT_U32, },
|
||||
[NETDEV_A_DEV_PAD] = { .name = "pad", .type = YNL_PT_IGNORE, },
|
||||
[NETDEV_A_DEV_XDP_FEATURES] = { .name = "xdp-features", .type = YNL_PT_U64, },
|
||||
[NETDEV_A_DEV_XDP_ZC_MAX_SEGS] = { .name = "xdp-zc-max-segs", .type = YNL_PT_U32, },
|
||||
[NETDEV_A_DEV_XDP_RX_METADATA_FEATURES] = { .name = "xdp-rx-metadata-features", .type = YNL_PT_U64, },
|
||||
};
|
||||
|
||||
struct ynl_policy_nest netdev_dev_nest = {
|
||||
@ -97,6 +111,11 @@ int netdev_dev_get_rsp_parse(const struct nlmsghdr *nlh, void *data)
|
||||
return MNL_CB_ERROR;
|
||||
dst->_present.xdp_zc_max_segs = 1;
|
||||
dst->xdp_zc_max_segs = mnl_attr_get_u32(attr);
|
||||
} else if (type == NETDEV_A_DEV_XDP_RX_METADATA_FEATURES) {
|
||||
if (ynl_attr_validate(yarg, attr))
|
||||
return MNL_CB_ERROR;
|
||||
dst->_present.xdp_rx_metadata_features = 1;
|
||||
dst->xdp_rx_metadata_features = mnl_attr_get_u64(attr);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -18,6 +18,7 @@ extern const struct ynl_family ynl_netdev_family;
|
||||
/* Enums */
|
||||
const char *netdev_op_str(int op);
|
||||
const char *netdev_xdp_act_str(enum netdev_xdp_act value);
|
||||
const char *netdev_xdp_rx_metadata_str(enum netdev_xdp_rx_metadata value);
|
||||
|
||||
/* Common nested types */
|
||||
/* ============== NETDEV_CMD_DEV_GET ============== */
|
||||
@ -48,11 +49,13 @@ struct netdev_dev_get_rsp {
|
||||
__u32 ifindex:1;
|
||||
__u32 xdp_features:1;
|
||||
__u32 xdp_zc_max_segs:1;
|
||||
__u32 xdp_rx_metadata_features:1;
|
||||
} _present;
|
||||
|
||||
__u32 ifindex;
|
||||
__u64 xdp_features;
|
||||
__u32 xdp_zc_max_segs;
|
||||
__u64 xdp_rx_metadata_features;
|
||||
};
|
||||
|
||||
void netdev_dev_get_rsp_free(struct netdev_dev_get_rsp *rsp);
|
||||
|
@ -4,7 +4,7 @@ include ../Makefile.deps
|
||||
|
||||
CC=gcc
|
||||
CFLAGS=-std=gnu11 -O2 -W -Wall -Wextra -Wno-unused-parameter -Wshadow \
|
||||
-I../lib/ -I../generated/ -idirafter $(UAPI_PATH)
|
||||
-I../../../include/uapi -I../lib/ -I../generated/ -idirafter $(UAPI_PATH)
|
||||
ifeq ("$(DEBUG)","1")
|
||||
CFLAGS += -g -fsanitize=address -fsanitize=leak -static-libasan
|
||||
endif
|
||||
|
@ -32,12 +32,18 @@ static void netdev_print_device(struct netdev_dev_get_rsp *d, unsigned int op)
|
||||
if (!d->_present.xdp_features)
|
||||
return;
|
||||
|
||||
printf("%llx:", d->xdp_features);
|
||||
printf("xdp-features (%llx):", d->xdp_features);
|
||||
for (int i = 0; d->xdp_features > 1U << i; i++) {
|
||||
if (d->xdp_features & (1U << i))
|
||||
printf(" %s", netdev_xdp_act_str(1 << i));
|
||||
}
|
||||
|
||||
printf(" xdp-rx-metadata-features (%llx):", d->xdp_rx_metadata_features);
|
||||
for (int i = 0; d->xdp_rx_metadata_features > 1U << i; i++) {
|
||||
if (d->xdp_rx_metadata_features & (1U << i))
|
||||
printf(" %s", netdev_xdp_rx_metadata_str(1 << i));
|
||||
}
|
||||
|
||||
printf(" xdp-zc-max-segs=%u", d->xdp_zc_max_segs);
|
||||
|
||||
name = netdev_op_str(op);
|
||||
|
@ -1,5 +1,6 @@
|
||||
bpf_cookie/multi_kprobe_attach_api # kprobe_multi_link_api_subtest:FAIL:fentry_raw_skel_load unexpected error: -3
|
||||
bpf_cookie/multi_kprobe_link_api # kprobe_multi_link_api_subtest:FAIL:fentry_raw_skel_load unexpected error: -3
|
||||
exceptions # JIT does not support calling kfunc bpf_throw: -524
|
||||
fexit_sleep # The test never returns. The remaining tests cannot start.
|
||||
kprobe_multi_bench_attach # bpf_program__attach_kprobe_multi_opts unexpected error: -95
|
||||
kprobe_multi_test/attach_api_addrs # bpf_program__attach_kprobe_multi_opts unexpected error: -95
|
||||
|
@ -6,6 +6,7 @@ bpf_loop # attaches to __x64_sys_nanosleep
|
||||
cgrp_local_storage # prog_attach unexpected error: -524 (trampoline)
|
||||
dynptr/test_dynptr_skb_data
|
||||
dynptr/test_skb_readonly
|
||||
exceptions # JIT does not support calling kfunc bpf_throw (exceptions)
|
||||
fexit_sleep # fexit_skel_load fexit skeleton failed (trampoline)
|
||||
get_stack_raw_tp # user_stack corrupted user stack (no backchain userspace)
|
||||
iters/testmod_seq* # s390x doesn't support kfuncs in modules yet
|
||||
|
@ -131,4 +131,323 @@ extern int bpf_rbtree_add_impl(struct bpf_rb_root *root, struct bpf_rb_node *nod
|
||||
*/
|
||||
extern struct bpf_rb_node *bpf_rbtree_first(struct bpf_rb_root *root) __ksym;
|
||||
|
||||
/* Description
|
||||
* Allocates a percpu object of the type represented by 'local_type_id' in
|
||||
* program BTF. User may use the bpf_core_type_id_local macro to pass the
|
||||
* type ID of a struct in program BTF.
|
||||
*
|
||||
* The 'local_type_id' parameter must be a known constant.
|
||||
* The 'meta' parameter is rewritten by the verifier, no need for BPF
|
||||
* program to set it.
|
||||
* Returns
|
||||
* A pointer to a percpu object of the type corresponding to the passed in
|
||||
* 'local_type_id', or NULL on failure.
|
||||
*/
|
||||
extern void *bpf_percpu_obj_new_impl(__u64 local_type_id, void *meta) __ksym;
|
||||
|
||||
/* Convenience macro to wrap over bpf_percpu_obj_new_impl */
|
||||
#define bpf_percpu_obj_new(type) ((type __percpu_kptr *)bpf_percpu_obj_new_impl(bpf_core_type_id_local(type), NULL))
|
||||
|
||||
/* Description
|
||||
* Free an allocated percpu object. All fields of the object that require
|
||||
* destruction will be destructed before the storage is freed.
|
||||
*
|
||||
* The 'meta' parameter is rewritten by the verifier, no need for BPF
|
||||
* program to set it.
|
||||
* Returns
|
||||
* Void.
|
||||
*/
|
||||
extern void bpf_percpu_obj_drop_impl(void *kptr, void *meta) __ksym;
|
||||
|
||||
/* Convenience macro to wrap over bpf_obj_drop_impl */
|
||||
#define bpf_percpu_obj_drop(kptr) bpf_percpu_obj_drop_impl(kptr, NULL)
|
||||
|
||||
/* Description
|
||||
* Throw a BPF exception from the program, immediately terminating its
|
||||
* execution and unwinding the stack. The supplied 'cookie' parameter
|
||||
* will be the return value of the program when an exception is thrown,
|
||||
* and the default exception callback is used. Otherwise, if an exception
|
||||
* callback is set using the '__exception_cb(callback)' declaration tag
|
||||
* on the main program, the 'cookie' parameter will be the callback's only
|
||||
* input argument.
|
||||
*
|
||||
* Thus, in case of default exception callback, 'cookie' is subjected to
|
||||
* constraints on the program's return value (as with R0 on exit).
|
||||
* Otherwise, the return value of the marked exception callback will be
|
||||
* subjected to the same checks.
|
||||
*
|
||||
* Note that throwing an exception with lingering resources (locks,
|
||||
* references, etc.) will lead to a verification error.
|
||||
*
|
||||
* Note that callbacks *cannot* call this helper.
|
||||
* Returns
|
||||
* Never.
|
||||
* Throws
|
||||
* An exception with the specified 'cookie' value.
|
||||
*/
|
||||
extern void bpf_throw(u64 cookie) __ksym;
|
||||
|
||||
/* This macro must be used to mark the exception callback corresponding to the
|
||||
* main program. For example:
|
||||
*
|
||||
* int exception_cb(u64 cookie) {
|
||||
* return cookie;
|
||||
* }
|
||||
*
|
||||
* SEC("tc")
|
||||
* __exception_cb(exception_cb)
|
||||
* int main_prog(struct __sk_buff *ctx) {
|
||||
* ...
|
||||
* return TC_ACT_OK;
|
||||
* }
|
||||
*
|
||||
* Here, exception callback for the main program will be 'exception_cb'. Note
|
||||
* that this attribute can only be used once, and multiple exception callbacks
|
||||
* specified for the main program will lead to verification error.
|
||||
*/
|
||||
#define __exception_cb(name) __attribute__((btf_decl_tag("exception_callback:" #name)))
|
||||
|
||||
#define __bpf_assert_signed(x) _Generic((x), \
|
||||
unsigned long: 0, \
|
||||
unsigned long long: 0, \
|
||||
signed long: 1, \
|
||||
signed long long: 1 \
|
||||
)
|
||||
|
||||
#define __bpf_assert_check(LHS, op, RHS) \
|
||||
_Static_assert(sizeof(&(LHS)), "1st argument must be an lvalue expression"); \
|
||||
_Static_assert(sizeof(LHS) == 8, "Only 8-byte integers are supported\n"); \
|
||||
_Static_assert(__builtin_constant_p(__bpf_assert_signed(LHS)), "internal static assert"); \
|
||||
_Static_assert(__builtin_constant_p((RHS)), "2nd argument must be a constant expression")
|
||||
|
||||
#define __bpf_assert(LHS, op, cons, RHS, VAL) \
|
||||
({ \
|
||||
(void)bpf_throw; \
|
||||
asm volatile ("if %[lhs] " op " %[rhs] goto +2; r1 = %[value]; call bpf_throw" \
|
||||
: : [lhs] "r"(LHS), [rhs] cons(RHS), [value] "ri"(VAL) : ); \
|
||||
})
|
||||
|
||||
#define __bpf_assert_op_sign(LHS, op, cons, RHS, VAL, supp_sign) \
|
||||
({ \
|
||||
__bpf_assert_check(LHS, op, RHS); \
|
||||
if (__bpf_assert_signed(LHS) && !(supp_sign)) \
|
||||
__bpf_assert(LHS, "s" #op, cons, RHS, VAL); \
|
||||
else \
|
||||
__bpf_assert(LHS, #op, cons, RHS, VAL); \
|
||||
})
|
||||
|
||||
#define __bpf_assert_op(LHS, op, RHS, VAL, supp_sign) \
|
||||
({ \
|
||||
if (sizeof(typeof(RHS)) == 8) { \
|
||||
const typeof(RHS) rhs_var = (RHS); \
|
||||
__bpf_assert_op_sign(LHS, op, "r", rhs_var, VAL, supp_sign); \
|
||||
} else { \
|
||||
__bpf_assert_op_sign(LHS, op, "i", RHS, VAL, supp_sign); \
|
||||
} \
|
||||
})
|
||||
|
||||
/* Description
|
||||
* Assert that a conditional expression is true.
|
||||
* Returns
|
||||
* Void.
|
||||
* Throws
|
||||
* An exception with the value zero when the assertion fails.
|
||||
*/
|
||||
#define bpf_assert(cond) if (!(cond)) bpf_throw(0);
|
||||
|
||||
/* Description
|
||||
* Assert that a conditional expression is true.
|
||||
* Returns
|
||||
* Void.
|
||||
* Throws
|
||||
* An exception with the specified value when the assertion fails.
|
||||
*/
|
||||
#define bpf_assert_with(cond, value) if (!(cond)) bpf_throw(value);
|
||||
|
||||
/* Description
|
||||
* Assert that LHS is equal to RHS. This statement updates the known value
|
||||
* of LHS during verification. Note that RHS must be a constant value, and
|
||||
* must fit within the data type of LHS.
|
||||
* Returns
|
||||
* Void.
|
||||
* Throws
|
||||
* An exception with the value zero when the assertion fails.
|
||||
*/
|
||||
#define bpf_assert_eq(LHS, RHS) \
|
||||
({ \
|
||||
barrier_var(LHS); \
|
||||
__bpf_assert_op(LHS, ==, RHS, 0, true); \
|
||||
})
|
||||
|
||||
/* Description
|
||||
* Assert that LHS is equal to RHS. This statement updates the known value
|
||||
* of LHS during verification. Note that RHS must be a constant value, and
|
||||
* must fit within the data type of LHS.
|
||||
* Returns
|
||||
* Void.
|
||||
* Throws
|
||||
* An exception with the specified value when the assertion fails.
|
||||
*/
|
||||
#define bpf_assert_eq_with(LHS, RHS, value) \
|
||||
({ \
|
||||
barrier_var(LHS); \
|
||||
__bpf_assert_op(LHS, ==, RHS, value, true); \
|
||||
})
|
||||
|
||||
/* Description
|
||||
* Assert that LHS is less than RHS. This statement updates the known
|
||||
* bounds of LHS during verification. Note that RHS must be a constant
|
||||
* value, and must fit within the data type of LHS.
|
||||
* Returns
|
||||
* Void.
|
||||
* Throws
|
||||
* An exception with the value zero when the assertion fails.
|
||||
*/
|
||||
#define bpf_assert_lt(LHS, RHS) \
|
||||
({ \
|
||||
barrier_var(LHS); \
|
||||
__bpf_assert_op(LHS, <, RHS, 0, false); \
|
||||
})
|
||||
|
||||
/* Description
|
||||
* Assert that LHS is less than RHS. This statement updates the known
|
||||
* bounds of LHS during verification. Note that RHS must be a constant
|
||||
* value, and must fit within the data type of LHS.
|
||||
* Returns
|
||||
* Void.
|
||||
* Throws
|
||||
* An exception with the specified value when the assertion fails.
|
||||
*/
|
||||
#define bpf_assert_lt_with(LHS, RHS, value) \
|
||||
({ \
|
||||
barrier_var(LHS); \
|
||||
__bpf_assert_op(LHS, <, RHS, value, false); \
|
||||
})
|
||||
|
||||
/* Description
|
||||
* Assert that LHS is greater than RHS. This statement updates the known
|
||||
* bounds of LHS during verification. Note that RHS must be a constant
|
||||
* value, and must fit within the data type of LHS.
|
||||
* Returns
|
||||
* Void.
|
||||
* Throws
|
||||
* An exception with the value zero when the assertion fails.
|
||||
*/
|
||||
#define bpf_assert_gt(LHS, RHS) \
|
||||
({ \
|
||||
barrier_var(LHS); \
|
||||
__bpf_assert_op(LHS, >, RHS, 0, false); \
|
||||
})
|
||||
|
||||
/* Description
|
||||
* Assert that LHS is greater than RHS. This statement updates the known
|
||||
* bounds of LHS during verification. Note that RHS must be a constant
|
||||
* value, and must fit within the data type of LHS.
|
||||
* Returns
|
||||
* Void.
|
||||
* Throws
|
||||
* An exception with the specified value when the assertion fails.
|
||||
*/
|
||||
#define bpf_assert_gt_with(LHS, RHS, value) \
|
||||
({ \
|
||||
barrier_var(LHS); \
|
||||
__bpf_assert_op(LHS, >, RHS, value, false); \
|
||||
})
|
||||
|
||||
/* Description
|
||||
* Assert that LHS is less than or equal to RHS. This statement updates the
|
||||
* known bounds of LHS during verification. Note that RHS must be a
|
||||
* constant value, and must fit within the data type of LHS.
|
||||
* Returns
|
||||
* Void.
|
||||
* Throws
|
||||
* An exception with the value zero when the assertion fails.
|
||||
*/
|
||||
#define bpf_assert_le(LHS, RHS) \
|
||||
({ \
|
||||
barrier_var(LHS); \
|
||||
__bpf_assert_op(LHS, <=, RHS, 0, false); \
|
||||
})
|
||||
|
||||
/* Description
|
||||
* Assert that LHS is less than or equal to RHS. This statement updates the
|
||||
* known bounds of LHS during verification. Note that RHS must be a
|
||||
* constant value, and must fit within the data type of LHS.
|
||||
* Returns
|
||||
* Void.
|
||||
* Throws
|
||||
* An exception with the specified value when the assertion fails.
|
||||
*/
|
||||
#define bpf_assert_le_with(LHS, RHS, value) \
|
||||
({ \
|
||||
barrier_var(LHS); \
|
||||
__bpf_assert_op(LHS, <=, RHS, value, false); \
|
||||
})
|
||||
|
||||
/* Description
|
||||
* Assert that LHS is greater than or equal to RHS. This statement updates
|
||||
* the known bounds of LHS during verification. Note that RHS must be a
|
||||
* constant value, and must fit within the data type of LHS.
|
||||
* Returns
|
||||
* Void.
|
||||
* Throws
|
||||
* An exception with the value zero when the assertion fails.
|
||||
*/
|
||||
#define bpf_assert_ge(LHS, RHS) \
|
||||
({ \
|
||||
barrier_var(LHS); \
|
||||
__bpf_assert_op(LHS, >=, RHS, 0, false); \
|
||||
})
|
||||
|
||||
/* Description
|
||||
* Assert that LHS is greater than or equal to RHS. This statement updates
|
||||
* the known bounds of LHS during verification. Note that RHS must be a
|
||||
* constant value, and must fit within the data type of LHS.
|
||||
* Returns
|
||||
* Void.
|
||||
* Throws
|
||||
* An exception with the specified value when the assertion fails.
|
||||
*/
|
||||
#define bpf_assert_ge_with(LHS, RHS, value) \
|
||||
({ \
|
||||
barrier_var(LHS); \
|
||||
__bpf_assert_op(LHS, >=, RHS, value, false); \
|
||||
})
|
||||
|
||||
/* Description
|
||||
* Assert that LHS is in the range [BEG, END] (inclusive of both). This
|
||||
* statement updates the known bounds of LHS during verification. Note
|
||||
* that both BEG and END must be constant values, and must fit within the
|
||||
* data type of LHS.
|
||||
* Returns
|
||||
* Void.
|
||||
* Throws
|
||||
* An exception with the value zero when the assertion fails.
|
||||
*/
|
||||
#define bpf_assert_range(LHS, BEG, END) \
|
||||
({ \
|
||||
_Static_assert(BEG <= END, "BEG must be <= END"); \
|
||||
barrier_var(LHS); \
|
||||
__bpf_assert_op(LHS, >=, BEG, 0, false); \
|
||||
__bpf_assert_op(LHS, <=, END, 0, false); \
|
||||
})
|
||||
|
||||
/* Description
|
||||
* Assert that LHS is in the range [BEG, END] (inclusive of both). This
|
||||
* statement updates the known bounds of LHS during verification. Note
|
||||
* that both BEG and END must be constant values, and must fit within the
|
||||
* data type of LHS.
|
||||
* Returns
|
||||
* Void.
|
||||
* Throws
|
||||
* An exception with the specified value when the assertion fails.
|
||||
*/
|
||||
#define bpf_assert_range_with(LHS, BEG, END, value) \
|
||||
({ \
|
||||
_Static_assert(BEG <= END, "BEG must be <= END"); \
|
||||
barrier_var(LHS); \
|
||||
__bpf_assert_op(LHS, >=, BEG, value, false); \
|
||||
__bpf_assert_op(LHS, <=, END, value, false); \
|
||||
})
|
||||
|
||||
#endif
|
||||
|
@ -7296,7 +7296,7 @@ static struct btf_dedup_test dedup_tests[] = {
|
||||
BTF_FUNC_PROTO_ENC(0, 2), /* [3] */
|
||||
BTF_FUNC_PROTO_ARG_ENC(NAME_NTH(2), 1),
|
||||
BTF_FUNC_PROTO_ARG_ENC(NAME_NTH(3), 1),
|
||||
BTF_FUNC_ENC(NAME_NTH(4), 2), /* [4] */
|
||||
BTF_FUNC_ENC(NAME_NTH(4), 3), /* [4] */
|
||||
/* tag -> t */
|
||||
BTF_DECL_TAG_ENC(NAME_NTH(5), 2, -1), /* [5] */
|
||||
BTF_DECL_TAG_ENC(NAME_NTH(5), 2, -1), /* [6] */
|
||||
@ -7317,7 +7317,7 @@ static struct btf_dedup_test dedup_tests[] = {
|
||||
BTF_FUNC_PROTO_ENC(0, 2), /* [3] */
|
||||
BTF_FUNC_PROTO_ARG_ENC(NAME_NTH(2), 1),
|
||||
BTF_FUNC_PROTO_ARG_ENC(NAME_NTH(3), 1),
|
||||
BTF_FUNC_ENC(NAME_NTH(4), 2), /* [4] */
|
||||
BTF_FUNC_ENC(NAME_NTH(4), 3), /* [4] */
|
||||
BTF_DECL_TAG_ENC(NAME_NTH(5), 2, -1), /* [5] */
|
||||
BTF_DECL_TAG_ENC(NAME_NTH(5), 4, -1), /* [6] */
|
||||
BTF_DECL_TAG_ENC(NAME_NTH(5), 4, 1), /* [7] */
|
||||
|
408
tools/testing/selftests/bpf/prog_tests/exceptions.c
Normal file
408
tools/testing/selftests/bpf/prog_tests/exceptions.c
Normal file
@ -0,0 +1,408 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
#include <test_progs.h>
|
||||
#include <network_helpers.h>
|
||||
|
||||
#include "exceptions.skel.h"
|
||||
#include "exceptions_ext.skel.h"
|
||||
#include "exceptions_fail.skel.h"
|
||||
#include "exceptions_assert.skel.h"
|
||||
|
||||
static char log_buf[1024 * 1024];
|
||||
|
||||
static void test_exceptions_failure(void)
|
||||
{
|
||||
RUN_TESTS(exceptions_fail);
|
||||
}
|
||||
|
||||
static void test_exceptions_success(void)
|
||||
{
|
||||
LIBBPF_OPTS(bpf_test_run_opts, ropts,
|
||||
.data_in = &pkt_v4,
|
||||
.data_size_in = sizeof(pkt_v4),
|
||||
.repeat = 1,
|
||||
);
|
||||
struct exceptions_ext *eskel = NULL;
|
||||
struct exceptions *skel;
|
||||
int ret;
|
||||
|
||||
skel = exceptions__open();
|
||||
if (!ASSERT_OK_PTR(skel, "exceptions__open"))
|
||||
return;
|
||||
|
||||
ret = exceptions__load(skel);
|
||||
if (!ASSERT_OK(ret, "exceptions__load"))
|
||||
goto done;
|
||||
|
||||
if (!ASSERT_OK(bpf_map_update_elem(bpf_map__fd(skel->maps.jmp_table), &(int){0},
|
||||
&(int){bpf_program__fd(skel->progs.exception_tail_call_target)}, BPF_ANY),
|
||||
"bpf_map_update_elem jmp_table"))
|
||||
goto done;
|
||||
|
||||
#define RUN_SUCCESS(_prog, return_val) \
|
||||
if (!test__start_subtest(#_prog)) goto _prog##_##return_val; \
|
||||
ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs._prog), &ropts); \
|
||||
ASSERT_OK(ret, #_prog " prog run ret"); \
|
||||
ASSERT_EQ(ropts.retval, return_val, #_prog " prog run retval"); \
|
||||
_prog##_##return_val:
|
||||
|
||||
RUN_SUCCESS(exception_throw_always_1, 64);
|
||||
RUN_SUCCESS(exception_throw_always_2, 32);
|
||||
RUN_SUCCESS(exception_throw_unwind_1, 16);
|
||||
RUN_SUCCESS(exception_throw_unwind_2, 32);
|
||||
RUN_SUCCESS(exception_throw_default, 0);
|
||||
RUN_SUCCESS(exception_throw_default_value, 5);
|
||||
RUN_SUCCESS(exception_tail_call, 24);
|
||||
RUN_SUCCESS(exception_ext, 0);
|
||||
RUN_SUCCESS(exception_ext_mod_cb_runtime, 35);
|
||||
RUN_SUCCESS(exception_throw_subprog, 1);
|
||||
RUN_SUCCESS(exception_assert_nz_gfunc, 1);
|
||||
RUN_SUCCESS(exception_assert_zero_gfunc, 1);
|
||||
RUN_SUCCESS(exception_assert_neg_gfunc, 1);
|
||||
RUN_SUCCESS(exception_assert_pos_gfunc, 1);
|
||||
RUN_SUCCESS(exception_assert_negeq_gfunc, 1);
|
||||
RUN_SUCCESS(exception_assert_poseq_gfunc, 1);
|
||||
RUN_SUCCESS(exception_assert_nz_gfunc_with, 1);
|
||||
RUN_SUCCESS(exception_assert_zero_gfunc_with, 1);
|
||||
RUN_SUCCESS(exception_assert_neg_gfunc_with, 1);
|
||||
RUN_SUCCESS(exception_assert_pos_gfunc_with, 1);
|
||||
RUN_SUCCESS(exception_assert_negeq_gfunc_with, 1);
|
||||
RUN_SUCCESS(exception_assert_poseq_gfunc_with, 1);
|
||||
RUN_SUCCESS(exception_bad_assert_nz_gfunc, 0);
|
||||
RUN_SUCCESS(exception_bad_assert_zero_gfunc, 0);
|
||||
RUN_SUCCESS(exception_bad_assert_neg_gfunc, 0);
|
||||
RUN_SUCCESS(exception_bad_assert_pos_gfunc, 0);
|
||||
RUN_SUCCESS(exception_bad_assert_negeq_gfunc, 0);
|
||||
RUN_SUCCESS(exception_bad_assert_poseq_gfunc, 0);
|
||||
RUN_SUCCESS(exception_bad_assert_nz_gfunc_with, 100);
|
||||
RUN_SUCCESS(exception_bad_assert_zero_gfunc_with, 105);
|
||||
RUN_SUCCESS(exception_bad_assert_neg_gfunc_with, 200);
|
||||
RUN_SUCCESS(exception_bad_assert_pos_gfunc_with, 0);
|
||||
RUN_SUCCESS(exception_bad_assert_negeq_gfunc_with, 101);
|
||||
RUN_SUCCESS(exception_bad_assert_poseq_gfunc_with, 99);
|
||||
RUN_SUCCESS(exception_assert_range, 1);
|
||||
RUN_SUCCESS(exception_assert_range_with, 1);
|
||||
RUN_SUCCESS(exception_bad_assert_range, 0);
|
||||
RUN_SUCCESS(exception_bad_assert_range_with, 10);
|
||||
|
||||
#define RUN_EXT(load_ret, attach_err, expr, msg, after_link) \
|
||||
{ \
|
||||
LIBBPF_OPTS(bpf_object_open_opts, o, .kernel_log_buf = log_buf, \
|
||||
.kernel_log_size = sizeof(log_buf), \
|
||||
.kernel_log_level = 2); \
|
||||
exceptions_ext__destroy(eskel); \
|
||||
eskel = exceptions_ext__open_opts(&o); \
|
||||
struct bpf_program *prog = NULL; \
|
||||
struct bpf_link *link = NULL; \
|
||||
if (!ASSERT_OK_PTR(eskel, "exceptions_ext__open")) \
|
||||
goto done; \
|
||||
(expr); \
|
||||
ASSERT_OK_PTR(bpf_program__name(prog), bpf_program__name(prog)); \
|
||||
if (!ASSERT_EQ(exceptions_ext__load(eskel), load_ret, \
|
||||
"exceptions_ext__load")) { \
|
||||
printf("%s\n", log_buf); \
|
||||
goto done; \
|
||||
} \
|
||||
if (load_ret != 0) { \
|
||||
printf("%s\n", log_buf); \
|
||||
if (!ASSERT_OK_PTR(strstr(log_buf, msg), "strstr")) \
|
||||
goto done; \
|
||||
} \
|
||||
if (!load_ret && attach_err) { \
|
||||
if (!ASSERT_ERR_PTR(link = bpf_program__attach(prog), "attach err")) \
|
||||
goto done; \
|
||||
} else if (!load_ret) { \
|
||||
if (!ASSERT_OK_PTR(link = bpf_program__attach(prog), "attach ok")) \
|
||||
goto done; \
|
||||
(void)(after_link); \
|
||||
bpf_link__destroy(link); \
|
||||
} \
|
||||
}
|
||||
|
||||
if (test__start_subtest("non-throwing fentry -> exception_cb"))
|
||||
RUN_EXT(-EINVAL, true, ({
|
||||
prog = eskel->progs.pfentry;
|
||||
bpf_program__set_autoload(prog, true);
|
||||
if (!ASSERT_OK(bpf_program__set_attach_target(prog,
|
||||
bpf_program__fd(skel->progs.exception_ext_mod_cb_runtime),
|
||||
"exception_cb_mod"), "set_attach_target"))
|
||||
goto done;
|
||||
}), "FENTRY/FEXIT programs cannot attach to exception callback", 0);
|
||||
|
||||
if (test__start_subtest("throwing fentry -> exception_cb"))
|
||||
RUN_EXT(-EINVAL, true, ({
|
||||
prog = eskel->progs.throwing_fentry;
|
||||
bpf_program__set_autoload(prog, true);
|
||||
if (!ASSERT_OK(bpf_program__set_attach_target(prog,
|
||||
bpf_program__fd(skel->progs.exception_ext_mod_cb_runtime),
|
||||
"exception_cb_mod"), "set_attach_target"))
|
||||
goto done;
|
||||
}), "FENTRY/FEXIT programs cannot attach to exception callback", 0);
|
||||
|
||||
if (test__start_subtest("non-throwing fexit -> exception_cb"))
|
||||
RUN_EXT(-EINVAL, true, ({
|
||||
prog = eskel->progs.pfexit;
|
||||
bpf_program__set_autoload(prog, true);
|
||||
if (!ASSERT_OK(bpf_program__set_attach_target(prog,
|
||||
bpf_program__fd(skel->progs.exception_ext_mod_cb_runtime),
|
||||
"exception_cb_mod"), "set_attach_target"))
|
||||
goto done;
|
||||
}), "FENTRY/FEXIT programs cannot attach to exception callback", 0);
|
||||
|
||||
if (test__start_subtest("throwing fexit -> exception_cb"))
|
||||
RUN_EXT(-EINVAL, true, ({
|
||||
prog = eskel->progs.throwing_fexit;
|
||||
bpf_program__set_autoload(prog, true);
|
||||
if (!ASSERT_OK(bpf_program__set_attach_target(prog,
|
||||
bpf_program__fd(skel->progs.exception_ext_mod_cb_runtime),
|
||||
"exception_cb_mod"), "set_attach_target"))
|
||||
goto done;
|
||||
}), "FENTRY/FEXIT programs cannot attach to exception callback", 0);
|
||||
|
||||
if (test__start_subtest("throwing extension (with custom cb) -> exception_cb"))
|
||||
RUN_EXT(-EINVAL, true, ({
|
||||
prog = eskel->progs.throwing_exception_cb_extension;
|
||||
bpf_program__set_autoload(prog, true);
|
||||
if (!ASSERT_OK(bpf_program__set_attach_target(prog,
|
||||
bpf_program__fd(skel->progs.exception_ext_mod_cb_runtime),
|
||||
"exception_cb_mod"), "set_attach_target"))
|
||||
goto done;
|
||||
}), "Extension programs cannot attach to exception callback", 0);
|
||||
|
||||
if (test__start_subtest("throwing extension -> global func in exception_cb"))
|
||||
RUN_EXT(0, false, ({
|
||||
prog = eskel->progs.throwing_exception_cb_extension;
|
||||
bpf_program__set_autoload(prog, true);
|
||||
if (!ASSERT_OK(bpf_program__set_attach_target(prog,
|
||||
bpf_program__fd(skel->progs.exception_ext_mod_cb_runtime),
|
||||
"exception_cb_mod_global"), "set_attach_target"))
|
||||
goto done;
|
||||
}), "", ({ RUN_SUCCESS(exception_ext_mod_cb_runtime, 131); }));
|
||||
|
||||
if (test__start_subtest("throwing extension (with custom cb) -> global func in exception_cb"))
|
||||
RUN_EXT(0, false, ({
|
||||
prog = eskel->progs.throwing_extension;
|
||||
bpf_program__set_autoload(prog, true);
|
||||
if (!ASSERT_OK(bpf_program__set_attach_target(prog,
|
||||
bpf_program__fd(skel->progs.exception_ext),
|
||||
"exception_ext_global"), "set_attach_target"))
|
||||
goto done;
|
||||
}), "", ({ RUN_SUCCESS(exception_ext, 128); }));
|
||||
|
||||
if (test__start_subtest("non-throwing fentry -> non-throwing subprog"))
|
||||
/* non-throwing fentry -> non-throwing subprog : OK */
|
||||
RUN_EXT(0, false, ({
|
||||
prog = eskel->progs.pfentry;
|
||||
bpf_program__set_autoload(prog, true);
|
||||
if (!ASSERT_OK(bpf_program__set_attach_target(prog,
|
||||
bpf_program__fd(skel->progs.exception_throw_subprog),
|
||||
"subprog"), "set_attach_target"))
|
||||
goto done;
|
||||
}), "", 0);
|
||||
|
||||
if (test__start_subtest("throwing fentry -> non-throwing subprog"))
|
||||
/* throwing fentry -> non-throwing subprog : OK */
|
||||
RUN_EXT(0, false, ({
|
||||
prog = eskel->progs.throwing_fentry;
|
||||
bpf_program__set_autoload(prog, true);
|
||||
if (!ASSERT_OK(bpf_program__set_attach_target(prog,
|
||||
bpf_program__fd(skel->progs.exception_throw_subprog),
|
||||
"subprog"), "set_attach_target"))
|
||||
goto done;
|
||||
}), "", 0);
|
||||
|
||||
if (test__start_subtest("non-throwing fentry -> throwing subprog"))
|
||||
/* non-throwing fentry -> throwing subprog : OK */
|
||||
RUN_EXT(0, false, ({
|
||||
prog = eskel->progs.pfentry;
|
||||
bpf_program__set_autoload(prog, true);
|
||||
if (!ASSERT_OK(bpf_program__set_attach_target(prog,
|
||||
bpf_program__fd(skel->progs.exception_throw_subprog),
|
||||
"throwing_subprog"), "set_attach_target"))
|
||||
goto done;
|
||||
}), "", 0);
|
||||
|
||||
if (test__start_subtest("throwing fentry -> throwing subprog"))
|
||||
/* throwing fentry -> throwing subprog : OK */
|
||||
RUN_EXT(0, false, ({
|
||||
prog = eskel->progs.throwing_fentry;
|
||||
bpf_program__set_autoload(prog, true);
|
||||
if (!ASSERT_OK(bpf_program__set_attach_target(prog,
|
||||
bpf_program__fd(skel->progs.exception_throw_subprog),
|
||||
"throwing_subprog"), "set_attach_target"))
|
||||
goto done;
|
||||
}), "", 0);
|
||||
|
||||
if (test__start_subtest("non-throwing fexit -> non-throwing subprog"))
|
||||
/* non-throwing fexit -> non-throwing subprog : OK */
|
||||
RUN_EXT(0, false, ({
|
||||
prog = eskel->progs.pfexit;
|
||||
bpf_program__set_autoload(prog, true);
|
||||
if (!ASSERT_OK(bpf_program__set_attach_target(prog,
|
||||
bpf_program__fd(skel->progs.exception_throw_subprog),
|
||||
"subprog"), "set_attach_target"))
|
||||
goto done;
|
||||
}), "", 0);
|
||||
|
||||
if (test__start_subtest("throwing fexit -> non-throwing subprog"))
|
||||
/* throwing fexit -> non-throwing subprog : OK */
|
||||
RUN_EXT(0, false, ({
|
||||
prog = eskel->progs.throwing_fexit;
|
||||
bpf_program__set_autoload(prog, true);
|
||||
if (!ASSERT_OK(bpf_program__set_attach_target(prog,
|
||||
bpf_program__fd(skel->progs.exception_throw_subprog),
|
||||
"subprog"), "set_attach_target"))
|
||||
goto done;
|
||||
}), "", 0);
|
||||
|
||||
if (test__start_subtest("non-throwing fexit -> throwing subprog"))
|
||||
/* non-throwing fexit -> throwing subprog : OK */
|
||||
RUN_EXT(0, false, ({
|
||||
prog = eskel->progs.pfexit;
|
||||
bpf_program__set_autoload(prog, true);
|
||||
if (!ASSERT_OK(bpf_program__set_attach_target(prog,
|
||||
bpf_program__fd(skel->progs.exception_throw_subprog),
|
||||
"throwing_subprog"), "set_attach_target"))
|
||||
goto done;
|
||||
}), "", 0);
|
||||
|
||||
if (test__start_subtest("throwing fexit -> throwing subprog"))
|
||||
/* throwing fexit -> throwing subprog : OK */
|
||||
RUN_EXT(0, false, ({
|
||||
prog = eskel->progs.throwing_fexit;
|
||||
bpf_program__set_autoload(prog, true);
|
||||
if (!ASSERT_OK(bpf_program__set_attach_target(prog,
|
||||
bpf_program__fd(skel->progs.exception_throw_subprog),
|
||||
"throwing_subprog"), "set_attach_target"))
|
||||
goto done;
|
||||
}), "", 0);
|
||||
|
||||
/* fmod_ret not allowed for subprog - Check so we remember to handle its
|
||||
* throwing specification compatibility with target when supported.
|
||||
*/
|
||||
if (test__start_subtest("non-throwing fmod_ret -> non-throwing subprog"))
|
||||
RUN_EXT(-EINVAL, true, ({
|
||||
prog = eskel->progs.pfmod_ret;
|
||||
bpf_program__set_autoload(prog, true);
|
||||
if (!ASSERT_OK(bpf_program__set_attach_target(prog,
|
||||
bpf_program__fd(skel->progs.exception_throw_subprog),
|
||||
"subprog"), "set_attach_target"))
|
||||
goto done;
|
||||
}), "can't modify return codes of BPF program", 0);
|
||||
|
||||
/* fmod_ret not allowed for subprog - Check so we remember to handle its
|
||||
* throwing specification compatibility with target when supported.
|
||||
*/
|
||||
if (test__start_subtest("non-throwing fmod_ret -> non-throwing global subprog"))
|
||||
RUN_EXT(-EINVAL, true, ({
|
||||
prog = eskel->progs.pfmod_ret;
|
||||
bpf_program__set_autoload(prog, true);
|
||||
if (!ASSERT_OK(bpf_program__set_attach_target(prog,
|
||||
bpf_program__fd(skel->progs.exception_throw_subprog),
|
||||
"global_subprog"), "set_attach_target"))
|
||||
goto done;
|
||||
}), "can't modify return codes of BPF program", 0);
|
||||
|
||||
if (test__start_subtest("non-throwing extension -> non-throwing subprog"))
|
||||
/* non-throwing extension -> non-throwing subprog : BAD (!global) */
|
||||
RUN_EXT(-EINVAL, true, ({
|
||||
prog = eskel->progs.extension;
|
||||
bpf_program__set_autoload(prog, true);
|
||||
if (!ASSERT_OK(bpf_program__set_attach_target(prog,
|
||||
bpf_program__fd(skel->progs.exception_throw_subprog),
|
||||
"subprog"), "set_attach_target"))
|
||||
goto done;
|
||||
}), "subprog() is not a global function", 0);
|
||||
|
||||
if (test__start_subtest("non-throwing extension -> throwing subprog"))
|
||||
/* non-throwing extension -> throwing subprog : BAD (!global) */
|
||||
RUN_EXT(-EINVAL, true, ({
|
||||
prog = eskel->progs.extension;
|
||||
bpf_program__set_autoload(prog, true);
|
||||
if (!ASSERT_OK(bpf_program__set_attach_target(prog,
|
||||
bpf_program__fd(skel->progs.exception_throw_subprog),
|
||||
"throwing_subprog"), "set_attach_target"))
|
||||
goto done;
|
||||
}), "throwing_subprog() is not a global function", 0);
|
||||
|
||||
if (test__start_subtest("non-throwing extension -> non-throwing subprog"))
|
||||
/* non-throwing extension -> non-throwing global subprog : OK */
|
||||
RUN_EXT(0, false, ({
|
||||
prog = eskel->progs.extension;
|
||||
bpf_program__set_autoload(prog, true);
|
||||
if (!ASSERT_OK(bpf_program__set_attach_target(prog,
|
||||
bpf_program__fd(skel->progs.exception_throw_subprog),
|
||||
"global_subprog"), "set_attach_target"))
|
||||
goto done;
|
||||
}), "", 0);
|
||||
|
||||
if (test__start_subtest("non-throwing extension -> throwing global subprog"))
|
||||
/* non-throwing extension -> throwing global subprog : OK */
|
||||
RUN_EXT(0, false, ({
|
||||
prog = eskel->progs.extension;
|
||||
bpf_program__set_autoload(prog, true);
|
||||
if (!ASSERT_OK(bpf_program__set_attach_target(prog,
|
||||
bpf_program__fd(skel->progs.exception_throw_subprog),
|
||||
"throwing_global_subprog"), "set_attach_target"))
|
||||
goto done;
|
||||
}), "", 0);
|
||||
|
||||
if (test__start_subtest("throwing extension -> throwing global subprog"))
|
||||
/* throwing extension -> throwing global subprog : OK */
|
||||
RUN_EXT(0, false, ({
|
||||
prog = eskel->progs.throwing_extension;
|
||||
bpf_program__set_autoload(prog, true);
|
||||
if (!ASSERT_OK(bpf_program__set_attach_target(prog,
|
||||
bpf_program__fd(skel->progs.exception_throw_subprog),
|
||||
"throwing_global_subprog"), "set_attach_target"))
|
||||
goto done;
|
||||
}), "", 0);
|
||||
|
||||
if (test__start_subtest("throwing extension -> non-throwing global subprog"))
|
||||
/* throwing extension -> non-throwing global subprog : OK */
|
||||
RUN_EXT(0, false, ({
|
||||
prog = eskel->progs.throwing_extension;
|
||||
bpf_program__set_autoload(prog, true);
|
||||
if (!ASSERT_OK(bpf_program__set_attach_target(prog,
|
||||
bpf_program__fd(skel->progs.exception_throw_subprog),
|
||||
"global_subprog"), "set_attach_target"))
|
||||
goto done;
|
||||
}), "", 0);
|
||||
|
||||
if (test__start_subtest("non-throwing extension -> main subprog"))
|
||||
/* non-throwing extension -> main subprog : OK */
|
||||
RUN_EXT(0, false, ({
|
||||
prog = eskel->progs.extension;
|
||||
bpf_program__set_autoload(prog, true);
|
||||
if (!ASSERT_OK(bpf_program__set_attach_target(prog,
|
||||
bpf_program__fd(skel->progs.exception_throw_subprog),
|
||||
"exception_throw_subprog"), "set_attach_target"))
|
||||
goto done;
|
||||
}), "", 0);
|
||||
|
||||
if (test__start_subtest("throwing extension -> main subprog"))
|
||||
/* throwing extension -> main subprog : OK */
|
||||
RUN_EXT(0, false, ({
|
||||
prog = eskel->progs.throwing_extension;
|
||||
bpf_program__set_autoload(prog, true);
|
||||
if (!ASSERT_OK(bpf_program__set_attach_target(prog,
|
||||
bpf_program__fd(skel->progs.exception_throw_subprog),
|
||||
"exception_throw_subprog"), "set_attach_target"))
|
||||
goto done;
|
||||
}), "", 0);
|
||||
|
||||
done:
|
||||
exceptions_ext__destroy(eskel);
|
||||
exceptions__destroy(skel);
|
||||
}
|
||||
|
||||
static void test_exceptions_assertions(void)
|
||||
{
|
||||
RUN_TESTS(exceptions_assert);
|
||||
}
|
||||
|
||||
void test_exceptions(void)
|
||||
{
|
||||
test_exceptions_success();
|
||||
test_exceptions_failure();
|
||||
test_exceptions_assertions();
|
||||
}
|
@ -308,7 +308,7 @@ void test_fill_link_info(void)
|
||||
return;
|
||||
|
||||
/* load kallsyms to compare the addr */
|
||||
if (!ASSERT_OK(load_kallsyms_refresh(), "load_kallsyms_refresh"))
|
||||
if (!ASSERT_OK(load_kallsyms(), "load_kallsyms"))
|
||||
goto cleanup;
|
||||
|
||||
kprobe_addr = ksym_get_addr(KPROBE_FUNC);
|
||||
|
@ -4,6 +4,8 @@
|
||||
#include "trace_helpers.h"
|
||||
#include "bpf/libbpf_internal.h"
|
||||
|
||||
static struct ksyms *ksyms;
|
||||
|
||||
static void kprobe_multi_testmod_check(struct kprobe_multi *skel)
|
||||
{
|
||||
ASSERT_EQ(skel->bss->kprobe_testmod_test1_result, 1, "kprobe_test1_result");
|
||||
@ -50,12 +52,12 @@ static void test_testmod_attach_api_addrs(void)
|
||||
LIBBPF_OPTS(bpf_kprobe_multi_opts, opts);
|
||||
unsigned long long addrs[3];
|
||||
|
||||
addrs[0] = ksym_get_addr("bpf_testmod_fentry_test1");
|
||||
ASSERT_NEQ(addrs[0], 0, "ksym_get_addr");
|
||||
addrs[1] = ksym_get_addr("bpf_testmod_fentry_test2");
|
||||
ASSERT_NEQ(addrs[1], 0, "ksym_get_addr");
|
||||
addrs[2] = ksym_get_addr("bpf_testmod_fentry_test3");
|
||||
ASSERT_NEQ(addrs[2], 0, "ksym_get_addr");
|
||||
addrs[0] = ksym_get_addr_local(ksyms, "bpf_testmod_fentry_test1");
|
||||
ASSERT_NEQ(addrs[0], 0, "ksym_get_addr_local");
|
||||
addrs[1] = ksym_get_addr_local(ksyms, "bpf_testmod_fentry_test2");
|
||||
ASSERT_NEQ(addrs[1], 0, "ksym_get_addr_local");
|
||||
addrs[2] = ksym_get_addr_local(ksyms, "bpf_testmod_fentry_test3");
|
||||
ASSERT_NEQ(addrs[2], 0, "ksym_get_addr_local");
|
||||
|
||||
opts.addrs = (const unsigned long *) addrs;
|
||||
opts.cnt = ARRAY_SIZE(addrs);
|
||||
@ -79,11 +81,15 @@ static void test_testmod_attach_api_syms(void)
|
||||
|
||||
void serial_test_kprobe_multi_testmod_test(void)
|
||||
{
|
||||
if (!ASSERT_OK(load_kallsyms_refresh(), "load_kallsyms_refresh"))
|
||||
ksyms = load_kallsyms_local();
|
||||
if (!ASSERT_OK_PTR(ksyms, "load_kallsyms_local"))
|
||||
return;
|
||||
|
||||
if (test__start_subtest("testmod_attach_api_syms"))
|
||||
test_testmod_attach_api_syms();
|
||||
|
||||
if (test__start_subtest("testmod_attach_api_addrs"))
|
||||
test_testmod_attach_api_addrs();
|
||||
|
||||
free_kallsyms_local(ksyms);
|
||||
}
|
||||
|
@ -142,10 +142,14 @@ static void test_libbpf_bpf_map_type_str(void)
|
||||
/* Special case for map_type_name BPF_MAP_TYPE_CGROUP_STORAGE_DEPRECATED
|
||||
* where it and BPF_MAP_TYPE_CGROUP_STORAGE have the same enum value
|
||||
* (map_type). For this enum value, libbpf_bpf_map_type_str() picks
|
||||
* BPF_MAP_TYPE_CGROUP_STORAGE.
|
||||
* BPF_MAP_TYPE_CGROUP_STORAGE. The same for
|
||||
* BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE_DEPRECATED and
|
||||
* BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE.
|
||||
*/
|
||||
if (strcmp(map_type_name, "BPF_MAP_TYPE_CGROUP_STORAGE_DEPRECATED") == 0)
|
||||
continue;
|
||||
if (strcmp(map_type_name, "BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE_DEPRECATED") == 0)
|
||||
continue;
|
||||
|
||||
ASSERT_STREQ(buf, map_type_name, "exp_str_value");
|
||||
}
|
||||
|
@ -65,8 +65,8 @@ static struct {
|
||||
{ "map_compat_raw_tp", "tracing progs cannot use bpf_{list_head,rb_root} yet" },
|
||||
{ "map_compat_raw_tp_w", "tracing progs cannot use bpf_{list_head,rb_root} yet" },
|
||||
{ "obj_type_id_oor", "local type ID argument must be in range [0, U32_MAX]" },
|
||||
{ "obj_new_no_composite", "bpf_obj_new type ID argument must be of a struct" },
|
||||
{ "obj_new_no_struct", "bpf_obj_new type ID argument must be of a struct" },
|
||||
{ "obj_new_no_composite", "bpf_obj_new/bpf_percpu_obj_new type ID argument must be of a struct" },
|
||||
{ "obj_new_no_struct", "bpf_obj_new/bpf_percpu_obj_new type ID argument must be of a struct" },
|
||||
{ "obj_drop_non_zero_off", "R1 must have zero offset when passed to release func" },
|
||||
{ "new_null_ret", "R0 invalid mem access 'ptr_or_null_'" },
|
||||
{ "obj_new_acq", "Unreleased reference id=" },
|
||||
|
@ -61,6 +61,11 @@ void test_module_fentry_shadow(void)
|
||||
int link_fd[2] = {};
|
||||
__s32 btf_id[2] = {};
|
||||
|
||||
if (!env.has_testmod) {
|
||||
test__skip();
|
||||
return;
|
||||
}
|
||||
|
||||
LIBBPF_OPTS(bpf_prog_load_opts, load_opts,
|
||||
.expected_attach_type = BPF_TRACE_FENTRY,
|
||||
);
|
||||
|
125
tools/testing/selftests/bpf/prog_tests/percpu_alloc.c
Normal file
125
tools/testing/selftests/bpf/prog_tests/percpu_alloc.c
Normal file
@ -0,0 +1,125 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
#include <test_progs.h>
|
||||
#include "percpu_alloc_array.skel.h"
|
||||
#include "percpu_alloc_cgrp_local_storage.skel.h"
|
||||
#include "percpu_alloc_fail.skel.h"
|
||||
|
||||
static void test_array(void)
|
||||
{
|
||||
struct percpu_alloc_array *skel;
|
||||
int err, prog_fd;
|
||||
LIBBPF_OPTS(bpf_test_run_opts, topts);
|
||||
|
||||
skel = percpu_alloc_array__open();
|
||||
if (!ASSERT_OK_PTR(skel, "percpu_alloc_array__open"))
|
||||
return;
|
||||
|
||||
bpf_program__set_autoload(skel->progs.test_array_map_1, true);
|
||||
bpf_program__set_autoload(skel->progs.test_array_map_2, true);
|
||||
bpf_program__set_autoload(skel->progs.test_array_map_3, true);
|
||||
bpf_program__set_autoload(skel->progs.test_array_map_4, true);
|
||||
|
||||
skel->rodata->nr_cpus = libbpf_num_possible_cpus();
|
||||
|
||||
err = percpu_alloc_array__load(skel);
|
||||
if (!ASSERT_OK(err, "percpu_alloc_array__load"))
|
||||
goto out;
|
||||
|
||||
err = percpu_alloc_array__attach(skel);
|
||||
if (!ASSERT_OK(err, "percpu_alloc_array__attach"))
|
||||
goto out;
|
||||
|
||||
prog_fd = bpf_program__fd(skel->progs.test_array_map_1);
|
||||
err = bpf_prog_test_run_opts(prog_fd, &topts);
|
||||
ASSERT_OK(err, "test_run array_map 1-4");
|
||||
ASSERT_EQ(topts.retval, 0, "test_run array_map 1-4");
|
||||
ASSERT_EQ(skel->bss->cpu0_field_d, 2, "cpu0_field_d");
|
||||
ASSERT_EQ(skel->bss->sum_field_c, 1, "sum_field_c");
|
||||
out:
|
||||
percpu_alloc_array__destroy(skel);
|
||||
}
|
||||
|
||||
static void test_array_sleepable(void)
|
||||
{
|
||||
struct percpu_alloc_array *skel;
|
||||
int err, prog_fd;
|
||||
LIBBPF_OPTS(bpf_test_run_opts, topts);
|
||||
|
||||
skel = percpu_alloc_array__open();
|
||||
if (!ASSERT_OK_PTR(skel, "percpu_alloc__open"))
|
||||
return;
|
||||
|
||||
bpf_program__set_autoload(skel->progs.test_array_map_10, true);
|
||||
|
||||
skel->rodata->nr_cpus = libbpf_num_possible_cpus();
|
||||
|
||||
err = percpu_alloc_array__load(skel);
|
||||
if (!ASSERT_OK(err, "percpu_alloc_array__load"))
|
||||
goto out;
|
||||
|
||||
err = percpu_alloc_array__attach(skel);
|
||||
if (!ASSERT_OK(err, "percpu_alloc_array__attach"))
|
||||
goto out;
|
||||
|
||||
prog_fd = bpf_program__fd(skel->progs.test_array_map_10);
|
||||
err = bpf_prog_test_run_opts(prog_fd, &topts);
|
||||
ASSERT_OK(err, "test_run array_map_10");
|
||||
ASSERT_EQ(topts.retval, 0, "test_run array_map_10");
|
||||
ASSERT_EQ(skel->bss->cpu0_field_d, 2, "cpu0_field_d");
|
||||
ASSERT_EQ(skel->bss->sum_field_c, 1, "sum_field_c");
|
||||
out:
|
||||
percpu_alloc_array__destroy(skel);
|
||||
}
|
||||
|
||||
static void test_cgrp_local_storage(void)
|
||||
{
|
||||
struct percpu_alloc_cgrp_local_storage *skel;
|
||||
int err, cgroup_fd, prog_fd;
|
||||
LIBBPF_OPTS(bpf_test_run_opts, topts);
|
||||
|
||||
cgroup_fd = test__join_cgroup("/percpu_alloc");
|
||||
if (!ASSERT_GE(cgroup_fd, 0, "join_cgroup /percpu_alloc"))
|
||||
return;
|
||||
|
||||
skel = percpu_alloc_cgrp_local_storage__open();
|
||||
if (!ASSERT_OK_PTR(skel, "percpu_alloc_cgrp_local_storage__open"))
|
||||
goto close_fd;
|
||||
|
||||
skel->rodata->nr_cpus = libbpf_num_possible_cpus();
|
||||
|
||||
err = percpu_alloc_cgrp_local_storage__load(skel);
|
||||
if (!ASSERT_OK(err, "percpu_alloc_cgrp_local_storage__load"))
|
||||
goto destroy_skel;
|
||||
|
||||
err = percpu_alloc_cgrp_local_storage__attach(skel);
|
||||
if (!ASSERT_OK(err, "percpu_alloc_cgrp_local_storage__attach"))
|
||||
goto destroy_skel;
|
||||
|
||||
prog_fd = bpf_program__fd(skel->progs.test_cgrp_local_storage_1);
|
||||
err = bpf_prog_test_run_opts(prog_fd, &topts);
|
||||
ASSERT_OK(err, "test_run cgrp_local_storage 1-3");
|
||||
ASSERT_EQ(topts.retval, 0, "test_run cgrp_local_storage 1-3");
|
||||
ASSERT_EQ(skel->bss->cpu0_field_d, 2, "cpu0_field_d");
|
||||
ASSERT_EQ(skel->bss->sum_field_c, 1, "sum_field_c");
|
||||
|
||||
destroy_skel:
|
||||
percpu_alloc_cgrp_local_storage__destroy(skel);
|
||||
close_fd:
|
||||
close(cgroup_fd);
|
||||
}
|
||||
|
||||
static void test_failure(void) {
|
||||
RUN_TESTS(percpu_alloc_fail);
|
||||
}
|
||||
|
||||
void test_percpu_alloc(void)
|
||||
{
|
||||
if (test__start_subtest("array"))
|
||||
test_array();
|
||||
if (test__start_subtest("array_sleepable"))
|
||||
test_array_sleepable();
|
||||
if (test__start_subtest("cgrp_local_storage"))
|
||||
test_cgrp_local_storage();
|
||||
if (test__start_subtest("failure_tests"))
|
||||
test_failure();
|
||||
}
|
89
tools/testing/selftests/bpf/prog_tests/preempted_bpf_ma_op.c
Normal file
89
tools/testing/selftests/bpf/prog_tests/preempted_bpf_ma_op.c
Normal file
@ -0,0 +1,89 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/* Copyright (C) 2023. Huawei Technologies Co., Ltd */
|
||||
#define _GNU_SOURCE
|
||||
#include <sched.h>
|
||||
#include <pthread.h>
|
||||
#include <stdbool.h>
|
||||
#include <test_progs.h>
|
||||
|
||||
#include "preempted_bpf_ma_op.skel.h"
|
||||
|
||||
#define ALLOC_THREAD_NR 4
|
||||
#define ALLOC_LOOP_NR 512
|
||||
|
||||
struct alloc_ctx {
|
||||
/* output */
|
||||
int run_err;
|
||||
/* input */
|
||||
int fd;
|
||||
bool *nomem_err;
|
||||
};
|
||||
|
||||
static void *run_alloc_prog(void *data)
|
||||
{
|
||||
struct alloc_ctx *ctx = data;
|
||||
cpu_set_t cpu_set;
|
||||
int i;
|
||||
|
||||
CPU_ZERO(&cpu_set);
|
||||
CPU_SET(0, &cpu_set);
|
||||
pthread_setaffinity_np(pthread_self(), sizeof(cpu_set), &cpu_set);
|
||||
|
||||
for (i = 0; i < ALLOC_LOOP_NR && !*ctx->nomem_err; i++) {
|
||||
LIBBPF_OPTS(bpf_test_run_opts, topts);
|
||||
int err;
|
||||
|
||||
err = bpf_prog_test_run_opts(ctx->fd, &topts);
|
||||
ctx->run_err |= err | topts.retval;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void test_preempted_bpf_ma_op(void)
|
||||
{
|
||||
struct alloc_ctx ctx[ALLOC_THREAD_NR];
|
||||
struct preempted_bpf_ma_op *skel;
|
||||
pthread_t tid[ALLOC_THREAD_NR];
|
||||
int i, err;
|
||||
|
||||
skel = preempted_bpf_ma_op__open_and_load();
|
||||
if (!ASSERT_OK_PTR(skel, "open_and_load"))
|
||||
return;
|
||||
|
||||
err = preempted_bpf_ma_op__attach(skel);
|
||||
if (!ASSERT_OK(err, "attach"))
|
||||
goto out;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(ctx); i++) {
|
||||
struct bpf_program *prog;
|
||||
char name[8];
|
||||
|
||||
snprintf(name, sizeof(name), "test%d", i);
|
||||
prog = bpf_object__find_program_by_name(skel->obj, name);
|
||||
if (!ASSERT_OK_PTR(prog, "no test prog"))
|
||||
goto out;
|
||||
|
||||
ctx[i].run_err = 0;
|
||||
ctx[i].fd = bpf_program__fd(prog);
|
||||
ctx[i].nomem_err = &skel->bss->nomem_err;
|
||||
}
|
||||
|
||||
memset(tid, 0, sizeof(tid));
|
||||
for (i = 0; i < ARRAY_SIZE(tid); i++) {
|
||||
err = pthread_create(&tid[i], NULL, run_alloc_prog, &ctx[i]);
|
||||
if (!ASSERT_OK(err, "pthread_create"))
|
||||
break;
|
||||
}
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(tid); i++) {
|
||||
if (!tid[i])
|
||||
break;
|
||||
pthread_join(tid[i], NULL);
|
||||
ASSERT_EQ(ctx[i].run_err, 0, "run prog err");
|
||||
}
|
||||
|
||||
ASSERT_FALSE(skel->bss->nomem_err, "ENOMEM");
|
||||
out:
|
||||
preempted_bpf_ma_op__destroy(skel);
|
||||
}
|
@ -218,12 +218,14 @@ static void test_tailcall_2(void)
|
||||
bpf_object__close(obj);
|
||||
}
|
||||
|
||||
static void test_tailcall_count(const char *which)
|
||||
static void test_tailcall_count(const char *which, bool test_fentry,
|
||||
bool test_fexit)
|
||||
{
|
||||
struct bpf_object *obj = NULL, *fentry_obj = NULL, *fexit_obj = NULL;
|
||||
struct bpf_link *fentry_link = NULL, *fexit_link = NULL;
|
||||
int err, map_fd, prog_fd, main_fd, data_fd, i, val;
|
||||
struct bpf_map *prog_array, *data_map;
|
||||
struct bpf_program *prog;
|
||||
struct bpf_object *obj;
|
||||
char buff[128] = {};
|
||||
LIBBPF_OPTS(bpf_test_run_opts, topts,
|
||||
.data_in = buff,
|
||||
@ -265,23 +267,105 @@ static void test_tailcall_count(const char *which)
|
||||
if (CHECK_FAIL(err))
|
||||
goto out;
|
||||
|
||||
if (test_fentry) {
|
||||
fentry_obj = bpf_object__open_file("tailcall_bpf2bpf_fentry.bpf.o",
|
||||
NULL);
|
||||
if (!ASSERT_OK_PTR(fentry_obj, "open fentry_obj file"))
|
||||
goto out;
|
||||
|
||||
prog = bpf_object__find_program_by_name(fentry_obj, "fentry");
|
||||
if (!ASSERT_OK_PTR(prog, "find fentry prog"))
|
||||
goto out;
|
||||
|
||||
err = bpf_program__set_attach_target(prog, prog_fd,
|
||||
"subprog_tail");
|
||||
if (!ASSERT_OK(err, "set_attach_target subprog_tail"))
|
||||
goto out;
|
||||
|
||||
err = bpf_object__load(fentry_obj);
|
||||
if (!ASSERT_OK(err, "load fentry_obj"))
|
||||
goto out;
|
||||
|
||||
fentry_link = bpf_program__attach_trace(prog);
|
||||
if (!ASSERT_OK_PTR(fentry_link, "attach_trace"))
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (test_fexit) {
|
||||
fexit_obj = bpf_object__open_file("tailcall_bpf2bpf_fexit.bpf.o",
|
||||
NULL);
|
||||
if (!ASSERT_OK_PTR(fexit_obj, "open fexit_obj file"))
|
||||
goto out;
|
||||
|
||||
prog = bpf_object__find_program_by_name(fexit_obj, "fexit");
|
||||
if (!ASSERT_OK_PTR(prog, "find fexit prog"))
|
||||
goto out;
|
||||
|
||||
err = bpf_program__set_attach_target(prog, prog_fd,
|
||||
"subprog_tail");
|
||||
if (!ASSERT_OK(err, "set_attach_target subprog_tail"))
|
||||
goto out;
|
||||
|
||||
err = bpf_object__load(fexit_obj);
|
||||
if (!ASSERT_OK(err, "load fexit_obj"))
|
||||
goto out;
|
||||
|
||||
fexit_link = bpf_program__attach_trace(prog);
|
||||
if (!ASSERT_OK_PTR(fexit_link, "attach_trace"))
|
||||
goto out;
|
||||
}
|
||||
|
||||
err = bpf_prog_test_run_opts(main_fd, &topts);
|
||||
ASSERT_OK(err, "tailcall");
|
||||
ASSERT_EQ(topts.retval, 1, "tailcall retval");
|
||||
|
||||
data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
|
||||
if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
|
||||
return;
|
||||
goto out;
|
||||
|
||||
data_fd = bpf_map__fd(data_map);
|
||||
if (CHECK_FAIL(map_fd < 0))
|
||||
return;
|
||||
if (CHECK_FAIL(data_fd < 0))
|
||||
goto out;
|
||||
|
||||
i = 0;
|
||||
err = bpf_map_lookup_elem(data_fd, &i, &val);
|
||||
ASSERT_OK(err, "tailcall count");
|
||||
ASSERT_EQ(val, 33, "tailcall count");
|
||||
|
||||
if (test_fentry) {
|
||||
data_map = bpf_object__find_map_by_name(fentry_obj, ".bss");
|
||||
if (!ASSERT_FALSE(!data_map || !bpf_map__is_internal(data_map),
|
||||
"find tailcall_bpf2bpf_fentry.bss map"))
|
||||
goto out;
|
||||
|
||||
data_fd = bpf_map__fd(data_map);
|
||||
if (!ASSERT_FALSE(data_fd < 0,
|
||||
"find tailcall_bpf2bpf_fentry.bss map fd"))
|
||||
goto out;
|
||||
|
||||
i = 0;
|
||||
err = bpf_map_lookup_elem(data_fd, &i, &val);
|
||||
ASSERT_OK(err, "fentry count");
|
||||
ASSERT_EQ(val, 33, "fentry count");
|
||||
}
|
||||
|
||||
if (test_fexit) {
|
||||
data_map = bpf_object__find_map_by_name(fexit_obj, ".bss");
|
||||
if (!ASSERT_FALSE(!data_map || !bpf_map__is_internal(data_map),
|
||||
"find tailcall_bpf2bpf_fexit.bss map"))
|
||||
goto out;
|
||||
|
||||
data_fd = bpf_map__fd(data_map);
|
||||
if (!ASSERT_FALSE(data_fd < 0,
|
||||
"find tailcall_bpf2bpf_fexit.bss map fd"))
|
||||
goto out;
|
||||
|
||||
i = 0;
|
||||
err = bpf_map_lookup_elem(data_fd, &i, &val);
|
||||
ASSERT_OK(err, "fexit count");
|
||||
ASSERT_EQ(val, 33, "fexit count");
|
||||
}
|
||||
|
||||
i = 0;
|
||||
err = bpf_map_delete_elem(map_fd, &i);
|
||||
if (CHECK_FAIL(err))
|
||||
@ -291,6 +375,10 @@ static void test_tailcall_count(const char *which)
|
||||
ASSERT_OK(err, "tailcall");
|
||||
ASSERT_OK(topts.retval, "tailcall retval");
|
||||
out:
|
||||
bpf_link__destroy(fentry_link);
|
||||
bpf_link__destroy(fexit_link);
|
||||
bpf_object__close(fentry_obj);
|
||||
bpf_object__close(fexit_obj);
|
||||
bpf_object__close(obj);
|
||||
}
|
||||
|
||||
@ -299,7 +387,7 @@ static void test_tailcall_count(const char *which)
|
||||
*/
|
||||
static void test_tailcall_3(void)
|
||||
{
|
||||
test_tailcall_count("tailcall3.bpf.o");
|
||||
test_tailcall_count("tailcall3.bpf.o", false, false);
|
||||
}
|
||||
|
||||
/* test_tailcall_6 checks that the count value of the tail call limit
|
||||
@ -307,7 +395,7 @@ static void test_tailcall_3(void)
|
||||
*/
|
||||
static void test_tailcall_6(void)
|
||||
{
|
||||
test_tailcall_count("tailcall6.bpf.o");
|
||||
test_tailcall_count("tailcall6.bpf.o", false, false);
|
||||
}
|
||||
|
||||
/* test_tailcall_4 checks that the kernel properly selects indirect jump
|
||||
@ -352,11 +440,11 @@ static void test_tailcall_4(void)
|
||||
|
||||
data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
|
||||
if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
|
||||
return;
|
||||
goto out;
|
||||
|
||||
data_fd = bpf_map__fd(data_map);
|
||||
if (CHECK_FAIL(map_fd < 0))
|
||||
return;
|
||||
if (CHECK_FAIL(data_fd < 0))
|
||||
goto out;
|
||||
|
||||
for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
|
||||
snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
|
||||
@ -442,11 +530,11 @@ static void test_tailcall_5(void)
|
||||
|
||||
data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
|
||||
if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
|
||||
return;
|
||||
goto out;
|
||||
|
||||
data_fd = bpf_map__fd(data_map);
|
||||
if (CHECK_FAIL(map_fd < 0))
|
||||
return;
|
||||
if (CHECK_FAIL(data_fd < 0))
|
||||
goto out;
|
||||
|
||||
for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
|
||||
snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
|
||||
@ -631,11 +719,11 @@ static void test_tailcall_bpf2bpf_2(void)
|
||||
|
||||
data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
|
||||
if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
|
||||
return;
|
||||
goto out;
|
||||
|
||||
data_fd = bpf_map__fd(data_map);
|
||||
if (CHECK_FAIL(map_fd < 0))
|
||||
return;
|
||||
if (CHECK_FAIL(data_fd < 0))
|
||||
goto out;
|
||||
|
||||
i = 0;
|
||||
err = bpf_map_lookup_elem(data_fd, &i, &val);
|
||||
@ -805,11 +893,11 @@ static void test_tailcall_bpf2bpf_4(bool noise)
|
||||
|
||||
data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
|
||||
if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
|
||||
return;
|
||||
goto out;
|
||||
|
||||
data_fd = bpf_map__fd(data_map);
|
||||
if (CHECK_FAIL(map_fd < 0))
|
||||
return;
|
||||
if (CHECK_FAIL(data_fd < 0))
|
||||
goto out;
|
||||
|
||||
i = 0;
|
||||
val.noise = noise;
|
||||
@ -872,7 +960,7 @@ static void test_tailcall_bpf2bpf_6(void)
|
||||
ASSERT_EQ(topts.retval, 0, "tailcall retval");
|
||||
|
||||
data_fd = bpf_map__fd(obj->maps.bss);
|
||||
if (!ASSERT_GE(map_fd, 0, "bss map fd"))
|
||||
if (!ASSERT_GE(data_fd, 0, "bss map fd"))
|
||||
goto out;
|
||||
|
||||
i = 0;
|
||||
@ -884,6 +972,139 @@ static void test_tailcall_bpf2bpf_6(void)
|
||||
tailcall_bpf2bpf6__destroy(obj);
|
||||
}
|
||||
|
||||
/* test_tailcall_bpf2bpf_fentry checks that the count value of the tail call
|
||||
* limit enforcement matches with expectations when tailcall is preceded with
|
||||
* bpf2bpf call, and the bpf2bpf call is traced by fentry.
|
||||
*/
|
||||
static void test_tailcall_bpf2bpf_fentry(void)
|
||||
{
|
||||
test_tailcall_count("tailcall_bpf2bpf2.bpf.o", true, false);
|
||||
}
|
||||
|
||||
/* test_tailcall_bpf2bpf_fexit checks that the count value of the tail call
|
||||
* limit enforcement matches with expectations when tailcall is preceded with
|
||||
* bpf2bpf call, and the bpf2bpf call is traced by fexit.
|
||||
*/
|
||||
static void test_tailcall_bpf2bpf_fexit(void)
|
||||
{
|
||||
test_tailcall_count("tailcall_bpf2bpf2.bpf.o", false, true);
|
||||
}
|
||||
|
||||
/* test_tailcall_bpf2bpf_fentry_fexit checks that the count value of the tail
|
||||
* call limit enforcement matches with expectations when tailcall is preceded
|
||||
* with bpf2bpf call, and the bpf2bpf call is traced by both fentry and fexit.
|
||||
*/
|
||||
static void test_tailcall_bpf2bpf_fentry_fexit(void)
|
||||
{
|
||||
test_tailcall_count("tailcall_bpf2bpf2.bpf.o", true, true);
|
||||
}
|
||||
|
||||
/* test_tailcall_bpf2bpf_fentry_entry checks that the count value of the tail
|
||||
* call limit enforcement matches with expectations when tailcall is preceded
|
||||
* with bpf2bpf call, and the bpf2bpf caller is traced by fentry.
|
||||
*/
|
||||
static void test_tailcall_bpf2bpf_fentry_entry(void)
|
||||
{
|
||||
struct bpf_object *tgt_obj = NULL, *fentry_obj = NULL;
|
||||
int err, map_fd, prog_fd, data_fd, i, val;
|
||||
struct bpf_map *prog_array, *data_map;
|
||||
struct bpf_link *fentry_link = NULL;
|
||||
struct bpf_program *prog;
|
||||
char buff[128] = {};
|
||||
|
||||
LIBBPF_OPTS(bpf_test_run_opts, topts,
|
||||
.data_in = buff,
|
||||
.data_size_in = sizeof(buff),
|
||||
.repeat = 1,
|
||||
);
|
||||
|
||||
err = bpf_prog_test_load("tailcall_bpf2bpf2.bpf.o",
|
||||
BPF_PROG_TYPE_SCHED_CLS,
|
||||
&tgt_obj, &prog_fd);
|
||||
if (!ASSERT_OK(err, "load tgt_obj"))
|
||||
return;
|
||||
|
||||
prog_array = bpf_object__find_map_by_name(tgt_obj, "jmp_table");
|
||||
if (!ASSERT_OK_PTR(prog_array, "find jmp_table map"))
|
||||
goto out;
|
||||
|
||||
map_fd = bpf_map__fd(prog_array);
|
||||
if (!ASSERT_FALSE(map_fd < 0, "find jmp_table map fd"))
|
||||
goto out;
|
||||
|
||||
prog = bpf_object__find_program_by_name(tgt_obj, "classifier_0");
|
||||
if (!ASSERT_OK_PTR(prog, "find classifier_0 prog"))
|
||||
goto out;
|
||||
|
||||
prog_fd = bpf_program__fd(prog);
|
||||
if (!ASSERT_FALSE(prog_fd < 0, "find classifier_0 prog fd"))
|
||||
goto out;
|
||||
|
||||
i = 0;
|
||||
err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
|
||||
if (!ASSERT_OK(err, "update jmp_table"))
|
||||
goto out;
|
||||
|
||||
fentry_obj = bpf_object__open_file("tailcall_bpf2bpf_fentry.bpf.o",
|
||||
NULL);
|
||||
if (!ASSERT_OK_PTR(fentry_obj, "open fentry_obj file"))
|
||||
goto out;
|
||||
|
||||
prog = bpf_object__find_program_by_name(fentry_obj, "fentry");
|
||||
if (!ASSERT_OK_PTR(prog, "find fentry prog"))
|
||||
goto out;
|
||||
|
||||
err = bpf_program__set_attach_target(prog, prog_fd, "classifier_0");
|
||||
if (!ASSERT_OK(err, "set_attach_target classifier_0"))
|
||||
goto out;
|
||||
|
||||
err = bpf_object__load(fentry_obj);
|
||||
if (!ASSERT_OK(err, "load fentry_obj"))
|
||||
goto out;
|
||||
|
||||
fentry_link = bpf_program__attach_trace(prog);
|
||||
if (!ASSERT_OK_PTR(fentry_link, "attach_trace"))
|
||||
goto out;
|
||||
|
||||
err = bpf_prog_test_run_opts(prog_fd, &topts);
|
||||
ASSERT_OK(err, "tailcall");
|
||||
ASSERT_EQ(topts.retval, 1, "tailcall retval");
|
||||
|
||||
data_map = bpf_object__find_map_by_name(tgt_obj, "tailcall.bss");
|
||||
if (!ASSERT_FALSE(!data_map || !bpf_map__is_internal(data_map),
|
||||
"find tailcall.bss map"))
|
||||
goto out;
|
||||
|
||||
data_fd = bpf_map__fd(data_map);
|
||||
if (!ASSERT_FALSE(data_fd < 0, "find tailcall.bss map fd"))
|
||||
goto out;
|
||||
|
||||
i = 0;
|
||||
err = bpf_map_lookup_elem(data_fd, &i, &val);
|
||||
ASSERT_OK(err, "tailcall count");
|
||||
ASSERT_EQ(val, 34, "tailcall count");
|
||||
|
||||
data_map = bpf_object__find_map_by_name(fentry_obj, ".bss");
|
||||
if (!ASSERT_FALSE(!data_map || !bpf_map__is_internal(data_map),
|
||||
"find tailcall_bpf2bpf_fentry.bss map"))
|
||||
goto out;
|
||||
|
||||
data_fd = bpf_map__fd(data_map);
|
||||
if (!ASSERT_FALSE(data_fd < 0,
|
||||
"find tailcall_bpf2bpf_fentry.bss map fd"))
|
||||
goto out;
|
||||
|
||||
i = 0;
|
||||
err = bpf_map_lookup_elem(data_fd, &i, &val);
|
||||
ASSERT_OK(err, "fentry count");
|
||||
ASSERT_EQ(val, 1, "fentry count");
|
||||
|
||||
out:
|
||||
bpf_link__destroy(fentry_link);
|
||||
bpf_object__close(fentry_obj);
|
||||
bpf_object__close(tgt_obj);
|
||||
}
|
||||
|
||||
void test_tailcalls(void)
|
||||
{
|
||||
if (test__start_subtest("tailcall_1"))
|
||||
@ -910,4 +1131,12 @@ void test_tailcalls(void)
|
||||
test_tailcall_bpf2bpf_4(true);
|
||||
if (test__start_subtest("tailcall_bpf2bpf_6"))
|
||||
test_tailcall_bpf2bpf_6();
|
||||
if (test__start_subtest("tailcall_bpf2bpf_fentry"))
|
||||
test_tailcall_bpf2bpf_fentry();
|
||||
if (test__start_subtest("tailcall_bpf2bpf_fexit"))
|
||||
test_tailcall_bpf2bpf_fexit();
|
||||
if (test__start_subtest("tailcall_bpf2bpf_fentry_fexit"))
|
||||
test_tailcall_bpf2bpf_fentry_fexit();
|
||||
if (test__start_subtest("tailcall_bpf2bpf_fentry_entry"))
|
||||
test_tailcall_bpf2bpf_fentry_entry();
|
||||
}
|
||||
|
368
tools/testing/selftests/bpf/progs/exceptions.c
Normal file
368
tools/testing/selftests/bpf/progs/exceptions.c
Normal file
@ -0,0 +1,368 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
#include <vmlinux.h>
|
||||
#include <bpf/bpf_tracing.h>
|
||||
#include <bpf/bpf_helpers.h>
|
||||
#include <bpf/bpf_core_read.h>
|
||||
#include <bpf/bpf_endian.h>
|
||||
#include "bpf_misc.h"
|
||||
#include "bpf_experimental.h"
|
||||
|
||||
#ifndef ETH_P_IP
|
||||
#define ETH_P_IP 0x0800
|
||||
#endif
|
||||
|
||||
struct {
|
||||
__uint(type, BPF_MAP_TYPE_PROG_ARRAY);
|
||||
__uint(max_entries, 4);
|
||||
__uint(key_size, sizeof(__u32));
|
||||
__uint(value_size, sizeof(__u32));
|
||||
} jmp_table SEC(".maps");
|
||||
|
||||
static __noinline int static_func(u64 i)
|
||||
{
|
||||
bpf_throw(32);
|
||||
return i;
|
||||
}
|
||||
|
||||
__noinline int global2static_simple(u64 i)
|
||||
{
|
||||
static_func(i + 2);
|
||||
return i - 1;
|
||||
}
|
||||
|
||||
__noinline int global2static(u64 i)
|
||||
{
|
||||
if (i == ETH_P_IP)
|
||||
bpf_throw(16);
|
||||
return static_func(i);
|
||||
}
|
||||
|
||||
static __noinline int static2global(u64 i)
|
||||
{
|
||||
return global2static(i) + i;
|
||||
}
|
||||
|
||||
SEC("tc")
|
||||
int exception_throw_always_1(struct __sk_buff *ctx)
|
||||
{
|
||||
bpf_throw(64);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* In this case, the global func will never be seen executing after call to
|
||||
* static subprog, hence verifier will DCE the remaining instructions. Ensure we
|
||||
* are resilient to that.
|
||||
*/
|
||||
SEC("tc")
|
||||
int exception_throw_always_2(struct __sk_buff *ctx)
|
||||
{
|
||||
return global2static_simple(ctx->protocol);
|
||||
}
|
||||
|
||||
SEC("tc")
|
||||
int exception_throw_unwind_1(struct __sk_buff *ctx)
|
||||
{
|
||||
return static2global(bpf_ntohs(ctx->protocol));
|
||||
}
|
||||
|
||||
SEC("tc")
|
||||
int exception_throw_unwind_2(struct __sk_buff *ctx)
|
||||
{
|
||||
return static2global(bpf_ntohs(ctx->protocol) - 1);
|
||||
}
|
||||
|
||||
SEC("tc")
|
||||
int exception_throw_default(struct __sk_buff *ctx)
|
||||
{
|
||||
bpf_throw(0);
|
||||
return 1;
|
||||
}
|
||||
|
||||
SEC("tc")
|
||||
int exception_throw_default_value(struct __sk_buff *ctx)
|
||||
{
|
||||
bpf_throw(5);
|
||||
return 1;
|
||||
}
|
||||
|
||||
SEC("tc")
|
||||
int exception_tail_call_target(struct __sk_buff *ctx)
|
||||
{
|
||||
bpf_throw(16);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static __noinline
|
||||
int exception_tail_call_subprog(struct __sk_buff *ctx)
|
||||
{
|
||||
volatile int ret = 10;
|
||||
|
||||
bpf_tail_call_static(ctx, &jmp_table, 0);
|
||||
return ret;
|
||||
}
|
||||
|
||||
SEC("tc")
|
||||
int exception_tail_call(struct __sk_buff *ctx) {
|
||||
volatile int ret = 0;
|
||||
|
||||
ret = exception_tail_call_subprog(ctx);
|
||||
return ret + 8;
|
||||
}
|
||||
|
||||
__noinline int exception_ext_global(struct __sk_buff *ctx)
|
||||
{
|
||||
volatile int ret = 0;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static __noinline int exception_ext_static(struct __sk_buff *ctx)
|
||||
{
|
||||
return exception_ext_global(ctx);
|
||||
}
|
||||
|
||||
SEC("tc")
|
||||
int exception_ext(struct __sk_buff *ctx)
|
||||
{
|
||||
return exception_ext_static(ctx);
|
||||
}
|
||||
|
||||
__noinline int exception_cb_mod_global(u64 cookie)
|
||||
{
|
||||
volatile int ret = 0;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Example of how the exception callback supplied during verification can still
|
||||
* introduce extensions by calling to dummy global functions, and alter runtime
|
||||
* behavior.
|
||||
*
|
||||
* Right now we don't allow freplace attachment to exception callback itself,
|
||||
* but if the need arises this restriction is technically feasible to relax in
|
||||
* the future.
|
||||
*/
|
||||
__noinline int exception_cb_mod(u64 cookie)
|
||||
{
|
||||
return exception_cb_mod_global(cookie) + cookie + 10;
|
||||
}
|
||||
|
||||
SEC("tc")
|
||||
__exception_cb(exception_cb_mod)
|
||||
int exception_ext_mod_cb_runtime(struct __sk_buff *ctx)
|
||||
{
|
||||
bpf_throw(25);
|
||||
return 0;
|
||||
}
|
||||
|
||||
__noinline static int subprog(struct __sk_buff *ctx)
|
||||
{
|
||||
return bpf_ktime_get_ns();
|
||||
}
|
||||
|
||||
__noinline static int throwing_subprog(struct __sk_buff *ctx)
|
||||
{
|
||||
if (ctx->tstamp)
|
||||
bpf_throw(0);
|
||||
return bpf_ktime_get_ns();
|
||||
}
|
||||
|
||||
__noinline int global_subprog(struct __sk_buff *ctx)
|
||||
{
|
||||
return bpf_ktime_get_ns();
|
||||
}
|
||||
|
||||
__noinline int throwing_global_subprog(struct __sk_buff *ctx)
|
||||
{
|
||||
if (ctx->tstamp)
|
||||
bpf_throw(0);
|
||||
return bpf_ktime_get_ns();
|
||||
}
|
||||
|
||||
SEC("tc")
|
||||
int exception_throw_subprog(struct __sk_buff *ctx)
|
||||
{
|
||||
switch (ctx->protocol) {
|
||||
case 1:
|
||||
return subprog(ctx);
|
||||
case 2:
|
||||
return global_subprog(ctx);
|
||||
case 3:
|
||||
return throwing_subprog(ctx);
|
||||
case 4:
|
||||
return throwing_global_subprog(ctx);
|
||||
default:
|
||||
break;
|
||||
}
|
||||
bpf_throw(1);
|
||||
return 0;
|
||||
}
|
||||
|
||||
__noinline int assert_nz_gfunc(u64 c)
|
||||
{
|
||||
volatile u64 cookie = c;
|
||||
|
||||
bpf_assert(cookie != 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
__noinline int assert_zero_gfunc(u64 c)
|
||||
{
|
||||
volatile u64 cookie = c;
|
||||
|
||||
bpf_assert_eq(cookie, 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
__noinline int assert_neg_gfunc(s64 c)
|
||||
{
|
||||
volatile s64 cookie = c;
|
||||
|
||||
bpf_assert_lt(cookie, 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
__noinline int assert_pos_gfunc(s64 c)
|
||||
{
|
||||
volatile s64 cookie = c;
|
||||
|
||||
bpf_assert_gt(cookie, 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
__noinline int assert_negeq_gfunc(s64 c)
|
||||
{
|
||||
volatile s64 cookie = c;
|
||||
|
||||
bpf_assert_le(cookie, -1);
|
||||
return 0;
|
||||
}
|
||||
|
||||
__noinline int assert_poseq_gfunc(s64 c)
|
||||
{
|
||||
volatile s64 cookie = c;
|
||||
|
||||
bpf_assert_ge(cookie, 1);
|
||||
return 0;
|
||||
}
|
||||
|
||||
__noinline int assert_nz_gfunc_with(u64 c)
|
||||
{
|
||||
volatile u64 cookie = c;
|
||||
|
||||
bpf_assert_with(cookie != 0, cookie + 100);
|
||||
return 0;
|
||||
}
|
||||
|
||||
__noinline int assert_zero_gfunc_with(u64 c)
|
||||
{
|
||||
volatile u64 cookie = c;
|
||||
|
||||
bpf_assert_eq_with(cookie, 0, cookie + 100);
|
||||
return 0;
|
||||
}
|
||||
|
||||
__noinline int assert_neg_gfunc_with(s64 c)
|
||||
{
|
||||
volatile s64 cookie = c;
|
||||
|
||||
bpf_assert_lt_with(cookie, 0, cookie + 100);
|
||||
return 0;
|
||||
}
|
||||
|
||||
__noinline int assert_pos_gfunc_with(s64 c)
|
||||
{
|
||||
volatile s64 cookie = c;
|
||||
|
||||
bpf_assert_gt_with(cookie, 0, cookie + 100);
|
||||
return 0;
|
||||
}
|
||||
|
||||
__noinline int assert_negeq_gfunc_with(s64 c)
|
||||
{
|
||||
volatile s64 cookie = c;
|
||||
|
||||
bpf_assert_le_with(cookie, -1, cookie + 100);
|
||||
return 0;
|
||||
}
|
||||
|
||||
__noinline int assert_poseq_gfunc_with(s64 c)
|
||||
{
|
||||
volatile s64 cookie = c;
|
||||
|
||||
bpf_assert_ge_with(cookie, 1, cookie + 100);
|
||||
return 0;
|
||||
}
|
||||
|
||||
#define check_assert(name, cookie, tag) \
|
||||
SEC("tc") \
|
||||
int exception##tag##name(struct __sk_buff *ctx) \
|
||||
{ \
|
||||
return name(cookie) + 1; \
|
||||
}
|
||||
|
||||
check_assert(assert_nz_gfunc, 5, _);
|
||||
check_assert(assert_zero_gfunc, 0, _);
|
||||
check_assert(assert_neg_gfunc, -100, _);
|
||||
check_assert(assert_pos_gfunc, 100, _);
|
||||
check_assert(assert_negeq_gfunc, -1, _);
|
||||
check_assert(assert_poseq_gfunc, 1, _);
|
||||
|
||||
check_assert(assert_nz_gfunc_with, 5, _);
|
||||
check_assert(assert_zero_gfunc_with, 0, _);
|
||||
check_assert(assert_neg_gfunc_with, -100, _);
|
||||
check_assert(assert_pos_gfunc_with, 100, _);
|
||||
check_assert(assert_negeq_gfunc_with, -1, _);
|
||||
check_assert(assert_poseq_gfunc_with, 1, _);
|
||||
|
||||
check_assert(assert_nz_gfunc, 0, _bad_);
|
||||
check_assert(assert_zero_gfunc, 5, _bad_);
|
||||
check_assert(assert_neg_gfunc, 100, _bad_);
|
||||
check_assert(assert_pos_gfunc, -100, _bad_);
|
||||
check_assert(assert_negeq_gfunc, 1, _bad_);
|
||||
check_assert(assert_poseq_gfunc, -1, _bad_);
|
||||
|
||||
check_assert(assert_nz_gfunc_with, 0, _bad_);
|
||||
check_assert(assert_zero_gfunc_with, 5, _bad_);
|
||||
check_assert(assert_neg_gfunc_with, 100, _bad_);
|
||||
check_assert(assert_pos_gfunc_with, -100, _bad_);
|
||||
check_assert(assert_negeq_gfunc_with, 1, _bad_);
|
||||
check_assert(assert_poseq_gfunc_with, -1, _bad_);
|
||||
|
||||
SEC("tc")
|
||||
int exception_assert_range(struct __sk_buff *ctx)
|
||||
{
|
||||
u64 time = bpf_ktime_get_ns();
|
||||
|
||||
bpf_assert_range(time, 0, ~0ULL);
|
||||
return 1;
|
||||
}
|
||||
|
||||
SEC("tc")
|
||||
int exception_assert_range_with(struct __sk_buff *ctx)
|
||||
{
|
||||
u64 time = bpf_ktime_get_ns();
|
||||
|
||||
bpf_assert_range_with(time, 0, ~0ULL, 10);
|
||||
return 1;
|
||||
}
|
||||
|
||||
SEC("tc")
|
||||
int exception_bad_assert_range(struct __sk_buff *ctx)
|
||||
{
|
||||
u64 time = bpf_ktime_get_ns();
|
||||
|
||||
bpf_assert_range(time, -100, 100);
|
||||
return 1;
|
||||
}
|
||||
|
||||
SEC("tc")
|
||||
int exception_bad_assert_range_with(struct __sk_buff *ctx)
|
||||
{
|
||||
u64 time = bpf_ktime_get_ns();
|
||||
|
||||
bpf_assert_range_with(time, -1000, 1000, 10);
|
||||
return 1;
|
||||
}
|
||||
|
||||
char _license[] SEC("license") = "GPL";
|
135
tools/testing/selftests/bpf/progs/exceptions_assert.c
Normal file
135
tools/testing/selftests/bpf/progs/exceptions_assert.c
Normal file
@ -0,0 +1,135 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
#include <vmlinux.h>
|
||||
#include <limits.h>
|
||||
#include <bpf/bpf_tracing.h>
|
||||
#include <bpf/bpf_helpers.h>
|
||||
#include <bpf/bpf_core_read.h>
|
||||
#include <bpf/bpf_endian.h>
|
||||
#include "bpf_misc.h"
|
||||
#include "bpf_experimental.h"
|
||||
|
||||
#define check_assert(type, op, name, value) \
|
||||
SEC("?tc") \
|
||||
__log_level(2) __failure \
|
||||
int check_assert_##op##_##name(void *ctx) \
|
||||
{ \
|
||||
type num = bpf_ktime_get_ns(); \
|
||||
bpf_assert_##op(num, value); \
|
||||
return *(u64 *)num; \
|
||||
}
|
||||
|
||||
__msg(": R0_w=-2147483648 R10=fp0")
|
||||
check_assert(s64, eq, int_min, INT_MIN);
|
||||
__msg(": R0_w=2147483647 R10=fp0")
|
||||
check_assert(s64, eq, int_max, INT_MAX);
|
||||
__msg(": R0_w=0 R10=fp0")
|
||||
check_assert(s64, eq, zero, 0);
|
||||
__msg(": R0_w=-9223372036854775808 R1_w=-9223372036854775808 R10=fp0")
|
||||
check_assert(s64, eq, llong_min, LLONG_MIN);
|
||||
__msg(": R0_w=9223372036854775807 R1_w=9223372036854775807 R10=fp0")
|
||||
check_assert(s64, eq, llong_max, LLONG_MAX);
|
||||
|
||||
__msg(": R0_w=scalar(smax=2147483646) R10=fp0")
|
||||
check_assert(s64, lt, pos, INT_MAX);
|
||||
__msg(": R0_w=scalar(umin=9223372036854775808,var_off=(0x8000000000000000; 0x7fffffffffffffff))")
|
||||
check_assert(s64, lt, zero, 0);
|
||||
__msg(": R0_w=scalar(umin=9223372036854775808,umax=18446744071562067967,var_off=(0x8000000000000000; 0x7fffffffffffffff))")
|
||||
check_assert(s64, lt, neg, INT_MIN);
|
||||
|
||||
__msg(": R0_w=scalar(smax=2147483647) R10=fp0")
|
||||
check_assert(s64, le, pos, INT_MAX);
|
||||
__msg(": R0_w=scalar(smax=0) R10=fp0")
|
||||
check_assert(s64, le, zero, 0);
|
||||
__msg(": R0_w=scalar(umin=9223372036854775808,umax=18446744071562067968,var_off=(0x8000000000000000; 0x7fffffffffffffff))")
|
||||
check_assert(s64, le, neg, INT_MIN);
|
||||
|
||||
__msg(": R0_w=scalar(umin=2147483648,umax=9223372036854775807,var_off=(0x0; 0x7fffffffffffffff))")
|
||||
check_assert(s64, gt, pos, INT_MAX);
|
||||
__msg(": R0_w=scalar(umin=1,umax=9223372036854775807,var_off=(0x0; 0x7fffffffffffffff))")
|
||||
check_assert(s64, gt, zero, 0);
|
||||
__msg(": R0_w=scalar(smin=-2147483647) R10=fp0")
|
||||
check_assert(s64, gt, neg, INT_MIN);
|
||||
|
||||
__msg(": R0_w=scalar(umin=2147483647,umax=9223372036854775807,var_off=(0x0; 0x7fffffffffffffff))")
|
||||
check_assert(s64, ge, pos, INT_MAX);
|
||||
__msg(": R0_w=scalar(umax=9223372036854775807,var_off=(0x0; 0x7fffffffffffffff)) R10=fp0")
|
||||
check_assert(s64, ge, zero, 0);
|
||||
__msg(": R0_w=scalar(smin=-2147483648) R10=fp0")
|
||||
check_assert(s64, ge, neg, INT_MIN);
|
||||
|
||||
SEC("?tc")
|
||||
__log_level(2) __failure
|
||||
__msg(": R0=0 R1=ctx(off=0,imm=0) R2=scalar(smin=-2147483646,smax=2147483645) R10=fp0")
|
||||
int check_assert_range_s64(struct __sk_buff *ctx)
|
||||
{
|
||||
struct bpf_sock *sk = ctx->sk;
|
||||
s64 num;
|
||||
|
||||
_Static_assert(_Generic((sk->rx_queue_mapping), s32: 1, default: 0), "type match");
|
||||
if (!sk)
|
||||
return 0;
|
||||
num = sk->rx_queue_mapping;
|
||||
bpf_assert_range(num, INT_MIN + 2, INT_MAX - 2);
|
||||
return *((u8 *)ctx + num);
|
||||
}
|
||||
|
||||
SEC("?tc")
|
||||
__log_level(2) __failure
|
||||
__msg(": R1=ctx(off=0,imm=0) R2=scalar(umin=4096,umax=8192,var_off=(0x0; 0x3fff))")
|
||||
int check_assert_range_u64(struct __sk_buff *ctx)
|
||||
{
|
||||
u64 num = ctx->len;
|
||||
|
||||
bpf_assert_range(num, 4096, 8192);
|
||||
return *((u8 *)ctx + num);
|
||||
}
|
||||
|
||||
SEC("?tc")
|
||||
__log_level(2) __failure
|
||||
__msg(": R0=0 R1=ctx(off=0,imm=0) R2=4096 R10=fp0")
|
||||
int check_assert_single_range_s64(struct __sk_buff *ctx)
|
||||
{
|
||||
struct bpf_sock *sk = ctx->sk;
|
||||
s64 num;
|
||||
|
||||
_Static_assert(_Generic((sk->rx_queue_mapping), s32: 1, default: 0), "type match");
|
||||
if (!sk)
|
||||
return 0;
|
||||
num = sk->rx_queue_mapping;
|
||||
|
||||
bpf_assert_range(num, 4096, 4096);
|
||||
return *((u8 *)ctx + num);
|
||||
}
|
||||
|
||||
SEC("?tc")
|
||||
__log_level(2) __failure
|
||||
__msg(": R1=ctx(off=0,imm=0) R2=4096 R10=fp0")
|
||||
int check_assert_single_range_u64(struct __sk_buff *ctx)
|
||||
{
|
||||
u64 num = ctx->len;
|
||||
|
||||
bpf_assert_range(num, 4096, 4096);
|
||||
return *((u8 *)ctx + num);
|
||||
}
|
||||
|
||||
SEC("?tc")
|
||||
__log_level(2) __failure
|
||||
__msg(": R1=pkt(off=64,r=64,imm=0) R2=pkt_end(off=0,imm=0) R6=pkt(off=0,r=64,imm=0) R10=fp0")
|
||||
int check_assert_generic(struct __sk_buff *ctx)
|
||||
{
|
||||
u8 *data_end = (void *)(long)ctx->data_end;
|
||||
u8 *data = (void *)(long)ctx->data;
|
||||
|
||||
bpf_assert(data + 64 <= data_end);
|
||||
return data[128];
|
||||
}
|
||||
|
||||
SEC("?fentry/bpf_check")
|
||||
__failure __msg("At program exit the register R0 has value (0x40; 0x0)")
|
||||
int check_assert_with_return(void *ctx)
|
||||
{
|
||||
bpf_assert_with(!ctx, 64);
|
||||
return 0;
|
||||
}
|
||||
|
||||
char _license[] SEC("license") = "GPL";
|
72
tools/testing/selftests/bpf/progs/exceptions_ext.c
Normal file
72
tools/testing/selftests/bpf/progs/exceptions_ext.c
Normal file
@ -0,0 +1,72 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
#include <vmlinux.h>
|
||||
#include <bpf/bpf_helpers.h>
|
||||
#include "bpf_experimental.h"
|
||||
|
||||
SEC("?fentry")
|
||||
int pfentry(void *ctx)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
SEC("?fentry")
|
||||
int throwing_fentry(void *ctx)
|
||||
{
|
||||
bpf_throw(0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
__noinline int exception_cb(u64 cookie)
|
||||
{
|
||||
return cookie + 64;
|
||||
}
|
||||
|
||||
SEC("?freplace")
|
||||
int extension(struct __sk_buff *ctx)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
SEC("?freplace")
|
||||
__exception_cb(exception_cb)
|
||||
int throwing_exception_cb_extension(u64 cookie)
|
||||
{
|
||||
bpf_throw(32);
|
||||
return 0;
|
||||
}
|
||||
|
||||
SEC("?freplace")
|
||||
__exception_cb(exception_cb)
|
||||
int throwing_extension(struct __sk_buff *ctx)
|
||||
{
|
||||
bpf_throw(64);
|
||||
return 0;
|
||||
}
|
||||
|
||||
SEC("?fexit")
|
||||
int pfexit(void *ctx)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
SEC("?fexit")
|
||||
int throwing_fexit(void *ctx)
|
||||
{
|
||||
bpf_throw(0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
SEC("?fmod_ret")
|
||||
int pfmod_ret(void *ctx)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
SEC("?fmod_ret")
|
||||
int throwing_fmod_ret(void *ctx)
|
||||
{
|
||||
bpf_throw(0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
char _license[] SEC("license") = "GPL";
|
347
tools/testing/selftests/bpf/progs/exceptions_fail.c
Normal file
347
tools/testing/selftests/bpf/progs/exceptions_fail.c
Normal file
@ -0,0 +1,347 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
#include <vmlinux.h>
|
||||
#include <bpf/bpf_tracing.h>
|
||||
#include <bpf/bpf_helpers.h>
|
||||
#include <bpf/bpf_core_read.h>
|
||||
|
||||
#include "bpf_misc.h"
|
||||
#include "bpf_experimental.h"
|
||||
|
||||
extern void bpf_rcu_read_lock(void) __ksym;
|
||||
|
||||
#define private(name) SEC(".bss." #name) __hidden __attribute__((aligned(8)))
|
||||
|
||||
struct foo {
|
||||
struct bpf_rb_node node;
|
||||
};
|
||||
|
||||
struct hmap_elem {
|
||||
struct bpf_timer timer;
|
||||
};
|
||||
|
||||
struct {
|
||||
__uint(type, BPF_MAP_TYPE_HASH);
|
||||
__uint(max_entries, 64);
|
||||
__type(key, int);
|
||||
__type(value, struct hmap_elem);
|
||||
} hmap SEC(".maps");
|
||||
|
||||
private(A) struct bpf_spin_lock lock;
|
||||
private(A) struct bpf_rb_root rbtree __contains(foo, node);
|
||||
|
||||
__noinline void *exception_cb_bad_ret_type(u64 cookie)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
__noinline int exception_cb_bad_arg_0(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
__noinline int exception_cb_bad_arg_2(int a, int b)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
__noinline int exception_cb_ok_arg_small(int a)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
SEC("?tc")
|
||||
__exception_cb(exception_cb_bad_ret_type)
|
||||
__failure __msg("Global function exception_cb_bad_ret_type() doesn't return scalar.")
|
||||
int reject_exception_cb_type_1(struct __sk_buff *ctx)
|
||||
{
|
||||
bpf_throw(0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
SEC("?tc")
|
||||
__exception_cb(exception_cb_bad_arg_0)
|
||||
__failure __msg("exception cb only supports single integer argument")
|
||||
int reject_exception_cb_type_2(struct __sk_buff *ctx)
|
||||
{
|
||||
bpf_throw(0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
SEC("?tc")
|
||||
__exception_cb(exception_cb_bad_arg_2)
|
||||
__failure __msg("exception cb only supports single integer argument")
|
||||
int reject_exception_cb_type_3(struct __sk_buff *ctx)
|
||||
{
|
||||
bpf_throw(0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
SEC("?tc")
|
||||
__exception_cb(exception_cb_ok_arg_small)
|
||||
__success
|
||||
int reject_exception_cb_type_4(struct __sk_buff *ctx)
|
||||
{
|
||||
bpf_throw(0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
__noinline
|
||||
static int timer_cb(void *map, int *key, struct bpf_timer *timer)
|
||||
{
|
||||
bpf_throw(0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
SEC("?tc")
|
||||
__failure __msg("cannot be called from callback subprog")
|
||||
int reject_async_callback_throw(struct __sk_buff *ctx)
|
||||
{
|
||||
struct hmap_elem *elem;
|
||||
|
||||
elem = bpf_map_lookup_elem(&hmap, &(int){0});
|
||||
if (!elem)
|
||||
return 0;
|
||||
return bpf_timer_set_callback(&elem->timer, timer_cb);
|
||||
}
|
||||
|
||||
__noinline static int subprog_lock(struct __sk_buff *ctx)
|
||||
{
|
||||
volatile int ret = 0;
|
||||
|
||||
bpf_spin_lock(&lock);
|
||||
if (ctx->len)
|
||||
bpf_throw(0);
|
||||
return ret;
|
||||
}
|
||||
|
||||
SEC("?tc")
|
||||
__failure __msg("function calls are not allowed while holding a lock")
|
||||
int reject_with_lock(void *ctx)
|
||||
{
|
||||
bpf_spin_lock(&lock);
|
||||
bpf_throw(0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
SEC("?tc")
|
||||
__failure __msg("function calls are not allowed while holding a lock")
|
||||
int reject_subprog_with_lock(void *ctx)
|
||||
{
|
||||
return subprog_lock(ctx);
|
||||
}
|
||||
|
||||
SEC("?tc")
|
||||
__failure __msg("bpf_rcu_read_unlock is missing")
|
||||
int reject_with_rcu_read_lock(void *ctx)
|
||||
{
|
||||
bpf_rcu_read_lock();
|
||||
bpf_throw(0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
__noinline static int throwing_subprog(struct __sk_buff *ctx)
|
||||
{
|
||||
if (ctx->len)
|
||||
bpf_throw(0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
SEC("?tc")
|
||||
__failure __msg("bpf_rcu_read_unlock is missing")
|
||||
int reject_subprog_with_rcu_read_lock(void *ctx)
|
||||
{
|
||||
bpf_rcu_read_lock();
|
||||
return throwing_subprog(ctx);
|
||||
}
|
||||
|
||||
static bool rbless(struct bpf_rb_node *n1, const struct bpf_rb_node *n2)
|
||||
{
|
||||
bpf_throw(0);
|
||||
return true;
|
||||
}
|
||||
|
||||
SEC("?tc")
|
||||
__failure __msg("function calls are not allowed while holding a lock")
|
||||
int reject_with_rbtree_add_throw(void *ctx)
|
||||
{
|
||||
struct foo *f;
|
||||
|
||||
f = bpf_obj_new(typeof(*f));
|
||||
if (!f)
|
||||
return 0;
|
||||
bpf_spin_lock(&lock);
|
||||
bpf_rbtree_add(&rbtree, &f->node, rbless);
|
||||
return 0;
|
||||
}
|
||||
|
||||
SEC("?tc")
|
||||
__failure __msg("Unreleased reference")
|
||||
int reject_with_reference(void *ctx)
|
||||
{
|
||||
struct foo *f;
|
||||
|
||||
f = bpf_obj_new(typeof(*f));
|
||||
if (!f)
|
||||
return 0;
|
||||
bpf_throw(0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
__noinline static int subprog_ref(struct __sk_buff *ctx)
|
||||
{
|
||||
struct foo *f;
|
||||
|
||||
f = bpf_obj_new(typeof(*f));
|
||||
if (!f)
|
||||
return 0;
|
||||
bpf_throw(0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
__noinline static int subprog_cb_ref(u32 i, void *ctx)
|
||||
{
|
||||
bpf_throw(0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
SEC("?tc")
|
||||
__failure __msg("Unreleased reference")
|
||||
int reject_with_cb_reference(void *ctx)
|
||||
{
|
||||
struct foo *f;
|
||||
|
||||
f = bpf_obj_new(typeof(*f));
|
||||
if (!f)
|
||||
return 0;
|
||||
bpf_loop(5, subprog_cb_ref, NULL, 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
SEC("?tc")
|
||||
__failure __msg("cannot be called from callback")
|
||||
int reject_with_cb(void *ctx)
|
||||
{
|
||||
bpf_loop(5, subprog_cb_ref, NULL, 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
SEC("?tc")
|
||||
__failure __msg("Unreleased reference")
|
||||
int reject_with_subprog_reference(void *ctx)
|
||||
{
|
||||
return subprog_ref(ctx) + 1;
|
||||
}
|
||||
|
||||
__noinline int throwing_exception_cb(u64 c)
|
||||
{
|
||||
bpf_throw(0);
|
||||
return c;
|
||||
}
|
||||
|
||||
__noinline int exception_cb1(u64 c)
|
||||
{
|
||||
return c;
|
||||
}
|
||||
|
||||
__noinline int exception_cb2(u64 c)
|
||||
{
|
||||
return c;
|
||||
}
|
||||
|
||||
static __noinline int static_func(struct __sk_buff *ctx)
|
||||
{
|
||||
return exception_cb1(ctx->tstamp);
|
||||
}
|
||||
|
||||
__noinline int global_func(struct __sk_buff *ctx)
|
||||
{
|
||||
return exception_cb1(ctx->tstamp);
|
||||
}
|
||||
|
||||
SEC("?tc")
|
||||
__exception_cb(throwing_exception_cb)
|
||||
__failure __msg("cannot be called from callback subprog")
|
||||
int reject_throwing_exception_cb(struct __sk_buff *ctx)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
SEC("?tc")
|
||||
__exception_cb(exception_cb1)
|
||||
__failure __msg("cannot call exception cb directly")
|
||||
int reject_exception_cb_call_global_func(struct __sk_buff *ctx)
|
||||
{
|
||||
return global_func(ctx);
|
||||
}
|
||||
|
||||
SEC("?tc")
|
||||
__exception_cb(exception_cb1)
|
||||
__failure __msg("cannot call exception cb directly")
|
||||
int reject_exception_cb_call_static_func(struct __sk_buff *ctx)
|
||||
{
|
||||
return static_func(ctx);
|
||||
}
|
||||
|
||||
SEC("?tc")
|
||||
__exception_cb(exception_cb1)
|
||||
__exception_cb(exception_cb2)
|
||||
__failure __msg("multiple exception callback tags for main subprog")
|
||||
int reject_multiple_exception_cb(struct __sk_buff *ctx)
|
||||
{
|
||||
bpf_throw(0);
|
||||
return 16;
|
||||
}
|
||||
|
||||
__noinline int exception_cb_bad_ret(u64 c)
|
||||
{
|
||||
return c;
|
||||
}
|
||||
|
||||
SEC("?fentry/bpf_check")
|
||||
__exception_cb(exception_cb_bad_ret)
|
||||
__failure __msg("At program exit the register R0 has unknown scalar value should")
|
||||
int reject_set_exception_cb_bad_ret1(void *ctx)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
SEC("?fentry/bpf_check")
|
||||
__failure __msg("At program exit the register R0 has value (0x40; 0x0) should")
|
||||
int reject_set_exception_cb_bad_ret2(void *ctx)
|
||||
{
|
||||
bpf_throw(64);
|
||||
return 0;
|
||||
}
|
||||
|
||||
__noinline static int loop_cb1(u32 index, int *ctx)
|
||||
{
|
||||
bpf_throw(0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
__noinline static int loop_cb2(u32 index, int *ctx)
|
||||
{
|
||||
bpf_throw(0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
SEC("?tc")
|
||||
__failure __msg("cannot be called from callback")
|
||||
int reject_exception_throw_cb(struct __sk_buff *ctx)
|
||||
{
|
||||
bpf_loop(5, loop_cb1, NULL, 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
SEC("?tc")
|
||||
__failure __msg("cannot be called from callback")
|
||||
int reject_exception_throw_cb_diff(struct __sk_buff *ctx)
|
||||
{
|
||||
if (ctx->protocol)
|
||||
bpf_loop(5, loop_cb1, NULL, 0);
|
||||
else
|
||||
bpf_loop(5, loop_cb2, NULL, 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
char _license[] SEC("license") = "GPL";
|
183
tools/testing/selftests/bpf/progs/percpu_alloc_array.c
Normal file
183
tools/testing/selftests/bpf/progs/percpu_alloc_array.c
Normal file
@ -0,0 +1,183 @@
|
||||
#include "bpf_experimental.h"
|
||||
|
||||
struct val_t {
|
||||
long b, c, d;
|
||||
};
|
||||
|
||||
struct elem {
|
||||
long sum;
|
||||
struct val_t __percpu_kptr *pc;
|
||||
};
|
||||
|
||||
struct {
|
||||
__uint(type, BPF_MAP_TYPE_ARRAY);
|
||||
__uint(max_entries, 1);
|
||||
__type(key, int);
|
||||
__type(value, struct elem);
|
||||
} array SEC(".maps");
|
||||
|
||||
void bpf_rcu_read_lock(void) __ksym;
|
||||
void bpf_rcu_read_unlock(void) __ksym;
|
||||
|
||||
const volatile int nr_cpus;
|
||||
|
||||
/* Initialize the percpu object */
|
||||
SEC("?fentry/bpf_fentry_test1")
|
||||
int BPF_PROG(test_array_map_1)
|
||||
{
|
||||
struct val_t __percpu_kptr *p;
|
||||
struct elem *e;
|
||||
int index = 0;
|
||||
|
||||
e = bpf_map_lookup_elem(&array, &index);
|
||||
if (!e)
|
||||
return 0;
|
||||
|
||||
p = bpf_percpu_obj_new(struct val_t);
|
||||
if (!p)
|
||||
return 0;
|
||||
|
||||
p = bpf_kptr_xchg(&e->pc, p);
|
||||
if (p)
|
||||
bpf_percpu_obj_drop(p);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Update percpu data */
|
||||
SEC("?fentry/bpf_fentry_test2")
|
||||
int BPF_PROG(test_array_map_2)
|
||||
{
|
||||
struct val_t __percpu_kptr *p;
|
||||
struct val_t *v;
|
||||
struct elem *e;
|
||||
int index = 0;
|
||||
|
||||
e = bpf_map_lookup_elem(&array, &index);
|
||||
if (!e)
|
||||
return 0;
|
||||
|
||||
p = e->pc;
|
||||
if (!p)
|
||||
return 0;
|
||||
|
||||
v = bpf_per_cpu_ptr(p, 0);
|
||||
if (!v)
|
||||
return 0;
|
||||
v->c = 1;
|
||||
v->d = 2;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int cpu0_field_d, sum_field_c;
|
||||
|
||||
/* Summarize percpu data */
|
||||
SEC("?fentry/bpf_fentry_test3")
|
||||
int BPF_PROG(test_array_map_3)
|
||||
{
|
||||
struct val_t __percpu_kptr *p;
|
||||
int i, index = 0;
|
||||
struct val_t *v;
|
||||
struct elem *e;
|
||||
|
||||
e = bpf_map_lookup_elem(&array, &index);
|
||||
if (!e)
|
||||
return 0;
|
||||
|
||||
p = e->pc;
|
||||
if (!p)
|
||||
return 0;
|
||||
|
||||
bpf_for(i, 0, nr_cpus) {
|
||||
v = bpf_per_cpu_ptr(p, i);
|
||||
if (v) {
|
||||
if (i == 0)
|
||||
cpu0_field_d = v->d;
|
||||
sum_field_c += v->c;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Explicitly free allocated percpu data */
|
||||
SEC("?fentry/bpf_fentry_test4")
|
||||
int BPF_PROG(test_array_map_4)
|
||||
{
|
||||
struct val_t __percpu_kptr *p;
|
||||
struct elem *e;
|
||||
int index = 0;
|
||||
|
||||
e = bpf_map_lookup_elem(&array, &index);
|
||||
if (!e)
|
||||
return 0;
|
||||
|
||||
/* delete */
|
||||
p = bpf_kptr_xchg(&e->pc, NULL);
|
||||
if (p) {
|
||||
bpf_percpu_obj_drop(p);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
SEC("?fentry.s/bpf_fentry_test1")
|
||||
int BPF_PROG(test_array_map_10)
|
||||
{
|
||||
struct val_t __percpu_kptr *p, *p1;
|
||||
int i, index = 0;
|
||||
struct val_t *v;
|
||||
struct elem *e;
|
||||
|
||||
e = bpf_map_lookup_elem(&array, &index);
|
||||
if (!e)
|
||||
return 0;
|
||||
|
||||
bpf_rcu_read_lock();
|
||||
p = e->pc;
|
||||
if (!p) {
|
||||
p = bpf_percpu_obj_new(struct val_t);
|
||||
if (!p)
|
||||
goto out;
|
||||
|
||||
p1 = bpf_kptr_xchg(&e->pc, p);
|
||||
if (p1) {
|
||||
/* race condition */
|
||||
bpf_percpu_obj_drop(p1);
|
||||
}
|
||||
}
|
||||
|
||||
v = bpf_this_cpu_ptr(p);
|
||||
v->c = 3;
|
||||
v = bpf_this_cpu_ptr(p);
|
||||
v->c = 0;
|
||||
|
||||
v = bpf_per_cpu_ptr(p, 0);
|
||||
if (!v)
|
||||
goto out;
|
||||
v->c = 1;
|
||||
v->d = 2;
|
||||
|
||||
/* delete */
|
||||
p1 = bpf_kptr_xchg(&e->pc, NULL);
|
||||
if (!p1)
|
||||
goto out;
|
||||
|
||||
bpf_for(i, 0, nr_cpus) {
|
||||
v = bpf_per_cpu_ptr(p, i);
|
||||
if (v) {
|
||||
if (i == 0)
|
||||
cpu0_field_d = v->d;
|
||||
sum_field_c += v->c;
|
||||
}
|
||||
}
|
||||
|
||||
/* finally release p */
|
||||
bpf_percpu_obj_drop(p1);
|
||||
out:
|
||||
bpf_rcu_read_unlock();
|
||||
return 0;
|
||||
}
|
||||
|
||||
char _license[] SEC("license") = "GPL";
|
@ -0,0 +1,105 @@
|
||||
#include "bpf_experimental.h"
|
||||
|
||||
struct val_t {
|
||||
long b, c, d;
|
||||
};
|
||||
|
||||
struct elem {
|
||||
long sum;
|
||||
struct val_t __percpu_kptr *pc;
|
||||
};
|
||||
|
||||
struct {
|
||||
__uint(type, BPF_MAP_TYPE_CGRP_STORAGE);
|
||||
__uint(map_flags, BPF_F_NO_PREALLOC);
|
||||
__type(key, int);
|
||||
__type(value, struct elem);
|
||||
} cgrp SEC(".maps");
|
||||
|
||||
const volatile int nr_cpus;
|
||||
|
||||
/* Initialize the percpu object */
|
||||
SEC("fentry/bpf_fentry_test1")
|
||||
int BPF_PROG(test_cgrp_local_storage_1)
|
||||
{
|
||||
struct task_struct *task;
|
||||
struct val_t __percpu_kptr *p;
|
||||
struct elem *e;
|
||||
|
||||
task = bpf_get_current_task_btf();
|
||||
e = bpf_cgrp_storage_get(&cgrp, task->cgroups->dfl_cgrp, 0,
|
||||
BPF_LOCAL_STORAGE_GET_F_CREATE);
|
||||
if (!e)
|
||||
return 0;
|
||||
|
||||
p = bpf_percpu_obj_new(struct val_t);
|
||||
if (!p)
|
||||
return 0;
|
||||
|
||||
p = bpf_kptr_xchg(&e->pc, p);
|
||||
if (p)
|
||||
bpf_percpu_obj_drop(p);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Percpu data collection */
|
||||
SEC("fentry/bpf_fentry_test2")
|
||||
int BPF_PROG(test_cgrp_local_storage_2)
|
||||
{
|
||||
struct task_struct *task;
|
||||
struct val_t __percpu_kptr *p;
|
||||
struct val_t *v;
|
||||
struct elem *e;
|
||||
|
||||
task = bpf_get_current_task_btf();
|
||||
e = bpf_cgrp_storage_get(&cgrp, task->cgroups->dfl_cgrp, 0, 0);
|
||||
if (!e)
|
||||
return 0;
|
||||
|
||||
p = e->pc;
|
||||
if (!p)
|
||||
return 0;
|
||||
|
||||
v = bpf_per_cpu_ptr(p, 0);
|
||||
if (!v)
|
||||
return 0;
|
||||
v->c = 1;
|
||||
v->d = 2;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int cpu0_field_d, sum_field_c;
|
||||
|
||||
/* Summarize percpu data collection */
|
||||
SEC("fentry/bpf_fentry_test3")
|
||||
int BPF_PROG(test_cgrp_local_storage_3)
|
||||
{
|
||||
struct task_struct *task;
|
||||
struct val_t __percpu_kptr *p;
|
||||
struct val_t *v;
|
||||
struct elem *e;
|
||||
int i;
|
||||
|
||||
task = bpf_get_current_task_btf();
|
||||
e = bpf_cgrp_storage_get(&cgrp, task->cgroups->dfl_cgrp, 0, 0);
|
||||
if (!e)
|
||||
return 0;
|
||||
|
||||
p = e->pc;
|
||||
if (!p)
|
||||
return 0;
|
||||
|
||||
bpf_for(i, 0, nr_cpus) {
|
||||
v = bpf_per_cpu_ptr(p, i);
|
||||
if (v) {
|
||||
if (i == 0)
|
||||
cpu0_field_d = v->d;
|
||||
sum_field_c += v->c;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
char _license[] SEC("license") = "GPL";
|
164
tools/testing/selftests/bpf/progs/percpu_alloc_fail.c
Normal file
164
tools/testing/selftests/bpf/progs/percpu_alloc_fail.c
Normal file
@ -0,0 +1,164 @@
|
||||
#include "bpf_experimental.h"
|
||||
#include "bpf_misc.h"
|
||||
|
||||
struct val_t {
|
||||
long b, c, d;
|
||||
};
|
||||
|
||||
struct val2_t {
|
||||
long b;
|
||||
};
|
||||
|
||||
struct val_with_ptr_t {
|
||||
char *p;
|
||||
};
|
||||
|
||||
struct val_with_rb_root_t {
|
||||
struct bpf_spin_lock lock;
|
||||
};
|
||||
|
||||
struct elem {
|
||||
long sum;
|
||||
struct val_t __percpu_kptr *pc;
|
||||
};
|
||||
|
||||
struct {
|
||||
__uint(type, BPF_MAP_TYPE_ARRAY);
|
||||
__uint(max_entries, 1);
|
||||
__type(key, int);
|
||||
__type(value, struct elem);
|
||||
} array SEC(".maps");
|
||||
|
||||
long ret;
|
||||
|
||||
SEC("?fentry/bpf_fentry_test1")
|
||||
__failure __msg("store to referenced kptr disallowed")
|
||||
int BPF_PROG(test_array_map_1)
|
||||
{
|
||||
struct val_t __percpu_kptr *p;
|
||||
struct elem *e;
|
||||
int index = 0;
|
||||
|
||||
e = bpf_map_lookup_elem(&array, &index);
|
||||
if (!e)
|
||||
return 0;
|
||||
|
||||
p = bpf_percpu_obj_new(struct val_t);
|
||||
if (!p)
|
||||
return 0;
|
||||
|
||||
p = bpf_kptr_xchg(&e->pc, p);
|
||||
if (p)
|
||||
bpf_percpu_obj_drop(p);
|
||||
|
||||
e->pc = (struct val_t __percpu_kptr *)ret;
|
||||
return 0;
|
||||
}
|
||||
|
||||
SEC("?fentry/bpf_fentry_test1")
|
||||
__failure __msg("invalid kptr access, R2 type=percpu_ptr_val2_t expected=ptr_val_t")
|
||||
int BPF_PROG(test_array_map_2)
|
||||
{
|
||||
struct val2_t __percpu_kptr *p2;
|
||||
struct val_t __percpu_kptr *p;
|
||||
struct elem *e;
|
||||
int index = 0;
|
||||
|
||||
e = bpf_map_lookup_elem(&array, &index);
|
||||
if (!e)
|
||||
return 0;
|
||||
|
||||
p2 = bpf_percpu_obj_new(struct val2_t);
|
||||
if (!p2)
|
||||
return 0;
|
||||
|
||||
p = bpf_kptr_xchg(&e->pc, p2);
|
||||
if (p)
|
||||
bpf_percpu_obj_drop(p);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
SEC("?fentry.s/bpf_fentry_test1")
|
||||
__failure __msg("R1 type=scalar expected=percpu_ptr_, percpu_rcu_ptr_, percpu_trusted_ptr_")
|
||||
int BPF_PROG(test_array_map_3)
|
||||
{
|
||||
struct val_t __percpu_kptr *p, *p1;
|
||||
struct val_t *v;
|
||||
struct elem *e;
|
||||
int index = 0;
|
||||
|
||||
e = bpf_map_lookup_elem(&array, &index);
|
||||
if (!e)
|
||||
return 0;
|
||||
|
||||
p = bpf_percpu_obj_new(struct val_t);
|
||||
if (!p)
|
||||
return 0;
|
||||
|
||||
p1 = bpf_kptr_xchg(&e->pc, p);
|
||||
if (p1)
|
||||
bpf_percpu_obj_drop(p1);
|
||||
|
||||
v = bpf_this_cpu_ptr(p);
|
||||
ret = v->b;
|
||||
return 0;
|
||||
}
|
||||
|
||||
SEC("?fentry.s/bpf_fentry_test1")
|
||||
__failure __msg("arg#0 expected for bpf_percpu_obj_drop_impl()")
|
||||
int BPF_PROG(test_array_map_4)
|
||||
{
|
||||
struct val_t __percpu_kptr *p;
|
||||
|
||||
p = bpf_percpu_obj_new(struct val_t);
|
||||
if (!p)
|
||||
return 0;
|
||||
|
||||
bpf_obj_drop(p);
|
||||
return 0;
|
||||
}
|
||||
|
||||
SEC("?fentry.s/bpf_fentry_test1")
|
||||
__failure __msg("arg#0 expected for bpf_obj_drop_impl()")
|
||||
int BPF_PROG(test_array_map_5)
|
||||
{
|
||||
struct val_t *p;
|
||||
|
||||
p = bpf_obj_new(struct val_t);
|
||||
if (!p)
|
||||
return 0;
|
||||
|
||||
bpf_percpu_obj_drop(p);
|
||||
return 0;
|
||||
}
|
||||
|
||||
SEC("?fentry.s/bpf_fentry_test1")
|
||||
__failure __msg("bpf_percpu_obj_new type ID argument must be of a struct of scalars")
|
||||
int BPF_PROG(test_array_map_6)
|
||||
{
|
||||
struct val_with_ptr_t __percpu_kptr *p;
|
||||
|
||||
p = bpf_percpu_obj_new(struct val_with_ptr_t);
|
||||
if (!p)
|
||||
return 0;
|
||||
|
||||
bpf_percpu_obj_drop(p);
|
||||
return 0;
|
||||
}
|
||||
|
||||
SEC("?fentry.s/bpf_fentry_test1")
|
||||
__failure __msg("bpf_percpu_obj_new type ID argument must not contain special fields")
|
||||
int BPF_PROG(test_array_map_7)
|
||||
{
|
||||
struct val_with_rb_root_t __percpu_kptr *p;
|
||||
|
||||
p = bpf_percpu_obj_new(struct val_with_rb_root_t);
|
||||
if (!p)
|
||||
return 0;
|
||||
|
||||
bpf_percpu_obj_drop(p);
|
||||
return 0;
|
||||
}
|
||||
|
||||
char _license[] SEC("license") = "GPL";
|
106
tools/testing/selftests/bpf/progs/preempted_bpf_ma_op.c
Normal file
106
tools/testing/selftests/bpf/progs/preempted_bpf_ma_op.c
Normal file
@ -0,0 +1,106 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/* Copyright (C) 2023. Huawei Technologies Co., Ltd */
|
||||
#include <vmlinux.h>
|
||||
#include <bpf/bpf_tracing.h>
|
||||
#include <bpf/bpf_helpers.h>
|
||||
|
||||
#include "bpf_experimental.h"
|
||||
|
||||
struct bin_data {
|
||||
char data[256];
|
||||
struct bpf_spin_lock lock;
|
||||
};
|
||||
|
||||
struct map_value {
|
||||
struct bin_data __kptr * data;
|
||||
};
|
||||
|
||||
struct {
|
||||
__uint(type, BPF_MAP_TYPE_ARRAY);
|
||||
__type(key, int);
|
||||
__type(value, struct map_value);
|
||||
__uint(max_entries, 2048);
|
||||
} array SEC(".maps");
|
||||
|
||||
char _license[] SEC("license") = "GPL";
|
||||
|
||||
bool nomem_err = false;
|
||||
|
||||
static int del_array(unsigned int i, int *from)
|
||||
{
|
||||
struct map_value *value;
|
||||
struct bin_data *old;
|
||||
|
||||
value = bpf_map_lookup_elem(&array, from);
|
||||
if (!value)
|
||||
return 1;
|
||||
|
||||
old = bpf_kptr_xchg(&value->data, NULL);
|
||||
if (old)
|
||||
bpf_obj_drop(old);
|
||||
|
||||
(*from)++;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int add_array(unsigned int i, int *from)
|
||||
{
|
||||
struct bin_data *old, *new;
|
||||
struct map_value *value;
|
||||
|
||||
value = bpf_map_lookup_elem(&array, from);
|
||||
if (!value)
|
||||
return 1;
|
||||
|
||||
new = bpf_obj_new(typeof(*new));
|
||||
if (!new) {
|
||||
nomem_err = true;
|
||||
return 1;
|
||||
}
|
||||
|
||||
old = bpf_kptr_xchg(&value->data, new);
|
||||
if (old)
|
||||
bpf_obj_drop(old);
|
||||
|
||||
(*from)++;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void del_then_add_array(int from)
|
||||
{
|
||||
int i;
|
||||
|
||||
i = from;
|
||||
bpf_loop(512, del_array, &i, 0);
|
||||
|
||||
i = from;
|
||||
bpf_loop(512, add_array, &i, 0);
|
||||
}
|
||||
|
||||
SEC("fentry/bpf_fentry_test1")
|
||||
int BPF_PROG2(test0, int, a)
|
||||
{
|
||||
del_then_add_array(0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
SEC("fentry/bpf_fentry_test2")
|
||||
int BPF_PROG2(test1, int, a, u64, b)
|
||||
{
|
||||
del_then_add_array(512);
|
||||
return 0;
|
||||
}
|
||||
|
||||
SEC("fentry/bpf_fentry_test3")
|
||||
int BPF_PROG2(test2, char, a, int, b, u64, c)
|
||||
{
|
||||
del_then_add_array(1024);
|
||||
return 0;
|
||||
}
|
||||
|
||||
SEC("fentry/bpf_fentry_test4")
|
||||
int BPF_PROG2(test3, void *, a, char, b, int, c, u64, d)
|
||||
{
|
||||
del_then_add_array(1536);
|
||||
return 0;
|
||||
}
|
18
tools/testing/selftests/bpf/progs/tailcall_bpf2bpf_fentry.c
Normal file
18
tools/testing/selftests/bpf/progs/tailcall_bpf2bpf_fentry.c
Normal file
@ -0,0 +1,18 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/* Copyright Leon Hwang */
|
||||
|
||||
#include "vmlinux.h"
|
||||
#include <bpf/bpf_helpers.h>
|
||||
#include <bpf/bpf_tracing.h>
|
||||
|
||||
int count = 0;
|
||||
|
||||
SEC("fentry/subprog_tail")
|
||||
int BPF_PROG(fentry, struct sk_buff *skb)
|
||||
{
|
||||
count++;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
char _license[] SEC("license") = "GPL";
|
18
tools/testing/selftests/bpf/progs/tailcall_bpf2bpf_fexit.c
Normal file
18
tools/testing/selftests/bpf/progs/tailcall_bpf2bpf_fexit.c
Normal file
@ -0,0 +1,18 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/* Copyright Leon Hwang */
|
||||
|
||||
#include "vmlinux.h"
|
||||
#include <bpf/bpf_helpers.h>
|
||||
#include <bpf/bpf_tracing.h>
|
||||
|
||||
int count = 0;
|
||||
|
||||
SEC("fexit/subprog_tail")
|
||||
int BPF_PROG(fexit, struct sk_buff *skb)
|
||||
{
|
||||
count++;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
char _license[] SEC("license") = "GPL";
|
@ -5,7 +5,8 @@
|
||||
#include "bpf_misc.h"
|
||||
|
||||
#if (defined(__TARGET_ARCH_arm64) || defined(__TARGET_ARCH_x86) || \
|
||||
(defined(__TARGET_ARCH_riscv) && __riscv_xlen == 64)) && __clang_major__ >= 18
|
||||
(defined(__TARGET_ARCH_riscv) && __riscv_xlen == 64) || defined(__TARGET_ARCH_arm)) && \
|
||||
__clang_major__ >= 18
|
||||
|
||||
SEC("socket")
|
||||
__description("BSWAP, 16")
|
||||
|
@ -5,7 +5,8 @@
|
||||
#include "bpf_misc.h"
|
||||
|
||||
#if (defined(__TARGET_ARCH_arm64) || defined(__TARGET_ARCH_x86) || \
|
||||
(defined(__TARGET_ARCH_riscv) && __riscv_xlen == 64)) && __clang_major__ >= 18
|
||||
(defined(__TARGET_ARCH_riscv) && __riscv_xlen == 64) || defined(__TARGET_ARCH_arm)) && \
|
||||
__clang_major__ >= 18
|
||||
|
||||
SEC("socket")
|
||||
__description("gotol, small_imm")
|
||||
|
@ -5,7 +5,8 @@
|
||||
#include "bpf_misc.h"
|
||||
|
||||
#if (defined(__TARGET_ARCH_arm64) || defined(__TARGET_ARCH_x86) || \
|
||||
(defined(__TARGET_ARCH_riscv) && __riscv_xlen == 64)) && __clang_major__ >= 18
|
||||
(defined(__TARGET_ARCH_riscv) && __riscv_xlen == 64) || defined(__TARGET_ARCH_arm)) && \
|
||||
__clang_major__ >= 18
|
||||
|
||||
SEC("socket")
|
||||
__description("LDSX, S8")
|
||||
|
@ -5,7 +5,8 @@
|
||||
#include "bpf_misc.h"
|
||||
|
||||
#if (defined(__TARGET_ARCH_arm64) || defined(__TARGET_ARCH_x86) || \
|
||||
(defined(__TARGET_ARCH_riscv) && __riscv_xlen == 64)) && __clang_major__ >= 18
|
||||
(defined(__TARGET_ARCH_riscv) && __riscv_xlen == 64) || defined(__TARGET_ARCH_arm)) && \
|
||||
__clang_major__ >= 18
|
||||
|
||||
SEC("socket")
|
||||
__description("MOV32SX, S8")
|
||||
|
@ -5,7 +5,8 @@
|
||||
#include "bpf_misc.h"
|
||||
|
||||
#if (defined(__TARGET_ARCH_arm64) || defined(__TARGET_ARCH_x86) || \
|
||||
(defined(__TARGET_ARCH_riscv) && __riscv_xlen == 64)) && __clang_major__ >= 18
|
||||
(defined(__TARGET_ARCH_riscv) && __riscv_xlen == 64) || defined(__TARGET_ARCH_arm)) && \
|
||||
__clang_major__ >= 18
|
||||
|
||||
SEC("socket")
|
||||
__description("SDIV32, non-zero imm divisor, check 1")
|
||||
|
@ -509,6 +509,15 @@ def main():
|
||||
source_map_types.remove('cgroup_storage_deprecated')
|
||||
source_map_types.add('cgroup_storage')
|
||||
|
||||
# The same applied to BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE_DEPRECATED and
|
||||
# BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE which share the same enum value
|
||||
# and source_map_types picks
|
||||
# BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE_DEPRECATED/percpu_cgroup_storage_deprecated.
|
||||
# Replace 'percpu_cgroup_storage_deprecated' with 'percpu_cgroup_storage'
|
||||
# so it aligns with what `bpftool map help` shows.
|
||||
source_map_types.remove('percpu_cgroup_storage_deprecated')
|
||||
source_map_types.add('percpu_cgroup_storage')
|
||||
|
||||
help_map_types = map_info.get_map_help()
|
||||
help_map_options = map_info.get_options()
|
||||
map_info.close()
|
||||
|
@ -73,17 +73,33 @@
|
||||
#
|
||||
# Run test suite for physical device in loopback mode
|
||||
# sudo ./test_xsk.sh -i IFACE
|
||||
#
|
||||
# Run test suite in a specific mode only [skb,drv,zc]
|
||||
# sudo ./test_xsk.sh -m MODE
|
||||
#
|
||||
# List available tests
|
||||
# ./test_xsk.sh -l
|
||||
#
|
||||
# Run a specific test from the test suite
|
||||
# sudo ./test_xsk.sh -t TEST_NAME
|
||||
#
|
||||
# Display the available command line options
|
||||
# ./test_xsk.sh -h
|
||||
|
||||
. xsk_prereqs.sh
|
||||
|
||||
ETH=""
|
||||
|
||||
while getopts "vi:d" flag
|
||||
while getopts "vi:dm:lt:h" flag
|
||||
do
|
||||
case "${flag}" in
|
||||
v) verbose=1;;
|
||||
d) debug=1;;
|
||||
i) ETH=${OPTARG};;
|
||||
m) MODE=${OPTARG};;
|
||||
l) list=1;;
|
||||
t) TEST=${OPTARG};;
|
||||
h) help=1;;
|
||||
esac
|
||||
done
|
||||
|
||||
@ -131,6 +147,16 @@ setup_vethPairs() {
|
||||
ip link set ${VETH0} up
|
||||
}
|
||||
|
||||
if [[ $list -eq 1 ]]; then
|
||||
./${XSKOBJ} -l
|
||||
exit
|
||||
fi
|
||||
|
||||
if [[ $help -eq 1 ]]; then
|
||||
./${XSKOBJ}
|
||||
exit
|
||||
fi
|
||||
|
||||
if [ ! -z $ETH ]; then
|
||||
VETH0=${ETH}
|
||||
VETH1=${ETH}
|
||||
@ -153,6 +179,14 @@ if [[ $verbose -eq 1 ]]; then
|
||||
ARGS+="-v "
|
||||
fi
|
||||
|
||||
if [ -n "$MODE" ]; then
|
||||
ARGS+="-m ${MODE} "
|
||||
fi
|
||||
|
||||
if [ -n "$TEST" ]; then
|
||||
ARGS+="-t ${TEST} "
|
||||
fi
|
||||
|
||||
retval=$?
|
||||
test_status $retval "${TEST_NAME}"
|
||||
|
||||
@ -175,6 +209,10 @@ else
|
||||
cleanup_iface ${ETH} ${MTU}
|
||||
fi
|
||||
|
||||
if [[ $list -eq 1 ]]; then
|
||||
exit
|
||||
fi
|
||||
|
||||
TEST_NAME="XSK_SELFTESTS_${VETH0}_BUSY_POLL"
|
||||
busy_poll=1
|
||||
|
||||
|
@ -7,6 +7,7 @@
|
||||
#include <errno.h>
|
||||
#include <fcntl.h>
|
||||
#include <poll.h>
|
||||
#include <pthread.h>
|
||||
#include <unistd.h>
|
||||
#include <linux/perf_event.h>
|
||||
#include <sys/mman.h>
|
||||
@ -14,102 +15,163 @@
|
||||
#include <linux/limits.h>
|
||||
#include <libelf.h>
|
||||
#include <gelf.h>
|
||||
#include "bpf/libbpf_internal.h"
|
||||
|
||||
#define TRACEFS_PIPE "/sys/kernel/tracing/trace_pipe"
|
||||
#define DEBUGFS_PIPE "/sys/kernel/debug/tracing/trace_pipe"
|
||||
|
||||
#define MAX_SYMS 400000
|
||||
static struct ksym syms[MAX_SYMS];
|
||||
static int sym_cnt;
|
||||
struct ksyms {
|
||||
struct ksym *syms;
|
||||
size_t sym_cap;
|
||||
size_t sym_cnt;
|
||||
};
|
||||
|
||||
static struct ksyms *ksyms;
|
||||
static pthread_mutex_t ksyms_mutex = PTHREAD_MUTEX_INITIALIZER;
|
||||
|
||||
static int ksyms__add_symbol(struct ksyms *ksyms, const char *name,
|
||||
unsigned long addr)
|
||||
{
|
||||
void *tmp;
|
||||
|
||||
tmp = strdup(name);
|
||||
if (!tmp)
|
||||
return -ENOMEM;
|
||||
ksyms->syms[ksyms->sym_cnt].addr = addr;
|
||||
ksyms->syms[ksyms->sym_cnt].name = tmp;
|
||||
ksyms->sym_cnt++;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void free_kallsyms_local(struct ksyms *ksyms)
|
||||
{
|
||||
unsigned int i;
|
||||
|
||||
if (!ksyms)
|
||||
return;
|
||||
|
||||
if (!ksyms->syms) {
|
||||
free(ksyms);
|
||||
return;
|
||||
}
|
||||
|
||||
for (i = 0; i < ksyms->sym_cnt; i++)
|
||||
free(ksyms->syms[i].name);
|
||||
free(ksyms->syms);
|
||||
free(ksyms);
|
||||
}
|
||||
|
||||
static int ksym_cmp(const void *p1, const void *p2)
|
||||
{
|
||||
return ((struct ksym *)p1)->addr - ((struct ksym *)p2)->addr;
|
||||
}
|
||||
|
||||
int load_kallsyms_refresh(void)
|
||||
struct ksyms *load_kallsyms_local(void)
|
||||
{
|
||||
FILE *f;
|
||||
char func[256], buf[256];
|
||||
char symbol;
|
||||
void *addr;
|
||||
int i = 0;
|
||||
|
||||
sym_cnt = 0;
|
||||
int ret;
|
||||
struct ksyms *ksyms;
|
||||
|
||||
f = fopen("/proc/kallsyms", "r");
|
||||
if (!f)
|
||||
return -ENOENT;
|
||||
return NULL;
|
||||
|
||||
ksyms = calloc(1, sizeof(struct ksyms));
|
||||
if (!ksyms) {
|
||||
fclose(f);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
while (fgets(buf, sizeof(buf), f)) {
|
||||
if (sscanf(buf, "%p %c %s", &addr, &symbol, func) != 3)
|
||||
break;
|
||||
if (!addr)
|
||||
continue;
|
||||
if (i >= MAX_SYMS)
|
||||
return -EFBIG;
|
||||
|
||||
syms[i].addr = (long) addr;
|
||||
syms[i].name = strdup(func);
|
||||
i++;
|
||||
ret = libbpf_ensure_mem((void **) &ksyms->syms, &ksyms->sym_cap,
|
||||
sizeof(struct ksym), ksyms->sym_cnt + 1);
|
||||
if (ret)
|
||||
goto error;
|
||||
ret = ksyms__add_symbol(ksyms, func, (unsigned long)addr);
|
||||
if (ret)
|
||||
goto error;
|
||||
}
|
||||
fclose(f);
|
||||
sym_cnt = i;
|
||||
qsort(syms, sym_cnt, sizeof(struct ksym), ksym_cmp);
|
||||
return 0;
|
||||
qsort(ksyms->syms, ksyms->sym_cnt, sizeof(struct ksym), ksym_cmp);
|
||||
return ksyms;
|
||||
|
||||
error:
|
||||
fclose(f);
|
||||
free_kallsyms_local(ksyms);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
int load_kallsyms(void)
|
||||
{
|
||||
/*
|
||||
* This is called/used from multiplace places,
|
||||
* load symbols just once.
|
||||
*/
|
||||
if (sym_cnt)
|
||||
return 0;
|
||||
return load_kallsyms_refresh();
|
||||
pthread_mutex_lock(&ksyms_mutex);
|
||||
if (!ksyms)
|
||||
ksyms = load_kallsyms_local();
|
||||
pthread_mutex_unlock(&ksyms_mutex);
|
||||
return ksyms ? 0 : 1;
|
||||
}
|
||||
|
||||
struct ksym *ksym_search(long key)
|
||||
struct ksym *ksym_search_local(struct ksyms *ksyms, long key)
|
||||
{
|
||||
int start = 0, end = sym_cnt;
|
||||
int start = 0, end = ksyms->sym_cnt;
|
||||
int result;
|
||||
|
||||
/* kallsyms not loaded. return NULL */
|
||||
if (sym_cnt <= 0)
|
||||
if (ksyms->sym_cnt <= 0)
|
||||
return NULL;
|
||||
|
||||
while (start < end) {
|
||||
size_t mid = start + (end - start) / 2;
|
||||
|
||||
result = key - syms[mid].addr;
|
||||
result = key - ksyms->syms[mid].addr;
|
||||
if (result < 0)
|
||||
end = mid;
|
||||
else if (result > 0)
|
||||
start = mid + 1;
|
||||
else
|
||||
return &syms[mid];
|
||||
return &ksyms->syms[mid];
|
||||
}
|
||||
|
||||
if (start >= 1 && syms[start - 1].addr < key &&
|
||||
key < syms[start].addr)
|
||||
if (start >= 1 && ksyms->syms[start - 1].addr < key &&
|
||||
key < ksyms->syms[start].addr)
|
||||
/* valid ksym */
|
||||
return &syms[start - 1];
|
||||
return &ksyms->syms[start - 1];
|
||||
|
||||
/* out of range. return _stext */
|
||||
return &syms[0];
|
||||
return &ksyms->syms[0];
|
||||
}
|
||||
|
||||
struct ksym *ksym_search(long key)
|
||||
{
|
||||
if (!ksyms)
|
||||
return NULL;
|
||||
return ksym_search_local(ksyms, key);
|
||||
}
|
||||
|
||||
long ksym_get_addr_local(struct ksyms *ksyms, const char *name)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ksyms->sym_cnt; i++) {
|
||||
if (strcmp(ksyms->syms[i].name, name) == 0)
|
||||
return ksyms->syms[i].addr;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
long ksym_get_addr(const char *name)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < sym_cnt; i++) {
|
||||
if (strcmp(syms[i].name, name) == 0)
|
||||
return syms[i].addr;
|
||||
}
|
||||
|
||||
return 0;
|
||||
if (!ksyms)
|
||||
return 0;
|
||||
return ksym_get_addr_local(ksyms, name);
|
||||
}
|
||||
|
||||
/* open kallsyms and read symbol addresses on the fly. Without caching all symbols,
|
||||
|
@ -11,13 +11,17 @@ struct ksym {
|
||||
long addr;
|
||||
char *name;
|
||||
};
|
||||
struct ksyms;
|
||||
|
||||
int load_kallsyms(void);
|
||||
int load_kallsyms_refresh(void);
|
||||
|
||||
struct ksym *ksym_search(long key);
|
||||
long ksym_get_addr(const char *name);
|
||||
|
||||
struct ksyms *load_kallsyms_local(void);
|
||||
struct ksym *ksym_search_local(struct ksyms *ksyms, long key);
|
||||
long ksym_get_addr_local(struct ksyms *ksyms, const char *name);
|
||||
void free_kallsyms_local(struct ksyms *ksyms);
|
||||
|
||||
/* open kallsyms and find addresses on the fly, faster than load + search. */
|
||||
int kallsyms_find(const char *sym, unsigned long long *addr);
|
||||
|
||||
|
@ -83,9 +83,11 @@ exec_xskxceiver()
|
||||
fi
|
||||
|
||||
./${XSKOBJ} -i ${VETH0} -i ${VETH1} ${ARGS}
|
||||
|
||||
retval=$?
|
||||
test_status $retval "${TEST_NAME}"
|
||||
statusList+=($retval)
|
||||
nameList+=(${TEST_NAME})
|
||||
|
||||
if [[ $list -ne 1 ]]; then
|
||||
test_status $retval "${TEST_NAME}"
|
||||
statusList+=($retval)
|
||||
nameList+=(${TEST_NAME})
|
||||
fi
|
||||
}
|
||||
|
@ -107,6 +107,11 @@
|
||||
static const char *MAC1 = "\x00\x0A\x56\x9E\xEE\x62";
|
||||
static const char *MAC2 = "\x00\x0A\x56\x9E\xEE\x61";
|
||||
|
||||
static bool opt_verbose;
|
||||
static bool opt_print_tests;
|
||||
static enum test_mode opt_mode = TEST_MODE_ALL;
|
||||
static u32 opt_run_test = RUN_ALL_TESTS;
|
||||
|
||||
static void __exit_with_error(int error, const char *file, const char *func, int line)
|
||||
{
|
||||
ksft_test_result_fail("[%s:%s:%i]: ERROR: %d/\"%s\"\n", file, func, line, error,
|
||||
@ -310,19 +315,28 @@ static struct option long_options[] = {
|
||||
{"interface", required_argument, 0, 'i'},
|
||||
{"busy-poll", no_argument, 0, 'b'},
|
||||
{"verbose", no_argument, 0, 'v'},
|
||||
{"mode", required_argument, 0, 'm'},
|
||||
{"list", no_argument, 0, 'l'},
|
||||
{"test", required_argument, 0, 't'},
|
||||
{"help", no_argument, 0, 'h'},
|
||||
{0, 0, 0, 0}
|
||||
};
|
||||
|
||||
static void usage(const char *prog)
|
||||
static void print_usage(char **argv)
|
||||
{
|
||||
const char *str =
|
||||
" Usage: %s [OPTIONS]\n"
|
||||
" Usage: xskxceiver [OPTIONS]\n"
|
||||
" Options:\n"
|
||||
" -i, --interface Use interface\n"
|
||||
" -v, --verbose Verbose output\n"
|
||||
" -b, --busy-poll Enable busy poll\n";
|
||||
" -b, --busy-poll Enable busy poll\n"
|
||||
" -m, --mode Run only mode skb, drv, or zc\n"
|
||||
" -l, --list List all available tests\n"
|
||||
" -t, --test Run a specific test. Enter number from -l option.\n"
|
||||
" -h, --help Display this help and exit\n";
|
||||
|
||||
ksft_print_msg(str, prog);
|
||||
ksft_print_msg(str, basename(argv[0]));
|
||||
ksft_exit_xfail();
|
||||
}
|
||||
|
||||
static bool validate_interface(struct ifobject *ifobj)
|
||||
@ -342,7 +356,7 @@ static void parse_command_line(struct ifobject *ifobj_tx, struct ifobject *ifobj
|
||||
opterr = 0;
|
||||
|
||||
for (;;) {
|
||||
c = getopt_long(argc, argv, "i:vb", long_options, &option_index);
|
||||
c = getopt_long(argc, argv, "i:vbm:lt:", long_options, &option_index);
|
||||
if (c == -1)
|
||||
break;
|
||||
|
||||
@ -371,9 +385,28 @@ static void parse_command_line(struct ifobject *ifobj_tx, struct ifobject *ifobj
|
||||
ifobj_tx->busy_poll = true;
|
||||
ifobj_rx->busy_poll = true;
|
||||
break;
|
||||
case 'm':
|
||||
if (!strncmp("skb", optarg, strlen(optarg)))
|
||||
opt_mode = TEST_MODE_SKB;
|
||||
else if (!strncmp("drv", optarg, strlen(optarg)))
|
||||
opt_mode = TEST_MODE_DRV;
|
||||
else if (!strncmp("zc", optarg, strlen(optarg)))
|
||||
opt_mode = TEST_MODE_ZC;
|
||||
else
|
||||
print_usage(argv);
|
||||
break;
|
||||
case 'l':
|
||||
opt_print_tests = true;
|
||||
break;
|
||||
case 't':
|
||||
errno = 0;
|
||||
opt_run_test = strtol(optarg, NULL, 0);
|
||||
if (errno)
|
||||
print_usage(argv);
|
||||
break;
|
||||
case 'h':
|
||||
default:
|
||||
usage(basename(argv[0]));
|
||||
ksft_exit_xfail();
|
||||
print_usage(argv);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -427,7 +460,8 @@ static void __test_spec_init(struct test_spec *test, struct ifobject *ifobj_tx,
|
||||
}
|
||||
|
||||
static void test_spec_init(struct test_spec *test, struct ifobject *ifobj_tx,
|
||||
struct ifobject *ifobj_rx, enum test_mode mode)
|
||||
struct ifobject *ifobj_rx, enum test_mode mode,
|
||||
const struct test_spec *test_to_run)
|
||||
{
|
||||
struct pkt_stream *tx_pkt_stream;
|
||||
struct pkt_stream *rx_pkt_stream;
|
||||
@ -449,6 +483,8 @@ static void test_spec_init(struct test_spec *test, struct ifobject *ifobj_tx,
|
||||
ifobj->bind_flags |= XDP_COPY;
|
||||
}
|
||||
|
||||
strncpy(test->name, test_to_run->name, MAX_TEST_NAME_SIZE);
|
||||
test->test_func = test_to_run->test_func;
|
||||
test->mode = mode;
|
||||
__test_spec_init(test, ifobj_tx, ifobj_rx);
|
||||
}
|
||||
@ -458,11 +494,6 @@ static void test_spec_reset(struct test_spec *test)
|
||||
__test_spec_init(test, test->ifobj_tx, test->ifobj_rx);
|
||||
}
|
||||
|
||||
static void test_spec_set_name(struct test_spec *test, const char *name)
|
||||
{
|
||||
strncpy(test->name, name, MAX_TEST_NAME_SIZE);
|
||||
}
|
||||
|
||||
static void test_spec_set_xdp_prog(struct test_spec *test, struct bpf_program *xdp_prog_rx,
|
||||
struct bpf_program *xdp_prog_tx, struct bpf_map *xskmap_rx,
|
||||
struct bpf_map *xskmap_tx)
|
||||
@ -747,6 +778,9 @@ static struct pkt_stream *__pkt_stream_generate_custom(struct ifobject *ifobj, s
|
||||
len = 0;
|
||||
}
|
||||
|
||||
print_verbose("offset: %d len: %u valid: %u options: %u pkt_nb: %u\n",
|
||||
pkt->offset, pkt->len, pkt->valid, pkt->options, pkt->pkt_nb);
|
||||
|
||||
if (pkt->valid && pkt->len > pkt_stream->max_pkt_len)
|
||||
pkt_stream->max_pkt_len = pkt->len;
|
||||
pkt_nb++;
|
||||
@ -777,7 +811,7 @@ static void pkt_print_data(u32 *data, u32 cnt)
|
||||
|
||||
seqnum = ntohl(*data) & 0xffff;
|
||||
pkt_nb = ntohl(*data) >> 16;
|
||||
fprintf(stdout, "%u:%u ", pkt_nb, seqnum);
|
||||
ksft_print_msg("%u:%u ", pkt_nb, seqnum);
|
||||
data++;
|
||||
}
|
||||
}
|
||||
@ -789,13 +823,13 @@ static void pkt_dump(void *pkt, u32 len, bool eth_header)
|
||||
|
||||
if (eth_header) {
|
||||
/*extract L2 frame */
|
||||
fprintf(stdout, "DEBUG>> L2: dst mac: ");
|
||||
ksft_print_msg("DEBUG>> L2: dst mac: ");
|
||||
for (i = 0; i < ETH_ALEN; i++)
|
||||
fprintf(stdout, "%02X", ethhdr->h_dest[i]);
|
||||
ksft_print_msg("%02X", ethhdr->h_dest[i]);
|
||||
|
||||
fprintf(stdout, "\nDEBUG>> L2: src mac: ");
|
||||
ksft_print_msg("\nDEBUG>> L2: src mac: ");
|
||||
for (i = 0; i < ETH_ALEN; i++)
|
||||
fprintf(stdout, "%02X", ethhdr->h_source[i]);
|
||||
ksft_print_msg("%02X", ethhdr->h_source[i]);
|
||||
|
||||
data = pkt + PKT_HDR_SIZE;
|
||||
} else {
|
||||
@ -803,15 +837,15 @@ static void pkt_dump(void *pkt, u32 len, bool eth_header)
|
||||
}
|
||||
|
||||
/*extract L5 frame */
|
||||
fprintf(stdout, "\nDEBUG>> L5: seqnum: ");
|
||||
ksft_print_msg("\nDEBUG>> L5: seqnum: ");
|
||||
pkt_print_data(data, PKT_DUMP_NB_TO_PRINT);
|
||||
fprintf(stdout, "....");
|
||||
ksft_print_msg("....");
|
||||
if (len > PKT_DUMP_NB_TO_PRINT * sizeof(u32)) {
|
||||
fprintf(stdout, "\n.... ");
|
||||
ksft_print_msg("\n.... ");
|
||||
pkt_print_data(data + len / sizeof(u32) - PKT_DUMP_NB_TO_PRINT,
|
||||
PKT_DUMP_NB_TO_PRINT);
|
||||
}
|
||||
fprintf(stdout, "\n---------------------------------------\n");
|
||||
ksft_print_msg("\n---------------------------------------\n");
|
||||
}
|
||||
|
||||
static bool is_offset_correct(struct xsk_umem_info *umem, struct pkt *pkt, u64 addr)
|
||||
@ -916,36 +950,42 @@ static bool is_pkt_valid(struct pkt *pkt, void *buffer, u64 addr, u32 len)
|
||||
return true;
|
||||
}
|
||||
|
||||
static void kick_tx(struct xsk_socket_info *xsk)
|
||||
static int kick_tx(struct xsk_socket_info *xsk)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = sendto(xsk_socket__fd(xsk->xsk), NULL, 0, MSG_DONTWAIT, NULL, 0);
|
||||
if (ret >= 0)
|
||||
return;
|
||||
return TEST_PASS;
|
||||
if (errno == ENOBUFS || errno == EAGAIN || errno == EBUSY || errno == ENETDOWN) {
|
||||
usleep(100);
|
||||
return;
|
||||
return TEST_PASS;
|
||||
}
|
||||
exit_with_error(errno);
|
||||
return TEST_FAILURE;
|
||||
}
|
||||
|
||||
static void kick_rx(struct xsk_socket_info *xsk)
|
||||
static int kick_rx(struct xsk_socket_info *xsk)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = recvfrom(xsk_socket__fd(xsk->xsk), NULL, 0, MSG_DONTWAIT, NULL, NULL);
|
||||
if (ret < 0)
|
||||
exit_with_error(errno);
|
||||
return TEST_FAILURE;
|
||||
|
||||
return TEST_PASS;
|
||||
}
|
||||
|
||||
static int complete_pkts(struct xsk_socket_info *xsk, int batch_size)
|
||||
{
|
||||
unsigned int rcvd;
|
||||
u32 idx;
|
||||
int ret;
|
||||
|
||||
if (xsk_ring_prod__needs_wakeup(&xsk->tx))
|
||||
kick_tx(xsk);
|
||||
if (xsk_ring_prod__needs_wakeup(&xsk->tx)) {
|
||||
ret = kick_tx(xsk);
|
||||
if (ret)
|
||||
return TEST_FAILURE;
|
||||
}
|
||||
|
||||
rcvd = xsk_ring_cons__peek(&xsk->umem->cq, batch_size, &idx);
|
||||
if (rcvd) {
|
||||
@ -993,11 +1033,14 @@ static int receive_pkts(struct test_spec *test, struct pollfd *fds)
|
||||
return TEST_FAILURE;
|
||||
}
|
||||
|
||||
kick_rx(xsk);
|
||||
ret = kick_rx(xsk);
|
||||
if (ret)
|
||||
return TEST_FAILURE;
|
||||
|
||||
if (ifobj->use_poll) {
|
||||
ret = poll(fds, 1, POLL_TMOUT);
|
||||
if (ret < 0)
|
||||
exit_with_error(errno);
|
||||
return TEST_FAILURE;
|
||||
|
||||
if (!ret) {
|
||||
if (!is_umem_valid(test->ifobj_tx))
|
||||
@ -1018,12 +1061,10 @@ static int receive_pkts(struct test_spec *test, struct pollfd *fds)
|
||||
if (ifobj->use_fill_ring) {
|
||||
ret = xsk_ring_prod__reserve(&umem->fq, rcvd, &idx_fq);
|
||||
while (ret != rcvd) {
|
||||
if (ret < 0)
|
||||
exit_with_error(-ret);
|
||||
if (xsk_ring_prod__needs_wakeup(&umem->fq)) {
|
||||
ret = poll(fds, 1, POLL_TMOUT);
|
||||
if (ret < 0)
|
||||
exit_with_error(errno);
|
||||
return TEST_FAILURE;
|
||||
}
|
||||
ret = xsk_ring_prod__reserve(&umem->fq, rcvd, &idx_fq);
|
||||
}
|
||||
@ -1042,6 +1083,9 @@ static int receive_pkts(struct test_spec *test, struct pollfd *fds)
|
||||
return TEST_FAILURE;
|
||||
}
|
||||
|
||||
print_verbose("Rx: addr: %lx len: %u options: %u pkt_nb: %u valid: %u\n",
|
||||
addr, desc->len, desc->options, pkt->pkt_nb, pkt->valid);
|
||||
|
||||
if (!is_frag_valid(umem, addr, desc->len, pkt->pkt_nb, pkt_len) ||
|
||||
!is_offset_correct(umem, pkt, addr) ||
|
||||
(ifobj->use_metadata && !is_metadata_correct(pkt, umem->buffer, addr)))
|
||||
@ -1104,7 +1148,9 @@ static int __send_pkts(struct ifobject *ifobject, struct pollfd *fds, bool timeo
|
||||
buffer_len = pkt_get_buffer_len(umem, pkt_stream->max_pkt_len);
|
||||
/* pkts_in_flight might be negative if many invalid packets are sent */
|
||||
if (pkts_in_flight >= (int)((umem_size(umem) - BATCH_SIZE * buffer_len) / buffer_len)) {
|
||||
kick_tx(xsk);
|
||||
ret = kick_tx(xsk);
|
||||
if (ret)
|
||||
return TEST_FAILURE;
|
||||
return TEST_CONTINUE;
|
||||
}
|
||||
|
||||
@ -1165,6 +1211,9 @@ static int __send_pkts(struct ifobject *ifobject, struct pollfd *fds, bool timeo
|
||||
bytes_written);
|
||||
bytes_written += tx_desc->len;
|
||||
|
||||
print_verbose("Tx addr: %llx len: %u options: %u pkt_nb: %u\n",
|
||||
tx_desc->addr, tx_desc->len, tx_desc->options, pkt->pkt_nb);
|
||||
|
||||
if (nb_frags_left) {
|
||||
i++;
|
||||
if (pkt_stream->verbatim)
|
||||
@ -1207,10 +1256,29 @@ static int __send_pkts(struct ifobject *ifobject, struct pollfd *fds, bool timeo
|
||||
return TEST_CONTINUE;
|
||||
}
|
||||
|
||||
static void wait_for_tx_completion(struct xsk_socket_info *xsk)
|
||||
static int wait_for_tx_completion(struct xsk_socket_info *xsk)
|
||||
{
|
||||
while (xsk->outstanding_tx)
|
||||
struct timeval tv_end, tv_now, tv_timeout = {THREAD_TMOUT, 0};
|
||||
int ret;
|
||||
|
||||
ret = gettimeofday(&tv_now, NULL);
|
||||
if (ret)
|
||||
exit_with_error(errno);
|
||||
timeradd(&tv_now, &tv_timeout, &tv_end);
|
||||
|
||||
while (xsk->outstanding_tx) {
|
||||
ret = gettimeofday(&tv_now, NULL);
|
||||
if (ret)
|
||||
exit_with_error(errno);
|
||||
if (timercmp(&tv_now, &tv_end, >)) {
|
||||
ksft_print_msg("ERROR: [%s] Transmission loop timed out\n", __func__);
|
||||
return TEST_FAILURE;
|
||||
}
|
||||
|
||||
complete_pkts(xsk, BATCH_SIZE);
|
||||
}
|
||||
|
||||
return TEST_PASS;
|
||||
}
|
||||
|
||||
static int send_pkts(struct test_spec *test, struct ifobject *ifobject)
|
||||
@ -1233,8 +1301,7 @@ static int send_pkts(struct test_spec *test, struct ifobject *ifobject)
|
||||
return ret;
|
||||
}
|
||||
|
||||
wait_for_tx_completion(ifobject->xsk);
|
||||
return TEST_PASS;
|
||||
return wait_for_tx_completion(ifobject->xsk);
|
||||
}
|
||||
|
||||
static int get_xsk_stats(struct xsk_socket *xsk, struct xdp_statistics *stats)
|
||||
@ -1266,7 +1333,9 @@ static int validate_rx_dropped(struct ifobject *ifobject)
|
||||
struct xdp_statistics stats;
|
||||
int err;
|
||||
|
||||
kick_rx(ifobject->xsk);
|
||||
err = kick_rx(ifobject->xsk);
|
||||
if (err)
|
||||
return TEST_FAILURE;
|
||||
|
||||
err = get_xsk_stats(xsk, &stats);
|
||||
if (err)
|
||||
@ -1292,7 +1361,9 @@ static int validate_rx_full(struct ifobject *ifobject)
|
||||
int err;
|
||||
|
||||
usleep(1000);
|
||||
kick_rx(ifobject->xsk);
|
||||
err = kick_rx(ifobject->xsk);
|
||||
if (err)
|
||||
return TEST_FAILURE;
|
||||
|
||||
err = get_xsk_stats(xsk, &stats);
|
||||
if (err)
|
||||
@ -1311,7 +1382,9 @@ static int validate_fill_empty(struct ifobject *ifobject)
|
||||
int err;
|
||||
|
||||
usleep(1000);
|
||||
kick_rx(ifobject->xsk);
|
||||
err = kick_rx(ifobject->xsk);
|
||||
if (err)
|
||||
return TEST_FAILURE;
|
||||
|
||||
err = get_xsk_stats(xsk, &stats);
|
||||
if (err)
|
||||
@ -1475,8 +1548,6 @@ static void *worker_testapp_validate_tx(void *arg)
|
||||
thread_common_ops_tx(test, ifobject);
|
||||
}
|
||||
|
||||
print_verbose("Sending %d packets on interface %s\n", ifobject->pkt_stream->nb_pkts,
|
||||
ifobject->ifname);
|
||||
err = send_pkts(test, ifobject);
|
||||
|
||||
if (!err && ifobject->validation_func)
|
||||
@ -1500,7 +1571,8 @@ static void *worker_testapp_validate_rx(void *arg)
|
||||
xsk_clear_xskmap(ifobject->xskmap);
|
||||
err = xsk_update_xskmap(ifobject->xskmap, ifobject->xsk->xsk);
|
||||
if (err) {
|
||||
printf("Error: Failed to update xskmap, error %s\n", strerror(-err));
|
||||
ksft_print_msg("Error: Failed to update xskmap, error %s\n",
|
||||
strerror(-err));
|
||||
exit_with_error(-err);
|
||||
}
|
||||
}
|
||||
@ -1564,7 +1636,7 @@ static void xsk_reattach_xdp(struct ifobject *ifobj, struct bpf_program *xdp_pro
|
||||
xsk_detach_xdp_program(ifobj->ifindex, mode_to_xdp_flags(ifobj->mode));
|
||||
err = xsk_attach_xdp_program(xdp_prog, ifobj->ifindex, mode_to_xdp_flags(mode));
|
||||
if (err) {
|
||||
printf("Error attaching XDP program\n");
|
||||
ksft_print_msg("Error attaching XDP program\n");
|
||||
exit_with_error(-err);
|
||||
}
|
||||
|
||||
@ -1682,7 +1754,6 @@ static int testapp_teardown(struct test_spec *test)
|
||||
{
|
||||
int i;
|
||||
|
||||
test_spec_set_name(test, "TEARDOWN");
|
||||
for (i = 0; i < MAX_TEARDOWN_ITER; i++) {
|
||||
if (testapp_validate_traffic(test))
|
||||
return TEST_FAILURE;
|
||||
@ -1704,18 +1775,17 @@ static void swap_directions(struct ifobject **ifobj1, struct ifobject **ifobj2)
|
||||
*ifobj2 = tmp_ifobj;
|
||||
}
|
||||
|
||||
static int testapp_bidi(struct test_spec *test)
|
||||
static int testapp_bidirectional(struct test_spec *test)
|
||||
{
|
||||
int res;
|
||||
|
||||
test_spec_set_name(test, "BIDIRECTIONAL");
|
||||
test->ifobj_tx->rx_on = true;
|
||||
test->ifobj_rx->tx_on = true;
|
||||
test->total_steps = 2;
|
||||
if (testapp_validate_traffic(test))
|
||||
return TEST_FAILURE;
|
||||
|
||||
print_verbose("Switching Tx/Rx vectors\n");
|
||||
print_verbose("Switching Tx/Rx direction\n");
|
||||
swap_directions(&test->ifobj_rx, &test->ifobj_tx);
|
||||
res = __testapp_validate_traffic(test, test->ifobj_rx, test->ifobj_tx);
|
||||
|
||||
@ -1723,7 +1793,7 @@ static int testapp_bidi(struct test_spec *test)
|
||||
return res;
|
||||
}
|
||||
|
||||
static void swap_xsk_resources(struct ifobject *ifobj_tx, struct ifobject *ifobj_rx)
|
||||
static int swap_xsk_resources(struct ifobject *ifobj_tx, struct ifobject *ifobj_rx)
|
||||
{
|
||||
int ret;
|
||||
|
||||
@ -1734,31 +1804,31 @@ static void swap_xsk_resources(struct ifobject *ifobj_tx, struct ifobject *ifobj
|
||||
|
||||
ret = xsk_update_xskmap(ifobj_rx->xskmap, ifobj_rx->xsk->xsk);
|
||||
if (ret)
|
||||
exit_with_error(errno);
|
||||
return TEST_FAILURE;
|
||||
|
||||
return TEST_PASS;
|
||||
}
|
||||
|
||||
static int testapp_bpf_res(struct test_spec *test)
|
||||
static int testapp_xdp_prog_cleanup(struct test_spec *test)
|
||||
{
|
||||
test_spec_set_name(test, "BPF_RES");
|
||||
test->total_steps = 2;
|
||||
test->nb_sockets = 2;
|
||||
if (testapp_validate_traffic(test))
|
||||
return TEST_FAILURE;
|
||||
|
||||
swap_xsk_resources(test->ifobj_tx, test->ifobj_rx);
|
||||
if (swap_xsk_resources(test->ifobj_tx, test->ifobj_rx))
|
||||
return TEST_FAILURE;
|
||||
return testapp_validate_traffic(test);
|
||||
}
|
||||
|
||||
static int testapp_headroom(struct test_spec *test)
|
||||
{
|
||||
test_spec_set_name(test, "UMEM_HEADROOM");
|
||||
test->ifobj_rx->umem->frame_headroom = UMEM_HEADROOM_TEST_SIZE;
|
||||
return testapp_validate_traffic(test);
|
||||
}
|
||||
|
||||
static int testapp_stats_rx_dropped(struct test_spec *test)
|
||||
{
|
||||
test_spec_set_name(test, "STAT_RX_DROPPED");
|
||||
if (test->mode == TEST_MODE_ZC) {
|
||||
ksft_test_result_skip("Can not run RX_DROPPED test for ZC mode\n");
|
||||
return TEST_SKIP;
|
||||
@ -1774,7 +1844,6 @@ static int testapp_stats_rx_dropped(struct test_spec *test)
|
||||
|
||||
static int testapp_stats_tx_invalid_descs(struct test_spec *test)
|
||||
{
|
||||
test_spec_set_name(test, "STAT_TX_INVALID");
|
||||
pkt_stream_replace_half(test, XSK_UMEM__INVALID_FRAME_SIZE, 0);
|
||||
test->ifobj_tx->validation_func = validate_tx_invalid_descs;
|
||||
return testapp_validate_traffic(test);
|
||||
@ -1782,7 +1851,6 @@ static int testapp_stats_tx_invalid_descs(struct test_spec *test)
|
||||
|
||||
static int testapp_stats_rx_full(struct test_spec *test)
|
||||
{
|
||||
test_spec_set_name(test, "STAT_RX_FULL");
|
||||
pkt_stream_replace(test, DEFAULT_UMEM_BUFFERS + DEFAULT_UMEM_BUFFERS / 2, MIN_PKT_SIZE);
|
||||
test->ifobj_rx->pkt_stream = pkt_stream_generate(test->ifobj_rx->umem,
|
||||
DEFAULT_UMEM_BUFFERS, MIN_PKT_SIZE);
|
||||
@ -1795,7 +1863,6 @@ static int testapp_stats_rx_full(struct test_spec *test)
|
||||
|
||||
static int testapp_stats_fill_empty(struct test_spec *test)
|
||||
{
|
||||
test_spec_set_name(test, "STAT_RX_FILL_EMPTY");
|
||||
pkt_stream_replace(test, DEFAULT_UMEM_BUFFERS + DEFAULT_UMEM_BUFFERS / 2, MIN_PKT_SIZE);
|
||||
test->ifobj_rx->pkt_stream = pkt_stream_generate(test->ifobj_rx->umem,
|
||||
DEFAULT_UMEM_BUFFERS, MIN_PKT_SIZE);
|
||||
@ -1805,9 +1872,8 @@ static int testapp_stats_fill_empty(struct test_spec *test)
|
||||
return testapp_validate_traffic(test);
|
||||
}
|
||||
|
||||
static int testapp_unaligned(struct test_spec *test)
|
||||
static int testapp_send_receive_unaligned(struct test_spec *test)
|
||||
{
|
||||
test_spec_set_name(test, "UNALIGNED_MODE");
|
||||
test->ifobj_tx->umem->unaligned_mode = true;
|
||||
test->ifobj_rx->umem->unaligned_mode = true;
|
||||
/* Let half of the packets straddle a 4K buffer boundary */
|
||||
@ -1816,9 +1882,8 @@ static int testapp_unaligned(struct test_spec *test)
|
||||
return testapp_validate_traffic(test);
|
||||
}
|
||||
|
||||
static int testapp_unaligned_mb(struct test_spec *test)
|
||||
static int testapp_send_receive_unaligned_mb(struct test_spec *test)
|
||||
{
|
||||
test_spec_set_name(test, "UNALIGNED_MODE_9K");
|
||||
test->mtu = MAX_ETH_JUMBO_SIZE;
|
||||
test->ifobj_tx->umem->unaligned_mode = true;
|
||||
test->ifobj_rx->umem->unaligned_mode = true;
|
||||
@ -1834,9 +1899,8 @@ static int testapp_single_pkt(struct test_spec *test)
|
||||
return testapp_validate_traffic(test);
|
||||
}
|
||||
|
||||
static int testapp_multi_buffer(struct test_spec *test)
|
||||
static int testapp_send_receive_mb(struct test_spec *test)
|
||||
{
|
||||
test_spec_set_name(test, "RUN_TO_COMPLETION_9K_PACKETS");
|
||||
test->mtu = MAX_ETH_JUMBO_SIZE;
|
||||
pkt_stream_replace(test, DEFAULT_PKT_CNT, MAX_ETH_JUMBO_SIZE);
|
||||
|
||||
@ -1933,7 +1997,6 @@ static int testapp_xdp_drop(struct test_spec *test)
|
||||
struct xsk_xdp_progs *skel_rx = test->ifobj_rx->xdp_progs;
|
||||
struct xsk_xdp_progs *skel_tx = test->ifobj_tx->xdp_progs;
|
||||
|
||||
test_spec_set_name(test, "XDP_DROP_HALF");
|
||||
test_spec_set_xdp_prog(test, skel_rx->progs.xsk_xdp_drop, skel_tx->progs.xsk_xdp_drop,
|
||||
skel_rx->maps.xsk, skel_tx->maps.xsk);
|
||||
|
||||
@ -1941,7 +2004,7 @@ static int testapp_xdp_drop(struct test_spec *test)
|
||||
return testapp_validate_traffic(test);
|
||||
}
|
||||
|
||||
static int testapp_xdp_metadata_count(struct test_spec *test)
|
||||
static int testapp_xdp_metadata_copy(struct test_spec *test)
|
||||
{
|
||||
struct xsk_xdp_progs *skel_rx = test->ifobj_rx->xdp_progs;
|
||||
struct xsk_xdp_progs *skel_tx = test->ifobj_tx->xdp_progs;
|
||||
@ -1955,19 +2018,21 @@ static int testapp_xdp_metadata_count(struct test_spec *test)
|
||||
test->ifobj_rx->use_metadata = true;
|
||||
|
||||
data_map = bpf_object__find_map_by_name(skel_rx->obj, "xsk_xdp_.bss");
|
||||
if (!data_map || !bpf_map__is_internal(data_map))
|
||||
exit_with_error(ENOMEM);
|
||||
if (!data_map || !bpf_map__is_internal(data_map)) {
|
||||
ksft_print_msg("Error: could not find bss section of XDP program\n");
|
||||
return TEST_FAILURE;
|
||||
}
|
||||
|
||||
if (bpf_map_update_elem(bpf_map__fd(data_map), &key, &count, BPF_ANY))
|
||||
exit_with_error(errno);
|
||||
if (bpf_map_update_elem(bpf_map__fd(data_map), &key, &count, BPF_ANY)) {
|
||||
ksft_print_msg("Error: could not update count element\n");
|
||||
return TEST_FAILURE;
|
||||
}
|
||||
|
||||
return testapp_validate_traffic(test);
|
||||
}
|
||||
|
||||
static int testapp_poll_txq_tmout(struct test_spec *test)
|
||||
{
|
||||
test_spec_set_name(test, "POLL_TXQ_FULL");
|
||||
|
||||
test->ifobj_tx->use_poll = true;
|
||||
/* create invalid frame by set umem frame_size and pkt length equal to 2048 */
|
||||
test->ifobj_tx->umem->frame_size = 2048;
|
||||
@ -1977,7 +2042,6 @@ static int testapp_poll_txq_tmout(struct test_spec *test)
|
||||
|
||||
static int testapp_poll_rxq_tmout(struct test_spec *test)
|
||||
{
|
||||
test_spec_set_name(test, "POLL_RXQ_EMPTY");
|
||||
test->ifobj_rx->use_poll = true;
|
||||
return testapp_validate_traffic_single_thread(test, test->ifobj_rx);
|
||||
}
|
||||
@ -1987,7 +2051,6 @@ static int testapp_too_many_frags(struct test_spec *test)
|
||||
struct pkt pkts[2 * XSK_DESC__MAX_SKB_FRAGS + 2] = {};
|
||||
u32 max_frags, i;
|
||||
|
||||
test_spec_set_name(test, "TOO_MANY_FRAGS");
|
||||
if (test->mode == TEST_MODE_ZC)
|
||||
max_frags = test->ifobj_tx->xdp_zc_max_segs;
|
||||
else
|
||||
@ -2067,7 +2130,7 @@ static void init_iface(struct ifobject *ifobj, const char *dst_mac, const char *
|
||||
|
||||
err = xsk_load_xdp_programs(ifobj);
|
||||
if (err) {
|
||||
printf("Error loading XDP program\n");
|
||||
ksft_print_msg("Error loading XDP program\n");
|
||||
exit_with_error(err);
|
||||
}
|
||||
|
||||
@ -2091,138 +2154,98 @@ static void init_iface(struct ifobject *ifobj, const char *dst_mac, const char *
|
||||
}
|
||||
}
|
||||
|
||||
static void run_pkt_test(struct test_spec *test, enum test_mode mode, enum test_type type)
|
||||
static int testapp_send_receive(struct test_spec *test)
|
||||
{
|
||||
int ret = TEST_SKIP;
|
||||
return testapp_validate_traffic(test);
|
||||
}
|
||||
|
||||
switch (type) {
|
||||
case TEST_TYPE_STATS_RX_DROPPED:
|
||||
ret = testapp_stats_rx_dropped(test);
|
||||
break;
|
||||
case TEST_TYPE_STATS_TX_INVALID_DESCS:
|
||||
ret = testapp_stats_tx_invalid_descs(test);
|
||||
break;
|
||||
case TEST_TYPE_STATS_RX_FULL:
|
||||
ret = testapp_stats_rx_full(test);
|
||||
break;
|
||||
case TEST_TYPE_STATS_FILL_EMPTY:
|
||||
ret = testapp_stats_fill_empty(test);
|
||||
break;
|
||||
case TEST_TYPE_TEARDOWN:
|
||||
ret = testapp_teardown(test);
|
||||
break;
|
||||
case TEST_TYPE_BIDI:
|
||||
ret = testapp_bidi(test);
|
||||
break;
|
||||
case TEST_TYPE_BPF_RES:
|
||||
ret = testapp_bpf_res(test);
|
||||
break;
|
||||
case TEST_TYPE_RUN_TO_COMPLETION:
|
||||
test_spec_set_name(test, "RUN_TO_COMPLETION");
|
||||
ret = testapp_validate_traffic(test);
|
||||
break;
|
||||
case TEST_TYPE_RUN_TO_COMPLETION_MB:
|
||||
ret = testapp_multi_buffer(test);
|
||||
break;
|
||||
case TEST_TYPE_RUN_TO_COMPLETION_SINGLE_PKT:
|
||||
test_spec_set_name(test, "RUN_TO_COMPLETION_SINGLE_PKT");
|
||||
ret = testapp_single_pkt(test);
|
||||
break;
|
||||
case TEST_TYPE_RUN_TO_COMPLETION_2K_FRAME:
|
||||
test_spec_set_name(test, "RUN_TO_COMPLETION_2K_FRAME_SIZE");
|
||||
test->ifobj_tx->umem->frame_size = 2048;
|
||||
test->ifobj_rx->umem->frame_size = 2048;
|
||||
pkt_stream_replace(test, DEFAULT_PKT_CNT, MIN_PKT_SIZE);
|
||||
ret = testapp_validate_traffic(test);
|
||||
break;
|
||||
case TEST_TYPE_RX_POLL:
|
||||
test->ifobj_rx->use_poll = true;
|
||||
test_spec_set_name(test, "POLL_RX");
|
||||
ret = testapp_validate_traffic(test);
|
||||
break;
|
||||
case TEST_TYPE_TX_POLL:
|
||||
test->ifobj_tx->use_poll = true;
|
||||
test_spec_set_name(test, "POLL_TX");
|
||||
ret = testapp_validate_traffic(test);
|
||||
break;
|
||||
case TEST_TYPE_POLL_TXQ_TMOUT:
|
||||
ret = testapp_poll_txq_tmout(test);
|
||||
break;
|
||||
case TEST_TYPE_POLL_RXQ_TMOUT:
|
||||
ret = testapp_poll_rxq_tmout(test);
|
||||
break;
|
||||
case TEST_TYPE_ALIGNED_INV_DESC:
|
||||
test_spec_set_name(test, "ALIGNED_INV_DESC");
|
||||
ret = testapp_invalid_desc(test);
|
||||
break;
|
||||
case TEST_TYPE_ALIGNED_INV_DESC_2K_FRAME:
|
||||
test_spec_set_name(test, "ALIGNED_INV_DESC_2K_FRAME_SIZE");
|
||||
test->ifobj_tx->umem->frame_size = 2048;
|
||||
test->ifobj_rx->umem->frame_size = 2048;
|
||||
ret = testapp_invalid_desc(test);
|
||||
break;
|
||||
case TEST_TYPE_UNALIGNED_INV_DESC:
|
||||
test_spec_set_name(test, "UNALIGNED_INV_DESC");
|
||||
test->ifobj_tx->umem->unaligned_mode = true;
|
||||
test->ifobj_rx->umem->unaligned_mode = true;
|
||||
ret = testapp_invalid_desc(test);
|
||||
break;
|
||||
case TEST_TYPE_UNALIGNED_INV_DESC_4K1_FRAME: {
|
||||
u64 page_size, umem_size;
|
||||
static int testapp_send_receive_2k_frame(struct test_spec *test)
|
||||
{
|
||||
test->ifobj_tx->umem->frame_size = 2048;
|
||||
test->ifobj_rx->umem->frame_size = 2048;
|
||||
pkt_stream_replace(test, DEFAULT_PKT_CNT, MIN_PKT_SIZE);
|
||||
return testapp_validate_traffic(test);
|
||||
}
|
||||
|
||||
test_spec_set_name(test, "UNALIGNED_INV_DESC_4K1_FRAME_SIZE");
|
||||
/* Odd frame size so the UMEM doesn't end near a page boundary. */
|
||||
test->ifobj_tx->umem->frame_size = 4001;
|
||||
test->ifobj_rx->umem->frame_size = 4001;
|
||||
test->ifobj_tx->umem->unaligned_mode = true;
|
||||
test->ifobj_rx->umem->unaligned_mode = true;
|
||||
/* This test exists to test descriptors that staddle the end of
|
||||
* the UMEM but not a page.
|
||||
*/
|
||||
page_size = sysconf(_SC_PAGESIZE);
|
||||
umem_size = test->ifobj_tx->umem->num_frames * test->ifobj_tx->umem->frame_size;
|
||||
assert(umem_size % page_size > MIN_PKT_SIZE);
|
||||
assert(umem_size % page_size < page_size - MIN_PKT_SIZE);
|
||||
ret = testapp_invalid_desc(test);
|
||||
break;
|
||||
}
|
||||
case TEST_TYPE_ALIGNED_INV_DESC_MB:
|
||||
test_spec_set_name(test, "ALIGNED_INV_DESC_MULTI_BUFF");
|
||||
ret = testapp_invalid_desc_mb(test);
|
||||
break;
|
||||
case TEST_TYPE_UNALIGNED_INV_DESC_MB:
|
||||
test_spec_set_name(test, "UNALIGNED_INV_DESC_MULTI_BUFF");
|
||||
test->ifobj_tx->umem->unaligned_mode = true;
|
||||
test->ifobj_rx->umem->unaligned_mode = true;
|
||||
ret = testapp_invalid_desc_mb(test);
|
||||
break;
|
||||
case TEST_TYPE_UNALIGNED:
|
||||
ret = testapp_unaligned(test);
|
||||
break;
|
||||
case TEST_TYPE_UNALIGNED_MB:
|
||||
ret = testapp_unaligned_mb(test);
|
||||
break;
|
||||
case TEST_TYPE_HEADROOM:
|
||||
ret = testapp_headroom(test);
|
||||
break;
|
||||
case TEST_TYPE_XDP_DROP_HALF:
|
||||
ret = testapp_xdp_drop(test);
|
||||
break;
|
||||
case TEST_TYPE_XDP_METADATA_COUNT:
|
||||
test_spec_set_name(test, "XDP_METADATA_COUNT");
|
||||
ret = testapp_xdp_metadata_count(test);
|
||||
break;
|
||||
case TEST_TYPE_XDP_METADATA_COUNT_MB:
|
||||
test_spec_set_name(test, "XDP_METADATA_COUNT_MULTI_BUFF");
|
||||
test->mtu = MAX_ETH_JUMBO_SIZE;
|
||||
ret = testapp_xdp_metadata_count(test);
|
||||
break;
|
||||
case TEST_TYPE_TOO_MANY_FRAGS:
|
||||
ret = testapp_too_many_frags(test);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
static int testapp_poll_rx(struct test_spec *test)
|
||||
{
|
||||
test->ifobj_rx->use_poll = true;
|
||||
return testapp_validate_traffic(test);
|
||||
}
|
||||
|
||||
static int testapp_poll_tx(struct test_spec *test)
|
||||
{
|
||||
test->ifobj_tx->use_poll = true;
|
||||
return testapp_validate_traffic(test);
|
||||
}
|
||||
|
||||
static int testapp_aligned_inv_desc(struct test_spec *test)
|
||||
{
|
||||
return testapp_invalid_desc(test);
|
||||
}
|
||||
|
||||
static int testapp_aligned_inv_desc_2k_frame(struct test_spec *test)
|
||||
{
|
||||
test->ifobj_tx->umem->frame_size = 2048;
|
||||
test->ifobj_rx->umem->frame_size = 2048;
|
||||
return testapp_invalid_desc(test);
|
||||
}
|
||||
|
||||
static int testapp_unaligned_inv_desc(struct test_spec *test)
|
||||
{
|
||||
test->ifobj_tx->umem->unaligned_mode = true;
|
||||
test->ifobj_rx->umem->unaligned_mode = true;
|
||||
return testapp_invalid_desc(test);
|
||||
}
|
||||
|
||||
static int testapp_unaligned_inv_desc_4001_frame(struct test_spec *test)
|
||||
{
|
||||
u64 page_size, umem_size;
|
||||
|
||||
/* Odd frame size so the UMEM doesn't end near a page boundary. */
|
||||
test->ifobj_tx->umem->frame_size = 4001;
|
||||
test->ifobj_rx->umem->frame_size = 4001;
|
||||
test->ifobj_tx->umem->unaligned_mode = true;
|
||||
test->ifobj_rx->umem->unaligned_mode = true;
|
||||
/* This test exists to test descriptors that staddle the end of
|
||||
* the UMEM but not a page.
|
||||
*/
|
||||
page_size = sysconf(_SC_PAGESIZE);
|
||||
umem_size = test->ifobj_tx->umem->num_frames * test->ifobj_tx->umem->frame_size;
|
||||
assert(umem_size % page_size > MIN_PKT_SIZE);
|
||||
assert(umem_size % page_size < page_size - MIN_PKT_SIZE);
|
||||
|
||||
return testapp_invalid_desc(test);
|
||||
}
|
||||
|
||||
static int testapp_aligned_inv_desc_mb(struct test_spec *test)
|
||||
{
|
||||
return testapp_invalid_desc_mb(test);
|
||||
}
|
||||
|
||||
static int testapp_unaligned_inv_desc_mb(struct test_spec *test)
|
||||
{
|
||||
test->ifobj_tx->umem->unaligned_mode = true;
|
||||
test->ifobj_rx->umem->unaligned_mode = true;
|
||||
return testapp_invalid_desc_mb(test);
|
||||
}
|
||||
|
||||
static int testapp_xdp_metadata(struct test_spec *test)
|
||||
{
|
||||
return testapp_xdp_metadata_copy(test);
|
||||
}
|
||||
|
||||
static int testapp_xdp_metadata_mb(struct test_spec *test)
|
||||
{
|
||||
test->mtu = MAX_ETH_JUMBO_SIZE;
|
||||
return testapp_xdp_metadata_copy(test);
|
||||
}
|
||||
|
||||
static void run_pkt_test(struct test_spec *test)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = test->test_func(test);
|
||||
|
||||
if (ret == TEST_PASS)
|
||||
ksft_test_result_pass("PASS: %s %s%s\n", mode_string(test), busy_poll_string(test),
|
||||
@ -2290,13 +2313,55 @@ static bool is_xdp_supported(int ifindex)
|
||||
return true;
|
||||
}
|
||||
|
||||
static const struct test_spec tests[] = {
|
||||
{.name = "SEND_RECEIVE", .test_func = testapp_send_receive},
|
||||
{.name = "SEND_RECEIVE_2K_FRAME", .test_func = testapp_send_receive_2k_frame},
|
||||
{.name = "SEND_RECEIVE_SINGLE_PKT", .test_func = testapp_single_pkt},
|
||||
{.name = "POLL_RX", .test_func = testapp_poll_rx},
|
||||
{.name = "POLL_TX", .test_func = testapp_poll_tx},
|
||||
{.name = "POLL_RXQ_FULL", .test_func = testapp_poll_rxq_tmout},
|
||||
{.name = "POLL_TXQ_FULL", .test_func = testapp_poll_txq_tmout},
|
||||
{.name = "SEND_RECEIVE_UNALIGNED", .test_func = testapp_send_receive_unaligned},
|
||||
{.name = "ALIGNED_INV_DESC", .test_func = testapp_aligned_inv_desc},
|
||||
{.name = "ALIGNED_INV_DESC_2K_FRAME_SIZE", .test_func = testapp_aligned_inv_desc_2k_frame},
|
||||
{.name = "UNALIGNED_INV_DESC", .test_func = testapp_unaligned_inv_desc},
|
||||
{.name = "UNALIGNED_INV_DESC_4001_FRAME_SIZE",
|
||||
.test_func = testapp_unaligned_inv_desc_4001_frame},
|
||||
{.name = "UMEM_HEADROOM", .test_func = testapp_headroom},
|
||||
{.name = "TEARDOWN", .test_func = testapp_teardown},
|
||||
{.name = "BIDIRECTIONAL", .test_func = testapp_bidirectional},
|
||||
{.name = "STAT_RX_DROPPED", .test_func = testapp_stats_rx_dropped},
|
||||
{.name = "STAT_TX_INVALID", .test_func = testapp_stats_tx_invalid_descs},
|
||||
{.name = "STAT_RX_FULL", .test_func = testapp_stats_rx_full},
|
||||
{.name = "STAT_FILL_EMPTY", .test_func = testapp_stats_fill_empty},
|
||||
{.name = "XDP_PROG_CLEANUP", .test_func = testapp_xdp_prog_cleanup},
|
||||
{.name = "XDP_DROP_HALF", .test_func = testapp_xdp_drop},
|
||||
{.name = "XDP_METADATA_COPY", .test_func = testapp_xdp_metadata},
|
||||
{.name = "XDP_METADATA_COPY_MULTI_BUFF", .test_func = testapp_xdp_metadata_mb},
|
||||
{.name = "SEND_RECEIVE_9K_PACKETS", .test_func = testapp_send_receive_mb},
|
||||
{.name = "SEND_RECEIVE_UNALIGNED_9K_PACKETS",
|
||||
.test_func = testapp_send_receive_unaligned_mb},
|
||||
{.name = "ALIGNED_INV_DESC_MULTI_BUFF", .test_func = testapp_aligned_inv_desc_mb},
|
||||
{.name = "UNALIGNED_INV_DESC_MULTI_BUFF", .test_func = testapp_unaligned_inv_desc_mb},
|
||||
{.name = "TOO_MANY_FRAGS", .test_func = testapp_too_many_frags},
|
||||
};
|
||||
|
||||
static void print_tests(void)
|
||||
{
|
||||
u32 i;
|
||||
|
||||
printf("Tests:\n");
|
||||
for (i = 0; i < ARRAY_SIZE(tests); i++)
|
||||
printf("%u: %s\n", i, tests[i].name);
|
||||
}
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
struct pkt_stream *rx_pkt_stream_default;
|
||||
struct pkt_stream *tx_pkt_stream_default;
|
||||
struct ifobject *ifobj_tx, *ifobj_rx;
|
||||
u32 i, j, failed_tests = 0, nb_tests;
|
||||
int modes = TEST_MODE_SKB + 1;
|
||||
u32 i, j, failed_tests = 0;
|
||||
struct test_spec test;
|
||||
bool shared_netdev;
|
||||
|
||||
@ -2314,14 +2379,21 @@ int main(int argc, char **argv)
|
||||
|
||||
parse_command_line(ifobj_tx, ifobj_rx, argc, argv);
|
||||
|
||||
if (opt_print_tests) {
|
||||
print_tests();
|
||||
ksft_exit_xpass();
|
||||
}
|
||||
if (opt_run_test != RUN_ALL_TESTS && opt_run_test >= ARRAY_SIZE(tests)) {
|
||||
ksft_print_msg("Error: test %u does not exist.\n", opt_run_test);
|
||||
ksft_exit_xfail();
|
||||
}
|
||||
|
||||
shared_netdev = (ifobj_tx->ifindex == ifobj_rx->ifindex);
|
||||
ifobj_tx->shared_umem = shared_netdev;
|
||||
ifobj_rx->shared_umem = shared_netdev;
|
||||
|
||||
if (!validate_interface(ifobj_tx) || !validate_interface(ifobj_rx)) {
|
||||
usage(basename(argv[0]));
|
||||
ksft_exit_xfail();
|
||||
}
|
||||
if (!validate_interface(ifobj_tx) || !validate_interface(ifobj_rx))
|
||||
print_usage(argv);
|
||||
|
||||
if (is_xdp_supported(ifobj_tx->ifindex)) {
|
||||
modes++;
|
||||
@ -2332,7 +2404,7 @@ int main(int argc, char **argv)
|
||||
init_iface(ifobj_rx, MAC1, MAC2, worker_testapp_validate_rx);
|
||||
init_iface(ifobj_tx, MAC2, MAC1, worker_testapp_validate_tx);
|
||||
|
||||
test_spec_init(&test, ifobj_tx, ifobj_rx, 0);
|
||||
test_spec_init(&test, ifobj_tx, ifobj_rx, 0, &tests[0]);
|
||||
tx_pkt_stream_default = pkt_stream_generate(ifobj_tx->umem, DEFAULT_PKT_CNT, MIN_PKT_SIZE);
|
||||
rx_pkt_stream_default = pkt_stream_generate(ifobj_rx->umem, DEFAULT_PKT_CNT, MIN_PKT_SIZE);
|
||||
if (!tx_pkt_stream_default || !rx_pkt_stream_default)
|
||||
@ -2340,12 +2412,35 @@ int main(int argc, char **argv)
|
||||
test.tx_pkt_stream_default = tx_pkt_stream_default;
|
||||
test.rx_pkt_stream_default = rx_pkt_stream_default;
|
||||
|
||||
ksft_set_plan(modes * TEST_TYPE_MAX);
|
||||
if (opt_run_test == RUN_ALL_TESTS)
|
||||
nb_tests = ARRAY_SIZE(tests);
|
||||
else
|
||||
nb_tests = 1;
|
||||
if (opt_mode == TEST_MODE_ALL) {
|
||||
ksft_set_plan(modes * nb_tests);
|
||||
} else {
|
||||
if (opt_mode == TEST_MODE_DRV && modes <= TEST_MODE_DRV) {
|
||||
ksft_print_msg("Error: XDP_DRV mode not supported.\n");
|
||||
ksft_exit_xfail();
|
||||
}
|
||||
if (opt_mode == TEST_MODE_ZC && modes <= TEST_MODE_ZC) {
|
||||
ksft_print_msg("Error: zero-copy mode not supported.\n");
|
||||
ksft_exit_xfail();
|
||||
}
|
||||
|
||||
ksft_set_plan(nb_tests);
|
||||
}
|
||||
|
||||
for (i = 0; i < modes; i++) {
|
||||
for (j = 0; j < TEST_TYPE_MAX; j++) {
|
||||
test_spec_init(&test, ifobj_tx, ifobj_rx, i);
|
||||
run_pkt_test(&test, i, j);
|
||||
if (opt_mode != TEST_MODE_ALL && i != opt_mode)
|
||||
continue;
|
||||
|
||||
for (j = 0; j < ARRAY_SIZE(tests); j++) {
|
||||
if (opt_run_test != RUN_ALL_TESTS && j != opt_run_test)
|
||||
continue;
|
||||
|
||||
test_spec_init(&test, ifobj_tx, ifobj_rx, i, &tests[j]);
|
||||
run_pkt_test(&test);
|
||||
usleep(USLEEP_MAX);
|
||||
|
||||
if (test.fail)
|
||||
|
@ -5,6 +5,8 @@
|
||||
#ifndef XSKXCEIVER_H_
|
||||
#define XSKXCEIVER_H_
|
||||
|
||||
#include <limits.h>
|
||||
|
||||
#include "xsk_xdp_progs.skel.h"
|
||||
|
||||
#ifndef SOL_XDP
|
||||
@ -34,7 +36,7 @@
|
||||
#define MAX_INTERFACES 2
|
||||
#define MAX_INTERFACE_NAME_CHARS 16
|
||||
#define MAX_SOCKETS 2
|
||||
#define MAX_TEST_NAME_SIZE 32
|
||||
#define MAX_TEST_NAME_SIZE 48
|
||||
#define MAX_TEARDOWN_ITER 10
|
||||
#define PKT_HDR_SIZE (sizeof(struct ethhdr) + 2) /* Just to align the data in the packet */
|
||||
#define MIN_PKT_SIZE 64
|
||||
@ -56,6 +58,7 @@
|
||||
#define XSK_DESC__MAX_SKB_FRAGS 18
|
||||
#define HUGEPAGE_SIZE (2 * 1024 * 1024)
|
||||
#define PKT_DUMP_NB_TO_PRINT 16
|
||||
#define RUN_ALL_TESTS UINT_MAX
|
||||
|
||||
#define print_verbose(x...) do { if (opt_verbose) ksft_print_msg(x); } while (0)
|
||||
|
||||
@ -63,43 +66,9 @@ enum test_mode {
|
||||
TEST_MODE_SKB,
|
||||
TEST_MODE_DRV,
|
||||
TEST_MODE_ZC,
|
||||
TEST_MODE_MAX
|
||||
TEST_MODE_ALL
|
||||
};
|
||||
|
||||
enum test_type {
|
||||
TEST_TYPE_RUN_TO_COMPLETION,
|
||||
TEST_TYPE_RUN_TO_COMPLETION_2K_FRAME,
|
||||
TEST_TYPE_RUN_TO_COMPLETION_SINGLE_PKT,
|
||||
TEST_TYPE_RX_POLL,
|
||||
TEST_TYPE_TX_POLL,
|
||||
TEST_TYPE_POLL_RXQ_TMOUT,
|
||||
TEST_TYPE_POLL_TXQ_TMOUT,
|
||||
TEST_TYPE_UNALIGNED,
|
||||
TEST_TYPE_ALIGNED_INV_DESC,
|
||||
TEST_TYPE_ALIGNED_INV_DESC_2K_FRAME,
|
||||
TEST_TYPE_UNALIGNED_INV_DESC,
|
||||
TEST_TYPE_UNALIGNED_INV_DESC_4K1_FRAME,
|
||||
TEST_TYPE_HEADROOM,
|
||||
TEST_TYPE_TEARDOWN,
|
||||
TEST_TYPE_BIDI,
|
||||
TEST_TYPE_STATS_RX_DROPPED,
|
||||
TEST_TYPE_STATS_TX_INVALID_DESCS,
|
||||
TEST_TYPE_STATS_RX_FULL,
|
||||
TEST_TYPE_STATS_FILL_EMPTY,
|
||||
TEST_TYPE_BPF_RES,
|
||||
TEST_TYPE_XDP_DROP_HALF,
|
||||
TEST_TYPE_XDP_METADATA_COUNT,
|
||||
TEST_TYPE_XDP_METADATA_COUNT_MB,
|
||||
TEST_TYPE_RUN_TO_COMPLETION_MB,
|
||||
TEST_TYPE_UNALIGNED_MB,
|
||||
TEST_TYPE_ALIGNED_INV_DESC_MB,
|
||||
TEST_TYPE_UNALIGNED_INV_DESC_MB,
|
||||
TEST_TYPE_TOO_MANY_FRAGS,
|
||||
TEST_TYPE_MAX
|
||||
};
|
||||
|
||||
static bool opt_verbose;
|
||||
|
||||
struct xsk_umem_info {
|
||||
struct xsk_ring_prod fq;
|
||||
struct xsk_ring_cons cq;
|
||||
@ -139,8 +108,10 @@ struct pkt_stream {
|
||||
};
|
||||
|
||||
struct ifobject;
|
||||
struct test_spec;
|
||||
typedef int (*validation_func_t)(struct ifobject *ifobj);
|
||||
typedef void *(*thread_func_t)(void *arg);
|
||||
typedef int (*test_func_t)(struct test_spec *test);
|
||||
|
||||
struct ifobject {
|
||||
char ifname[MAX_INTERFACE_NAME_CHARS];
|
||||
@ -182,6 +153,7 @@ struct test_spec {
|
||||
struct bpf_program *xdp_prog_tx;
|
||||
struct bpf_map *xskmap_rx;
|
||||
struct bpf_map *xskmap_tx;
|
||||
test_func_t test_func;
|
||||
int mtu;
|
||||
u16 total_steps;
|
||||
u16 current_step;
|
||||
|
Loading…
Reference in New Issue
Block a user