diff --git a/Documentation/bpf/btf.rst b/Documentation/bpf/btf.rst index 93060283b6fd..2478cef758f8 100644 --- a/Documentation/bpf/btf.rst +++ b/Documentation/bpf/btf.rst @@ -835,7 +835,7 @@ section named by ``btf_ext_info_sec->sec_name_off``. See :ref:`Documentation/bpf/llvm_reloc.rst ` for more information on CO-RE relocations. -4.2 .BTF_ids section +4.3 .BTF_ids section -------------------- The .BTF_ids section encodes BTF ID values that are used within the kernel. @@ -896,6 +896,81 @@ and is used as a filter when resolving the BTF ID value. All the BTF ID lists and sets are compiled in the .BTF_ids section and resolved during the linking phase of kernel build by ``resolve_btfids`` tool. +4.4 .BTF.base section +--------------------- +Split BTF - where the .BTF section only contains types not in the associated +base .BTF section - is an extremely efficient way to encode type information +for kernel modules, since they generally consist of a few module-specific +types along with a large set of shared kernel types. The former are encoded +in split BTF, while the latter are encoded in base BTF, resulting in more +compact representations. A type in split BTF that refers to a type in +base BTF refers to it using its base BTF ID, and split BTF IDs start +at last_base_BTF_ID + 1. + +The downside of this approach however is that this makes the split BTF +somewhat brittle - when the base BTF changes, base BTF ID references are +no longer valid and the split BTF itself becomes useless. The role of the +.BTF.base section is to make split BTF more resilient for cases where +the base BTF may change, as is the case for kernel modules not built every +time the kernel is for example. .BTF.base contains named base types; INTs, +FLOATs, STRUCTs, UNIONs, ENUM[64]s and FWDs. INTs and FLOATs are fully +described in .BTF.base sections, while composite types like structs +and unions are not fully defined - the .BTF.base type simply serves as +a description of the type the split BTF referred to, so structs/unions +have 0 members in the .BTF.base section. ENUM[64]s are similarly recorded +with 0 members. Any other types are added to the split BTF. This +distillation process then leaves us with a .BTF.base section with +such minimal descriptions of base types and .BTF split section which refers +to those base types. Later, we can relocate the split BTF using both the +information stored in the .BTF.base section and the new .BTF base; the type +information in the .BTF.base section allows us to update the split BTF +references to point at the corresponding new base BTF IDs. + +BTF relocation happens on kernel module load when a kernel module has a +.BTF.base section, and libbpf also provides a btf__relocate() API to +accomplish this. + +As an example consider the following base BTF:: + + [1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED + [2] STRUCT 'foo' size=8 vlen=2 + 'f1' type_id=1 bits_offset=0 + 'f2' type_id=1 bits_offset=32 + +...and associated split BTF:: + + [3] PTR '(anon)' type_id=2 + +i.e. split BTF describes a pointer to struct foo { int f1; int f2 }; + +.BTF.base will consist of:: + + [1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED + [2] STRUCT 'foo' size=8 vlen=0 + +If we relocate the split BTF later using the following new base BTF:: + + [1] INT 'long unsigned int' size=8 bits_offset=0 nr_bits=64 encoding=(none) + [2] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED + [3] STRUCT 'foo' size=8 vlen=2 + 'f1' type_id=2 bits_offset=0 + 'f2' type_id=2 bits_offset=32 + +...we can use our .BTF.base description to know that the split BTF reference +is to struct foo, and relocation results in new split BTF:: + + [4] PTR '(anon)' type_id=3 + +Note that we had to update BTF ID and start BTF ID for the split BTF. + +So we see how .BTF.base plays the role of facilitating later relocation, +leading to more resilient split BTF. + +.BTF.base sections will be generated automatically for out-of-tree kernel module +builds - i.e. where KBUILD_EXTMOD is set (as it would be for "make M=path/2/mod" +cases). .BTF.base generation requires pahole support for the "distilled_base" +BTF feature; this is available in pahole v1.28 and later. + 5. Using BTF ============ diff --git a/Documentation/bpf/verifier.rst b/Documentation/bpf/verifier.rst index d23761540002..95e6f80a407e 100644 --- a/Documentation/bpf/verifier.rst +++ b/Documentation/bpf/verifier.rst @@ -507,7 +507,7 @@ Notes: from the parent state to the current state. * Details about REG_LIVE_READ32 are omitted. - + * Function ``propagate_liveness()`` (see section :ref:`read_marks_for_cache_hits`) might override the first parent link. Please refer to the comments in the ``propagate_liveness()`` and ``mark_reg_read()`` source code for further @@ -571,7 +571,7 @@ works:: are considered equivalent. .. _read_marks_for_cache_hits: - + Read marks propagation for cache hits ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c index 5db82bfc9dc1..27ef366363e4 100644 --- a/arch/arm64/net/bpf_jit_comp.c +++ b/arch/arm64/net/bpf_jit_comp.c @@ -2094,6 +2094,12 @@ static void restore_args(struct jit_ctx *ctx, int args_off, int nregs) } } +static bool is_struct_ops_tramp(const struct bpf_tramp_links *fentry_links) +{ + return fentry_links->nr_links == 1 && + fentry_links->links[0]->link.type == BPF_LINK_TYPE_STRUCT_OPS; +} + /* Based on the x86's implementation of arch_prepare_bpf_trampoline(). * * bpf prog and function entry before bpf trampoline hooked: @@ -2123,6 +2129,7 @@ static int prepare_trampoline(struct jit_ctx *ctx, struct bpf_tramp_image *im, struct bpf_tramp_links *fmod_ret = &tlinks[BPF_TRAMP_MODIFY_RETURN]; bool save_ret; __le32 **branches = NULL; + bool is_struct_ops = is_struct_ops_tramp(fentry); /* trampoline stack layout: * [ parent ip ] @@ -2191,11 +2198,14 @@ static int prepare_trampoline(struct jit_ctx *ctx, struct bpf_tramp_image *im, */ emit_bti(A64_BTI_JC, ctx); - /* frame for parent function */ - emit(A64_PUSH(A64_FP, A64_R(9), A64_SP), ctx); - emit(A64_MOV(1, A64_FP, A64_SP), ctx); + /* x9 is not set for struct_ops */ + if (!is_struct_ops) { + /* frame for parent function */ + emit(A64_PUSH(A64_FP, A64_R(9), A64_SP), ctx); + emit(A64_MOV(1, A64_FP, A64_SP), ctx); + } - /* frame for patched function */ + /* frame for patched function for tracing, or caller for struct_ops */ emit(A64_PUSH(A64_FP, A64_LR, A64_SP), ctx); emit(A64_MOV(1, A64_FP, A64_SP), ctx); @@ -2289,19 +2299,24 @@ static int prepare_trampoline(struct jit_ctx *ctx, struct bpf_tramp_image *im, /* reset SP */ emit(A64_MOV(1, A64_SP, A64_FP), ctx); - /* pop frames */ - emit(A64_POP(A64_FP, A64_LR, A64_SP), ctx); - emit(A64_POP(A64_FP, A64_R(9), A64_SP), ctx); - - if (flags & BPF_TRAMP_F_SKIP_FRAME) { - /* skip patched function, return to parent */ - emit(A64_MOV(1, A64_LR, A64_R(9)), ctx); - emit(A64_RET(A64_R(9)), ctx); + if (is_struct_ops) { + emit(A64_POP(A64_FP, A64_LR, A64_SP), ctx); + emit(A64_RET(A64_LR), ctx); } else { - /* return to patched function */ - emit(A64_MOV(1, A64_R(10), A64_LR), ctx); - emit(A64_MOV(1, A64_LR, A64_R(9)), ctx); - emit(A64_RET(A64_R(10)), ctx); + /* pop frames */ + emit(A64_POP(A64_FP, A64_LR, A64_SP), ctx); + emit(A64_POP(A64_FP, A64_R(9), A64_SP), ctx); + + if (flags & BPF_TRAMP_F_SKIP_FRAME) { + /* skip patched function, return to parent */ + emit(A64_MOV(1, A64_LR, A64_R(9)), ctx); + emit(A64_RET(A64_R(9)), ctx); + } else { + /* return to patched function */ + emit(A64_MOV(1, A64_R(10), A64_LR), ctx); + emit(A64_MOV(1, A64_LR, A64_R(9)), ctx); + emit(A64_RET(A64_R(10)), ctx); + } } kfree(branches); diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c index 06b080b61aa5..a43fc5af973d 100644 --- a/arch/x86/net/bpf_jit_comp.c +++ b/arch/x86/net/bpf_jit_comp.c @@ -325,6 +325,22 @@ struct jit_context { /* Number of bytes that will be skipped on tailcall */ #define X86_TAIL_CALL_OFFSET (12 + ENDBR_INSN_SIZE) +static void push_r9(u8 **pprog) +{ + u8 *prog = *pprog; + + EMIT2(0x41, 0x51); /* push r9 */ + *pprog = prog; +} + +static void pop_r9(u8 **pprog) +{ + u8 *prog = *pprog; + + EMIT2(0x41, 0x59); /* pop r9 */ + *pprog = prog; +} + static void push_r12(u8 **pprog) { u8 *prog = *pprog; @@ -1404,6 +1420,24 @@ static void emit_shiftx(u8 **pprog, u32 dst_reg, u8 src_reg, bool is64, u8 op) *pprog = prog; } +static void emit_priv_frame_ptr(u8 **pprog, void __percpu *priv_frame_ptr) +{ + u8 *prog = *pprog; + + /* movabs r9, priv_frame_ptr */ + emit_mov_imm64(&prog, X86_REG_R9, (__force long) priv_frame_ptr >> 32, + (u32) (__force long) priv_frame_ptr); + +#ifdef CONFIG_SMP + /* add , gs:[] */ + EMIT2(0x65, 0x4c); + EMIT3(0x03, 0x0c, 0x25); + EMIT((u32)(unsigned long)&this_cpu_off, 4); +#endif + + *pprog = prog; +} + #define INSN_SZ_DIFF (((addrs[i] - addrs[i - 1]) - (prog - temp))) #define __LOAD_TCC_PTR(off) \ @@ -1412,6 +1446,10 @@ static void emit_shiftx(u8 **pprog, u32 dst_reg, u8 src_reg, bool is64, u8 op) #define LOAD_TAIL_CALL_CNT_PTR(stack) \ __LOAD_TCC_PTR(BPF_TAIL_CALL_CNT_PTR_STACK_OFF(stack)) +/* Memory size/value to protect private stack overflow/underflow */ +#define PRIV_STACK_GUARD_SZ 8 +#define PRIV_STACK_GUARD_VAL 0xEB9F12345678eb9fULL + static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, u8 *rw_image, int oldproglen, struct jit_context *ctx, bool jmp_padding) { @@ -1421,18 +1459,28 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, u8 *rw_image int insn_cnt = bpf_prog->len; bool seen_exit = false; u8 temp[BPF_MAX_INSN_SIZE + BPF_INSN_SAFETY]; + void __percpu *priv_frame_ptr = NULL; u64 arena_vm_start, user_vm_start; + void __percpu *priv_stack_ptr; int i, excnt = 0; int ilen, proglen = 0; u8 *prog = temp; + u32 stack_depth; int err; + stack_depth = bpf_prog->aux->stack_depth; + priv_stack_ptr = bpf_prog->aux->priv_stack_ptr; + if (priv_stack_ptr) { + priv_frame_ptr = priv_stack_ptr + PRIV_STACK_GUARD_SZ + round_up(stack_depth, 8); + stack_depth = 0; + } + arena_vm_start = bpf_arena_get_kern_vm_start(bpf_prog->aux->arena); user_vm_start = bpf_arena_get_user_vm_start(bpf_prog->aux->arena); detect_reg_usage(insn, insn_cnt, callee_regs_used); - emit_prologue(&prog, bpf_prog->aux->stack_depth, + emit_prologue(&prog, stack_depth, bpf_prog_was_classic(bpf_prog), tail_call_reachable, bpf_is_subprog(bpf_prog), bpf_prog->aux->exception_cb); /* Exception callback will clobber callee regs for its own use, and @@ -1454,6 +1502,9 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, u8 *rw_image emit_mov_imm64(&prog, X86_REG_R12, arena_vm_start >> 32, (u32) arena_vm_start); + if (priv_frame_ptr) + emit_priv_frame_ptr(&prog, priv_frame_ptr); + ilen = prog - temp; if (rw_image) memcpy(rw_image + proglen, temp, ilen); @@ -1473,6 +1524,14 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, u8 *rw_image u8 *func; int nops; + if (priv_frame_ptr) { + if (src_reg == BPF_REG_FP) + src_reg = X86_REG_R9; + + if (dst_reg == BPF_REG_FP) + dst_reg = X86_REG_R9; + } + switch (insn->code) { /* ALU */ case BPF_ALU | BPF_ADD | BPF_X: @@ -2127,15 +2186,21 @@ st: if (is_imm8(insn->off)) u8 *ip = image + addrs[i - 1]; func = (u8 *) __bpf_call_base + imm32; - if (tail_call_reachable) { - LOAD_TAIL_CALL_CNT_PTR(bpf_prog->aux->stack_depth); + if (src_reg == BPF_PSEUDO_CALL && tail_call_reachable) { + LOAD_TAIL_CALL_CNT_PTR(stack_depth); ip += 7; } if (!imm32) return -EINVAL; + if (priv_frame_ptr) { + push_r9(&prog); + ip += 2; + } ip += x86_call_depth_emit_accounting(&prog, func, ip); if (emit_call(&prog, func, ip)) return -EINVAL; + if (priv_frame_ptr) + pop_r9(&prog); break; } @@ -2145,13 +2210,13 @@ st: if (is_imm8(insn->off)) &bpf_prog->aux->poke_tab[imm32 - 1], &prog, image + addrs[i - 1], callee_regs_used, - bpf_prog->aux->stack_depth, + stack_depth, ctx); else emit_bpf_tail_call_indirect(bpf_prog, &prog, callee_regs_used, - bpf_prog->aux->stack_depth, + stack_depth, image + addrs[i - 1], ctx); break; @@ -3303,6 +3368,42 @@ int arch_prepare_bpf_dispatcher(void *image, void *buf, s64 *funcs, int num_func return emit_bpf_dispatcher(&prog, 0, num_funcs - 1, funcs, image, buf); } +static const char *bpf_get_prog_name(struct bpf_prog *prog) +{ + if (prog->aux->ksym.prog) + return prog->aux->ksym.name; + return prog->aux->name; +} + +static void priv_stack_init_guard(void __percpu *priv_stack_ptr, int alloc_size) +{ + int cpu, underflow_idx = (alloc_size - PRIV_STACK_GUARD_SZ) >> 3; + u64 *stack_ptr; + + for_each_possible_cpu(cpu) { + stack_ptr = per_cpu_ptr(priv_stack_ptr, cpu); + stack_ptr[0] = PRIV_STACK_GUARD_VAL; + stack_ptr[underflow_idx] = PRIV_STACK_GUARD_VAL; + } +} + +static void priv_stack_check_guard(void __percpu *priv_stack_ptr, int alloc_size, + struct bpf_prog *prog) +{ + int cpu, underflow_idx = (alloc_size - PRIV_STACK_GUARD_SZ) >> 3; + u64 *stack_ptr; + + for_each_possible_cpu(cpu) { + stack_ptr = per_cpu_ptr(priv_stack_ptr, cpu); + if (stack_ptr[0] != PRIV_STACK_GUARD_VAL || + stack_ptr[underflow_idx] != PRIV_STACK_GUARD_VAL) { + pr_err("BPF private stack overflow/underflow detected for prog %sx\n", + bpf_get_prog_name(prog)); + break; + } + } +} + struct x64_jit_data { struct bpf_binary_header *rw_header; struct bpf_binary_header *header; @@ -3320,7 +3421,9 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) struct bpf_binary_header *rw_header = NULL; struct bpf_binary_header *header = NULL; struct bpf_prog *tmp, *orig_prog = prog; + void __percpu *priv_stack_ptr = NULL; struct x64_jit_data *jit_data; + int priv_stack_alloc_sz; int proglen, oldproglen = 0; struct jit_context ctx = {}; bool tmp_blinded = false; @@ -3356,6 +3459,23 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) } prog->aux->jit_data = jit_data; } + priv_stack_ptr = prog->aux->priv_stack_ptr; + if (!priv_stack_ptr && prog->aux->jits_use_priv_stack) { + /* Allocate actual private stack size with verifier-calculated + * stack size plus two memory guards to protect overflow and + * underflow. + */ + priv_stack_alloc_sz = round_up(prog->aux->stack_depth, 8) + + 2 * PRIV_STACK_GUARD_SZ; + priv_stack_ptr = __alloc_percpu_gfp(priv_stack_alloc_sz, 8, GFP_KERNEL); + if (!priv_stack_ptr) { + prog = orig_prog; + goto out_priv_stack; + } + + priv_stack_init_guard(priv_stack_ptr, priv_stack_alloc_sz); + prog->aux->priv_stack_ptr = priv_stack_ptr; + } addrs = jit_data->addrs; if (addrs) { ctx = jit_data->ctx; @@ -3491,6 +3611,11 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) bpf_prog_fill_jited_linfo(prog, addrs + 1); out_addrs: kvfree(addrs); + if (!image && priv_stack_ptr) { + free_percpu(priv_stack_ptr); + prog->aux->priv_stack_ptr = NULL; + } +out_priv_stack: kfree(jit_data); prog->aux->jit_data = NULL; } @@ -3529,6 +3654,8 @@ void bpf_jit_free(struct bpf_prog *prog) if (prog->jited) { struct x64_jit_data *jit_data = prog->aux->jit_data; struct bpf_binary_header *hdr; + void __percpu *priv_stack_ptr; + int priv_stack_alloc_sz; /* * If we fail the final pass of JIT (from jit_subprogs), @@ -3544,6 +3671,13 @@ void bpf_jit_free(struct bpf_prog *prog) prog->bpf_func = (void *)prog->bpf_func - cfi_get_offset(); hdr = bpf_jit_binary_pack_hdr(prog); bpf_jit_binary_pack_free(hdr, NULL); + priv_stack_ptr = prog->aux->priv_stack_ptr; + if (priv_stack_ptr) { + priv_stack_alloc_sz = round_up(prog->aux->stack_depth, 8) + + 2 * PRIV_STACK_GUARD_SZ; + priv_stack_check_guard(priv_stack_ptr, priv_stack_alloc_sz, prog); + free_percpu(prog->aux->priv_stack_ptr); + } WARN_ON_ONCE(!bpf_prog_kallsyms_verify_off(prog)); } @@ -3559,6 +3693,11 @@ bool bpf_jit_supports_exceptions(void) return IS_ENABLED(CONFIG_UNWINDER_ORC); } +bool bpf_jit_supports_private_stack(void) +{ + return true; +} + void arch_bpf_stack_walk(bool (*consume_fn)(void *cookie, u64 ip, u64 sp, u64 bp), void *cookie) { #if defined(CONFIG_UNWINDER_ORC) diff --git a/include/linux/bpf.h b/include/linux/bpf.h index bdadb0bb6cec..3ace0d6227e3 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -203,6 +203,7 @@ enum btf_field_type { BPF_GRAPH_ROOT = BPF_RB_ROOT | BPF_LIST_HEAD, BPF_REFCOUNT = (1 << 9), BPF_WORKQUEUE = (1 << 10), + BPF_UPTR = (1 << 11), }; typedef void (*btf_dtor_kfunc_t)(void *); @@ -322,6 +323,8 @@ static inline const char *btf_field_type_name(enum btf_field_type type) return "kptr"; case BPF_KPTR_PERCPU: return "percpu_kptr"; + case BPF_UPTR: + return "uptr"; case BPF_LIST_HEAD: return "bpf_list_head"; case BPF_LIST_NODE: @@ -350,6 +353,7 @@ static inline u32 btf_field_type_size(enum btf_field_type type) case BPF_KPTR_UNREF: case BPF_KPTR_REF: case BPF_KPTR_PERCPU: + case BPF_UPTR: return sizeof(u64); case BPF_LIST_HEAD: return sizeof(struct bpf_list_head); @@ -379,6 +383,7 @@ static inline u32 btf_field_type_align(enum btf_field_type type) case BPF_KPTR_UNREF: case BPF_KPTR_REF: case BPF_KPTR_PERCPU: + case BPF_UPTR: return __alignof__(u64); case BPF_LIST_HEAD: return __alignof__(struct bpf_list_head); @@ -419,6 +424,7 @@ static inline void bpf_obj_init_field(const struct btf_field *field, void *addr) case BPF_KPTR_UNREF: case BPF_KPTR_REF: case BPF_KPTR_PERCPU: + case BPF_UPTR: break; default: WARN_ON_ONCE(1); @@ -507,6 +513,25 @@ static inline void copy_map_value_long(struct bpf_map *map, void *dst, void *src bpf_obj_memcpy(map->record, dst, src, map->value_size, true); } +static inline void bpf_obj_swap_uptrs(const struct btf_record *rec, void *dst, void *src) +{ + unsigned long *src_uptr, *dst_uptr; + const struct btf_field *field; + int i; + + if (!btf_record_has_field(rec, BPF_UPTR)) + return; + + for (i = 0, field = rec->fields; i < rec->cnt; i++, field++) { + if (field->type != BPF_UPTR) + continue; + + src_uptr = src + field->offset; + dst_uptr = dst + field->offset; + swap(*src_uptr, *dst_uptr); + } +} + static inline void bpf_obj_memzero(struct btf_record *rec, void *dst, u32 size) { u32 curr_off = 0; @@ -907,10 +932,6 @@ enum bpf_reg_type { * additional context, assume the value is non-null. */ PTR_TO_BTF_ID, - /* PTR_TO_BTF_ID_OR_NULL points to a kernel struct that has not - * been checked for null. Used primarily to inform the verifier - * an explicit null check is required for this struct. - */ PTR_TO_MEM, /* reg points to valid memory region */ PTR_TO_ARENA, PTR_TO_BUF, /* reg points to a read/write buffer */ @@ -923,6 +944,10 @@ enum bpf_reg_type { PTR_TO_SOCKET_OR_NULL = PTR_MAYBE_NULL | PTR_TO_SOCKET, PTR_TO_SOCK_COMMON_OR_NULL = PTR_MAYBE_NULL | PTR_TO_SOCK_COMMON, PTR_TO_TCP_SOCK_OR_NULL = PTR_MAYBE_NULL | PTR_TO_TCP_SOCK, + /* PTR_TO_BTF_ID_OR_NULL points to a kernel struct that has not + * been checked for null. Used primarily to inform the verifier + * an explicit null check is required for this struct. + */ PTR_TO_BTF_ID_OR_NULL = PTR_MAYBE_NULL | PTR_TO_BTF_ID, /* This must be the last entry. Its purpose is to ensure the enum is @@ -1300,8 +1325,12 @@ void *__bpf_dynptr_data_rw(const struct bpf_dynptr_kern *ptr, u32 len); bool __bpf_dynptr_is_rdonly(const struct bpf_dynptr_kern *ptr); #ifdef CONFIG_BPF_JIT -int bpf_trampoline_link_prog(struct bpf_tramp_link *link, struct bpf_trampoline *tr); -int bpf_trampoline_unlink_prog(struct bpf_tramp_link *link, struct bpf_trampoline *tr); +int bpf_trampoline_link_prog(struct bpf_tramp_link *link, + struct bpf_trampoline *tr, + struct bpf_prog *tgt_prog); +int bpf_trampoline_unlink_prog(struct bpf_tramp_link *link, + struct bpf_trampoline *tr, + struct bpf_prog *tgt_prog); struct bpf_trampoline *bpf_trampoline_get(u64 key, struct bpf_attach_target_info *tgt_info); void bpf_trampoline_put(struct bpf_trampoline *tr); @@ -1373,7 +1402,8 @@ int arch_prepare_bpf_dispatcher(void *image, void *buf, s64 *funcs, int num_func void bpf_dispatcher_change_prog(struct bpf_dispatcher *d, struct bpf_prog *from, struct bpf_prog *to); /* Called only from JIT-enabled code, so there's no need for stubs. */ -void bpf_image_ksym_add(void *data, unsigned int size, struct bpf_ksym *ksym); +void bpf_image_ksym_init(void *data, unsigned int size, struct bpf_ksym *ksym); +void bpf_image_ksym_add(struct bpf_ksym *ksym); void bpf_image_ksym_del(struct bpf_ksym *ksym); void bpf_ksym_add(struct bpf_ksym *ksym); void bpf_ksym_del(struct bpf_ksym *ksym); @@ -1382,12 +1412,14 @@ void bpf_jit_uncharge_modmem(u32 size); bool bpf_prog_has_trampoline(const struct bpf_prog *prog); #else static inline int bpf_trampoline_link_prog(struct bpf_tramp_link *link, - struct bpf_trampoline *tr) + struct bpf_trampoline *tr, + struct bpf_prog *tgt_prog) { return -ENOTSUPP; } static inline int bpf_trampoline_unlink_prog(struct bpf_tramp_link *link, - struct bpf_trampoline *tr) + struct bpf_trampoline *tr, + struct bpf_prog *tgt_prog) { return -ENOTSUPP; } @@ -1476,6 +1508,7 @@ struct bpf_prog_aux { u32 max_rdwr_access; struct btf *attach_btf; const struct bpf_ctx_arg_aux *ctx_arg_info; + void __percpu *priv_stack_ptr; struct mutex dst_mutex; /* protects dst_* pointers below, *after* prog becomes visible */ struct bpf_prog *dst_prog; struct bpf_trampoline *dst_trampoline; @@ -1491,7 +1524,13 @@ struct bpf_prog_aux { bool xdp_has_frags; bool exception_cb; bool exception_boundary; + bool is_extended; /* true if extended by freplace program */ + bool jits_use_priv_stack; + bool priv_stack_requested; + u64 prog_array_member_cnt; /* counts how many times as member of prog_array */ + struct mutex ext_mutex; /* mutex for is_extended and prog_array_member_cnt */ struct bpf_arena *arena; + void (*recursion_detected)(struct bpf_prog *prog); /* callback if recursion is detected */ /* BTF_KIND_FUNC_PROTO for valid attach_btf_id */ const struct btf_type *attach_func_proto; /* function name for valid attach_btf_id */ @@ -3461,4 +3500,10 @@ static inline bool bpf_is_subprog(const struct bpf_prog *prog) return prog->aux->func_idx != 0; } +static inline bool bpf_prog_is_raw_tp(const struct bpf_prog *prog) +{ + return prog->type == BPF_PROG_TYPE_TRACING && + prog->expected_attach_type == BPF_TRACE_RAW_TP; +} + #endif /* _LINUX_BPF_H */ diff --git a/include/linux/bpf_local_storage.h b/include/linux/bpf_local_storage.h index dcddb0aef7d8..ab7244d8108f 100644 --- a/include/linux/bpf_local_storage.h +++ b/include/linux/bpf_local_storage.h @@ -77,7 +77,13 @@ struct bpf_local_storage_elem { struct hlist_node map_node; /* Linked to bpf_local_storage_map */ struct hlist_node snode; /* Linked to bpf_local_storage */ struct bpf_local_storage __rcu *local_storage; - struct rcu_head rcu; + union { + struct rcu_head rcu; + struct hlist_node free_node; /* used to postpone + * bpf_selem_free + * after raw_spin_unlock + */ + }; /* 8 bytes hole */ /* The data is stored in another cacheline to minimize * the number of cachelines access during a cache hit. @@ -181,7 +187,7 @@ void bpf_selem_link_map(struct bpf_local_storage_map *smap, struct bpf_local_storage_elem * bpf_selem_alloc(struct bpf_local_storage_map *smap, void *owner, void *value, - bool charge_mem, gfp_t gfp_flags); + bool charge_mem, bool swap_uptrs, gfp_t gfp_flags); void bpf_selem_free(struct bpf_local_storage_elem *selem, struct bpf_local_storage_map *smap, @@ -195,7 +201,7 @@ bpf_local_storage_alloc(void *owner, struct bpf_local_storage_data * bpf_local_storage_update(void *owner, struct bpf_local_storage_map *smap, - void *value, u64 map_flags, gfp_t gfp_flags); + void *value, u64 map_flags, bool swap_uptrs, gfp_t gfp_flags); u64 bpf_local_storage_map_mem_usage(const struct bpf_map *map); diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h index 4513372c5bc8..f4290c179bee 100644 --- a/include/linux/bpf_verifier.h +++ b/include/linux/bpf_verifier.h @@ -48,22 +48,6 @@ enum bpf_reg_liveness { REG_LIVE_DONE = 0x8, /* liveness won't be updating this register anymore */ }; -/* For every reg representing a map value or allocated object pointer, - * we consider the tuple of (ptr, id) for them to be unique in verifier - * context and conside them to not alias each other for the purposes of - * tracking lock state. - */ -struct bpf_active_lock { - /* This can either be reg->map_ptr or reg->btf. If ptr is NULL, - * there's no active lock held, and other fields have no - * meaning. If non-NULL, it indicates that a lock is held and - * id member has the reg->id of the register which can be >= 0. - */ - void *ptr; - /* This will be reg->id */ - u32 id; -}; - #define ITER_PREFIX "bpf_iter_" enum bpf_iter_state { @@ -266,6 +250,13 @@ struct bpf_stack_state { }; struct bpf_reference_state { + /* Each reference object has a type. Ensure REF_TYPE_PTR is zero to + * default to pointer reference on zero initialization of a state. + */ + enum ref_state_type { + REF_TYPE_PTR = 0, + REF_TYPE_LOCK, + } type; /* Track each reference created with a unique id, even if the same * instruction creates the reference multiple times (eg, via CALL). */ @@ -274,17 +265,10 @@ struct bpf_reference_state { * is used purely to inform the user of a reference leak. */ int insn_idx; - /* There can be a case like: - * main (frame 0) - * cb (frame 1) - * func (frame 3) - * cb (frame 4) - * Hence for frame 4, if callback_ref just stored boolean, it would be - * impossible to distinguish nested callback refs. Hence store the - * frameno and compare that to callback_ref in check_reference_leak when - * exiting a callback function. + /* Use to keep track of the source object of a lock, to ensure + * it matches on unlock. */ - int callback_ref; + void *ptr; }; struct bpf_retval_range { @@ -332,6 +316,7 @@ struct bpf_func_state { /* The following fields should be last. See copy_func_state() */ int acquired_refs; + int active_locks; struct bpf_reference_state *refs; /* The state of the stack. Each element of the array describes BPF_REG_SIZE * (i.e. 8) bytes worth of stack memory. @@ -349,7 +334,7 @@ struct bpf_func_state { #define MAX_CALL_FRAMES 8 -/* instruction history flags, used in bpf_jmp_history_entry.flags field */ +/* instruction history flags, used in bpf_insn_hist_entry.flags field */ enum { /* instruction references stack slot through PTR_TO_STACK register; * we also store stack's frame number in lower 3 bits (MAX_CALL_FRAMES is 8) @@ -367,7 +352,7 @@ enum { static_assert(INSN_F_FRAMENO_MASK + 1 >= MAX_CALL_FRAMES); static_assert(INSN_F_SPI_MASK + 1 >= MAX_BPF_STACK / 8); -struct bpf_jmp_history_entry { +struct bpf_insn_hist_entry { u32 idx; /* insn idx can't be bigger than 1 million */ u32 prev_idx : 22; @@ -434,7 +419,6 @@ struct bpf_verifier_state { u32 insn_idx; u32 curframe; - struct bpf_active_lock active_lock; bool speculative; bool active_rcu_lock; u32 active_preempt_lock; @@ -458,13 +442,14 @@ struct bpf_verifier_state { * See get_loop_entry() for more information. */ struct bpf_verifier_state *loop_entry; - /* jmp history recorded from first to last. - * backtracking is using it to go from last to first. - * For most states jmp_history_cnt is [0-3]. + /* Sub-range of env->insn_hist[] corresponding to this state's + * instruction history. + * Backtracking is using it to go from last to first. + * For most states instruction history is short, 0-3 instructions. * For loops can go up to ~40. */ - struct bpf_jmp_history_entry *jmp_history; - u32 jmp_history_cnt; + u32 insn_hist_start; + u32 insn_hist_end; u32 dfs_depth; u32 callback_unroll_depth; u32 may_goto_depth; @@ -649,6 +634,12 @@ struct bpf_subprog_arg_info { }; }; +enum priv_stack_mode { + PRIV_STACK_UNKNOWN, + NO_PRIV_STACK, + PRIV_STACK_ADAPTIVE, +}; + struct bpf_subprog_info { /* 'start' has to be the first field otherwise find_subprog() won't work */ u32 start; /* insn idx of function entry point */ @@ -669,6 +660,7 @@ struct bpf_subprog_info { /* true if bpf_fastcall stack region is used by functions that can't be inlined */ bool keep_fastcall_stack: 1; + enum priv_stack_mode priv_stack_mode; u8 arg_cnt; struct bpf_subprog_arg_info args[MAX_BPF_FUNC_REG_ARGS]; }; @@ -747,7 +739,9 @@ struct bpf_verifier_env { int cur_stack; } cfg; struct backtrack_state bt; - struct bpf_jmp_history_entry *cur_hist_ent; + struct bpf_insn_hist_entry *insn_hist; + struct bpf_insn_hist_entry *cur_hist_ent; + u32 insn_hist_cap; u32 pass_cnt; /* number of times do_check() was called */ u32 subprog_cnt; /* number of instructions analyzed by the verifier */ @@ -888,6 +882,7 @@ static inline bool bpf_prog_check_recur(const struct bpf_prog *prog) case BPF_PROG_TYPE_TRACING: return prog->expected_attach_type != BPF_TRACE_ITER; case BPF_PROG_TYPE_STRUCT_OPS: + return prog->aux->jits_use_priv_stack; case BPF_PROG_TYPE_LSM: return false; default: diff --git a/include/linux/btf.h b/include/linux/btf.h index b8a583194c4a..4214e76c9168 100644 --- a/include/linux/btf.h +++ b/include/linux/btf.h @@ -75,6 +75,7 @@ #define KF_ITER_NEXT (1 << 9) /* kfunc implements BPF iter next method */ #define KF_ITER_DESTROY (1 << 10) /* kfunc implements BPF iter destructor */ #define KF_RCU_PROTECTED (1 << 11) /* kfunc should be protected by rcu cs when they are invoked */ +#define KF_FASTCALL (1 << 12) /* kfunc supports bpf_fastcall protocol */ /* * Tag marking a kernel function as a kfunc. This is meant to minimize the @@ -581,6 +582,16 @@ int get_kern_ctx_btf_id(struct bpf_verifier_log *log, enum bpf_prog_type prog_ty bool btf_types_are_same(const struct btf *btf1, u32 id1, const struct btf *btf2, u32 id2); int btf_check_iter_arg(struct btf *btf, const struct btf_type *func, int arg_idx); + +static inline bool btf_type_is_struct_ptr(struct btf *btf, const struct btf_type *t) +{ + if (!btf_type_is_ptr(t)) + return false; + + t = btf_type_skip_modifiers(btf, t->type, NULL); + + return btf_type_is_struct(t); +} #else static inline const struct btf_type *btf_type_by_id(const struct btf *btf, u32 type_id) @@ -660,15 +671,4 @@ static inline int btf_check_iter_arg(struct btf *btf, const struct btf_type *fun return -EOPNOTSUPP; } #endif - -static inline bool btf_type_is_struct_ptr(struct btf *btf, const struct btf_type *t) -{ - if (!btf_type_is_ptr(t)) - return false; - - t = btf_type_skip_modifiers(btf, t->type, NULL); - - return btf_type_is_struct(t); -} - #endif diff --git a/include/linux/btf_ids.h b/include/linux/btf_ids.h index c0e3e1426a82..139bdececdcf 100644 --- a/include/linux/btf_ids.h +++ b/include/linux/btf_ids.h @@ -283,5 +283,6 @@ extern u32 btf_tracing_ids[]; extern u32 bpf_cgroup_btf_id[]; extern u32 bpf_local_storage_map_btf_id[]; extern u32 btf_bpf_map_id[]; +extern u32 bpf_kmem_cache_btf_id[]; #endif diff --git a/include/linux/filter.h b/include/linux/filter.h index 7d7578a8eac1..3a21947f2fd4 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -1119,6 +1119,7 @@ bool bpf_jit_supports_exceptions(void); bool bpf_jit_supports_ptr_xchg(void); bool bpf_jit_supports_arena(void); bool bpf_jit_supports_insn(struct bpf_insn *insn, bool in_arena); +bool bpf_jit_supports_private_stack(void); u64 bpf_arch_uaddress_limit(void); void arch_bpf_stack_walk(bool (*consume_fn)(void *cookie, u64 ip, u64 sp, u64 bp), void *cookie); bool bpf_helper_changes_pkt_data(void *func); diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 4a939c90dc2e..4162afc6b5d0 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -1116,6 +1116,7 @@ enum bpf_attach_type { BPF_NETKIT_PRIMARY, BPF_NETKIT_PEER, BPF_TRACE_KPROBE_SESSION, + BPF_TRACE_UPROBE_SESSION, __MAX_BPF_ATTACH_TYPE }; @@ -1973,6 +1974,8 @@ union bpf_attr { * program. * Return * The SMP id of the processor running the program. + * Attributes + * __bpf_fastcall * * long bpf_skb_store_bytes(struct sk_buff *skb, u32 offset, const void *from, u32 len, u64 flags) * Description @@ -3104,10 +3107,6 @@ union bpf_attr { * with the **CONFIG_BPF_KPROBE_OVERRIDE** configuration * option, and in this case it only works on functions tagged with * **ALLOW_ERROR_INJECTION** in the kernel code. - * - * Also, the helper is only available for the architectures having - * the CONFIG_FUNCTION_ERROR_INJECTION option. As of this writing, - * x86 architecture is the only one to support this feature. * Return * 0 * @@ -5372,7 +5371,7 @@ union bpf_attr { * Currently, the **flags** must be 0. Currently, nr_loops is * limited to 1 << 23 (~8 million) loops. * - * long (\*callback_fn)(u32 index, void \*ctx); + * long (\*callback_fn)(u64 index, void \*ctx); * * where **index** is the current index in the loop. The index * is zero-indexed. diff --git a/kernel/bpf/Makefile b/kernel/bpf/Makefile index 9b9c151b5c82..9762bdddf1de 100644 --- a/kernel/bpf/Makefile +++ b/kernel/bpf/Makefile @@ -16,7 +16,7 @@ obj-$(CONFIG_BPF_SYSCALL) += disasm.o mprog.o obj-$(CONFIG_BPF_JIT) += trampoline.o obj-$(CONFIG_BPF_SYSCALL) += btf.o memalloc.o ifeq ($(CONFIG_MMU)$(CONFIG_64BIT),yy) -obj-$(CONFIG_BPF_SYSCALL) += arena.o +obj-$(CONFIG_BPF_SYSCALL) += arena.o range_tree.o endif obj-$(CONFIG_BPF_JIT) += dispatcher.o ifeq ($(CONFIG_NET),y) @@ -52,3 +52,4 @@ obj-$(CONFIG_BPF_PRELOAD) += preload/ obj-$(CONFIG_BPF_SYSCALL) += relo_core.o obj-$(CONFIG_BPF_SYSCALL) += btf_iter.o obj-$(CONFIG_BPF_SYSCALL) += btf_relocate.o +obj-$(CONFIG_BPF_SYSCALL) += kmem_cache_iter.o diff --git a/kernel/bpf/arena.c b/kernel/bpf/arena.c index e52b3ad231b9..945a5680f6a5 100644 --- a/kernel/bpf/arena.c +++ b/kernel/bpf/arena.c @@ -3,9 +3,11 @@ #include #include #include +#include "linux/filter.h" #include #include #include +#include "range_tree.h" /* * bpf_arena is a sparsely populated shared memory region between bpf program and @@ -45,7 +47,7 @@ struct bpf_arena { u64 user_vm_start; u64 user_vm_end; struct vm_struct *kern_vm; - struct maple_tree mt; + struct range_tree rt; struct list_head vma_list; struct mutex lock; }; @@ -98,6 +100,9 @@ static struct bpf_map *arena_map_alloc(union bpf_attr *attr) u64 vm_range; int err = -ENOMEM; + if (!bpf_jit_supports_arena()) + return ERR_PTR(-EOPNOTSUPP); + if (attr->key_size || attr->value_size || attr->max_entries == 0 || /* BPF_F_MMAPABLE must be set */ !(attr->map_flags & BPF_F_MMAPABLE) || @@ -132,7 +137,8 @@ static struct bpf_map *arena_map_alloc(union bpf_attr *attr) INIT_LIST_HEAD(&arena->vma_list); bpf_map_init_from_attr(&arena->map, attr); - mt_init_flags(&arena->mt, MT_FLAGS_ALLOC_RANGE); + range_tree_init(&arena->rt); + range_tree_set(&arena->rt, 0, attr->max_entries); mutex_init(&arena->lock); return &arena->map; @@ -183,7 +189,7 @@ static void arena_map_free(struct bpf_map *map) apply_to_existing_page_range(&init_mm, bpf_arena_get_kern_vm_start(arena), KERN_VM_SZ - GUARD_SZ, existing_page_cb, NULL); free_vm_area(arena->kern_vm); - mtree_destroy(&arena->mt); + range_tree_destroy(&arena->rt); bpf_map_area_free(arena); } @@ -274,20 +280,20 @@ static vm_fault_t arena_vm_fault(struct vm_fault *vmf) /* User space requested to segfault when page is not allocated by bpf prog */ return VM_FAULT_SIGSEGV; - ret = mtree_insert(&arena->mt, vmf->pgoff, MT_ENTRY, GFP_KERNEL); + ret = range_tree_clear(&arena->rt, vmf->pgoff, 1); if (ret) return VM_FAULT_SIGSEGV; /* Account into memcg of the process that created bpf_arena */ ret = bpf_map_alloc_pages(map, GFP_KERNEL | __GFP_ZERO, NUMA_NO_NODE, 1, &page); if (ret) { - mtree_erase(&arena->mt, vmf->pgoff); + range_tree_set(&arena->rt, vmf->pgoff, 1); return VM_FAULT_SIGSEGV; } ret = vm_area_map_pages(arena->kern_vm, kaddr, kaddr + PAGE_SIZE, &page); if (ret) { - mtree_erase(&arena->mt, vmf->pgoff); + range_tree_set(&arena->rt, vmf->pgoff, 1); __free_page(page); return VM_FAULT_SIGSEGV; } @@ -444,12 +450,16 @@ static long arena_alloc_pages(struct bpf_arena *arena, long uaddr, long page_cnt guard(mutex)(&arena->lock); - if (uaddr) - ret = mtree_insert_range(&arena->mt, pgoff, pgoff + page_cnt - 1, - MT_ENTRY, GFP_KERNEL); - else - ret = mtree_alloc_range(&arena->mt, &pgoff, MT_ENTRY, - page_cnt, 0, page_cnt_max - 1, GFP_KERNEL); + if (uaddr) { + ret = is_range_tree_set(&arena->rt, pgoff, page_cnt); + if (ret) + goto out_free_pages; + ret = range_tree_clear(&arena->rt, pgoff, page_cnt); + } else { + ret = pgoff = range_tree_find(&arena->rt, page_cnt); + if (pgoff >= 0) + ret = range_tree_clear(&arena->rt, pgoff, page_cnt); + } if (ret) goto out_free_pages; @@ -476,7 +486,7 @@ static long arena_alloc_pages(struct bpf_arena *arena, long uaddr, long page_cnt kvfree(pages); return clear_lo32(arena->user_vm_start) + uaddr32; out: - mtree_erase(&arena->mt, pgoff); + range_tree_set(&arena->rt, pgoff, page_cnt); out_free_pages: kvfree(pages); return 0; @@ -516,7 +526,7 @@ static void arena_free_pages(struct bpf_arena *arena, long uaddr, long page_cnt) pgoff = compute_pgoff(arena, uaddr); /* clear range */ - mtree_store_range(&arena->mt, pgoff, pgoff + page_cnt - 1, NULL, GFP_KERNEL); + range_tree_set(&arena->rt, pgoff, page_cnt); if (page_cnt > 1) /* bulk zap if multiple pages being freed */ diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c index 79660e3fca4c..6cdbb4c33d31 100644 --- a/kernel/bpf/arraymap.c +++ b/kernel/bpf/arraymap.c @@ -947,22 +947,44 @@ static void *prog_fd_array_get_ptr(struct bpf_map *map, struct file *map_file, int fd) { struct bpf_prog *prog = bpf_prog_get(fd); + bool is_extended; if (IS_ERR(prog)) return prog; - if (!bpf_prog_map_compatible(map, prog)) { + if (prog->type == BPF_PROG_TYPE_EXT || + !bpf_prog_map_compatible(map, prog)) { bpf_prog_put(prog); return ERR_PTR(-EINVAL); } + mutex_lock(&prog->aux->ext_mutex); + is_extended = prog->aux->is_extended; + if (!is_extended) + prog->aux->prog_array_member_cnt++; + mutex_unlock(&prog->aux->ext_mutex); + if (is_extended) { + /* Extended prog can not be tail callee. It's to prevent a + * potential infinite loop like: + * tail callee prog entry -> tail callee prog subprog -> + * freplace prog entry --tailcall-> tail callee prog entry. + */ + bpf_prog_put(prog); + return ERR_PTR(-EBUSY); + } + return prog; } static void prog_fd_array_put_ptr(struct bpf_map *map, void *ptr, bool need_defer) { + struct bpf_prog *prog = ptr; + + mutex_lock(&prog->aux->ext_mutex); + prog->aux->prog_array_member_cnt--; + mutex_unlock(&prog->aux->ext_mutex); /* bpf_prog is freed after one RCU or tasks trace grace period */ - bpf_prog_put(ptr); + bpf_prog_put(prog); } static u32 prog_fd_array_sys_lookup_elem(void *ptr) diff --git a/kernel/bpf/bpf_cgrp_storage.c b/kernel/bpf/bpf_cgrp_storage.c index 28efd0a3f220..20f05de92e9c 100644 --- a/kernel/bpf/bpf_cgrp_storage.c +++ b/kernel/bpf/bpf_cgrp_storage.c @@ -107,7 +107,7 @@ static long bpf_cgrp_storage_update_elem(struct bpf_map *map, void *key, bpf_cgrp_storage_lock(); sdata = bpf_local_storage_update(cgroup, (struct bpf_local_storage_map *)map, - value, map_flags, GFP_ATOMIC); + value, map_flags, false, GFP_ATOMIC); bpf_cgrp_storage_unlock(); cgroup_put(cgroup); return PTR_ERR_OR_ZERO(sdata); @@ -181,7 +181,7 @@ BPF_CALL_5(bpf_cgrp_storage_get, struct bpf_map *, map, struct cgroup *, cgroup, if (!percpu_ref_is_dying(&cgroup->self.refcnt) && (flags & BPF_LOCAL_STORAGE_GET_F_CREATE)) sdata = bpf_local_storage_update(cgroup, (struct bpf_local_storage_map *)map, - value, BPF_NOEXIST, gfp_flags); + value, BPF_NOEXIST, false, gfp_flags); unlock: bpf_cgrp_storage_unlock(); diff --git a/kernel/bpf/bpf_inode_storage.c b/kernel/bpf/bpf_inode_storage.c index e16e79f8cd6d..a51c82dee1bd 100644 --- a/kernel/bpf/bpf_inode_storage.c +++ b/kernel/bpf/bpf_inode_storage.c @@ -99,7 +99,7 @@ static long bpf_fd_inode_storage_update_elem(struct bpf_map *map, void *key, sdata = bpf_local_storage_update(file_inode(fd_file(f)), (struct bpf_local_storage_map *)map, - value, map_flags, GFP_ATOMIC); + value, map_flags, false, GFP_ATOMIC); return PTR_ERR_OR_ZERO(sdata); } @@ -153,7 +153,7 @@ BPF_CALL_5(bpf_inode_storage_get, struct bpf_map *, map, struct inode *, inode, if (flags & BPF_LOCAL_STORAGE_GET_F_CREATE) { sdata = bpf_local_storage_update( inode, (struct bpf_local_storage_map *)map, value, - BPF_NOEXIST, gfp_flags); + BPF_NOEXIST, false, gfp_flags); return IS_ERR(sdata) ? (unsigned long)NULL : (unsigned long)sdata->data; } diff --git a/kernel/bpf/bpf_local_storage.c b/kernel/bpf/bpf_local_storage.c index c938dea5ddbf..7e6a0af0afc1 100644 --- a/kernel/bpf/bpf_local_storage.c +++ b/kernel/bpf/bpf_local_storage.c @@ -73,7 +73,7 @@ static bool selem_linked_to_map(const struct bpf_local_storage_elem *selem) struct bpf_local_storage_elem * bpf_selem_alloc(struct bpf_local_storage_map *smap, void *owner, - void *value, bool charge_mem, gfp_t gfp_flags) + void *value, bool charge_mem, bool swap_uptrs, gfp_t gfp_flags) { struct bpf_local_storage_elem *selem; @@ -99,9 +99,12 @@ bpf_selem_alloc(struct bpf_local_storage_map *smap, void *owner, } if (selem) { - if (value) + if (value) { + /* No need to call check_and_init_map_value as memory is zero init */ copy_map_value(&smap->map, SDATA(selem)->data, value); - /* No need to call check_and_init_map_value as memory is zero init */ + if (swap_uptrs) + bpf_obj_swap_uptrs(smap->map.record, SDATA(selem)->data, value); + } return selem; } @@ -209,8 +212,12 @@ static void __bpf_selem_free(struct bpf_local_storage_elem *selem, static void bpf_selem_free_rcu(struct rcu_head *rcu) { struct bpf_local_storage_elem *selem; + struct bpf_local_storage_map *smap; selem = container_of(rcu, struct bpf_local_storage_elem, rcu); + /* The bpf_local_storage_map_free will wait for rcu_barrier */ + smap = rcu_dereference_check(SDATA(selem)->smap, 1); + bpf_obj_free_fields(smap->map.record, SDATA(selem)->data); bpf_mem_cache_raw_free(selem); } @@ -226,16 +233,25 @@ void bpf_selem_free(struct bpf_local_storage_elem *selem, struct bpf_local_storage_map *smap, bool reuse_now) { - bpf_obj_free_fields(smap->map.record, SDATA(selem)->data); - if (!smap->bpf_ma) { + /* Only task storage has uptrs and task storage + * has moved to bpf_mem_alloc. Meaning smap->bpf_ma == true + * for task storage, so this bpf_obj_free_fields() won't unpin + * any uptr. + */ + bpf_obj_free_fields(smap->map.record, SDATA(selem)->data); __bpf_selem_free(selem, reuse_now); return; } - if (!reuse_now) { - call_rcu_tasks_trace(&selem->rcu, bpf_selem_free_trace_rcu); - } else { + if (reuse_now) { + /* reuse_now == true only happens when the storage owner + * (e.g. task_struct) is being destructed or the map itself + * is being destructed (ie map_free). In both cases, + * no bpf prog can have a hold on the selem. It is + * safe to unpin the uptrs and free the selem now. + */ + bpf_obj_free_fields(smap->map.record, SDATA(selem)->data); /* Instead of using the vanilla call_rcu(), * bpf_mem_cache_free will be able to reuse selem * immediately. @@ -243,6 +259,26 @@ void bpf_selem_free(struct bpf_local_storage_elem *selem, migrate_disable(); bpf_mem_cache_free(&smap->selem_ma, selem); migrate_enable(); + return; + } + + call_rcu_tasks_trace(&selem->rcu, bpf_selem_free_trace_rcu); +} + +static void bpf_selem_free_list(struct hlist_head *list, bool reuse_now) +{ + struct bpf_local_storage_elem *selem; + struct bpf_local_storage_map *smap; + struct hlist_node *n; + + /* The "_safe" iteration is needed. + * The loop is not removing the selem from the list + * but bpf_selem_free will use the selem->rcu_head + * which is union-ized with the selem->free_node. + */ + hlist_for_each_entry_safe(selem, n, list, free_node) { + smap = rcu_dereference_check(SDATA(selem)->smap, bpf_rcu_lock_held()); + bpf_selem_free(selem, smap, reuse_now); } } @@ -252,7 +288,7 @@ void bpf_selem_free(struct bpf_local_storage_elem *selem, */ static bool bpf_selem_unlink_storage_nolock(struct bpf_local_storage *local_storage, struct bpf_local_storage_elem *selem, - bool uncharge_mem, bool reuse_now) + bool uncharge_mem, struct hlist_head *free_selem_list) { struct bpf_local_storage_map *smap; bool free_local_storage; @@ -296,7 +332,7 @@ static bool bpf_selem_unlink_storage_nolock(struct bpf_local_storage *local_stor SDATA(selem)) RCU_INIT_POINTER(local_storage->cache[smap->cache_idx], NULL); - bpf_selem_free(selem, smap, reuse_now); + hlist_add_head(&selem->free_node, free_selem_list); if (rcu_access_pointer(local_storage->smap) == smap) RCU_INIT_POINTER(local_storage->smap, NULL); @@ -345,6 +381,7 @@ static void bpf_selem_unlink_storage(struct bpf_local_storage_elem *selem, struct bpf_local_storage_map *storage_smap; struct bpf_local_storage *local_storage; bool bpf_ma, free_local_storage = false; + HLIST_HEAD(selem_free_list); unsigned long flags; if (unlikely(!selem_linked_to_storage_lockless(selem))) @@ -360,9 +397,11 @@ static void bpf_selem_unlink_storage(struct bpf_local_storage_elem *selem, raw_spin_lock_irqsave(&local_storage->lock, flags); if (likely(selem_linked_to_storage(selem))) free_local_storage = bpf_selem_unlink_storage_nolock( - local_storage, selem, true, reuse_now); + local_storage, selem, true, &selem_free_list); raw_spin_unlock_irqrestore(&local_storage->lock, flags); + bpf_selem_free_list(&selem_free_list, reuse_now); + if (free_local_storage) bpf_local_storage_free(local_storage, storage_smap, bpf_ma, reuse_now); } @@ -524,11 +563,12 @@ int bpf_local_storage_alloc(void *owner, */ struct bpf_local_storage_data * bpf_local_storage_update(void *owner, struct bpf_local_storage_map *smap, - void *value, u64 map_flags, gfp_t gfp_flags) + void *value, u64 map_flags, bool swap_uptrs, gfp_t gfp_flags) { struct bpf_local_storage_data *old_sdata = NULL; struct bpf_local_storage_elem *alloc_selem, *selem = NULL; struct bpf_local_storage *local_storage; + HLIST_HEAD(old_selem_free_list); unsigned long flags; int err; @@ -550,7 +590,7 @@ bpf_local_storage_update(void *owner, struct bpf_local_storage_map *smap, if (err) return ERR_PTR(err); - selem = bpf_selem_alloc(smap, owner, value, true, gfp_flags); + selem = bpf_selem_alloc(smap, owner, value, true, swap_uptrs, gfp_flags); if (!selem) return ERR_PTR(-ENOMEM); @@ -584,7 +624,7 @@ bpf_local_storage_update(void *owner, struct bpf_local_storage_map *smap, /* A lookup has just been done before and concluded a new selem is * needed. The chance of an unnecessary alloc is unlikely. */ - alloc_selem = selem = bpf_selem_alloc(smap, owner, value, true, gfp_flags); + alloc_selem = selem = bpf_selem_alloc(smap, owner, value, true, swap_uptrs, gfp_flags); if (!alloc_selem) return ERR_PTR(-ENOMEM); @@ -624,11 +664,12 @@ bpf_local_storage_update(void *owner, struct bpf_local_storage_map *smap, if (old_sdata) { bpf_selem_unlink_map(SELEM(old_sdata)); bpf_selem_unlink_storage_nolock(local_storage, SELEM(old_sdata), - true, false); + true, &old_selem_free_list); } unlock: raw_spin_unlock_irqrestore(&local_storage->lock, flags); + bpf_selem_free_list(&old_selem_free_list, false); if (alloc_selem) { mem_uncharge(smap, owner, smap->elem_size); bpf_selem_free(alloc_selem, smap, true); @@ -706,6 +747,7 @@ void bpf_local_storage_destroy(struct bpf_local_storage *local_storage) struct bpf_local_storage_map *storage_smap; struct bpf_local_storage_elem *selem; bool bpf_ma, free_storage = false; + HLIST_HEAD(free_selem_list); struct hlist_node *n; unsigned long flags; @@ -734,10 +776,12 @@ void bpf_local_storage_destroy(struct bpf_local_storage *local_storage) * of the loop will set the free_cgroup_storage to true. */ free_storage = bpf_selem_unlink_storage_nolock( - local_storage, selem, true, true); + local_storage, selem, true, &free_selem_list); } raw_spin_unlock_irqrestore(&local_storage->lock, flags); + bpf_selem_free_list(&free_selem_list, true); + if (free_storage) bpf_local_storage_free(local_storage, storage_smap, bpf_ma, true); } @@ -883,6 +927,9 @@ void bpf_local_storage_map_free(struct bpf_map *map, synchronize_rcu(); if (smap->bpf_ma) { + rcu_barrier_tasks_trace(); + if (!rcu_trace_implies_rcu_gp()) + rcu_barrier(); bpf_mem_alloc_destroy(&smap->selem_ma); bpf_mem_alloc_destroy(&smap->storage_ma); } diff --git a/kernel/bpf/bpf_struct_ops.c b/kernel/bpf/bpf_struct_ops.c index fda3dd2ee984..606efe32485a 100644 --- a/kernel/bpf/bpf_struct_ops.c +++ b/kernel/bpf/bpf_struct_ops.c @@ -23,7 +23,6 @@ struct bpf_struct_ops_value { struct bpf_struct_ops_map { struct bpf_map map; - struct rcu_head rcu; const struct bpf_struct_ops_desc *st_ops_desc; /* protect map_update */ struct mutex lock; @@ -32,7 +31,9 @@ struct bpf_struct_ops_map { * (in kvalue.data). */ struct bpf_link **links; - u32 links_cnt; + /* ksyms for bpf trampolines */ + struct bpf_ksym **ksyms; + u32 funcs_cnt; u32 image_pages_cnt; /* image_pages is an array of pages that has all the trampolines * that stores the func args before calling the bpf_prog. @@ -481,11 +482,11 @@ static void bpf_struct_ops_map_put_progs(struct bpf_struct_ops_map *st_map) { u32 i; - for (i = 0; i < st_map->links_cnt; i++) { - if (st_map->links[i]) { - bpf_link_put(st_map->links[i]); - st_map->links[i] = NULL; - } + for (i = 0; i < st_map->funcs_cnt; i++) { + if (!st_map->links[i]) + break; + bpf_link_put(st_map->links[i]); + st_map->links[i] = NULL; } } @@ -586,6 +587,49 @@ int bpf_struct_ops_prepare_trampoline(struct bpf_tramp_links *tlinks, return 0; } +static void bpf_struct_ops_ksym_init(const char *tname, const char *mname, + void *image, unsigned int size, + struct bpf_ksym *ksym) +{ + snprintf(ksym->name, KSYM_NAME_LEN, "bpf__%s_%s", tname, mname); + INIT_LIST_HEAD_RCU(&ksym->lnode); + bpf_image_ksym_init(image, size, ksym); +} + +static void bpf_struct_ops_map_add_ksyms(struct bpf_struct_ops_map *st_map) +{ + u32 i; + + for (i = 0; i < st_map->funcs_cnt; i++) { + if (!st_map->ksyms[i]) + break; + bpf_image_ksym_add(st_map->ksyms[i]); + } +} + +static void bpf_struct_ops_map_del_ksyms(struct bpf_struct_ops_map *st_map) +{ + u32 i; + + for (i = 0; i < st_map->funcs_cnt; i++) { + if (!st_map->ksyms[i]) + break; + bpf_image_ksym_del(st_map->ksyms[i]); + } +} + +static void bpf_struct_ops_map_free_ksyms(struct bpf_struct_ops_map *st_map) +{ + u32 i; + + for (i = 0; i < st_map->funcs_cnt; i++) { + if (!st_map->ksyms[i]) + break; + kfree(st_map->ksyms[i]); + st_map->ksyms[i] = NULL; + } +} + static long bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key, void *value, u64 flags) { @@ -601,6 +645,9 @@ static long bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key, int prog_fd, err; u32 i, trampoline_start, image_off = 0; void *cur_image = NULL, *image = NULL; + struct bpf_link **plink; + struct bpf_ksym **pksym; + const char *tname, *mname; if (flags) return -EINVAL; @@ -639,14 +686,19 @@ static long bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key, udata = &uvalue->data; kdata = &kvalue->data; + plink = st_map->links; + pksym = st_map->ksyms; + tname = btf_name_by_offset(st_map->btf, t->name_off); module_type = btf_type_by_id(btf_vmlinux, st_ops_ids[IDX_MODULE_ID]); for_each_member(i, t, member) { const struct btf_type *mtype, *ptype; struct bpf_prog *prog; struct bpf_tramp_link *link; + struct bpf_ksym *ksym; u32 moff; moff = __btf_member_bit_offset(t, member) / 8; + mname = btf_name_by_offset(st_map->btf, member->name_off); ptype = btf_type_resolve_ptr(st_map->btf, member->type, NULL); if (ptype == module_type) { if (*(void **)(udata + moff)) @@ -714,7 +766,14 @@ static long bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key, } bpf_link_init(&link->link, BPF_LINK_TYPE_STRUCT_OPS, &bpf_struct_ops_link_lops, prog); - st_map->links[i] = &link->link; + *plink++ = &link->link; + + ksym = kzalloc(sizeof(*ksym), GFP_USER); + if (!ksym) { + err = -ENOMEM; + goto reset_unlock; + } + *pksym++ = ksym; trampoline_start = image_off; err = bpf_struct_ops_prepare_trampoline(tlinks, link, @@ -735,6 +794,12 @@ static long bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key, /* put prog_id to udata */ *(unsigned long *)(udata + moff) = prog->aux->id; + + /* init ksym for this trampoline */ + bpf_struct_ops_ksym_init(tname, mname, + image + trampoline_start, + image_off - trampoline_start, + ksym); } if (st_ops->validate) { @@ -783,6 +848,7 @@ static long bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key, */ reset_unlock: + bpf_struct_ops_map_free_ksyms(st_map); bpf_struct_ops_map_free_image(st_map); bpf_struct_ops_map_put_progs(st_map); memset(uvalue, 0, map->value_size); @@ -790,6 +856,8 @@ static long bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key, unlock: kfree(tlinks); mutex_unlock(&st_map->lock); + if (!err) + bpf_struct_ops_map_add_ksyms(st_map); return err; } @@ -849,7 +917,10 @@ static void __bpf_struct_ops_map_free(struct bpf_map *map) if (st_map->links) bpf_struct_ops_map_put_progs(st_map); + if (st_map->ksyms) + bpf_struct_ops_map_free_ksyms(st_map); bpf_map_area_free(st_map->links); + bpf_map_area_free(st_map->ksyms); bpf_struct_ops_map_free_image(st_map); bpf_map_area_free(st_map->uvalue); bpf_map_area_free(st_map); @@ -866,6 +937,8 @@ static void bpf_struct_ops_map_free(struct bpf_map *map) if (btf_is_module(st_map->btf)) module_put(st_map->st_ops_desc->st_ops->owner); + bpf_struct_ops_map_del_ksyms(st_map); + /* The struct_ops's function may switch to another struct_ops. * * For example, bpf_tcp_cc_x->init() may switch to @@ -895,6 +968,19 @@ static int bpf_struct_ops_map_alloc_check(union bpf_attr *attr) return 0; } +static u32 count_func_ptrs(const struct btf *btf, const struct btf_type *t) +{ + int i; + u32 count; + const struct btf_member *member; + + count = 0; + for_each_member(i, t, member) + if (btf_type_resolve_func_ptr(btf, member->type, NULL)) + count++; + return count; +} + static struct bpf_map *bpf_struct_ops_map_alloc(union bpf_attr *attr) { const struct bpf_struct_ops_desc *st_ops_desc; @@ -961,11 +1047,15 @@ static struct bpf_map *bpf_struct_ops_map_alloc(union bpf_attr *attr) map = &st_map->map; st_map->uvalue = bpf_map_area_alloc(vt->size, NUMA_NO_NODE); - st_map->links_cnt = btf_type_vlen(t); + st_map->funcs_cnt = count_func_ptrs(btf, t); st_map->links = - bpf_map_area_alloc(st_map->links_cnt * sizeof(struct bpf_links *), + bpf_map_area_alloc(st_map->funcs_cnt * sizeof(struct bpf_link *), NUMA_NO_NODE); - if (!st_map->uvalue || !st_map->links) { + + st_map->ksyms = + bpf_map_area_alloc(st_map->funcs_cnt * sizeof(struct bpf_ksym *), + NUMA_NO_NODE); + if (!st_map->uvalue || !st_map->links || !st_map->ksyms) { ret = -ENOMEM; goto errout_free; } @@ -994,7 +1084,8 @@ static u64 bpf_struct_ops_map_mem_usage(const struct bpf_map *map) usage = sizeof(*st_map) + vt->size - sizeof(struct bpf_struct_ops_value); usage += vt->size; - usage += btf_type_vlen(vt) * sizeof(struct bpf_links *); + usage += st_map->funcs_cnt * sizeof(struct bpf_link *); + usage += st_map->funcs_cnt * sizeof(struct bpf_ksym *); usage += PAGE_SIZE; return usage; } diff --git a/kernel/bpf/bpf_task_storage.c b/kernel/bpf/bpf_task_storage.c index 1eb9852a9f8e..bf7fa15fdcc6 100644 --- a/kernel/bpf/bpf_task_storage.c +++ b/kernel/bpf/bpf_task_storage.c @@ -128,6 +128,9 @@ static long bpf_pid_task_storage_update_elem(struct bpf_map *map, void *key, struct pid *pid; int fd, err; + if ((map_flags & BPF_F_LOCK) && btf_record_has_field(map->record, BPF_UPTR)) + return -EOPNOTSUPP; + fd = *(int *)key; pid = pidfd_get_pid(fd, &f_flags); if (IS_ERR(pid)) @@ -146,7 +149,7 @@ static long bpf_pid_task_storage_update_elem(struct bpf_map *map, void *key, bpf_task_storage_lock(); sdata = bpf_local_storage_update( task, (struct bpf_local_storage_map *)map, value, map_flags, - GFP_ATOMIC); + true, GFP_ATOMIC); bpf_task_storage_unlock(); err = PTR_ERR_OR_ZERO(sdata); @@ -218,7 +221,7 @@ static void *__bpf_task_storage_get(struct bpf_map *map, (flags & BPF_LOCAL_STORAGE_GET_F_CREATE) && nobusy) { sdata = bpf_local_storage_update( task, (struct bpf_local_storage_map *)map, value, - BPF_NOEXIST, gfp_flags); + BPF_NOEXIST, false, gfp_flags); return IS_ERR(sdata) ? NULL : sdata->data; } diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c index 5cd1c7a23848..e7a59e6462a9 100644 --- a/kernel/bpf/btf.c +++ b/kernel/bpf/btf.c @@ -2808,7 +2808,7 @@ static void btf_ref_type_log(struct btf_verifier_env *env, btf_verifier_log(env, "type_id=%u", t->type); } -static struct btf_kind_operations modifier_ops = { +static const struct btf_kind_operations modifier_ops = { .check_meta = btf_ref_type_check_meta, .resolve = btf_modifier_resolve, .check_member = btf_modifier_check_member, @@ -2817,7 +2817,7 @@ static struct btf_kind_operations modifier_ops = { .show = btf_modifier_show, }; -static struct btf_kind_operations ptr_ops = { +static const struct btf_kind_operations ptr_ops = { .check_meta = btf_ref_type_check_meta, .resolve = btf_ptr_resolve, .check_member = btf_ptr_check_member, @@ -2858,7 +2858,7 @@ static void btf_fwd_type_log(struct btf_verifier_env *env, btf_verifier_log(env, "%s", btf_type_kflag(t) ? "union" : "struct"); } -static struct btf_kind_operations fwd_ops = { +static const struct btf_kind_operations fwd_ops = { .check_meta = btf_fwd_check_meta, .resolve = btf_df_resolve, .check_member = btf_df_check_member, @@ -3109,7 +3109,7 @@ static void btf_array_show(const struct btf *btf, const struct btf_type *t, __btf_array_show(btf, t, type_id, data, bits_offset, show); } -static struct btf_kind_operations array_ops = { +static const struct btf_kind_operations array_ops = { .check_meta = btf_array_check_meta, .resolve = btf_array_resolve, .check_member = btf_array_check_member, @@ -3334,7 +3334,7 @@ static int btf_find_struct(const struct btf *btf, const struct btf_type *t, } static int btf_find_kptr(const struct btf *btf, const struct btf_type *t, - u32 off, int sz, struct btf_field_info *info) + u32 off, int sz, struct btf_field_info *info, u32 field_mask) { enum btf_field_type type; u32 res_id; @@ -3358,9 +3358,14 @@ static int btf_find_kptr(const struct btf *btf, const struct btf_type *t, type = BPF_KPTR_REF; else if (!strcmp("percpu_kptr", __btf_name_by_offset(btf, t->name_off))) type = BPF_KPTR_PERCPU; + else if (!strcmp("uptr", __btf_name_by_offset(btf, t->name_off))) + type = BPF_UPTR; else return -EINVAL; + if (!(type & field_mask)) + return BTF_FIELD_IGNORE; + /* Get the base type */ t = btf_type_skip_modifiers(btf, t->type, &res_id); /* Only pointer to struct is allowed */ @@ -3502,7 +3507,7 @@ static int btf_get_field_type(const struct btf *btf, const struct btf_type *var_ field_mask_test_name(BPF_REFCOUNT, "bpf_refcount"); /* Only return BPF_KPTR when all other types with matchable names fail */ - if (field_mask & BPF_KPTR && !__btf_type_is_struct(var_type)) { + if (field_mask & (BPF_KPTR | BPF_UPTR) && !__btf_type_is_struct(var_type)) { type = BPF_KPTR_REF; goto end; } @@ -3535,6 +3540,7 @@ static int btf_repeat_fields(struct btf_field_info *info, int info_cnt, case BPF_KPTR_UNREF: case BPF_KPTR_REF: case BPF_KPTR_PERCPU: + case BPF_UPTR: case BPF_LIST_HEAD: case BPF_RB_ROOT: break; @@ -3667,8 +3673,9 @@ static int btf_find_field_one(const struct btf *btf, case BPF_KPTR_UNREF: case BPF_KPTR_REF: case BPF_KPTR_PERCPU: + case BPF_UPTR: ret = btf_find_kptr(btf, var_type, off, sz, - info_cnt ? &info[0] : &tmp); + info_cnt ? &info[0] : &tmp, field_mask); if (ret < 0) return ret; break; @@ -3991,6 +3998,7 @@ struct btf_record *btf_parse_fields(const struct btf *btf, const struct btf_type case BPF_KPTR_UNREF: case BPF_KPTR_REF: case BPF_KPTR_PERCPU: + case BPF_UPTR: ret = btf_parse_kptr(btf, &rec->fields[i], &info_arr[i]); if (ret < 0) goto end; @@ -4050,12 +4058,28 @@ int btf_check_and_fixup_fields(const struct btf *btf, struct btf_record *rec) * Hence we only need to ensure that bpf_{list_head,rb_root} ownership * does not form cycles. */ - if (IS_ERR_OR_NULL(rec) || !(rec->field_mask & BPF_GRAPH_ROOT)) + if (IS_ERR_OR_NULL(rec) || !(rec->field_mask & (BPF_GRAPH_ROOT | BPF_UPTR))) return 0; for (i = 0; i < rec->cnt; i++) { struct btf_struct_meta *meta; + const struct btf_type *t; u32 btf_id; + if (rec->fields[i].type == BPF_UPTR) { + /* The uptr only supports pinning one page and cannot + * point to a kernel struct + */ + if (btf_is_kernel(rec->fields[i].kptr.btf)) + return -EINVAL; + t = btf_type_by_id(rec->fields[i].kptr.btf, + rec->fields[i].kptr.btf_id); + if (!t->size) + return -EINVAL; + if (t->size > PAGE_SIZE) + return -E2BIG; + continue; + } + if (!(rec->fields[i].type & BPF_GRAPH_ROOT)) continue; btf_id = rec->fields[i].graph_root.value_btf_id; @@ -4191,7 +4215,7 @@ static void btf_struct_show(const struct btf *btf, const struct btf_type *t, __btf_struct_show(btf, t, type_id, data, bits_offset, show); } -static struct btf_kind_operations struct_ops = { +static const struct btf_kind_operations struct_ops = { .check_meta = btf_struct_check_meta, .resolve = btf_struct_resolve, .check_member = btf_struct_check_member, @@ -4359,7 +4383,7 @@ static void btf_enum_show(const struct btf *btf, const struct btf_type *t, btf_show_end_type(show); } -static struct btf_kind_operations enum_ops = { +static const struct btf_kind_operations enum_ops = { .check_meta = btf_enum_check_meta, .resolve = btf_df_resolve, .check_member = btf_enum_check_member, @@ -4462,7 +4486,7 @@ static void btf_enum64_show(const struct btf *btf, const struct btf_type *t, btf_show_end_type(show); } -static struct btf_kind_operations enum64_ops = { +static const struct btf_kind_operations enum64_ops = { .check_meta = btf_enum64_check_meta, .resolve = btf_df_resolve, .check_member = btf_enum_check_member, @@ -4540,7 +4564,7 @@ static void btf_func_proto_log(struct btf_verifier_env *env, btf_verifier_log(env, ")"); } -static struct btf_kind_operations func_proto_ops = { +static const struct btf_kind_operations func_proto_ops = { .check_meta = btf_func_proto_check_meta, .resolve = btf_df_resolve, /* @@ -4598,7 +4622,7 @@ static int btf_func_resolve(struct btf_verifier_env *env, return 0; } -static struct btf_kind_operations func_ops = { +static const struct btf_kind_operations func_ops = { .check_meta = btf_func_check_meta, .resolve = btf_func_resolve, .check_member = btf_df_check_member, @@ -5566,7 +5590,7 @@ btf_parse_struct_metas(struct bpf_verifier_log *log, struct btf *btf) goto free_aof; } - ret = btf_find_kptr(btf, t, 0, 0, &tmp); + ret = btf_find_kptr(btf, t, 0, 0, &tmp, BPF_KPTR); if (ret != BTF_FIELD_FOUND) continue; @@ -6564,7 +6588,10 @@ bool btf_ctx_access(int off, int size, enum bpf_access_type type, if (prog_args_trusted(prog)) info->reg_type |= PTR_TRUSTED; - if (btf_param_match_suffix(btf, &args[arg], "__nullable")) + /* Raw tracepoint arguments always get marked as maybe NULL */ + if (bpf_prog_is_raw_tp(prog)) + info->reg_type |= PTR_MAYBE_NULL; + else if (btf_param_match_suffix(btf, &args[arg], "__nullable")) info->reg_type |= PTR_MAYBE_NULL; if (tgt_prog) { diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c index e303626bdb2f..a2327c4fdc8b 100644 --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c @@ -131,6 +131,7 @@ struct bpf_prog *bpf_prog_alloc_no_stats(unsigned int size, gfp_t gfp_extra_flag INIT_LIST_HEAD_RCU(&fp->aux->ksym_prefix.lnode); #endif mutex_init(&fp->aux->used_maps_mutex); + mutex_init(&fp->aux->ext_mutex); mutex_init(&fp->aux->dst_mutex); return fp; @@ -3044,6 +3045,11 @@ bool __weak bpf_jit_supports_exceptions(void) return false; } +bool __weak bpf_jit_supports_private_stack(void) +{ + return false; +} + void __weak arch_bpf_stack_walk(bool (*consume_fn)(void *cookie, u64 ip, u64 sp, u64 bp), void *cookie) { } diff --git a/kernel/bpf/dispatcher.c b/kernel/bpf/dispatcher.c index 70fb82bf1637..b77db7413f8c 100644 --- a/kernel/bpf/dispatcher.c +++ b/kernel/bpf/dispatcher.c @@ -154,7 +154,8 @@ void bpf_dispatcher_change_prog(struct bpf_dispatcher *d, struct bpf_prog *from, d->image = NULL; goto out; } - bpf_image_ksym_add(d->image, PAGE_SIZE, &d->ksym); + bpf_image_ksym_init(d->image, PAGE_SIZE, &d->ksym); + bpf_image_ksym_add(&d->ksym); } prev_num_progs = d->num_progs; diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c index b14b87463ee0..3ec941a0ea41 100644 --- a/kernel/bpf/hashtab.c +++ b/kernel/bpf/hashtab.c @@ -896,9 +896,12 @@ static int htab_map_get_next_key(struct bpf_map *map, void *key, void *next_key) static void htab_elem_free(struct bpf_htab *htab, struct htab_elem *l) { check_and_free_fields(htab, l); + + migrate_disable(); if (htab->map.map_type == BPF_MAP_TYPE_PERCPU_HASH) bpf_mem_cache_free(&htab->pcpu_ma, l->ptr_to_pptr); bpf_mem_cache_free(&htab->ma, l); + migrate_enable(); } static void htab_put_fd_value(struct bpf_htab *htab, struct htab_elem *l) @@ -948,7 +951,7 @@ static void free_htab_elem(struct bpf_htab *htab, struct htab_elem *l) if (htab_is_prealloc(htab)) { bpf_map_dec_elem_count(&htab->map); check_and_free_fields(htab, l); - __pcpu_freelist_push(&htab->freelist, &l->fnode); + pcpu_freelist_push(&htab->freelist, &l->fnode); } else { dec_elem_count(htab); htab_elem_free(htab, l); @@ -1018,7 +1021,6 @@ static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key, */ pl_new = this_cpu_ptr(htab->extra_elems); l_new = *pl_new; - htab_put_fd_value(htab, old_elem); *pl_new = old_elem; } else { struct pcpu_freelist_node *l; @@ -1105,6 +1107,7 @@ static long htab_map_update_elem(struct bpf_map *map, void *key, void *value, struct htab_elem *l_new = NULL, *l_old; struct hlist_nulls_head *head; unsigned long flags; + void *old_map_ptr; struct bucket *b; u32 key_size, hash; int ret; @@ -1183,12 +1186,27 @@ static long htab_map_update_elem(struct bpf_map *map, void *key, void *value, hlist_nulls_add_head_rcu(&l_new->hash_node, head); if (l_old) { hlist_nulls_del_rcu(&l_old->hash_node); + + /* l_old has already been stashed in htab->extra_elems, free + * its special fields before it is available for reuse. Also + * save the old map pointer in htab of maps before unlock + * and release it after unlock. + */ + old_map_ptr = NULL; + if (htab_is_prealloc(htab)) { + if (map->ops->map_fd_put_ptr) + old_map_ptr = fd_htab_map_get_ptr(map, l_old); + check_and_free_fields(htab, l_old); + } + } + htab_unlock_bucket(htab, b, hash, flags); + if (l_old) { + if (old_map_ptr) + map->ops->map_fd_put_ptr(map, old_map_ptr, true); if (!htab_is_prealloc(htab)) free_htab_elem(htab, l_old); - else - check_and_free_fields(htab, l_old); } - ret = 0; + return 0; err: htab_unlock_bucket(htab, b, hash, flags); return ret; @@ -1432,15 +1450,15 @@ static long htab_map_delete_elem(struct bpf_map *map, void *key) return ret; l = lookup_elem_raw(head, hash, key, key_size); - - if (l) { + if (l) hlist_nulls_del_rcu(&l->hash_node); - free_htab_elem(htab, l); - } else { + else ret = -ENOENT; - } htab_unlock_bucket(htab, b, hash, flags); + + if (l) + free_htab_elem(htab, l); return ret; } @@ -1853,13 +1871,14 @@ __htab_map_lookup_and_delete_batch(struct bpf_map *map, * may cause deadlock. See comments in function * prealloc_lru_pop(). Let us do bpf_lru_push_free() * after releasing the bucket lock. + * + * For htab of maps, htab_put_fd_value() in + * free_htab_elem() may acquire a spinlock with bucket + * lock being held and it violates the lock rule, so + * invoke free_htab_elem() after unlock as well. */ - if (is_lru_map) { - l->batch_flink = node_to_free; - node_to_free = l; - } else { - free_htab_elem(htab, l); - } + l->batch_flink = node_to_free; + node_to_free = l; } dst_key += key_size; dst_val += value_size; @@ -1871,7 +1890,10 @@ __htab_map_lookup_and_delete_batch(struct bpf_map *map, while (node_to_free) { l = node_to_free; node_to_free = node_to_free->batch_flink; - htab_lru_push_free(htab, l); + if (is_lru_map) + htab_lru_push_free(htab, l); + else + free_htab_elem(htab, l); } next_batch: diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c index 3d45ebe8afb4..751c150f9e1c 100644 --- a/kernel/bpf/helpers.c +++ b/kernel/bpf/helpers.c @@ -2521,6 +2521,25 @@ __bpf_kfunc struct task_struct *bpf_task_from_pid(s32 pid) return p; } +/** + * bpf_task_from_vpid - Find a struct task_struct from its vpid by looking it up + * in the pid namespace of the current task. If a task is returned, it must + * either be stored in a map, or released with bpf_task_release(). + * @vpid: The vpid of the task being looked up. + */ +__bpf_kfunc struct task_struct *bpf_task_from_vpid(s32 vpid) +{ + struct task_struct *p; + + rcu_read_lock(); + p = find_task_by_vpid(vpid); + if (p) + p = bpf_task_acquire(p); + rcu_read_unlock(); + + return p; +} + /** * bpf_dynptr_slice() - Obtain a read-only pointer to the dynptr data. * @p: The dynptr whose data slice to retrieve @@ -3068,7 +3087,9 @@ BTF_ID_FLAGS(func, bpf_task_under_cgroup, KF_RCU) BTF_ID_FLAGS(func, bpf_task_get_cgroup1, KF_ACQUIRE | KF_RCU | KF_RET_NULL) #endif BTF_ID_FLAGS(func, bpf_task_from_pid, KF_ACQUIRE | KF_RET_NULL) +BTF_ID_FLAGS(func, bpf_task_from_vpid, KF_ACQUIRE | KF_RET_NULL) BTF_ID_FLAGS(func, bpf_throw) +BTF_ID_FLAGS(func, bpf_send_signal_task, KF_TRUSTED_ARGS) BTF_KFUNCS_END(generic_btf_ids) static const struct btf_kfunc_id_set generic_kfunc_set = { @@ -3086,8 +3107,8 @@ BTF_ID(func, bpf_cgroup_release_dtor) #endif BTF_KFUNCS_START(common_btf_ids) -BTF_ID_FLAGS(func, bpf_cast_to_kern_ctx) -BTF_ID_FLAGS(func, bpf_rdonly_cast) +BTF_ID_FLAGS(func, bpf_cast_to_kern_ctx, KF_FASTCALL) +BTF_ID_FLAGS(func, bpf_rdonly_cast, KF_FASTCALL) BTF_ID_FLAGS(func, bpf_rcu_read_lock) BTF_ID_FLAGS(func, bpf_rcu_read_unlock) BTF_ID_FLAGS(func, bpf_dynptr_slice, KF_RET_NULL) @@ -3124,6 +3145,10 @@ BTF_ID_FLAGS(func, bpf_iter_bits_new, KF_ITER_NEW) BTF_ID_FLAGS(func, bpf_iter_bits_next, KF_ITER_NEXT | KF_RET_NULL) BTF_ID_FLAGS(func, bpf_iter_bits_destroy, KF_ITER_DESTROY) BTF_ID_FLAGS(func, bpf_copy_from_user_str, KF_SLEEPABLE) +BTF_ID_FLAGS(func, bpf_get_kmem_cache) +BTF_ID_FLAGS(func, bpf_iter_kmem_cache_new, KF_ITER_NEW | KF_SLEEPABLE) +BTF_ID_FLAGS(func, bpf_iter_kmem_cache_next, KF_ITER_NEXT | KF_RET_NULL | KF_SLEEPABLE) +BTF_ID_FLAGS(func, bpf_iter_kmem_cache_destroy, KF_ITER_DESTROY | KF_SLEEPABLE) BTF_KFUNCS_END(common_btf_ids) static const struct btf_kfunc_id_set common_kfunc_set = { diff --git a/kernel/bpf/kmem_cache_iter.c b/kernel/bpf/kmem_cache_iter.c new file mode 100644 index 000000000000..3ae2158d767f --- /dev/null +++ b/kernel/bpf/kmem_cache_iter.c @@ -0,0 +1,238 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (c) 2024 Google */ +#include +#include +#include +#include +#include + +#include "../../mm/slab.h" /* kmem_cache, slab_caches and slab_mutex */ + +/* open-coded version */ +struct bpf_iter_kmem_cache { + __u64 __opaque[1]; +} __attribute__((aligned(8))); + +struct bpf_iter_kmem_cache_kern { + struct kmem_cache *pos; +} __attribute__((aligned(8))); + +#define KMEM_CACHE_POS_START ((void *)1L) + +__bpf_kfunc_start_defs(); + +__bpf_kfunc int bpf_iter_kmem_cache_new(struct bpf_iter_kmem_cache *it) +{ + struct bpf_iter_kmem_cache_kern *kit = (void *)it; + + BUILD_BUG_ON(sizeof(*kit) > sizeof(*it)); + BUILD_BUG_ON(__alignof__(*kit) != __alignof__(*it)); + + kit->pos = KMEM_CACHE_POS_START; + return 0; +} + +__bpf_kfunc struct kmem_cache *bpf_iter_kmem_cache_next(struct bpf_iter_kmem_cache *it) +{ + struct bpf_iter_kmem_cache_kern *kit = (void *)it; + struct kmem_cache *prev = kit->pos; + struct kmem_cache *next; + bool destroy = false; + + if (!prev) + return NULL; + + mutex_lock(&slab_mutex); + + if (list_empty(&slab_caches)) { + mutex_unlock(&slab_mutex); + return NULL; + } + + if (prev == KMEM_CACHE_POS_START) + next = list_first_entry(&slab_caches, struct kmem_cache, list); + else if (list_last_entry(&slab_caches, struct kmem_cache, list) == prev) + next = NULL; + else + next = list_next_entry(prev, list); + + /* boot_caches have negative refcount, don't touch them */ + if (next && next->refcount > 0) + next->refcount++; + + /* Skip kmem_cache_destroy() for active entries */ + if (prev && prev != KMEM_CACHE_POS_START) { + if (prev->refcount > 1) + prev->refcount--; + else if (prev->refcount == 1) + destroy = true; + } + + mutex_unlock(&slab_mutex); + + if (destroy) + kmem_cache_destroy(prev); + + kit->pos = next; + return next; +} + +__bpf_kfunc void bpf_iter_kmem_cache_destroy(struct bpf_iter_kmem_cache *it) +{ + struct bpf_iter_kmem_cache_kern *kit = (void *)it; + struct kmem_cache *s = kit->pos; + bool destroy = false; + + if (s == NULL || s == KMEM_CACHE_POS_START) + return; + + mutex_lock(&slab_mutex); + + /* Skip kmem_cache_destroy() for active entries */ + if (s->refcount > 1) + s->refcount--; + else if (s->refcount == 1) + destroy = true; + + mutex_unlock(&slab_mutex); + + if (destroy) + kmem_cache_destroy(s); +} + +__bpf_kfunc_end_defs(); + +struct bpf_iter__kmem_cache { + __bpf_md_ptr(struct bpf_iter_meta *, meta); + __bpf_md_ptr(struct kmem_cache *, s); +}; + +union kmem_cache_iter_priv { + struct bpf_iter_kmem_cache it; + struct bpf_iter_kmem_cache_kern kit; +}; + +static void *kmem_cache_iter_seq_start(struct seq_file *seq, loff_t *pos) +{ + loff_t cnt = 0; + bool found = false; + struct kmem_cache *s; + union kmem_cache_iter_priv *p = seq->private; + + mutex_lock(&slab_mutex); + + /* Find an entry at the given position in the slab_caches list instead + * of keeping a reference (of the last visited entry, if any) out of + * slab_mutex. It might miss something if one is deleted in the middle + * while it releases the lock. But it should be rare and there's not + * much we can do about it. + */ + list_for_each_entry(s, &slab_caches, list) { + if (cnt == *pos) { + /* Make sure this entry remains in the list by getting + * a new reference count. Note that boot_cache entries + * have a negative refcount, so don't touch them. + */ + if (s->refcount > 0) + s->refcount++; + found = true; + break; + } + cnt++; + } + mutex_unlock(&slab_mutex); + + if (!found) + s = NULL; + + p->kit.pos = s; + return s; +} + +static void kmem_cache_iter_seq_stop(struct seq_file *seq, void *v) +{ + struct bpf_iter_meta meta; + struct bpf_iter__kmem_cache ctx = { + .meta = &meta, + .s = v, + }; + union kmem_cache_iter_priv *p = seq->private; + struct bpf_prog *prog; + + meta.seq = seq; + prog = bpf_iter_get_info(&meta, true); + if (prog && !ctx.s) + bpf_iter_run_prog(prog, &ctx); + + bpf_iter_kmem_cache_destroy(&p->it); +} + +static void *kmem_cache_iter_seq_next(struct seq_file *seq, void *v, loff_t *pos) +{ + union kmem_cache_iter_priv *p = seq->private; + + ++*pos; + + return bpf_iter_kmem_cache_next(&p->it); +} + +static int kmem_cache_iter_seq_show(struct seq_file *seq, void *v) +{ + struct bpf_iter_meta meta; + struct bpf_iter__kmem_cache ctx = { + .meta = &meta, + .s = v, + }; + struct bpf_prog *prog; + int ret = 0; + + meta.seq = seq; + prog = bpf_iter_get_info(&meta, false); + if (prog) + ret = bpf_iter_run_prog(prog, &ctx); + + return ret; +} + +static const struct seq_operations kmem_cache_iter_seq_ops = { + .start = kmem_cache_iter_seq_start, + .next = kmem_cache_iter_seq_next, + .stop = kmem_cache_iter_seq_stop, + .show = kmem_cache_iter_seq_show, +}; + +BTF_ID_LIST_GLOBAL_SINGLE(bpf_kmem_cache_btf_id, struct, kmem_cache) + +static const struct bpf_iter_seq_info kmem_cache_iter_seq_info = { + .seq_ops = &kmem_cache_iter_seq_ops, + .seq_priv_size = sizeof(union kmem_cache_iter_priv), +}; + +static void bpf_iter_kmem_cache_show_fdinfo(const struct bpf_iter_aux_info *aux, + struct seq_file *seq) +{ + seq_puts(seq, "kmem_cache iter\n"); +} + +DEFINE_BPF_ITER_FUNC(kmem_cache, struct bpf_iter_meta *meta, + struct kmem_cache *s) + +static struct bpf_iter_reg bpf_kmem_cache_reg_info = { + .target = "kmem_cache", + .feature = BPF_ITER_RESCHED, + .show_fdinfo = bpf_iter_kmem_cache_show_fdinfo, + .ctx_arg_info_size = 1, + .ctx_arg_info = { + { offsetof(struct bpf_iter__kmem_cache, s), + PTR_TO_BTF_ID_OR_NULL | PTR_TRUSTED }, + }, + .seq_info = &kmem_cache_iter_seq_info, +}; + +static int __init bpf_kmem_cache_iter_init(void) +{ + bpf_kmem_cache_reg_info.ctx_arg_info[0].btf_id = bpf_kmem_cache_btf_id[0]; + return bpf_iter_reg_target(&bpf_kmem_cache_reg_info); +} + +late_initcall(bpf_kmem_cache_iter_init); diff --git a/kernel/bpf/memalloc.c b/kernel/bpf/memalloc.c index 146f5b57cfb1..889374722d0a 100644 --- a/kernel/bpf/memalloc.c +++ b/kernel/bpf/memalloc.c @@ -254,11 +254,8 @@ static void alloc_bulk(struct bpf_mem_cache *c, int cnt, int node, bool atomic) static void free_one(void *obj, bool percpu) { - if (percpu) { + if (percpu) free_percpu(((void __percpu **)obj)[1]); - kfree(obj); - return; - } kfree(obj); } diff --git a/kernel/bpf/range_tree.c b/kernel/bpf/range_tree.c new file mode 100644 index 000000000000..5bdf9aadca3a --- /dev/null +++ b/kernel/bpf/range_tree.c @@ -0,0 +1,272 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */ +#include +#include +#include +#include +#include "range_tree.h" + +/* + * struct range_tree is a data structure used to allocate contiguous memory + * ranges in bpf arena. It's a large bitmap. The contiguous sequence of bits is + * represented by struct range_node or 'rn' for short. + * rn->rn_rbnode links it into an interval tree while + * rn->rb_range_size links it into a second rbtree sorted by size of the range. + * __find_range() performs binary search and best fit algorithm to find the + * range less or equal requested size. + * range_tree_clear/set() clears or sets a range of bits in this bitmap. The + * adjacent ranges are merged or split at the same time. + * + * The split/merge logic is based/borrowed from XFS's xbitmap32 added + * in commit 6772fcc8890a ("xfs: convert xbitmap to interval tree"). + * + * The implementation relies on external lock to protect rbtree-s. + * The alloc/free of range_node-s is done via bpf_mem_alloc. + * + * bpf arena is using range_tree to represent unallocated slots. + * At init time: + * range_tree_set(rt, 0, max); + * Then: + * start = range_tree_find(rt, len); + * if (start >= 0) + * range_tree_clear(rt, start, len); + * to find free range and mark slots as allocated and later: + * range_tree_set(rt, start, len); + * to mark as unallocated after use. + */ +struct range_node { + struct rb_node rn_rbnode; + struct rb_node rb_range_size; + u32 rn_start; + u32 rn_last; /* inclusive */ + u32 __rn_subtree_last; +}; + +static struct range_node *rb_to_range_node(struct rb_node *rb) +{ + return rb_entry(rb, struct range_node, rb_range_size); +} + +static u32 rn_size(struct range_node *rn) +{ + return rn->rn_last - rn->rn_start + 1; +} + +/* Find range that fits best to requested size */ +static inline struct range_node *__find_range(struct range_tree *rt, u32 len) +{ + struct rb_node *rb = rt->range_size_root.rb_root.rb_node; + struct range_node *best = NULL; + + while (rb) { + struct range_node *rn = rb_to_range_node(rb); + + if (len <= rn_size(rn)) { + best = rn; + rb = rb->rb_right; + } else { + rb = rb->rb_left; + } + } + + return best; +} + +s64 range_tree_find(struct range_tree *rt, u32 len) +{ + struct range_node *rn; + + rn = __find_range(rt, len); + if (!rn) + return -ENOENT; + return rn->rn_start; +} + +/* Insert the range into rbtree sorted by the range size */ +static inline void __range_size_insert(struct range_node *rn, + struct rb_root_cached *root) +{ + struct rb_node **link = &root->rb_root.rb_node, *rb = NULL; + u64 size = rn_size(rn); + bool leftmost = true; + + while (*link) { + rb = *link; + if (size > rn_size(rb_to_range_node(rb))) { + link = &rb->rb_left; + } else { + link = &rb->rb_right; + leftmost = false; + } + } + + rb_link_node(&rn->rb_range_size, rb, link); + rb_insert_color_cached(&rn->rb_range_size, root, leftmost); +} + +#define START(node) ((node)->rn_start) +#define LAST(node) ((node)->rn_last) + +INTERVAL_TREE_DEFINE(struct range_node, rn_rbnode, u32, + __rn_subtree_last, START, LAST, + static inline __maybe_unused, + __range_it) + +static inline __maybe_unused void +range_it_insert(struct range_node *rn, struct range_tree *rt) +{ + __range_size_insert(rn, &rt->range_size_root); + __range_it_insert(rn, &rt->it_root); +} + +static inline __maybe_unused void +range_it_remove(struct range_node *rn, struct range_tree *rt) +{ + rb_erase_cached(&rn->rb_range_size, &rt->range_size_root); + RB_CLEAR_NODE(&rn->rb_range_size); + __range_it_remove(rn, &rt->it_root); +} + +static inline __maybe_unused struct range_node * +range_it_iter_first(struct range_tree *rt, u32 start, u32 last) +{ + return __range_it_iter_first(&rt->it_root, start, last); +} + +/* Clear the range in this range tree */ +int range_tree_clear(struct range_tree *rt, u32 start, u32 len) +{ + u32 last = start + len - 1; + struct range_node *new_rn; + struct range_node *rn; + + while ((rn = range_it_iter_first(rt, start, last))) { + if (rn->rn_start < start && rn->rn_last > last) { + u32 old_last = rn->rn_last; + + /* Overlaps with the entire clearing range */ + range_it_remove(rn, rt); + rn->rn_last = start - 1; + range_it_insert(rn, rt); + + /* Add a range */ + migrate_disable(); + new_rn = bpf_mem_alloc(&bpf_global_ma, sizeof(struct range_node)); + migrate_enable(); + if (!new_rn) + return -ENOMEM; + new_rn->rn_start = last + 1; + new_rn->rn_last = old_last; + range_it_insert(new_rn, rt); + } else if (rn->rn_start < start) { + /* Overlaps with the left side of the clearing range */ + range_it_remove(rn, rt); + rn->rn_last = start - 1; + range_it_insert(rn, rt); + } else if (rn->rn_last > last) { + /* Overlaps with the right side of the clearing range */ + range_it_remove(rn, rt); + rn->rn_start = last + 1; + range_it_insert(rn, rt); + break; + } else { + /* in the middle of the clearing range */ + range_it_remove(rn, rt); + migrate_disable(); + bpf_mem_free(&bpf_global_ma, rn); + migrate_enable(); + } + } + return 0; +} + +/* Is the whole range set ? */ +int is_range_tree_set(struct range_tree *rt, u32 start, u32 len) +{ + u32 last = start + len - 1; + struct range_node *left; + + /* Is this whole range set ? */ + left = range_it_iter_first(rt, start, last); + if (left && left->rn_start <= start && left->rn_last >= last) + return 0; + return -ESRCH; +} + +/* Set the range in this range tree */ +int range_tree_set(struct range_tree *rt, u32 start, u32 len) +{ + u32 last = start + len - 1; + struct range_node *right; + struct range_node *left; + int err; + + /* Is this whole range already set ? */ + left = range_it_iter_first(rt, start, last); + if (left && left->rn_start <= start && left->rn_last >= last) + return 0; + + /* Clear out everything in the range we want to set. */ + err = range_tree_clear(rt, start, len); + if (err) + return err; + + /* Do we have a left-adjacent range ? */ + left = range_it_iter_first(rt, start - 1, start - 1); + if (left && left->rn_last + 1 != start) + return -EFAULT; + + /* Do we have a right-adjacent range ? */ + right = range_it_iter_first(rt, last + 1, last + 1); + if (right && right->rn_start != last + 1) + return -EFAULT; + + if (left && right) { + /* Combine left and right adjacent ranges */ + range_it_remove(left, rt); + range_it_remove(right, rt); + left->rn_last = right->rn_last; + range_it_insert(left, rt); + migrate_disable(); + bpf_mem_free(&bpf_global_ma, right); + migrate_enable(); + } else if (left) { + /* Combine with the left range */ + range_it_remove(left, rt); + left->rn_last = last; + range_it_insert(left, rt); + } else if (right) { + /* Combine with the right range */ + range_it_remove(right, rt); + right->rn_start = start; + range_it_insert(right, rt); + } else { + migrate_disable(); + left = bpf_mem_alloc(&bpf_global_ma, sizeof(struct range_node)); + migrate_enable(); + if (!left) + return -ENOMEM; + left->rn_start = start; + left->rn_last = last; + range_it_insert(left, rt); + } + return 0; +} + +void range_tree_destroy(struct range_tree *rt) +{ + struct range_node *rn; + + while ((rn = range_it_iter_first(rt, 0, -1U))) { + range_it_remove(rn, rt); + migrate_disable(); + bpf_mem_free(&bpf_global_ma, rn); + migrate_enable(); + } +} + +void range_tree_init(struct range_tree *rt) +{ + rt->it_root = RB_ROOT_CACHED; + rt->range_size_root = RB_ROOT_CACHED; +} diff --git a/kernel/bpf/range_tree.h b/kernel/bpf/range_tree.h new file mode 100644 index 000000000000..ff0b9110eb71 --- /dev/null +++ b/kernel/bpf/range_tree.h @@ -0,0 +1,21 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */ +#ifndef _RANGE_TREE_H +#define _RANGE_TREE_H 1 + +struct range_tree { + /* root of interval tree */ + struct rb_root_cached it_root; + /* root of rbtree of interval sizes */ + struct rb_root_cached range_size_root; +}; + +void range_tree_init(struct range_tree *rt); +void range_tree_destroy(struct range_tree *rt); + +int range_tree_clear(struct range_tree *rt, u32 start, u32 len); +int range_tree_set(struct range_tree *rt, u32 start, u32 len); +int is_range_tree_set(struct range_tree *rt, u32 start, u32 len); +s64 range_tree_find(struct range_tree *rt, u32 len); + +#endif diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index c5aa127ed4cc..58190ca724a2 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -155,6 +155,89 @@ static void maybe_wait_bpf_programs(struct bpf_map *map) synchronize_rcu(); } +static void unpin_uptr_kaddr(void *kaddr) +{ + if (kaddr) + unpin_user_page(virt_to_page(kaddr)); +} + +static void __bpf_obj_unpin_uptrs(struct btf_record *rec, u32 cnt, void *obj) +{ + const struct btf_field *field; + void **uptr_addr; + int i; + + for (i = 0, field = rec->fields; i < cnt; i++, field++) { + if (field->type != BPF_UPTR) + continue; + + uptr_addr = obj + field->offset; + unpin_uptr_kaddr(*uptr_addr); + } +} + +static void bpf_obj_unpin_uptrs(struct btf_record *rec, void *obj) +{ + if (!btf_record_has_field(rec, BPF_UPTR)) + return; + + __bpf_obj_unpin_uptrs(rec, rec->cnt, obj); +} + +static int bpf_obj_pin_uptrs(struct btf_record *rec, void *obj) +{ + const struct btf_field *field; + const struct btf_type *t; + unsigned long start, end; + struct page *page; + void **uptr_addr; + int i, err; + + if (!btf_record_has_field(rec, BPF_UPTR)) + return 0; + + for (i = 0, field = rec->fields; i < rec->cnt; i++, field++) { + if (field->type != BPF_UPTR) + continue; + + uptr_addr = obj + field->offset; + start = *(unsigned long *)uptr_addr; + if (!start) + continue; + + t = btf_type_by_id(field->kptr.btf, field->kptr.btf_id); + /* t->size was checked for zero before */ + if (check_add_overflow(start, t->size - 1, &end)) { + err = -EFAULT; + goto unpin_all; + } + + /* The uptr's struct cannot span across two pages */ + if ((start & PAGE_MASK) != (end & PAGE_MASK)) { + err = -EOPNOTSUPP; + goto unpin_all; + } + + err = pin_user_pages_fast(start, 1, FOLL_LONGTERM | FOLL_WRITE, &page); + if (err != 1) + goto unpin_all; + + if (PageHighMem(page)) { + err = -EOPNOTSUPP; + unpin_user_page(page); + goto unpin_all; + } + + *uptr_addr = page_address(page) + offset_in_page(start); + } + + return 0; + +unpin_all: + __bpf_obj_unpin_uptrs(rec, i, obj); + return err; +} + static int bpf_map_update_value(struct bpf_map *map, struct file *map_file, void *key, void *value, __u64 flags) { @@ -199,9 +282,14 @@ static int bpf_map_update_value(struct bpf_map *map, struct file *map_file, map->map_type == BPF_MAP_TYPE_BLOOM_FILTER) { err = map->ops->map_push_elem(map, value, flags); } else { - rcu_read_lock(); - err = map->ops->map_update_elem(map, key, value, flags); - rcu_read_unlock(); + err = bpf_obj_pin_uptrs(map->record, value); + if (!err) { + rcu_read_lock(); + err = map->ops->map_update_elem(map, key, value, flags); + rcu_read_unlock(); + if (err) + bpf_obj_unpin_uptrs(map->record, value); + } } bpf_enable_instrumentation(); @@ -548,6 +636,7 @@ void btf_record_free(struct btf_record *rec) case BPF_KPTR_UNREF: case BPF_KPTR_REF: case BPF_KPTR_PERCPU: + case BPF_UPTR: if (rec->fields[i].kptr.module) module_put(rec->fields[i].kptr.module); if (btf_is_kernel(rec->fields[i].kptr.btf)) @@ -597,6 +686,7 @@ struct btf_record *btf_record_dup(const struct btf_record *rec) case BPF_KPTR_UNREF: case BPF_KPTR_REF: case BPF_KPTR_PERCPU: + case BPF_UPTR: if (btf_is_kernel(fields[i].kptr.btf)) btf_get(fields[i].kptr.btf); if (fields[i].kptr.module && !try_module_get(fields[i].kptr.module)) { @@ -714,6 +804,10 @@ void bpf_obj_free_fields(const struct btf_record *rec, void *obj) field->kptr.dtor(xchgd_field); } break; + case BPF_UPTR: + /* The caller ensured that no one is using the uptr */ + unpin_uptr_kaddr(*(void **)field_ptr); + break; case BPF_LIST_HEAD: if (WARN_ON_ONCE(rec->spin_lock_off < 0)) continue; @@ -1105,7 +1199,7 @@ static int map_check_btf(struct bpf_map *map, struct bpf_token *token, map->record = btf_parse_fields(btf, value_type, BPF_SPIN_LOCK | BPF_TIMER | BPF_KPTR | BPF_LIST_HEAD | - BPF_RB_ROOT | BPF_REFCOUNT | BPF_WORKQUEUE, + BPF_RB_ROOT | BPF_REFCOUNT | BPF_WORKQUEUE | BPF_UPTR, map->value_size); if (!IS_ERR_OR_NULL(map->record)) { int i; @@ -1161,6 +1255,12 @@ static int map_check_btf(struct bpf_map *map, struct bpf_token *token, goto free_map_tab; } break; + case BPF_UPTR: + if (map->map_type != BPF_MAP_TYPE_TASK_STORAGE) { + ret = -EOPNOTSUPP; + goto free_map_tab; + } + break; case BPF_LIST_HEAD: case BPF_RB_ROOT: if (map->map_type != BPF_MAP_TYPE_HASH && @@ -3218,7 +3318,8 @@ static void bpf_tracing_link_release(struct bpf_link *link) container_of(link, struct bpf_tracing_link, link.link); WARN_ON_ONCE(bpf_trampoline_unlink_prog(&tr_link->link, - tr_link->trampoline)); + tr_link->trampoline, + tr_link->tgt_prog)); bpf_trampoline_put(tr_link->trampoline); @@ -3358,7 +3459,7 @@ static int bpf_tracing_prog_attach(struct bpf_prog *prog, * in prog->aux * * - if prog->aux->dst_trampoline is NULL, the program has already been - * attached to a target and its initial target was cleared (below) + * attached to a target and its initial target was cleared (below) * * - if tgt_prog != NULL, the caller specified tgt_prog_fd + * target_btf_id using the link_create API. @@ -3433,7 +3534,7 @@ static int bpf_tracing_prog_attach(struct bpf_prog *prog, if (err) goto out_unlock; - err = bpf_trampoline_link_prog(&link->link, tr); + err = bpf_trampoline_link_prog(&link->link, tr, tgt_prog); if (err) { bpf_link_cleanup(&link_primer); link = NULL; @@ -4002,10 +4103,14 @@ static int bpf_prog_attach_check_attach_type(const struct bpf_prog *prog, if (prog->expected_attach_type == BPF_TRACE_UPROBE_MULTI && attach_type != BPF_TRACE_UPROBE_MULTI) return -EINVAL; + if (prog->expected_attach_type == BPF_TRACE_UPROBE_SESSION && + attach_type != BPF_TRACE_UPROBE_SESSION) + return -EINVAL; if (attach_type != BPF_PERF_EVENT && attach_type != BPF_TRACE_KPROBE_MULTI && attach_type != BPF_TRACE_KPROBE_SESSION && - attach_type != BPF_TRACE_UPROBE_MULTI) + attach_type != BPF_TRACE_UPROBE_MULTI && + attach_type != BPF_TRACE_UPROBE_SESSION) return -EINVAL; return 0; case BPF_PROG_TYPE_SCHED_CLS: @@ -5258,7 +5363,8 @@ static int link_create(union bpf_attr *attr, bpfptr_t uattr) else if (attr->link_create.attach_type == BPF_TRACE_KPROBE_MULTI || attr->link_create.attach_type == BPF_TRACE_KPROBE_SESSION) ret = bpf_kprobe_multi_link_attach(attr, prog); - else if (attr->link_create.attach_type == BPF_TRACE_UPROBE_MULTI) + else if (attr->link_create.attach_type == BPF_TRACE_UPROBE_MULTI || + attr->link_create.attach_type == BPF_TRACE_UPROBE_SESSION) ret = bpf_uprobe_multi_link_attach(attr, prog); break; default: diff --git a/kernel/bpf/trampoline.c b/kernel/bpf/trampoline.c index f8302a5ca400..c4b1a98ff726 100644 --- a/kernel/bpf/trampoline.c +++ b/kernel/bpf/trampoline.c @@ -115,10 +115,14 @@ bool bpf_prog_has_trampoline(const struct bpf_prog *prog) (ptype == BPF_PROG_TYPE_LSM && eatype == BPF_LSM_MAC); } -void bpf_image_ksym_add(void *data, unsigned int size, struct bpf_ksym *ksym) +void bpf_image_ksym_init(void *data, unsigned int size, struct bpf_ksym *ksym) { ksym->start = (unsigned long) data; ksym->end = ksym->start + size; +} + +void bpf_image_ksym_add(struct bpf_ksym *ksym) +{ bpf_ksym_add(ksym); perf_event_ksymbol(PERF_RECORD_KSYMBOL_TYPE_BPF, ksym->start, PAGE_SIZE, false, ksym->name); @@ -377,7 +381,8 @@ static struct bpf_tramp_image *bpf_tramp_image_alloc(u64 key, int size) ksym = &im->ksym; INIT_LIST_HEAD_RCU(&ksym->lnode); snprintf(ksym->name, KSYM_NAME_LEN, "bpf_trampoline_%llu", key); - bpf_image_ksym_add(image, size, ksym); + bpf_image_ksym_init(image, size, ksym); + bpf_image_ksym_add(ksym); return im; out_free_image: @@ -523,7 +528,27 @@ static enum bpf_tramp_prog_type bpf_attach_type_to_tramp(struct bpf_prog *prog) } } -static int __bpf_trampoline_link_prog(struct bpf_tramp_link *link, struct bpf_trampoline *tr) +static int bpf_freplace_check_tgt_prog(struct bpf_prog *tgt_prog) +{ + struct bpf_prog_aux *aux = tgt_prog->aux; + + guard(mutex)(&aux->ext_mutex); + if (aux->prog_array_member_cnt) + /* Program extensions can not extend target prog when the target + * prog has been updated to any prog_array map as tail callee. + * It's to prevent a potential infinite loop like: + * tgt prog entry -> tgt prog subprog -> freplace prog entry + * --tailcall-> tgt prog entry. + */ + return -EBUSY; + + aux->is_extended = true; + return 0; +} + +static int __bpf_trampoline_link_prog(struct bpf_tramp_link *link, + struct bpf_trampoline *tr, + struct bpf_prog *tgt_prog) { enum bpf_tramp_prog_type kind; struct bpf_tramp_link *link_exiting; @@ -544,6 +569,9 @@ static int __bpf_trampoline_link_prog(struct bpf_tramp_link *link, struct bpf_tr /* Cannot attach extension if fentry/fexit are in use. */ if (cnt) return -EBUSY; + err = bpf_freplace_check_tgt_prog(tgt_prog); + if (err) + return err; tr->extension_prog = link->link.prog; return bpf_arch_text_poke(tr->func.addr, BPF_MOD_JUMP, NULL, link->link.prog->bpf_func); @@ -570,17 +598,21 @@ static int __bpf_trampoline_link_prog(struct bpf_tramp_link *link, struct bpf_tr return err; } -int bpf_trampoline_link_prog(struct bpf_tramp_link *link, struct bpf_trampoline *tr) +int bpf_trampoline_link_prog(struct bpf_tramp_link *link, + struct bpf_trampoline *tr, + struct bpf_prog *tgt_prog) { int err; mutex_lock(&tr->mutex); - err = __bpf_trampoline_link_prog(link, tr); + err = __bpf_trampoline_link_prog(link, tr, tgt_prog); mutex_unlock(&tr->mutex); return err; } -static int __bpf_trampoline_unlink_prog(struct bpf_tramp_link *link, struct bpf_trampoline *tr) +static int __bpf_trampoline_unlink_prog(struct bpf_tramp_link *link, + struct bpf_trampoline *tr, + struct bpf_prog *tgt_prog) { enum bpf_tramp_prog_type kind; int err; @@ -591,6 +623,8 @@ static int __bpf_trampoline_unlink_prog(struct bpf_tramp_link *link, struct bpf_ err = bpf_arch_text_poke(tr->func.addr, BPF_MOD_JUMP, tr->extension_prog->bpf_func, NULL); tr->extension_prog = NULL; + guard(mutex)(&tgt_prog->aux->ext_mutex); + tgt_prog->aux->is_extended = false; return err; } hlist_del_init(&link->tramp_hlist); @@ -599,12 +633,14 @@ static int __bpf_trampoline_unlink_prog(struct bpf_tramp_link *link, struct bpf_ } /* bpf_trampoline_unlink_prog() should never fail. */ -int bpf_trampoline_unlink_prog(struct bpf_tramp_link *link, struct bpf_trampoline *tr) +int bpf_trampoline_unlink_prog(struct bpf_tramp_link *link, + struct bpf_trampoline *tr, + struct bpf_prog *tgt_prog) { int err; mutex_lock(&tr->mutex); - err = __bpf_trampoline_unlink_prog(link, tr); + err = __bpf_trampoline_unlink_prog(link, tr, tgt_prog); mutex_unlock(&tr->mutex); return err; } @@ -619,7 +655,7 @@ static void bpf_shim_tramp_link_release(struct bpf_link *link) if (!shim_link->trampoline) return; - WARN_ON_ONCE(bpf_trampoline_unlink_prog(&shim_link->link, shim_link->trampoline)); + WARN_ON_ONCE(bpf_trampoline_unlink_prog(&shim_link->link, shim_link->trampoline, NULL)); bpf_trampoline_put(shim_link->trampoline); } @@ -733,7 +769,7 @@ int bpf_trampoline_link_cgroup_shim(struct bpf_prog *prog, goto err; } - err = __bpf_trampoline_link_prog(&shim_link->link, tr); + err = __bpf_trampoline_link_prog(&shim_link->link, tr, NULL); if (err) goto err; @@ -868,6 +904,8 @@ static u64 notrace __bpf_prog_enter_recur(struct bpf_prog *prog, struct bpf_tram if (unlikely(this_cpu_inc_return(*(prog->active)) != 1)) { bpf_prog_inc_misses_counter(prog); + if (prog->aux->recursion_detected) + prog->aux->recursion_detected(prog); return 0; } return bpf_prog_start_time(); @@ -944,6 +982,8 @@ u64 notrace __bpf_prog_enter_sleepable_recur(struct bpf_prog *prog, if (unlikely(this_cpu_inc_return(*(prog->active)) != 1)) { bpf_prog_inc_misses_counter(prog); + if (prog->aux->recursion_detected) + prog->aux->recursion_detected(prog); return 0; } return bpf_prog_start_time(); diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index bb99bada7e2e..1c4ebb326785 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -194,6 +194,8 @@ struct bpf_verifier_stack_elem { #define BPF_GLOBAL_PERCPU_MA_MAX_SIZE 512 +#define BPF_PRIV_STACK_MIN_SIZE 64 + static int acquire_reference_state(struct bpf_verifier_env *env, int insn_idx); static int release_reference(struct bpf_verifier_env *env, int ref_obj_id); static void invalidate_non_owning_refs(struct bpf_verifier_env *env); @@ -418,6 +420,25 @@ static struct btf_record *reg_btf_record(const struct bpf_reg_state *reg) return rec; } +static bool mask_raw_tp_reg_cond(const struct bpf_verifier_env *env, struct bpf_reg_state *reg) { + return reg->type == (PTR_TO_BTF_ID | PTR_TRUSTED | PTR_MAYBE_NULL) && + bpf_prog_is_raw_tp(env->prog) && !reg->ref_obj_id; +} + +static bool mask_raw_tp_reg(const struct bpf_verifier_env *env, struct bpf_reg_state *reg) +{ + if (!mask_raw_tp_reg_cond(env, reg)) + return false; + reg->type &= ~PTR_MAYBE_NULL; + return true; +} + +static void unmask_raw_tp_reg(struct bpf_reg_state *reg, bool result) +{ + if (result) + reg->type |= PTR_MAYBE_NULL; +} + static bool subprog_is_global(const struct bpf_verifier_env *env, int subprog) { struct bpf_func_info_aux *aux = env->prog->aux->func_info_aux; @@ -1265,6 +1286,7 @@ static int copy_reference_state(struct bpf_func_state *dst, const struct bpf_fun if (!dst->refs) return -ENOMEM; + dst->active_locks = src->active_locks; dst->acquired_refs = src->acquired_refs; return 0; } @@ -1335,13 +1357,32 @@ static int acquire_reference_state(struct bpf_verifier_env *env, int insn_idx) if (err) return err; id = ++env->id_gen; + state->refs[new_ofs].type = REF_TYPE_PTR; state->refs[new_ofs].id = id; state->refs[new_ofs].insn_idx = insn_idx; - state->refs[new_ofs].callback_ref = state->in_callback_fn ? state->frameno : 0; return id; } +static int acquire_lock_state(struct bpf_verifier_env *env, int insn_idx, enum ref_state_type type, + int id, void *ptr) +{ + struct bpf_func_state *state = cur_func(env); + int new_ofs = state->acquired_refs; + int err; + + err = resize_reference_state(state, state->acquired_refs + 1); + if (err) + return err; + state->refs[new_ofs].type = type; + state->refs[new_ofs].id = id; + state->refs[new_ofs].insn_idx = insn_idx; + state->refs[new_ofs].ptr = ptr; + + state->active_locks++; + return 0; +} + /* release function corresponding to acquire_reference_state(). Idempotent. */ static int release_reference_state(struct bpf_func_state *state, int ptr_id) { @@ -1349,10 +1390,9 @@ static int release_reference_state(struct bpf_func_state *state, int ptr_id) last_idx = state->acquired_refs - 1; for (i = 0; i < state->acquired_refs; i++) { + if (state->refs[i].type != REF_TYPE_PTR) + continue; if (state->refs[i].id == ptr_id) { - /* Cannot release caller references in callbacks */ - if (state->in_callback_fn && state->refs[i].callback_ref != state->frameno) - return -EINVAL; if (last_idx && i != last_idx) memcpy(&state->refs[i], &state->refs[last_idx], sizeof(*state->refs)); @@ -1364,6 +1404,45 @@ static int release_reference_state(struct bpf_func_state *state, int ptr_id) return -EINVAL; } +static int release_lock_state(struct bpf_func_state *state, int type, int id, void *ptr) +{ + int i, last_idx; + + last_idx = state->acquired_refs - 1; + for (i = 0; i < state->acquired_refs; i++) { + if (state->refs[i].type != type) + continue; + if (state->refs[i].id == id && state->refs[i].ptr == ptr) { + if (last_idx && i != last_idx) + memcpy(&state->refs[i], &state->refs[last_idx], + sizeof(*state->refs)); + memset(&state->refs[last_idx], 0, sizeof(*state->refs)); + state->acquired_refs--; + state->active_locks--; + return 0; + } + } + return -EINVAL; +} + +static struct bpf_reference_state *find_lock_state(struct bpf_verifier_env *env, enum ref_state_type type, + int id, void *ptr) +{ + struct bpf_func_state *state = cur_func(env); + int i; + + for (i = 0; i < state->acquired_refs; i++) { + struct bpf_reference_state *s = &state->refs[i]; + + if (s->type == REF_TYPE_PTR || s->type != type) + continue; + + if (s->id == id && s->ptr == ptr) + return s; + } + return NULL; +} + static void free_func_state(struct bpf_func_state *state) { if (!state) @@ -1373,13 +1452,6 @@ static void free_func_state(struct bpf_func_state *state) kfree(state); } -static void clear_jmp_history(struct bpf_verifier_state *state) -{ - kfree(state->jmp_history); - state->jmp_history = NULL; - state->jmp_history_cnt = 0; -} - static void free_verifier_state(struct bpf_verifier_state *state, bool free_self) { @@ -1389,7 +1461,6 @@ static void free_verifier_state(struct bpf_verifier_state *state, free_func_state(state->frame[i]); state->frame[i] = NULL; } - clear_jmp_history(state); if (free_self) kfree(state); } @@ -1415,13 +1486,6 @@ static int copy_verifier_state(struct bpf_verifier_state *dst_state, struct bpf_func_state *dst; int i, err; - dst_state->jmp_history = copy_array(dst_state->jmp_history, src->jmp_history, - src->jmp_history_cnt, sizeof(*dst_state->jmp_history), - GFP_USER); - if (!dst_state->jmp_history) - return -ENOMEM; - dst_state->jmp_history_cnt = src->jmp_history_cnt; - /* if dst has more stack frames then src frame, free them, this is also * necessary in case of exceptional exits using bpf_throw. */ @@ -1434,12 +1498,12 @@ static int copy_verifier_state(struct bpf_verifier_state *dst_state, dst_state->active_preempt_lock = src->active_preempt_lock; dst_state->in_sleepable = src->in_sleepable; dst_state->curframe = src->curframe; - dst_state->active_lock.ptr = src->active_lock.ptr; - dst_state->active_lock.id = src->active_lock.id; dst_state->branches = src->branches; dst_state->parent = src->parent; dst_state->first_insn_idx = src->first_insn_idx; dst_state->last_insn_idx = src->last_insn_idx; + dst_state->insn_hist_start = src->insn_hist_start; + dst_state->insn_hist_end = src->insn_hist_end; dst_state->dfs_depth = src->dfs_depth; dst_state->callback_unroll_depth = src->callback_unroll_depth; dst_state->used_as_loop_entry = src->used_as_loop_entry; @@ -2492,9 +2556,14 @@ static struct bpf_verifier_state *push_async_cb(struct bpf_verifier_env *env, * The caller state doesn't matter. * This is async callback. It starts in a fresh stack. * Initialize it similar to do_check_common(). + * But we do need to make sure to not clobber insn_hist, so we keep + * chaining insn_hist_start/insn_hist_end indices as for a normal + * child state. */ elem->st.branches = 1; elem->st.in_sleepable = is_sleepable; + elem->st.insn_hist_start = env->cur_state->insn_hist_end; + elem->st.insn_hist_end = elem->st.insn_hist_start; frame = kzalloc(sizeof(*frame), GFP_KERNEL); if (!frame) goto err; @@ -3474,11 +3543,10 @@ static void linked_regs_unpack(u64 val, struct linked_regs *s) } /* for any branch, call, exit record the history of jmps in the given state */ -static int push_jmp_history(struct bpf_verifier_env *env, struct bpf_verifier_state *cur, - int insn_flags, u64 linked_regs) +static int push_insn_history(struct bpf_verifier_env *env, struct bpf_verifier_state *cur, + int insn_flags, u64 linked_regs) { - u32 cnt = cur->jmp_history_cnt; - struct bpf_jmp_history_entry *p; + struct bpf_insn_hist_entry *p; size_t alloc_size; /* combine instruction flags if we already recorded this instruction */ @@ -3498,29 +3566,32 @@ static int push_jmp_history(struct bpf_verifier_env *env, struct bpf_verifier_st return 0; } - cnt++; - alloc_size = kmalloc_size_roundup(size_mul(cnt, sizeof(*p))); - p = krealloc(cur->jmp_history, alloc_size, GFP_USER); - if (!p) - return -ENOMEM; - cur->jmp_history = p; + if (cur->insn_hist_end + 1 > env->insn_hist_cap) { + alloc_size = size_mul(cur->insn_hist_end + 1, sizeof(*p)); + p = kvrealloc(env->insn_hist, alloc_size, GFP_USER); + if (!p) + return -ENOMEM; + env->insn_hist = p; + env->insn_hist_cap = alloc_size / sizeof(*p); + } - p = &cur->jmp_history[cnt - 1]; + p = &env->insn_hist[cur->insn_hist_end]; p->idx = env->insn_idx; p->prev_idx = env->prev_insn_idx; p->flags = insn_flags; p->linked_regs = linked_regs; - cur->jmp_history_cnt = cnt; + + cur->insn_hist_end++; env->cur_hist_ent = p; return 0; } -static struct bpf_jmp_history_entry *get_jmp_hist_entry(struct bpf_verifier_state *st, - u32 hist_end, int insn_idx) +static struct bpf_insn_hist_entry *get_insn_hist_entry(struct bpf_verifier_env *env, + u32 hist_start, u32 hist_end, int insn_idx) { - if (hist_end > 0 && st->jmp_history[hist_end - 1].idx == insn_idx) - return &st->jmp_history[hist_end - 1]; + if (hist_end > hist_start && env->insn_hist[hist_end - 1].idx == insn_idx) + return &env->insn_hist[hist_end - 1]; return NULL; } @@ -3537,25 +3608,26 @@ static struct bpf_jmp_history_entry *get_jmp_hist_entry(struct bpf_verifier_stat * history entry recording a jump from last instruction of parent state and * first instruction of given state. */ -static int get_prev_insn_idx(struct bpf_verifier_state *st, int i, - u32 *history) +static int get_prev_insn_idx(const struct bpf_verifier_env *env, + struct bpf_verifier_state *st, + int insn_idx, u32 hist_start, u32 *hist_endp) { - u32 cnt = *history; + u32 hist_end = *hist_endp; + u32 cnt = hist_end - hist_start; - if (i == st->first_insn_idx) { + if (insn_idx == st->first_insn_idx) { if (cnt == 0) return -ENOENT; - if (cnt == 1 && st->jmp_history[0].idx == i) + if (cnt == 1 && env->insn_hist[hist_start].idx == insn_idx) return -ENOENT; } - if (cnt && st->jmp_history[cnt - 1].idx == i) { - i = st->jmp_history[cnt - 1].prev_idx; - (*history)--; + if (cnt && env->insn_hist[hist_end - 1].idx == insn_idx) { + (*hist_endp)--; + return env->insn_hist[hist_end - 1].prev_idx; } else { - i--; + return insn_idx - 1; } - return i; } static const char *disasm_kfunc_name(void *data, const struct bpf_insn *insn) @@ -3727,7 +3799,7 @@ static void fmt_stack_mask(char *buf, ssize_t buf_sz, u64 stack_mask) /* If any register R in hist->linked_regs is marked as precise in bt, * do bt_set_frame_{reg,slot}(bt, R) for all registers in hist->linked_regs. */ -static void bt_sync_linked_regs(struct backtrack_state *bt, struct bpf_jmp_history_entry *hist) +static void bt_sync_linked_regs(struct backtrack_state *bt, struct bpf_insn_hist_entry *hist) { struct linked_regs linked_regs; bool some_precise = false; @@ -3772,7 +3844,7 @@ static bool calls_callback(struct bpf_verifier_env *env, int insn_idx); * - *was* processed previously during backtracking. */ static int backtrack_insn(struct bpf_verifier_env *env, int idx, int subseq_idx, - struct bpf_jmp_history_entry *hist, struct backtrack_state *bt) + struct bpf_insn_hist_entry *hist, struct backtrack_state *bt) { const struct bpf_insn_cbs cbs = { .cb_call = disasm_kfunc_name, @@ -4191,7 +4263,7 @@ static void mark_all_scalars_imprecise(struct bpf_verifier_env *env, struct bpf_ * SCALARS, as well as any other registers and slots that contribute to * a tracked state of given registers/stack slots, depending on specific BPF * assembly instructions (see backtrack_insns() for exact instruction handling - * logic). This backtracking relies on recorded jmp_history and is able to + * logic). This backtracking relies on recorded insn_hist and is able to * traverse entire chain of parent states. This process ends only when all the * necessary registers/slots and their transitive dependencies are marked as * precise. @@ -4308,8 +4380,9 @@ static int __mark_chain_precision(struct bpf_verifier_env *env, int regno) for (;;) { DECLARE_BITMAP(mask, 64); - u32 history = st->jmp_history_cnt; - struct bpf_jmp_history_entry *hist; + u32 hist_start = st->insn_hist_start; + u32 hist_end = st->insn_hist_end; + struct bpf_insn_hist_entry *hist; if (env->log.level & BPF_LOG_LEVEL2) { verbose(env, "mark_precise: frame%d: last_idx %d first_idx %d subseq_idx %d \n", @@ -4348,7 +4421,7 @@ static int __mark_chain_precision(struct bpf_verifier_env *env, int regno) err = 0; skip_first = false; } else { - hist = get_jmp_hist_entry(st, history, i); + hist = get_insn_hist_entry(env, hist_start, hist_end, i); err = backtrack_insn(env, i, subseq_idx, hist, bt); } if (err == -ENOTSUPP) { @@ -4365,7 +4438,7 @@ static int __mark_chain_precision(struct bpf_verifier_env *env, int regno) */ return 0; subseq_idx = i; - i = get_prev_insn_idx(st, i, &history); + i = get_prev_insn_idx(env, st, i, hist_start, &hist_end); if (i == -ENOENT) break; if (i >= env->prog->len) { @@ -4731,7 +4804,7 @@ static int check_stack_write_fixed_off(struct bpf_verifier_env *env, } if (insn_flags) - return push_jmp_history(env, env->cur_state, insn_flags, 0); + return push_insn_history(env, env->cur_state, insn_flags, 0); return 0; } @@ -5038,7 +5111,7 @@ static int check_stack_read_fixed_off(struct bpf_verifier_env *env, insn_flags = 0; /* we are not restoring spilled register */ } if (insn_flags) - return push_jmp_history(env, env->cur_state, insn_flags, 0); + return push_insn_history(env, env->cur_state, insn_flags, 0); return 0; } @@ -5423,7 +5496,7 @@ static bool in_sleepable(struct bpf_verifier_env *env) static bool in_rcu_cs(struct bpf_verifier_env *env) { return env->cur_state->active_rcu_lock || - env->cur_state->active_lock.ptr || + cur_func(env)->active_locks || !in_sleepable(env); } @@ -5491,6 +5564,22 @@ static u32 btf_ld_kptr_type(struct bpf_verifier_env *env, struct btf_field *kptr return ret; } +static int mark_uptr_ld_reg(struct bpf_verifier_env *env, u32 regno, + struct btf_field *field) +{ + struct bpf_reg_state *reg; + const struct btf_type *t; + + t = btf_type_by_id(field->kptr.btf, field->kptr.btf_id); + mark_reg_known_zero(env, cur_regs(env), regno); + reg = reg_state(env, regno); + reg->type = PTR_TO_MEM | PTR_MAYBE_NULL; + reg->mem_size = t->size; + reg->id = ++env->id_gen; + + return 0; +} + static int check_map_kptr_access(struct bpf_verifier_env *env, u32 regno, int value_regno, int insn_idx, struct btf_field *kptr_field) @@ -5519,9 +5608,15 @@ static int check_map_kptr_access(struct bpf_verifier_env *env, u32 regno, verbose(env, "store to referenced kptr disallowed\n"); return -EACCES; } + if (class != BPF_LDX && kptr_field->type == BPF_UPTR) { + verbose(env, "store to uptr disallowed\n"); + return -EACCES; + } if (class == BPF_LDX) { - val_reg = reg_state(env, value_regno); + if (kptr_field->type == BPF_UPTR) + return mark_uptr_ld_reg(env, value_regno, kptr_field); + /* We can simply mark the value_regno receiving the pointer * value from map as PTR_TO_BTF_ID, with the correct type. */ @@ -5579,21 +5674,26 @@ static int check_map_access(struct bpf_verifier_env *env, u32 regno, case BPF_KPTR_UNREF: case BPF_KPTR_REF: case BPF_KPTR_PERCPU: + case BPF_UPTR: if (src != ACCESS_DIRECT) { - verbose(env, "kptr cannot be accessed indirectly by helper\n"); + verbose(env, "%s cannot be accessed indirectly by helper\n", + btf_field_type_name(field->type)); return -EACCES; } if (!tnum_is_const(reg->var_off)) { - verbose(env, "kptr access cannot have variable offset\n"); + verbose(env, "%s access cannot have variable offset\n", + btf_field_type_name(field->type)); return -EACCES; } if (p != off + reg->var_off.value) { - verbose(env, "kptr access misaligned expected=%u off=%llu\n", + verbose(env, "%s access misaligned expected=%u off=%llu\n", + btf_field_type_name(field->type), p, off + reg->var_off.value); return -EACCES; } if (size != bpf_size_to_bytes(BPF_DW)) { - verbose(env, "kptr access size must be BPF_DW\n"); + verbose(env, "%s access size must be BPF_DW\n", + btf_field_type_name(field->type)); return -EACCES; } break; @@ -5988,6 +6088,34 @@ static int check_ptr_alignment(struct bpf_verifier_env *env, strict); } +static enum priv_stack_mode bpf_enable_priv_stack(struct bpf_prog *prog) +{ + if (!bpf_jit_supports_private_stack()) + return NO_PRIV_STACK; + + /* bpf_prog_check_recur() checks all prog types that use bpf trampoline + * while kprobe/tp/perf_event/raw_tp don't use trampoline hence checked + * explicitly. + */ + switch (prog->type) { + case BPF_PROG_TYPE_KPROBE: + case BPF_PROG_TYPE_TRACEPOINT: + case BPF_PROG_TYPE_PERF_EVENT: + case BPF_PROG_TYPE_RAW_TRACEPOINT: + return PRIV_STACK_ADAPTIVE; + case BPF_PROG_TYPE_TRACING: + case BPF_PROG_TYPE_LSM: + case BPF_PROG_TYPE_STRUCT_OPS: + if (prog->aux->priv_stack_requested || bpf_prog_check_recur(prog)) + return PRIV_STACK_ADAPTIVE; + fallthrough; + default: + break; + } + + return NO_PRIV_STACK; +} + static int round_up_stack_depth(struct bpf_verifier_env *env, int stack_depth) { if (env->prog->jit_requested) @@ -6005,17 +6133,20 @@ static int round_up_stack_depth(struct bpf_verifier_env *env, int stack_depth) * Since recursion is prevented by check_cfg() this algorithm * only needs a local stack of MAX_CALL_FRAMES to remember callsites */ -static int check_max_stack_depth_subprog(struct bpf_verifier_env *env, int idx) +static int check_max_stack_depth_subprog(struct bpf_verifier_env *env, int idx, + bool priv_stack_supported) { struct bpf_subprog_info *subprog = env->subprog_info; struct bpf_insn *insn = env->prog->insnsi; - int depth = 0, frame = 0, i, subprog_end; + int depth = 0, frame = 0, i, subprog_end, subprog_depth; bool tail_call_reachable = false; int ret_insn[MAX_CALL_FRAMES]; int ret_prog[MAX_CALL_FRAMES]; int j; i = subprog[idx].start; + if (!priv_stack_supported) + subprog[idx].priv_stack_mode = NO_PRIV_STACK; process_func: /* protect against potential stack overflow that might happen when * bpf2bpf calls get combined with tailcalls. Limit the caller's stack @@ -6042,11 +6173,31 @@ static int check_max_stack_depth_subprog(struct bpf_verifier_env *env, int idx) depth); return -EACCES; } - depth += round_up_stack_depth(env, subprog[idx].stack_depth); - if (depth > MAX_BPF_STACK) { - verbose(env, "combined stack size of %d calls is %d. Too large\n", - frame + 1, depth); - return -EACCES; + + subprog_depth = round_up_stack_depth(env, subprog[idx].stack_depth); + if (priv_stack_supported) { + /* Request private stack support only if the subprog stack + * depth is no less than BPF_PRIV_STACK_MIN_SIZE. This is to + * avoid jit penalty if the stack usage is small. + */ + if (subprog[idx].priv_stack_mode == PRIV_STACK_UNKNOWN && + subprog_depth >= BPF_PRIV_STACK_MIN_SIZE) + subprog[idx].priv_stack_mode = PRIV_STACK_ADAPTIVE; + } + + if (subprog[idx].priv_stack_mode == PRIV_STACK_ADAPTIVE) { + if (subprog_depth > MAX_BPF_STACK) { + verbose(env, "stack size of subprog %d is %d. Too large\n", + idx, subprog_depth); + return -EACCES; + } + } else { + depth += subprog_depth; + if (depth > MAX_BPF_STACK) { + verbose(env, "combined stack size of %d calls is %d. Too large\n", + frame + 1, depth); + return -EACCES; + } } continue_func: subprog_end = subprog[idx + 1].start; @@ -6103,6 +6254,8 @@ static int check_max_stack_depth_subprog(struct bpf_verifier_env *env, int idx) } i = next_insn; idx = sidx; + if (!priv_stack_supported) + subprog[idx].priv_stack_mode = NO_PRIV_STACK; if (subprog[idx].has_tail_call) tail_call_reachable = true; @@ -6136,7 +6289,8 @@ static int check_max_stack_depth_subprog(struct bpf_verifier_env *env, int idx) */ if (frame == 0) return 0; - depth -= round_up_stack_depth(env, subprog[idx].stack_depth); + if (subprog[idx].priv_stack_mode != PRIV_STACK_ADAPTIVE) + depth -= round_up_stack_depth(env, subprog[idx].stack_depth); frame--; i = ret_insn[frame]; idx = ret_prog[frame]; @@ -6145,17 +6299,45 @@ static int check_max_stack_depth_subprog(struct bpf_verifier_env *env, int idx) static int check_max_stack_depth(struct bpf_verifier_env *env) { + enum priv_stack_mode priv_stack_mode = PRIV_STACK_UNKNOWN; struct bpf_subprog_info *si = env->subprog_info; + bool priv_stack_supported; int ret; for (int i = 0; i < env->subprog_cnt; i++) { + if (si[i].has_tail_call) { + priv_stack_mode = NO_PRIV_STACK; + break; + } + } + + if (priv_stack_mode == PRIV_STACK_UNKNOWN) + priv_stack_mode = bpf_enable_priv_stack(env->prog); + + /* All async_cb subprogs use normal kernel stack. If a particular + * subprog appears in both main prog and async_cb subtree, that + * subprog will use normal kernel stack to avoid potential nesting. + * The reverse subprog traversal ensures when main prog subtree is + * checked, the subprogs appearing in async_cb subtrees are already + * marked as using normal kernel stack, so stack size checking can + * be done properly. + */ + for (int i = env->subprog_cnt - 1; i >= 0; i--) { if (!i || si[i].is_async_cb) { - ret = check_max_stack_depth_subprog(env, i); + priv_stack_supported = !i && priv_stack_mode == PRIV_STACK_ADAPTIVE; + ret = check_max_stack_depth_subprog(env, i, priv_stack_supported); if (ret < 0) return ret; } - continue; } + + for (int i = 0; i < env->subprog_cnt; i++) { + if (si[i].priv_stack_mode == PRIV_STACK_ADAPTIVE) { + env->prog->aux->jits_use_priv_stack = true; + break; + } + } + return 0; } @@ -6595,6 +6777,7 @@ static int check_ptr_to_btf_access(struct bpf_verifier_env *env, const char *field_name = NULL; enum bpf_type_flag flag = 0; u32 btf_id = 0; + bool mask; int ret; if (!env->allow_ptr_leaks) { @@ -6666,7 +6849,21 @@ static int check_ptr_to_btf_access(struct bpf_verifier_env *env, if (ret < 0) return ret; - + /* For raw_tp progs, we allow dereference of PTR_MAYBE_NULL + * trusted PTR_TO_BTF_ID, these are the ones that are possibly + * arguments to the raw_tp. Since internal checks in for trusted + * reg in check_ptr_to_btf_access would consider PTR_MAYBE_NULL + * modifier as problematic, mask it out temporarily for the + * check. Don't apply this to pointers with ref_obj_id > 0, as + * those won't be raw_tp args. + * + * We may end up applying this relaxation to other trusted + * PTR_TO_BTF_ID with maybe null flag, since we cannot + * distinguish PTR_MAYBE_NULL tagged for arguments vs normal + * tagging, but that should expand allowed behavior, and not + * cause regression for existing behavior. + */ + mask = mask_raw_tp_reg(env, reg); if (ret != PTR_TO_BTF_ID) { /* just mark; */ @@ -6727,8 +6924,13 @@ static int check_ptr_to_btf_access(struct bpf_verifier_env *env, clear_trusted_flags(&flag); } - if (atype == BPF_READ && value_regno >= 0) + if (atype == BPF_READ && value_regno >= 0) { mark_btf_ld_reg(env, regs, value_regno, ret, reg->btf, btf_id, flag); + /* We've assigned a new type to regno, so don't undo masking. */ + if (regno == value_regno) + mask = false; + } + unmask_raw_tp_reg(reg, mask); return 0; } @@ -6949,7 +7151,7 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn return err; if (tnum_is_const(reg->var_off)) kptr_field = btf_record_find(reg->map_ptr->record, - off + reg->var_off.value, BPF_KPTR); + off + reg->var_off.value, BPF_KPTR | BPF_UPTR); if (kptr_field) { err = check_map_kptr_access(env, regno, value_regno, insn_idx, kptr_field); } else if (t == BPF_READ && value_regno >= 0) { @@ -7103,7 +7305,7 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn if (!err && t == BPF_READ && value_regno >= 0) mark_reg_unknown(env, regs, value_regno); } else if (base_type(reg->type) == PTR_TO_BTF_ID && - !type_may_be_null(reg->type)) { + (mask_raw_tp_reg_cond(env, reg) || !type_may_be_null(reg->type))) { err = check_ptr_to_btf_access(env, regs, regno, off, size, t, value_regno); } else if (reg->type == CONST_PTR_TO_MAP) { @@ -7648,19 +7850,20 @@ static int check_kfunc_mem_size_reg(struct bpf_verifier_env *env, struct bpf_reg * Since only one bpf_spin_lock is allowed the checks are simpler than * reg_is_refcounted() logic. The verifier needs to remember only * one spin_lock instead of array of acquired_refs. - * cur_state->active_lock remembers which map value element or allocated + * cur_func(env)->active_locks remembers which map value element or allocated * object got locked and clears it after bpf_spin_unlock. */ static int process_spin_lock(struct bpf_verifier_env *env, int regno, bool is_lock) { struct bpf_reg_state *regs = cur_regs(env), *reg = ®s[regno]; - struct bpf_verifier_state *cur = env->cur_state; bool is_const = tnum_is_const(reg->var_off); + struct bpf_func_state *cur = cur_func(env); u64 val = reg->var_off.value; struct bpf_map *map = NULL; struct btf *btf = NULL; struct btf_record *rec; + int err; if (!is_const) { verbose(env, @@ -7692,16 +7895,23 @@ static int process_spin_lock(struct bpf_verifier_env *env, int regno, return -EINVAL; } if (is_lock) { - if (cur->active_lock.ptr) { + void *ptr; + + if (map) + ptr = map; + else + ptr = btf; + + if (cur->active_locks) { verbose(env, "Locking two bpf_spin_locks are not allowed\n"); return -EINVAL; } - if (map) - cur->active_lock.ptr = map; - else - cur->active_lock.ptr = btf; - cur->active_lock.id = reg->id; + err = acquire_lock_state(env, env->insn_idx, REF_TYPE_LOCK, reg->id, ptr); + if (err < 0) { + verbose(env, "Failed to acquire lock state\n"); + return err; + } } else { void *ptr; @@ -7710,20 +7920,17 @@ static int process_spin_lock(struct bpf_verifier_env *env, int regno, else ptr = btf; - if (!cur->active_lock.ptr) { + if (!cur->active_locks) { verbose(env, "bpf_spin_unlock without taking a lock\n"); return -EINVAL; } - if (cur->active_lock.ptr != ptr || - cur->active_lock.id != reg->id) { + + if (release_lock_state(cur_func(env), REF_TYPE_LOCK, reg->id, ptr)) { verbose(env, "bpf_spin_unlock of different lock\n"); return -EINVAL; } invalidate_non_owning_refs(env); - - cur->active_lock.ptr = NULL; - cur->active_lock.id = 0; } return 0; } @@ -8796,6 +9003,7 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 arg, enum bpf_reg_type type = reg->type; u32 *arg_btf_id = NULL; int err = 0; + bool mask; if (arg_type == ARG_DONTCARE) return 0; @@ -8836,11 +9044,11 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 arg, base_type(arg_type) == ARG_PTR_TO_SPIN_LOCK) arg_btf_id = fn->arg_btf_id[arg]; + mask = mask_raw_tp_reg(env, reg); err = check_reg_type(env, regno, arg_type, arg_btf_id, meta); - if (err) - return err; - err = check_func_arg_reg_off(env, reg, regno, arg_type); + err = err ?: check_func_arg_reg_off(env, reg, regno, arg_type); + unmask_raw_tp_reg(reg, mask); if (err) return err; @@ -9635,14 +9843,17 @@ static int btf_check_func_arg_match(struct bpf_verifier_env *env, int subprog, return ret; } else if (base_type(arg->arg_type) == ARG_PTR_TO_BTF_ID) { struct bpf_call_arg_meta meta; + bool mask; int err; if (register_is_null(reg) && type_may_be_null(arg->arg_type)) continue; memset(&meta, 0, sizeof(meta)); /* leave func_id as zero */ + mask = mask_raw_tp_reg(env, reg); err = check_reg_type(env, regno, arg->arg_type, &arg->btf_id, &meta); err = err ?: check_func_arg_reg_off(env, reg, regno, arg->arg_type); + unmask_raw_tp_reg(reg, mask); if (err) return err; } else { @@ -9781,7 +9992,7 @@ static int check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn, const char *sub_name = subprog_name(env, subprog); /* Only global subprogs cannot be called with a lock held. */ - if (env->cur_state->active_lock.ptr) { + if (cur_func(env)->active_locks) { verbose(env, "global function calls are not allowed while holding a lock,\n" "use static function instead\n"); return -EINVAL; @@ -9910,7 +10121,7 @@ static int set_loop_callback_state(struct bpf_verifier_env *env, { /* bpf_loop(u32 nr_loops, void *callback_fn, void *callback_ctx, * u64 flags); - * callback_fn(u32 index, void *callback_ctx); + * callback_fn(u64 index, void *callback_ctx); */ callee->regs[BPF_REG_1].type = SCALAR_VALUE; callee->regs[BPF_REG_2] = caller->regs[BPF_REG_3]; @@ -10122,17 +10333,10 @@ static int prepare_func_exit(struct bpf_verifier_env *env, int *insn_idx) caller->regs[BPF_REG_0] = *r0; } - /* callback_fn frame should have released its own additions to parent's - * reference state at this point, or check_reference_leak would - * complain, hence it must be the same as the caller. There is no need - * to copy it back. - */ - if (!callee->in_callback_fn) { - /* Transfer references to the caller */ - err = copy_reference_state(caller, callee); - if (err) - return err; - } + /* Transfer references to the caller */ + err = copy_reference_state(caller, callee); + if (err) + return err; /* for callbacks like bpf_loop or bpf_for_each_map_elem go back to callsite, * there function call logic would reschedule callback visit. If iteration @@ -10302,11 +10506,11 @@ static int check_reference_leak(struct bpf_verifier_env *env, bool exception_exi bool refs_lingering = false; int i; - if (!exception_exit && state->frameno && !state->in_callback_fn) + if (!exception_exit && state->frameno) return 0; for (i = 0; i < state->acquired_refs; i++) { - if (!exception_exit && state->in_callback_fn && state->refs[i].callback_ref != state->frameno) + if (state->refs[i].type != REF_TYPE_PTR) continue; verbose(env, "Unreleased reference id=%d alloc_insn=%d\n", state->refs[i].id, state->refs[i].insn_idx); @@ -10315,6 +10519,34 @@ static int check_reference_leak(struct bpf_verifier_env *env, bool exception_exi return refs_lingering ? -EINVAL : 0; } +static int check_resource_leak(struct bpf_verifier_env *env, bool exception_exit, bool check_lock, const char *prefix) +{ + int err; + + if (check_lock && cur_func(env)->active_locks) { + verbose(env, "%s cannot be used inside bpf_spin_lock-ed region\n", prefix); + return -EINVAL; + } + + err = check_reference_leak(env, exception_exit); + if (err) { + verbose(env, "%s would lead to reference leak\n", prefix); + return err; + } + + if (check_lock && env->cur_state->active_rcu_lock) { + verbose(env, "%s cannot be used inside bpf_rcu_read_lock-ed region\n", prefix); + return -EINVAL; + } + + if (check_lock && env->cur_state->active_preempt_lock) { + verbose(env, "%s cannot be used inside bpf_preempt_disable-ed region\n", prefix); + return -EINVAL; + } + + return 0; +} + static int check_bpf_snprintf_call(struct bpf_verifier_env *env, struct bpf_reg_state *regs) { @@ -10583,11 +10815,9 @@ static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn switch (func_id) { case BPF_FUNC_tail_call: - err = check_reference_leak(env, false); - if (err) { - verbose(env, "tail_call would lead to reference leak\n"); + err = check_resource_leak(env, false, true, "tail_call"); + if (err) return err; - } break; case BPF_FUNC_get_local_storage: /* check that flags argument in get_local_storage(map, flags) is 0, @@ -11252,6 +11482,7 @@ enum special_kfunc_type { KF_bpf_preempt_enable, KF_bpf_iter_css_task_new, KF_bpf_session_cookie, + KF_bpf_get_kmem_cache, }; BTF_SET_START(special_kfunc_set) @@ -11317,6 +11548,7 @@ BTF_ID(func, bpf_session_cookie) #else BTF_ID_UNUSED #endif +BTF_ID(func, bpf_get_kmem_cache) static bool is_kfunc_ret_null(struct bpf_kfunc_call_arg_meta *meta) { @@ -11512,10 +11744,9 @@ static int process_kf_arg_ptr_to_btf_id(struct bpf_verifier_env *env, static int ref_set_non_owning(struct bpf_verifier_env *env, struct bpf_reg_state *reg) { - struct bpf_verifier_state *state = env->cur_state; struct btf_record *rec = reg_btf_record(reg); - if (!state->active_lock.ptr) { + if (!cur_func(env)->active_locks) { verbose(env, "verifier internal error: ref_set_non_owning w/o active lock\n"); return -EFAULT; } @@ -11612,6 +11843,7 @@ static int ref_convert_owning_non_owning(struct bpf_verifier_env *env, u32 ref_o */ static int check_reg_allocation_locked(struct bpf_verifier_env *env, struct bpf_reg_state *reg) { + struct bpf_reference_state *s; void *ptr; u32 id; @@ -11628,10 +11860,10 @@ static int check_reg_allocation_locked(struct bpf_verifier_env *env, struct bpf_ } id = reg->id; - if (!env->cur_state->active_lock.ptr) + if (!cur_func(env)->active_locks) return -EINVAL; - if (env->cur_state->active_lock.ptr != ptr || - env->cur_state->active_lock.id != id) { + s = find_lock_state(env, REF_TYPE_LOCK, id, ptr); + if (!s) { verbose(env, "held lock and object are not in the same allocation\n"); return -EINVAL; } @@ -11942,6 +12174,7 @@ static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_ enum bpf_arg_type arg_type = ARG_DONTCARE; u32 regno = i + 1, ref_id, type_size; bool is_ret_buf_sz = false; + bool mask = false; int kf_arg_type; t = btf_type_skip_modifiers(btf, args[i].type, NULL); @@ -12000,12 +12233,15 @@ static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_ return -EINVAL; } + mask = mask_raw_tp_reg(env, reg); if ((is_kfunc_trusted_args(meta) || is_kfunc_rcu(meta)) && (register_is_null(reg) || type_may_be_null(reg->type)) && !is_kfunc_arg_nullable(meta->btf, &args[i])) { verbose(env, "Possibly NULL pointer passed to trusted arg%d\n", i); + unmask_raw_tp_reg(reg, mask); return -EACCES; } + unmask_raw_tp_reg(reg, mask); if (reg->ref_obj_id) { if (is_kfunc_release(meta) && meta->ref_obj_id) { @@ -12063,16 +12299,24 @@ static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_ if (!is_kfunc_trusted_args(meta) && !is_kfunc_rcu(meta)) break; + /* Allow passing maybe NULL raw_tp arguments to + * kfuncs for compatibility. Don't apply this to + * arguments with ref_obj_id > 0. + */ + mask = mask_raw_tp_reg(env, reg); if (!is_trusted_reg(reg)) { if (!is_kfunc_rcu(meta)) { verbose(env, "R%d must be referenced or trusted\n", regno); + unmask_raw_tp_reg(reg, mask); return -EINVAL; } if (!is_rcu_reg(reg)) { verbose(env, "R%d must be a rcu pointer\n", regno); + unmask_raw_tp_reg(reg, mask); return -EINVAL; } } + unmask_raw_tp_reg(reg, mask); fallthrough; case KF_ARG_PTR_TO_CTX: case KF_ARG_PTR_TO_DYNPTR: @@ -12095,7 +12339,9 @@ static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_ if (is_kfunc_release(meta) && reg->ref_obj_id) arg_type |= OBJ_RELEASE; + mask = mask_raw_tp_reg(env, reg); ret = check_func_arg_reg_off(env, reg, regno, arg_type); + unmask_raw_tp_reg(reg, mask); if (ret < 0) return ret; @@ -12272,6 +12518,7 @@ static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_ ref_tname = btf_name_by_offset(btf, ref_t->name_off); fallthrough; case KF_ARG_PTR_TO_BTF_ID: + mask = mask_raw_tp_reg(env, reg); /* Only base_type is checked, further checks are done here */ if ((base_type(reg->type) != PTR_TO_BTF_ID || (bpf_type_has_unsafe_modifiers(reg->type) && !is_rcu_reg(reg))) && @@ -12280,9 +12527,11 @@ static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_ verbose(env, "expected %s or socket\n", reg_type_str(env, base_type(reg->type) | (type_flag(reg->type) & BPF_REG_TRUSTED_MODIFIERS))); + unmask_raw_tp_reg(reg, mask); return -EINVAL; } ret = process_kf_arg_ptr_to_btf_id(env, reg, ref_t, ref_tname, ref_id, meta, i); + unmask_raw_tp_reg(reg, mask); if (ret < 0) return ret; break; @@ -12827,6 +13076,9 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn, regs[BPF_REG_0].type = PTR_TO_BTF_ID; regs[BPF_REG_0].btf_id = ptr_type_id; + if (meta.func_id == special_kfunc_list[KF_bpf_get_kmem_cache]) + regs[BPF_REG_0].type |= PTR_UNTRUSTED; + if (is_iter_next_kfunc(&meta)) { struct bpf_reg_state *cur_iter; @@ -13252,7 +13504,7 @@ static int sanitize_check_bounds(struct bpf_verifier_env *env, */ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env, struct bpf_insn *insn, - const struct bpf_reg_state *ptr_reg, + struct bpf_reg_state *ptr_reg, const struct bpf_reg_state *off_reg) { struct bpf_verifier_state *vstate = env->cur_state; @@ -13266,6 +13518,7 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env, struct bpf_sanitize_info info = {}; u8 opcode = BPF_OP(insn->code); u32 dst = insn->dst_reg; + bool mask; int ret; dst_reg = ®s[dst]; @@ -13292,11 +13545,14 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env, return -EACCES; } + mask = mask_raw_tp_reg(env, ptr_reg); if (ptr_reg->type & PTR_MAYBE_NULL) { verbose(env, "R%d pointer arithmetic on %s prohibited, null-check it first\n", dst, reg_type_str(env, ptr_reg->type)); + unmask_raw_tp_reg(ptr_reg, mask); return -EACCES; } + unmask_raw_tp_reg(ptr_reg, mask); switch (base_type(ptr_reg->type)) { case PTR_TO_CTX: @@ -15480,7 +15736,7 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env, if (dst_reg->type == SCALAR_VALUE && dst_reg->id) collect_linked_regs(this_branch, dst_reg->id, &linked_regs); if (linked_regs.cnt > 1) { - err = push_jmp_history(env, this_branch, 0, linked_regs_pack(&linked_regs)); + err = push_insn_history(env, this_branch, 0, linked_regs_pack(&linked_regs)); if (err) return err; } @@ -15744,26 +16000,9 @@ static int check_ld_abs(struct bpf_verifier_env *env, struct bpf_insn *insn) * gen_ld_abs() may terminate the program at runtime, leading to * reference leak. */ - err = check_reference_leak(env, false); - if (err) { - verbose(env, "BPF_LD_[ABS|IND] cannot be mixed with socket references\n"); + err = check_resource_leak(env, false, true, "BPF_LD_[ABS|IND]"); + if (err) return err; - } - - if (env->cur_state->active_lock.ptr) { - verbose(env, "BPF_LD_[ABS|IND] cannot be used inside bpf_spin_lock-ed region\n"); - return -EINVAL; - } - - if (env->cur_state->active_rcu_lock) { - verbose(env, "BPF_LD_[ABS|IND] cannot be used inside bpf_rcu_read_lock-ed region\n"); - return -EINVAL; - } - - if (env->cur_state->active_preempt_lock) { - verbose(env, "BPF_LD_[ABS|IND] cannot be used inside bpf_preempt_disable-ed region\n"); - return -EINVAL; - } if (regs[ctx_reg].type != PTR_TO_CTX) { verbose(env, @@ -15909,6 +16148,16 @@ static int check_return_code(struct bpf_verifier_env *env, int regno, const char return -ENOTSUPP; } break; + case BPF_PROG_TYPE_KPROBE: + switch (env->prog->expected_attach_type) { + case BPF_TRACE_KPROBE_SESSION: + case BPF_TRACE_UPROBE_SESSION: + range = retval_range(0, 1); + break; + default: + return 0; + } + break; case BPF_PROG_TYPE_SK_LOOKUP: range = retval_range(SK_DROP, SK_PASS); break; @@ -16175,10 +16424,7 @@ static u32 kfunc_fastcall_clobber_mask(struct bpf_kfunc_call_arg_meta *meta) /* Same as verifier_inlines_helper_call() but for kfuncs, see comment above */ static bool is_fastcall_kfunc_call(struct bpf_kfunc_call_arg_meta *meta) { - if (meta->btf == btf_vmlinux) - return meta->func_id == special_kfunc_list[KF_bpf_cast_to_kern_ctx] || - meta->func_id == special_kfunc_list[KF_bpf_rdonly_cast]; - return false; + return meta->kfunc_flags & KF_FASTCALL; } /* LLVM define a bpf_fastcall function attribute. @@ -17513,8 +17759,20 @@ static bool refsafe(struct bpf_func_state *old, struct bpf_func_state *cur, return false; for (i = 0; i < old->acquired_refs; i++) { - if (!check_ids(old->refs[i].id, cur->refs[i].id, idmap)) + if (!check_ids(old->refs[i].id, cur->refs[i].id, idmap) || + old->refs[i].type != cur->refs[i].type) return false; + switch (old->refs[i].type) { + case REF_TYPE_PTR: + break; + case REF_TYPE_LOCK: + if (old->refs[i].ptr != cur->refs[i].ptr) + return false; + break; + default: + WARN_ONCE(1, "Unhandled enum type for reference state: %d\n", old->refs[i].type); + return false; + } } return true; @@ -17592,19 +17850,6 @@ static bool states_equal(struct bpf_verifier_env *env, if (old->speculative && !cur->speculative) return false; - if (old->active_lock.ptr != cur->active_lock.ptr) - return false; - - /* Old and cur active_lock's have to be either both present - * or both absent. - */ - if (!!old->active_lock.id != !!cur->active_lock.id) - return false; - - if (old->active_lock.id && - !check_ids(old->active_lock.id, cur->active_lock.id, &env->idmap_scratch)) - return false; - if (old->active_rcu_lock != cur->active_rcu_lock) return false; @@ -17880,7 +18125,7 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx) force_new_state = env->test_state_freq || is_force_checkpoint(env, insn_idx) || /* Avoid accumulating infinitely long jmp history */ - cur->jmp_history_cnt > 40; + cur->insn_hist_end - cur->insn_hist_start > 40; /* bpf progs typically have pruning point every 4 instructions * http://vger.kernel.org/bpfconf2019.html#session-1 @@ -18078,7 +18323,7 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx) * the current state. */ if (is_jmp_point(env, env->insn_idx)) - err = err ? : push_jmp_history(env, cur, 0, 0); + err = err ? : push_insn_history(env, cur, 0, 0); err = err ? : propagate_precision(env, &sl->state); if (err) return err; @@ -18177,8 +18422,8 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx) cur->parent = new; cur->first_insn_idx = insn_idx; + cur->insn_hist_start = cur->insn_hist_end; cur->dfs_depth = new->dfs_depth + 1; - clear_jmp_history(cur); new_sl->next = *explored_state(env, insn_idx); *explored_state(env, insn_idx) = new_sl; /* connect new state to parentage chain. Current frame needs all @@ -18346,7 +18591,7 @@ static int do_check(struct bpf_verifier_env *env) } if (is_jmp_point(env, env->insn_idx)) { - err = push_jmp_history(env, state, 0, 0); + err = push_insn_history(env, state, 0, 0); if (err) return err; } @@ -18506,7 +18751,7 @@ static int do_check(struct bpf_verifier_env *env) return -EINVAL; } - if (env->cur_state->active_lock.ptr) { + if (cur_func(env)->active_locks) { if ((insn->src_reg == BPF_REG_0 && insn->imm != BPF_FUNC_spin_unlock) || (insn->src_reg == BPF_PSEUDO_KFUNC_CALL && (insn->off != 0 || !is_bpf_graph_api_kfunc(insn->imm)))) { @@ -18555,30 +18800,14 @@ static int do_check(struct bpf_verifier_env *env) return -EINVAL; } process_bpf_exit_full: - if (env->cur_state->active_lock.ptr && !env->cur_state->curframe) { - verbose(env, "bpf_spin_unlock is missing\n"); - return -EINVAL; - } - - if (env->cur_state->active_rcu_lock && !env->cur_state->curframe) { - verbose(env, "bpf_rcu_read_unlock is missing\n"); - return -EINVAL; - } - - if (env->cur_state->active_preempt_lock && !env->cur_state->curframe) { - verbose(env, "%d bpf_preempt_enable%s missing\n", - env->cur_state->active_preempt_lock, - env->cur_state->active_preempt_lock == 1 ? " is" : "(s) are"); - return -EINVAL; - } - /* We must do check_reference_leak here before * prepare_func_exit to handle the case when * state->curframe > 0, it may be a callback * function, for which reference_state must * match caller reference state when it exits. */ - err = check_reference_leak(env, exception_exit); + err = check_resource_leak(env, exception_exit, !env->cur_state->curframe, + "BPF_EXIT instruction"); if (err) return err; @@ -19837,6 +20066,7 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env) * for this case. */ case PTR_TO_BTF_ID | MEM_ALLOC | PTR_UNTRUSTED: + case PTR_TO_BTF_ID | PTR_TRUSTED | PTR_MAYBE_NULL: if (type == BPF_READ) { if (BPF_MODE(insn->code) == BPF_MEM) insn->code = BPF_LDX | BPF_PROBE_MEM | @@ -20041,6 +20271,9 @@ static int jit_subprogs(struct bpf_verifier_env *env) func[i]->aux->name[0] = 'F'; func[i]->aux->stack_depth = env->subprog_info[i].stack_depth; + if (env->subprog_info[i].priv_stack_mode == PRIV_STACK_ADAPTIVE) + func[i]->aux->jits_use_priv_stack = true; + func[i]->jit_requested = 1; func[i]->blinding_requested = prog->blinding_requested; func[i]->aux->kfunc_tab = prog->aux->kfunc_tab; @@ -21809,6 +22042,11 @@ static int check_struct_ops_btf_id(struct bpf_verifier_env *env) } } + if (prog->aux->priv_stack_requested && !bpf_jit_supports_private_stack()) { + verbose(env, "Private stack not supported by jit\n"); + return -EACCES; + } + /* btf_ctx_access() used this to provide argument type info */ prog->aux->ctx_arg_info = st_ops_desc->arg_info[member_idx].info; @@ -22547,6 +22785,7 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr, bpfptr_t uattr, __u3 if (!is_priv) mutex_unlock(&bpf_verifier_lock); vfree(env->insn_aux_data); + kvfree(env->insn_hist); err_free_env: kvfree(env); return ret; diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index f86c78961708..949a3870946c 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c @@ -802,6 +802,8 @@ struct send_signal_irq_work { struct task_struct *task; u32 sig; enum pid_type type; + bool has_siginfo; + struct kernel_siginfo info; }; static DEFINE_PER_CPU(struct send_signal_irq_work, send_signal_work); @@ -809,27 +811,46 @@ static DEFINE_PER_CPU(struct send_signal_irq_work, send_signal_work); static void do_bpf_send_signal(struct irq_work *entry) { struct send_signal_irq_work *work; + struct kernel_siginfo *siginfo; work = container_of(entry, struct send_signal_irq_work, irq_work); - group_send_sig_info(work->sig, SEND_SIG_PRIV, work->task, work->type); + siginfo = work->has_siginfo ? &work->info : SEND_SIG_PRIV; + + group_send_sig_info(work->sig, siginfo, work->task, work->type); put_task_struct(work->task); } -static int bpf_send_signal_common(u32 sig, enum pid_type type) +static int bpf_send_signal_common(u32 sig, enum pid_type type, struct task_struct *task, u64 value) { struct send_signal_irq_work *work = NULL; + struct kernel_siginfo info; + struct kernel_siginfo *siginfo; + + if (!task) { + task = current; + siginfo = SEND_SIG_PRIV; + } else { + clear_siginfo(&info); + info.si_signo = sig; + info.si_errno = 0; + info.si_code = SI_KERNEL; + info.si_pid = 0; + info.si_uid = 0; + info.si_value.sival_ptr = (void *)(unsigned long)value; + siginfo = &info; + } /* Similar to bpf_probe_write_user, task needs to be * in a sound condition and kernel memory access be * permitted in order to send signal to the current * task. */ - if (unlikely(current->flags & (PF_KTHREAD | PF_EXITING))) + if (unlikely(task->flags & (PF_KTHREAD | PF_EXITING))) return -EPERM; if (unlikely(!nmi_uaccess_okay())) return -EPERM; /* Task should not be pid=1 to avoid kernel panic. */ - if (unlikely(is_global_init(current))) + if (unlikely(is_global_init(task))) return -EPERM; if (irqs_disabled()) { @@ -847,19 +868,22 @@ static int bpf_send_signal_common(u32 sig, enum pid_type type) * to the irq_work. The current task may change when queued * irq works get executed. */ - work->task = get_task_struct(current); + work->task = get_task_struct(task); + work->has_siginfo = siginfo == &info; + if (work->has_siginfo) + copy_siginfo(&work->info, &info); work->sig = sig; work->type = type; irq_work_queue(&work->irq_work); return 0; } - return group_send_sig_info(sig, SEND_SIG_PRIV, current, type); + return group_send_sig_info(sig, siginfo, task, type); } BPF_CALL_1(bpf_send_signal, u32, sig) { - return bpf_send_signal_common(sig, PIDTYPE_TGID); + return bpf_send_signal_common(sig, PIDTYPE_TGID, NULL, 0); } static const struct bpf_func_proto bpf_send_signal_proto = { @@ -871,7 +895,7 @@ static const struct bpf_func_proto bpf_send_signal_proto = { BPF_CALL_1(bpf_send_signal_thread, u32, sig) { - return bpf_send_signal_common(sig, PIDTYPE_PID); + return bpf_send_signal_common(sig, PIDTYPE_PID, NULL, 0); } static const struct bpf_func_proto bpf_send_signal_thread_proto = { @@ -1557,6 +1581,17 @@ static inline bool is_kprobe_session(const struct bpf_prog *prog) return prog->expected_attach_type == BPF_TRACE_KPROBE_SESSION; } +static inline bool is_uprobe_multi(const struct bpf_prog *prog) +{ + return prog->expected_attach_type == BPF_TRACE_UPROBE_MULTI || + prog->expected_attach_type == BPF_TRACE_UPROBE_SESSION; +} + +static inline bool is_uprobe_session(const struct bpf_prog *prog) +{ + return prog->expected_attach_type == BPF_TRACE_UPROBE_SESSION; +} + static const struct bpf_func_proto * kprobe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) { @@ -1574,13 +1609,13 @@ kprobe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) case BPF_FUNC_get_func_ip: if (is_kprobe_multi(prog)) return &bpf_get_func_ip_proto_kprobe_multi; - if (prog->expected_attach_type == BPF_TRACE_UPROBE_MULTI) + if (is_uprobe_multi(prog)) return &bpf_get_func_ip_proto_uprobe_multi; return &bpf_get_func_ip_proto_kprobe; case BPF_FUNC_get_attach_cookie: if (is_kprobe_multi(prog)) return &bpf_get_attach_cookie_proto_kmulti; - if (prog->expected_attach_type == BPF_TRACE_UPROBE_MULTI) + if (is_uprobe_multi(prog)) return &bpf_get_attach_cookie_proto_umulti; return &bpf_get_attach_cookie_proto_trace; default: @@ -3072,6 +3107,7 @@ struct bpf_uprobe { u64 cookie; struct uprobe *uprobe; struct uprobe_consumer consumer; + bool session; }; struct bpf_uprobe_multi_link { @@ -3084,7 +3120,7 @@ struct bpf_uprobe_multi_link { }; struct bpf_uprobe_multi_run_ctx { - struct bpf_run_ctx run_ctx; + struct bpf_session_run_ctx session_ctx; unsigned long entry_ip; struct bpf_uprobe *uprobe; }; @@ -3195,17 +3231,22 @@ static const struct bpf_link_ops bpf_uprobe_multi_link_lops = { static int uprobe_prog_run(struct bpf_uprobe *uprobe, unsigned long entry_ip, - struct pt_regs *regs) + struct pt_regs *regs, + bool is_return, void *data) { struct bpf_uprobe_multi_link *link = uprobe->link; struct bpf_uprobe_multi_run_ctx run_ctx = { + .session_ctx = { + .is_return = is_return, + .data = data, + }, .entry_ip = entry_ip, .uprobe = uprobe, }; struct bpf_prog *prog = link->link.prog; bool sleepable = prog->sleepable; struct bpf_run_ctx *old_run_ctx; - int err = 0; + int err; if (link->task && !same_thread_group(current, link->task)) return 0; @@ -3217,7 +3258,7 @@ static int uprobe_prog_run(struct bpf_uprobe *uprobe, migrate_disable(); - old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx); + old_run_ctx = bpf_set_run_ctx(&run_ctx.session_ctx.run_ctx); err = bpf_prog_run(link->link.prog, regs); bpf_reset_run_ctx(old_run_ctx); @@ -3244,9 +3285,13 @@ uprobe_multi_link_handler(struct uprobe_consumer *con, struct pt_regs *regs, __u64 *data) { struct bpf_uprobe *uprobe; + int ret; uprobe = container_of(con, struct bpf_uprobe, consumer); - return uprobe_prog_run(uprobe, instruction_pointer(regs), regs); + ret = uprobe_prog_run(uprobe, instruction_pointer(regs), regs, false, data); + if (uprobe->session) + return ret ? UPROBE_HANDLER_IGNORE : 0; + return 0; } static int @@ -3256,14 +3301,16 @@ uprobe_multi_link_ret_handler(struct uprobe_consumer *con, unsigned long func, s struct bpf_uprobe *uprobe; uprobe = container_of(con, struct bpf_uprobe, consumer); - return uprobe_prog_run(uprobe, func, regs); + uprobe_prog_run(uprobe, func, regs, true, data); + return 0; } static u64 bpf_uprobe_multi_entry_ip(struct bpf_run_ctx *ctx) { struct bpf_uprobe_multi_run_ctx *run_ctx; - run_ctx = container_of(current->bpf_ctx, struct bpf_uprobe_multi_run_ctx, run_ctx); + run_ctx = container_of(current->bpf_ctx, struct bpf_uprobe_multi_run_ctx, + session_ctx.run_ctx); return run_ctx->entry_ip; } @@ -3271,7 +3318,8 @@ static u64 bpf_uprobe_multi_cookie(struct bpf_run_ctx *ctx) { struct bpf_uprobe_multi_run_ctx *run_ctx; - run_ctx = container_of(current->bpf_ctx, struct bpf_uprobe_multi_run_ctx, run_ctx); + run_ctx = container_of(current->bpf_ctx, struct bpf_uprobe_multi_run_ctx, + session_ctx.run_ctx); return run_ctx->uprobe->cookie; } @@ -3295,7 +3343,7 @@ int bpf_uprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *pr if (sizeof(u64) != sizeof(void *)) return -EOPNOTSUPP; - if (prog->expected_attach_type != BPF_TRACE_UPROBE_MULTI) + if (!is_uprobe_multi(prog)) return -EINVAL; flags = attr->link_create.uprobe_multi.flags; @@ -3371,11 +3419,12 @@ int bpf_uprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *pr uprobes[i].link = link; - if (flags & BPF_F_UPROBE_MULTI_RETURN) - uprobes[i].consumer.ret_handler = uprobe_multi_link_ret_handler; - else + if (!(flags & BPF_F_UPROBE_MULTI_RETURN)) uprobes[i].consumer.handler = uprobe_multi_link_handler; - + if (flags & BPF_F_UPROBE_MULTI_RETURN || is_uprobe_session(prog)) + uprobes[i].consumer.ret_handler = uprobe_multi_link_ret_handler; + if (is_uprobe_session(prog)) + uprobes[i].session = true; if (pid) uprobes[i].consumer.filter = uprobe_multi_link_filter; } @@ -3464,7 +3513,7 @@ static int bpf_kprobe_multi_filter(const struct bpf_prog *prog, u32 kfunc_id) if (!btf_id_set8_contains(&kprobe_multi_kfunc_set_ids, kfunc_id)) return 0; - if (!is_kprobe_session(prog)) + if (!is_kprobe_session(prog) && !is_uprobe_session(prog)) return -EACCES; return 0; @@ -3482,3 +3531,16 @@ static int __init bpf_kprobe_multi_kfuncs_init(void) } late_initcall(bpf_kprobe_multi_kfuncs_init); + +__bpf_kfunc_start_defs(); + +__bpf_kfunc int bpf_send_signal_task(struct task_struct *task, int sig, enum pid_type type, + u64 value) +{ + if (type != PIDTYPE_PID && type != PIDTYPE_TGID) + return -EINVAL; + + return bpf_send_signal_common(sig, type, task, value); +} + +__bpf_kfunc_end_defs(); diff --git a/mm/slab_common.c b/mm/slab_common.c index 893d32059915..a7174455db9f 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -1332,6 +1332,25 @@ size_t ksize(const void *objp) } EXPORT_SYMBOL(ksize); +#ifdef CONFIG_BPF_SYSCALL +#include + +__bpf_kfunc_start_defs(); + +__bpf_kfunc struct kmem_cache *bpf_get_kmem_cache(u64 addr) +{ + struct slab *slab; + + if (!virt_addr_valid((void *)(long)addr)) + return NULL; + + slab = virt_to_slab((void *)(long)addr); + return slab ? slab->slab_cache : NULL; +} + +__bpf_kfunc_end_defs(); +#endif /* CONFIG_BPF_SYSCALL */ + /* Tracepoints definitions. */ EXPORT_TRACEPOINT_SYMBOL(kmalloc); EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc); diff --git a/net/core/bpf_sk_storage.c b/net/core/bpf_sk_storage.c index bc01b3aa6b0f..2f4ed83a75ae 100644 --- a/net/core/bpf_sk_storage.c +++ b/net/core/bpf_sk_storage.c @@ -106,7 +106,7 @@ static long bpf_fd_sk_storage_update_elem(struct bpf_map *map, void *key, if (sock) { sdata = bpf_local_storage_update( sock->sk, (struct bpf_local_storage_map *)map, value, - map_flags, GFP_ATOMIC); + map_flags, false, GFP_ATOMIC); sockfd_put(sock); return PTR_ERR_OR_ZERO(sdata); } @@ -137,7 +137,7 @@ bpf_sk_storage_clone_elem(struct sock *newsk, { struct bpf_local_storage_elem *copy_selem; - copy_selem = bpf_selem_alloc(smap, newsk, NULL, true, GFP_ATOMIC); + copy_selem = bpf_selem_alloc(smap, newsk, NULL, true, false, GFP_ATOMIC); if (!copy_selem) return NULL; @@ -243,7 +243,7 @@ BPF_CALL_5(bpf_sk_storage_get, struct bpf_map *, map, struct sock *, sk, refcount_inc_not_zero(&sk->sk_refcnt)) { sdata = bpf_local_storage_update( sk, (struct bpf_local_storage_map *)map, value, - BPF_NOEXIST, gfp_flags); + BPF_NOEXIST, false, gfp_flags); /* sk must be a fullsock (guaranteed by verifier), * so sock_gen_put() is unnecessary. */ diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile index 7afe040cf43b..bcf103a4c14f 100644 --- a/samples/bpf/Makefile +++ b/samples/bpf/Makefile @@ -17,20 +17,12 @@ tprogs-y += tracex3 tprogs-y += tracex4 tprogs-y += tracex5 tprogs-y += tracex6 -tprogs-y += tracex7 -tprogs-y += test_probe_write_user tprogs-y += trace_output tprogs-y += lathist tprogs-y += offwaketime tprogs-y += spintest tprogs-y += map_perf_test -tprogs-y += test_overhead -tprogs-y += test_cgrp2_array_pin -tprogs-y += test_cgrp2_attach -tprogs-y += test_cgrp2_sock -tprogs-y += test_cgrp2_sock2 tprogs-y += xdp_router_ipv4 -tprogs-y += test_current_task_under_cgroup tprogs-y += trace_event tprogs-y += sampleip tprogs-y += tc_l2_redirect @@ -66,20 +58,12 @@ tracex3-objs := tracex3_user.o tracex4-objs := tracex4_user.o tracex5-objs := tracex5_user.o $(TRACE_HELPERS) tracex6-objs := tracex6_user.o -tracex7-objs := tracex7_user.o -test_probe_write_user-objs := test_probe_write_user_user.o trace_output-objs := trace_output_user.o lathist-objs := lathist_user.o offwaketime-objs := offwaketime_user.o $(TRACE_HELPERS) spintest-objs := spintest_user.o $(TRACE_HELPERS) map_perf_test-objs := map_perf_test_user.o test_overhead-objs := test_overhead_user.o -test_cgrp2_array_pin-objs := test_cgrp2_array_pin.o -test_cgrp2_attach-objs := test_cgrp2_attach.o -test_cgrp2_sock-objs := test_cgrp2_sock.o -test_cgrp2_sock2-objs := test_cgrp2_sock2.o -test_current_task_under_cgroup-objs := $(CGROUP_HELPERS) \ - test_current_task_under_cgroup_user.o trace_event-objs := trace_event_user.o $(TRACE_HELPERS) sampleip-objs := sampleip_user.o $(TRACE_HELPERS) tc_l2_redirect-objs := tc_l2_redirect_user.o @@ -107,9 +91,6 @@ always-y += tracex3.bpf.o always-y += tracex4.bpf.o always-y += tracex5.bpf.o always-y += tracex6.bpf.o -always-y += tracex7.bpf.o -always-y += sock_flags.bpf.o -always-y += test_probe_write_user.bpf.o always-y += trace_output.bpf.o always-y += tcbpf1_kern.o always-y += tc_l2_redirect_kern.o @@ -117,12 +98,7 @@ always-y += lathist_kern.o always-y += offwaketime.bpf.o always-y += spintest.bpf.o always-y += map_perf_test.bpf.o -always-y += test_overhead_tp.bpf.o -always-y += test_overhead_raw_tp.bpf.o -always-y += test_overhead_kprobe.bpf.o always-y += parse_varlen.o parse_simple.o parse_ldabs.o -always-y += test_cgrp2_tc.bpf.o -always-y += test_current_task_under_cgroup.bpf.o always-y += trace_event_kern.o always-y += sampleip_kern.o always-y += lwt_len_hist.bpf.o @@ -195,7 +171,6 @@ TPROGLDLIBS_xdp_router_ipv4 += -lm -pthread TPROGLDLIBS_tracex4 += -lrt TPROGLDLIBS_trace_output += -lrt TPROGLDLIBS_map_perf_test += -lrt -TPROGLDLIBS_test_overhead += -lrt # Allows pointing LLC/CLANG to a LLVM backend with bpf support, redefine on cmdline: # make M=samples/bpf LLC=~/git/llvm-project/llvm/build/bin/llc CLANG=~/git/llvm-project/llvm/build/bin/clang diff --git a/samples/bpf/sock_flags.bpf.c b/samples/bpf/sock_flags.bpf.c deleted file mode 100644 index 0da749f6a9e1..000000000000 --- a/samples/bpf/sock_flags.bpf.c +++ /dev/null @@ -1,47 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -#include "vmlinux.h" -#include "net_shared.h" -#include - -SEC("cgroup/sock") -int bpf_prog1(struct bpf_sock *sk) -{ - char fmt[] = "socket: family %d type %d protocol %d\n"; - char fmt2[] = "socket: uid %u gid %u\n"; - __u64 gid_uid = bpf_get_current_uid_gid(); - __u32 uid = gid_uid & 0xffffffff; - __u32 gid = gid_uid >> 32; - - bpf_trace_printk(fmt, sizeof(fmt), sk->family, sk->type, sk->protocol); - bpf_trace_printk(fmt2, sizeof(fmt2), uid, gid); - - /* block AF_INET6, SOCK_DGRAM, IPPROTO_ICMPV6 sockets - * ie., make ping6 fail - */ - if (sk->family == AF_INET6 && - sk->type == SOCK_DGRAM && - sk->protocol == IPPROTO_ICMPV6) - return 0; - - return 1; -} - -SEC("cgroup/sock") -int bpf_prog2(struct bpf_sock *sk) -{ - char fmt[] = "socket: family %d type %d protocol %d\n"; - - bpf_trace_printk(fmt, sizeof(fmt), sk->family, sk->type, sk->protocol); - - /* block AF_INET, SOCK_DGRAM, IPPROTO_ICMP sockets - * ie., make ping fail - */ - if (sk->family == AF_INET && - sk->type == SOCK_DGRAM && - sk->protocol == IPPROTO_ICMP) - return 0; - - return 1; -} - -char _license[] SEC("license") = "GPL"; diff --git a/samples/bpf/syscall_nrs.c b/samples/bpf/syscall_nrs.c index 88f940052450..a6e600f3d477 100644 --- a/samples/bpf/syscall_nrs.c +++ b/samples/bpf/syscall_nrs.c @@ -2,6 +2,9 @@ #include #include +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wmissing-prototypes" + #define SYSNR(_NR) DEFINE(SYS ## _NR, _NR) void syscall_defines(void) @@ -17,3 +20,5 @@ void syscall_defines(void) #endif } + +#pragma GCC diagnostic pop diff --git a/samples/bpf/tc_l2_redirect_kern.c b/samples/bpf/tc_l2_redirect_kern.c index fd2fa0004330..b19fa9b88fe0 100644 --- a/samples/bpf/tc_l2_redirect_kern.c +++ b/samples/bpf/tc_l2_redirect_kern.c @@ -58,14 +58,11 @@ static __always_inline bool is_vip_addr(__be16 eth_proto, __be32 daddr) SEC("l2_to_iptun_ingress_forward") int _l2_to_iptun_ingress_forward(struct __sk_buff *skb) { - struct bpf_tunnel_key tkey = {}; void *data = (void *)(long)skb->data; struct eth_hdr *eth = data; void *data_end = (void *)(long)skb->data_end; int key = 0, *ifindex; - int ret; - if (data + sizeof(*eth) > data_end) return TC_ACT_OK; @@ -115,8 +112,6 @@ int _l2_to_iptun_ingress_redirect(struct __sk_buff *skb) void *data_end = (void *)(long)skb->data_end; int key = 0, *ifindex; - int ret; - if (data + sizeof(*eth) > data_end) return TC_ACT_OK; @@ -205,7 +200,6 @@ int _l2_to_ip6tun_ingress_redirect(struct __sk_buff *skb) SEC("drop_non_tun_vip") int _drop_non_tun_vip(struct __sk_buff *skb) { - struct bpf_tunnel_key tkey = {}; void *data = (void *)(long)skb->data; struct eth_hdr *eth = data; void *data_end = (void *)(long)skb->data_end; diff --git a/samples/bpf/test_cgrp2_array_pin.c b/samples/bpf/test_cgrp2_array_pin.c deleted file mode 100644 index 05e88aa63009..000000000000 --- a/samples/bpf/test_cgrp2_array_pin.c +++ /dev/null @@ -1,106 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* Copyright (c) 2016 Facebook - */ -#include -#include - -#include -#include -#include -#include -#include -#include - -#include - -static void usage(void) -{ - printf("Usage: test_cgrp2_array_pin [...]\n"); - printf(" -F File to pin an BPF cgroup array\n"); - printf(" -U Update an already pinned BPF cgroup array\n"); - printf(" -v Full path of the cgroup2\n"); - printf(" -h Display this help\n"); -} - -int main(int argc, char **argv) -{ - const char *pinned_file = NULL, *cg2 = NULL; - int create_array = 1; - int array_key = 0; - int array_fd = -1; - int cg2_fd = -1; - int ret = -1; - int opt; - - while ((opt = getopt(argc, argv, "F:U:v:")) != -1) { - switch (opt) { - /* General args */ - case 'F': - pinned_file = optarg; - break; - case 'U': - pinned_file = optarg; - create_array = 0; - break; - case 'v': - cg2 = optarg; - break; - default: - usage(); - goto out; - } - } - - if (!cg2 || !pinned_file) { - usage(); - goto out; - } - - cg2_fd = open(cg2, O_RDONLY); - if (cg2_fd < 0) { - fprintf(stderr, "open(%s,...): %s(%d)\n", - cg2, strerror(errno), errno); - goto out; - } - - if (create_array) { - array_fd = bpf_map_create(BPF_MAP_TYPE_CGROUP_ARRAY, NULL, - sizeof(uint32_t), sizeof(uint32_t), - 1, NULL); - if (array_fd < 0) { - fprintf(stderr, - "bpf_create_map(BPF_MAP_TYPE_CGROUP_ARRAY,...): %s(%d)\n", - strerror(errno), errno); - goto out; - } - } else { - array_fd = bpf_obj_get(pinned_file); - if (array_fd < 0) { - fprintf(stderr, "bpf_obj_get(%s): %s(%d)\n", - pinned_file, strerror(errno), errno); - goto out; - } - } - - ret = bpf_map_update_elem(array_fd, &array_key, &cg2_fd, 0); - if (ret) { - perror("bpf_map_update_elem"); - goto out; - } - - if (create_array) { - ret = bpf_obj_pin(array_fd, pinned_file); - if (ret) { - fprintf(stderr, "bpf_obj_pin(..., %s): %s(%d)\n", - pinned_file, strerror(errno), errno); - goto out; - } - } - -out: - if (array_fd != -1) - close(array_fd); - if (cg2_fd != -1) - close(cg2_fd); - return ret; -} diff --git a/samples/bpf/test_cgrp2_attach.c b/samples/bpf/test_cgrp2_attach.c deleted file mode 100644 index 68ce69457afe..000000000000 --- a/samples/bpf/test_cgrp2_attach.c +++ /dev/null @@ -1,177 +0,0 @@ -/* eBPF example program: - * - * - Creates arraymap in kernel with 4 bytes keys and 8 byte values - * - * - Loads eBPF program - * - * The eBPF program accesses the map passed in to store two pieces of - * information. The number of invocations of the program, which maps - * to the number of packets received, is stored to key 0. Key 1 is - * incremented on each iteration by the number of bytes stored in - * the skb. - * - * - Attaches the new program to a cgroup using BPF_PROG_ATTACH - * - * - Every second, reads map[0] and map[1] to see how many bytes and - * packets were seen on any socket of tasks in the given cgroup. - */ - -#define _GNU_SOURCE - -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include - -#include "bpf_insn.h" -#include "bpf_util.h" - -enum { - MAP_KEY_PACKETS, - MAP_KEY_BYTES, -}; - -char bpf_log_buf[BPF_LOG_BUF_SIZE]; - -static int prog_load(int map_fd, int verdict) -{ - struct bpf_insn prog[] = { - BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), /* save r6 so it's not clobbered by BPF_CALL */ - - /* Count packets */ - BPF_MOV64_IMM(BPF_REG_0, MAP_KEY_PACKETS), /* r0 = 0 */ - BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -4), /* *(u32 *)(fp - 4) = r0 */ - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), /* r2 = fp - 4 */ - BPF_LD_MAP_FD(BPF_REG_1, map_fd), /* load map fd to r1 */ - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), - BPF_MOV64_IMM(BPF_REG_1, 1), /* r1 = 1 */ - BPF_ATOMIC_OP(BPF_DW, BPF_ADD, BPF_REG_0, BPF_REG_1, 0), - - /* Count bytes */ - BPF_MOV64_IMM(BPF_REG_0, MAP_KEY_BYTES), /* r0 = 1 */ - BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -4), /* *(u32 *)(fp - 4) = r0 */ - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), /* r2 = fp - 4 */ - BPF_LD_MAP_FD(BPF_REG_1, map_fd), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), - BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_6, offsetof(struct __sk_buff, len)), /* r1 = skb->len */ - - BPF_ATOMIC_OP(BPF_DW, BPF_ADD, BPF_REG_0, BPF_REG_1, 0), - - BPF_MOV64_IMM(BPF_REG_0, verdict), /* r0 = verdict */ - BPF_EXIT_INSN(), - }; - size_t insns_cnt = ARRAY_SIZE(prog); - LIBBPF_OPTS(bpf_prog_load_opts, opts, - .log_buf = bpf_log_buf, - .log_size = BPF_LOG_BUF_SIZE, - ); - - return bpf_prog_load(BPF_PROG_TYPE_CGROUP_SKB, NULL, "GPL", - prog, insns_cnt, &opts); -} - -static int usage(const char *argv0) -{ - printf("Usage: %s [-d] [-D] \n", argv0); - printf(" -d Drop Traffic\n"); - printf(" -D Detach filter, and exit\n"); - return EXIT_FAILURE; -} - -static int attach_filter(int cg_fd, int type, int verdict) -{ - int prog_fd, map_fd, ret, key; - long long pkt_cnt, byte_cnt; - - map_fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, NULL, - sizeof(key), sizeof(byte_cnt), - 256, NULL); - if (map_fd < 0) { - printf("Failed to create map: '%s'\n", strerror(errno)); - return EXIT_FAILURE; - } - - prog_fd = prog_load(map_fd, verdict); - printf("Output from kernel verifier:\n%s\n-------\n", bpf_log_buf); - - if (prog_fd < 0) { - printf("Failed to load prog: '%s'\n", strerror(errno)); - return EXIT_FAILURE; - } - - ret = bpf_prog_attach(prog_fd, cg_fd, type, 0); - if (ret < 0) { - printf("Failed to attach prog to cgroup: '%s'\n", - strerror(errno)); - return EXIT_FAILURE; - } - while (1) { - key = MAP_KEY_PACKETS; - assert(bpf_map_lookup_elem(map_fd, &key, &pkt_cnt) == 0); - - key = MAP_KEY_BYTES; - assert(bpf_map_lookup_elem(map_fd, &key, &byte_cnt) == 0); - - printf("cgroup received %lld packets, %lld bytes\n", - pkt_cnt, byte_cnt); - sleep(1); - } - - return EXIT_SUCCESS; -} - -int main(int argc, char **argv) -{ - int detach_only = 0, verdict = 1; - enum bpf_attach_type type; - int opt, cg_fd, ret; - - while ((opt = getopt(argc, argv, "Dd")) != -1) { - switch (opt) { - case 'd': - verdict = 0; - break; - case 'D': - detach_only = 1; - break; - default: - return usage(argv[0]); - } - } - - if (argc - optind < 2) - return usage(argv[0]); - - if (strcmp(argv[optind + 1], "ingress") == 0) - type = BPF_CGROUP_INET_INGRESS; - else if (strcmp(argv[optind + 1], "egress") == 0) - type = BPF_CGROUP_INET_EGRESS; - else - return usage(argv[0]); - - cg_fd = open(argv[optind], O_DIRECTORY | O_RDONLY); - if (cg_fd < 0) { - printf("Failed to open cgroup path: '%s'\n", strerror(errno)); - return EXIT_FAILURE; - } - - if (detach_only) { - ret = bpf_prog_detach(cg_fd, type); - printf("bpf_prog_detach() returned '%s' (%d)\n", - strerror(errno), errno); - } else - ret = attach_filter(cg_fd, type, verdict); - - return ret; -} diff --git a/samples/bpf/test_cgrp2_sock.c b/samples/bpf/test_cgrp2_sock.c deleted file mode 100644 index a0811df888f4..000000000000 --- a/samples/bpf/test_cgrp2_sock.c +++ /dev/null @@ -1,294 +0,0 @@ -/* eBPF example program: - * - * - Loads eBPF program - * - * The eBPF program sets the sk_bound_dev_if index in new AF_INET{6} - * sockets opened by processes in the cgroup. - * - * - Attaches the new program to a cgroup using BPF_PROG_ATTACH - */ - -#define _GNU_SOURCE - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "bpf_insn.h" - -char bpf_log_buf[BPF_LOG_BUF_SIZE]; - -static int prog_load(__u32 idx, __u32 mark, __u32 prio) -{ - /* save pointer to context */ - struct bpf_insn prog_start[] = { - BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), - }; - struct bpf_insn prog_end[] = { - BPF_MOV64_IMM(BPF_REG_0, 1), /* r0 = verdict */ - BPF_EXIT_INSN(), - }; - - /* set sk_bound_dev_if on socket */ - struct bpf_insn prog_dev[] = { - BPF_MOV64_IMM(BPF_REG_3, idx), - BPF_MOV64_IMM(BPF_REG_2, offsetof(struct bpf_sock, bound_dev_if)), - BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3, offsetof(struct bpf_sock, bound_dev_if)), - }; - - /* set mark on socket */ - struct bpf_insn prog_mark[] = { - /* get uid of process */ - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_get_current_uid_gid), - BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0xffffffff), - - /* if uid is 0, use given mark, else use the uid as the mark */ - BPF_MOV64_REG(BPF_REG_3, BPF_REG_0), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), - BPF_MOV64_IMM(BPF_REG_3, mark), - - /* set the mark on the new socket */ - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), - BPF_MOV64_IMM(BPF_REG_2, offsetof(struct bpf_sock, mark)), - BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3, offsetof(struct bpf_sock, mark)), - }; - - /* set priority on socket */ - struct bpf_insn prog_prio[] = { - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), - BPF_MOV64_IMM(BPF_REG_3, prio), - BPF_MOV64_IMM(BPF_REG_2, offsetof(struct bpf_sock, priority)), - BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3, offsetof(struct bpf_sock, priority)), - }; - LIBBPF_OPTS(bpf_prog_load_opts, opts, - .log_buf = bpf_log_buf, - .log_size = BPF_LOG_BUF_SIZE, - ); - - struct bpf_insn *prog; - size_t insns_cnt; - void *p; - int ret; - - insns_cnt = sizeof(prog_start) + sizeof(prog_end); - if (idx) - insns_cnt += sizeof(prog_dev); - - if (mark) - insns_cnt += sizeof(prog_mark); - - if (prio) - insns_cnt += sizeof(prog_prio); - - p = prog = malloc(insns_cnt); - if (!prog) { - fprintf(stderr, "Failed to allocate memory for instructions\n"); - return EXIT_FAILURE; - } - - memcpy(p, prog_start, sizeof(prog_start)); - p += sizeof(prog_start); - - if (idx) { - memcpy(p, prog_dev, sizeof(prog_dev)); - p += sizeof(prog_dev); - } - - if (mark) { - memcpy(p, prog_mark, sizeof(prog_mark)); - p += sizeof(prog_mark); - } - - if (prio) { - memcpy(p, prog_prio, sizeof(prog_prio)); - p += sizeof(prog_prio); - } - - memcpy(p, prog_end, sizeof(prog_end)); - p += sizeof(prog_end); - - insns_cnt /= sizeof(struct bpf_insn); - - ret = bpf_prog_load(BPF_PROG_TYPE_CGROUP_SOCK, NULL, "GPL", - prog, insns_cnt, &opts); - - free(prog); - - return ret; -} - -static int get_bind_to_device(int sd, char *name, size_t len) -{ - socklen_t optlen = len; - int rc; - - name[0] = '\0'; - rc = getsockopt(sd, SOL_SOCKET, SO_BINDTODEVICE, name, &optlen); - if (rc < 0) - perror("setsockopt(SO_BINDTODEVICE)"); - - return rc; -} - -static unsigned int get_somark(int sd) -{ - unsigned int mark = 0; - socklen_t optlen = sizeof(mark); - int rc; - - rc = getsockopt(sd, SOL_SOCKET, SO_MARK, &mark, &optlen); - if (rc < 0) - perror("getsockopt(SO_MARK)"); - - return mark; -} - -static unsigned int get_priority(int sd) -{ - unsigned int prio = 0; - socklen_t optlen = sizeof(prio); - int rc; - - rc = getsockopt(sd, SOL_SOCKET, SO_PRIORITY, &prio, &optlen); - if (rc < 0) - perror("getsockopt(SO_PRIORITY)"); - - return prio; -} - -static int show_sockopts(int family) -{ - unsigned int mark, prio; - char name[16]; - int sd; - - sd = socket(family, SOCK_DGRAM, 17); - if (sd < 0) { - perror("socket"); - return 1; - } - - if (get_bind_to_device(sd, name, sizeof(name)) < 0) - return 1; - - mark = get_somark(sd); - prio = get_priority(sd); - - close(sd); - - printf("sd %d: dev %s, mark %u, priority %u\n", sd, name, mark, prio); - - return 0; -} - -static int usage(const char *argv0) -{ - printf("Usage:\n"); - printf(" Attach a program\n"); - printf(" %s -b bind-to-dev -m mark -p prio cg-path\n", argv0); - printf("\n"); - printf(" Detach a program\n"); - printf(" %s -d cg-path\n", argv0); - printf("\n"); - printf(" Show inherited socket settings (mark, priority, and device)\n"); - printf(" %s [-6]\n", argv0); - return EXIT_FAILURE; -} - -int main(int argc, char **argv) -{ - __u32 idx = 0, mark = 0, prio = 0; - const char *cgrp_path = NULL; - int cg_fd, prog_fd, ret; - int family = PF_INET; - int do_attach = 1; - int rc; - - while ((rc = getopt(argc, argv, "db:m:p:6")) != -1) { - switch (rc) { - case 'd': - do_attach = 0; - break; - case 'b': - idx = if_nametoindex(optarg); - if (!idx) { - idx = strtoumax(optarg, NULL, 0); - if (!idx) { - printf("Invalid device name\n"); - return EXIT_FAILURE; - } - } - break; - case 'm': - mark = strtoumax(optarg, NULL, 0); - break; - case 'p': - prio = strtoumax(optarg, NULL, 0); - break; - case '6': - family = PF_INET6; - break; - default: - return usage(argv[0]); - } - } - - if (optind == argc) - return show_sockopts(family); - - cgrp_path = argv[optind]; - if (!cgrp_path) { - fprintf(stderr, "cgroup path not given\n"); - return EXIT_FAILURE; - } - - if (do_attach && !idx && !mark && !prio) { - fprintf(stderr, - "One of device, mark or priority must be given\n"); - return EXIT_FAILURE; - } - - cg_fd = open(cgrp_path, O_DIRECTORY | O_RDONLY); - if (cg_fd < 0) { - printf("Failed to open cgroup path: '%s'\n", strerror(errno)); - return EXIT_FAILURE; - } - - if (do_attach) { - prog_fd = prog_load(idx, mark, prio); - if (prog_fd < 0) { - printf("Failed to load prog: '%s'\n", strerror(errno)); - printf("Output from kernel verifier:\n%s\n-------\n", - bpf_log_buf); - return EXIT_FAILURE; - } - - ret = bpf_prog_attach(prog_fd, cg_fd, - BPF_CGROUP_INET_SOCK_CREATE, 0); - if (ret < 0) { - printf("Failed to attach prog to cgroup: '%s'\n", - strerror(errno)); - return EXIT_FAILURE; - } - } else { - ret = bpf_prog_detach(cg_fd, BPF_CGROUP_INET_SOCK_CREATE); - if (ret < 0) { - printf("Failed to detach prog from cgroup: '%s'\n", - strerror(errno)); - return EXIT_FAILURE; - } - } - - close(cg_fd); - return EXIT_SUCCESS; -} diff --git a/samples/bpf/test_cgrp2_sock.sh b/samples/bpf/test_cgrp2_sock.sh deleted file mode 100755 index 36bd7cb46f06..000000000000 --- a/samples/bpf/test_cgrp2_sock.sh +++ /dev/null @@ -1,137 +0,0 @@ -#!/bin/sh -# SPDX-License-Identifier: GPL-2.0 - -# Test various socket options that can be set by attaching programs to cgroups. - -MY_DIR=$(dirname $0) -TEST=$MY_DIR/test_cgrp2_sock -CGRP_MNT="/tmp/cgroupv2-test_cgrp2_sock" - -################################################################################ -# -print_result() -{ - local rc=$1 - local status=" OK " - - [ $rc -ne 0 ] && status="FAIL" - - printf "%-50s [%4s]\n" "$2" "$status" -} - -check_sock() -{ - out=$($TEST) - echo $out | grep -q "$1" - if [ $? -ne 0 ]; then - print_result 1 "IPv4: $2" - echo " expected: $1" - echo " have: $out" - rc=1 - else - print_result 0 "IPv4: $2" - fi -} - -check_sock6() -{ - out=$($TEST -6) - echo $out | grep -q "$1" - if [ $? -ne 0 ]; then - print_result 1 "IPv6: $2" - echo " expected: $1" - echo " have: $out" - rc=1 - else - print_result 0 "IPv6: $2" - fi -} - -################################################################################ -# - -cleanup() -{ - echo $$ >> ${CGRP_MNT}/cgroup.procs - rmdir ${CGRP_MNT}/sockopts -} - -cleanup_and_exit() -{ - local rc=$1 - local msg="$2" - - [ -n "$msg" ] && echo "ERROR: $msg" - - $TEST -d ${CGRP_MNT}/sockopts - ip li del cgrp2_sock - umount ${CGRP_MNT} - - exit $rc -} - - -################################################################################ -# main - -rc=0 - -ip li add cgrp2_sock type dummy 2>/dev/null - -set -e -mkdir -p ${CGRP_MNT} -mount -t cgroup2 none ${CGRP_MNT} -set +e - - -# make sure we have a known start point -cleanup 2>/dev/null - -mkdir -p ${CGRP_MNT}/sockopts -[ $? -ne 0 ] && cleanup_and_exit 1 "Failed to create cgroup hierarchy" - - -# set pid into cgroup -echo $$ > ${CGRP_MNT}/sockopts/cgroup.procs - -# no bpf program attached, so socket should show no settings -check_sock "dev , mark 0, priority 0" "No programs attached" -check_sock6 "dev , mark 0, priority 0" "No programs attached" - -# verify device is set -# -$TEST -b cgrp2_sock ${CGRP_MNT}/sockopts -if [ $? -ne 0 ]; then - cleanup_and_exit 1 "Failed to install program to set device" -fi -check_sock "dev cgrp2_sock, mark 0, priority 0" "Device set" -check_sock6 "dev cgrp2_sock, mark 0, priority 0" "Device set" - -# verify mark is set -# -$TEST -m 666 ${CGRP_MNT}/sockopts -if [ $? -ne 0 ]; then - cleanup_and_exit 1 "Failed to install program to set mark" -fi -check_sock "dev , mark 666, priority 0" "Mark set" -check_sock6 "dev , mark 666, priority 0" "Mark set" - -# verify priority is set -# -$TEST -p 123 ${CGRP_MNT}/sockopts -if [ $? -ne 0 ]; then - cleanup_and_exit 1 "Failed to install program to set priority" -fi -check_sock "dev , mark 0, priority 123" "Priority set" -check_sock6 "dev , mark 0, priority 123" "Priority set" - -# all 3 at once -# -$TEST -b cgrp2_sock -m 666 -p 123 ${CGRP_MNT}/sockopts -if [ $? -ne 0 ]; then - cleanup_and_exit 1 "Failed to install program to set device, mark and priority" -fi -check_sock "dev cgrp2_sock, mark 666, priority 123" "Priority set" -check_sock6 "dev cgrp2_sock, mark 666, priority 123" "Priority set" - -cleanup_and_exit $rc diff --git a/samples/bpf/test_cgrp2_sock2.c b/samples/bpf/test_cgrp2_sock2.c deleted file mode 100644 index e7060aaa2f5a..000000000000 --- a/samples/bpf/test_cgrp2_sock2.c +++ /dev/null @@ -1,95 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* eBPF example program: - * - * - Loads eBPF program - * - * The eBPF program loads a filter from file and attaches the - * program to a cgroup using BPF_PROG_ATTACH - */ - -#define _GNU_SOURCE - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "bpf_insn.h" - -static int usage(const char *argv0) -{ - printf("Usage: %s cg-path filter-path [filter-id]\n", argv0); - return EXIT_FAILURE; -} - -int main(int argc, char **argv) -{ - int cg_fd, err, ret = EXIT_FAILURE, filter_id = 0, prog_cnt = 0; - const char *link_pin_path = "/sys/fs/bpf/test_cgrp2_sock2"; - struct bpf_link *link = NULL; - struct bpf_program *progs[2]; - struct bpf_program *prog; - struct bpf_object *obj; - - if (argc < 3) - return usage(argv[0]); - - if (argc > 3) - filter_id = atoi(argv[3]); - - cg_fd = open(argv[1], O_DIRECTORY | O_RDONLY); - if (cg_fd < 0) { - printf("Failed to open cgroup path: '%s'\n", strerror(errno)); - return ret; - } - - obj = bpf_object__open_file(argv[2], NULL); - if (libbpf_get_error(obj)) { - printf("ERROR: opening BPF object file failed\n"); - return ret; - } - - bpf_object__for_each_program(prog, obj) { - progs[prog_cnt] = prog; - prog_cnt++; - } - - if (filter_id >= prog_cnt) { - printf("Invalid program id; program not found in file\n"); - goto cleanup; - } - - /* load BPF program */ - if (bpf_object__load(obj)) { - printf("ERROR: loading BPF object file failed\n"); - goto cleanup; - } - - link = bpf_program__attach_cgroup(progs[filter_id], cg_fd); - if (libbpf_get_error(link)) { - printf("ERROR: bpf_program__attach failed\n"); - link = NULL; - goto cleanup; - } - - err = bpf_link__pin(link, link_pin_path); - if (err < 0) { - printf("ERROR: bpf_link__pin failed: %d\n", err); - goto cleanup; - } - - ret = EXIT_SUCCESS; - -cleanup: - bpf_link__destroy(link); - bpf_object__close(obj); - return ret; -} diff --git a/samples/bpf/test_cgrp2_sock2.sh b/samples/bpf/test_cgrp2_sock2.sh deleted file mode 100755 index 82acff93d739..000000000000 --- a/samples/bpf/test_cgrp2_sock2.sh +++ /dev/null @@ -1,103 +0,0 @@ -#!/bin/bash -# SPDX-License-Identifier: GPL-2.0 - -BPFFS=/sys/fs/bpf -MY_DIR=$(dirname $0) -TEST=$MY_DIR/test_cgrp2_sock2 -LINK_PIN=$BPFFS/test_cgrp2_sock2 -BPF_PROG=$MY_DIR/sock_flags.bpf.o - -function config_device { - ip netns add at_ns0 - ip link add veth0 type veth peer name veth0b - ip link set veth0 netns at_ns0 - ip netns exec at_ns0 sysctl -q net.ipv6.conf.veth0.disable_ipv6=0 - ip netns exec at_ns0 ip addr add 172.16.1.100/24 dev veth0 - ip netns exec at_ns0 ip addr add 2401:db00::1/64 dev veth0 nodad - ip netns exec at_ns0 ip link set dev veth0 up - sysctl -q net.ipv6.conf.veth0b.disable_ipv6=0 - ip addr add 172.16.1.101/24 dev veth0b - ip addr add 2401:db00::2/64 dev veth0b nodad - ip link set veth0b up -} - -function config_cgroup { - rm -rf /tmp/cgroupv2 - mkdir -p /tmp/cgroupv2 - mount -t cgroup2 none /tmp/cgroupv2 - mkdir -p /tmp/cgroupv2/foo - echo $$ >> /tmp/cgroupv2/foo/cgroup.procs -} - -function config_bpffs { - if mount | grep $BPFFS > /dev/null; then - echo "bpffs already mounted" - else - echo "bpffs not mounted. Mounting..." - mount -t bpf none $BPFFS - fi -} - -function attach_bpf { - $TEST /tmp/cgroupv2/foo $BPF_PROG $1 - [ $? -ne 0 ] && exit 1 -} - -function cleanup { - rm -rf $LINK_PIN - ip link del veth0b - ip netns delete at_ns0 - umount /tmp/cgroupv2 - rm -rf /tmp/cgroupv2 -} - -cleanup 2>/dev/null - -set -e -config_device -config_cgroup -config_bpffs -set +e - -# -# Test 1 - fail ping6 -# -attach_bpf 0 -ping -c1 -w1 172.16.1.100 -if [ $? -ne 0 ]; then - echo "ping failed when it should succeed" - cleanup - exit 1 -fi - -ping6 -c1 -w1 2401:db00::1 -if [ $? -eq 0 ]; then - echo "ping6 succeeded when it should not" - cleanup - exit 1 -fi - -rm -rf $LINK_PIN -sleep 1 # Wait for link detach - -# -# Test 2 - fail ping -# -attach_bpf 1 -ping6 -c1 -w1 2401:db00::1 -if [ $? -ne 0 ]; then - echo "ping6 failed when it should succeed" - cleanup - exit 1 -fi - -ping -c1 -w1 172.16.1.100 -if [ $? -eq 0 ]; then - echo "ping succeeded when it should not" - cleanup - exit 1 -fi - -cleanup -echo -echo "*** PASS ***" diff --git a/samples/bpf/test_cgrp2_tc.bpf.c b/samples/bpf/test_cgrp2_tc.bpf.c deleted file mode 100644 index c7d2291d676f..000000000000 --- a/samples/bpf/test_cgrp2_tc.bpf.c +++ /dev/null @@ -1,56 +0,0 @@ -/* Copyright (c) 2016 Facebook - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of version 2 of the GNU General Public - * License as published by the Free Software Foundation. - */ -#define KBUILD_MODNAME "foo" -#include "vmlinux.h" -#include "net_shared.h" -#include - -/* copy of 'struct ethhdr' without __packed */ -struct eth_hdr { - unsigned char h_dest[ETH_ALEN]; - unsigned char h_source[ETH_ALEN]; - unsigned short h_proto; -}; - -struct { - __uint(type, BPF_MAP_TYPE_CGROUP_ARRAY); - __type(key, u32); - __type(value, u32); - __uint(pinning, LIBBPF_PIN_BY_NAME); - __uint(max_entries, 1); -} test_cgrp2_array_pin SEC(".maps"); - -SEC("filter") -int handle_egress(struct __sk_buff *skb) -{ - void *data = (void *)(long)skb->data; - struct eth_hdr *eth = data; - struct ipv6hdr *ip6h = data + sizeof(*eth); - void *data_end = (void *)(long)skb->data_end; - char dont_care_msg[] = "dont care %04x %d\n"; - char pass_msg[] = "pass\n"; - char reject_msg[] = "reject\n"; - - /* single length check */ - if (data + sizeof(*eth) + sizeof(*ip6h) > data_end) - return TC_ACT_OK; - - if (eth->h_proto != bpf_htons(ETH_P_IPV6) || - ip6h->nexthdr != IPPROTO_ICMPV6) { - bpf_trace_printk(dont_care_msg, sizeof(dont_care_msg), - eth->h_proto, ip6h->nexthdr); - return TC_ACT_OK; - } else if (bpf_skb_under_cgroup(skb, &test_cgrp2_array_pin, 0) != 1) { - bpf_trace_printk(pass_msg, sizeof(pass_msg)); - return TC_ACT_OK; - } else { - bpf_trace_printk(reject_msg, sizeof(reject_msg)); - return TC_ACT_SHOT; - } -} - -char _license[] SEC("license") = "GPL"; diff --git a/samples/bpf/test_cgrp2_tc.sh b/samples/bpf/test_cgrp2_tc.sh deleted file mode 100755 index 38e8dbc9d16e..000000000000 --- a/samples/bpf/test_cgrp2_tc.sh +++ /dev/null @@ -1,187 +0,0 @@ -#!/bin/bash -# SPDX-License-Identifier: GPL-2.0 - -MY_DIR=$(dirname $0) -# Details on the bpf prog -BPF_CGRP2_ARRAY_NAME='test_cgrp2_array_pin' -BPF_PROG="$MY_DIR/test_cgrp2_tc.bpf.o" -BPF_SECTION='filter' - -[ -z "$TC" ] && TC='tc' -[ -z "$IP" ] && IP='ip' - -# Names of the veth interface, net namespace...etc. -HOST_IFC='ve' -NS_IFC='vens' -NS='ns' - -find_mnt() { - cat /proc/mounts | \ - awk '{ if ($3 == "'$1'" && mnt == "") { mnt = $2 }} END { print mnt }' -} - -# Init cgroup2 vars -init_cgrp2_vars() { - CGRP2_ROOT=$(find_mnt cgroup2) - if [ -z "$CGRP2_ROOT" ] - then - CGRP2_ROOT='/mnt/cgroup2' - MOUNT_CGRP2="yes" - fi - CGRP2_TC="$CGRP2_ROOT/tc" - CGRP2_TC_LEAF="$CGRP2_TC/leaf" -} - -# Init bpf fs vars -init_bpf_fs_vars() { - local bpf_fs_root=$(find_mnt bpf) - [ -n "$bpf_fs_root" ] || return -1 - BPF_FS_TC_SHARE="$bpf_fs_root/tc/globals" -} - -setup_cgrp2() { - case $1 in - start) - if [ "$MOUNT_CGRP2" == 'yes' ] - then - [ -d $CGRP2_ROOT ] || mkdir -p $CGRP2_ROOT - mount -t cgroup2 none $CGRP2_ROOT || return $? - fi - mkdir -p $CGRP2_TC_LEAF - ;; - *) - rmdir $CGRP2_TC_LEAF && rmdir $CGRP2_TC - [ "$MOUNT_CGRP2" == 'yes' ] && umount $CGRP2_ROOT - ;; - esac -} - -setup_bpf_cgrp2_array() { - local bpf_cgrp2_array="$BPF_FS_TC_SHARE/$BPF_CGRP2_ARRAY_NAME" - case $1 in - start) - $MY_DIR/test_cgrp2_array_pin -U $bpf_cgrp2_array -v $CGRP2_TC - ;; - *) - [ -d "$BPF_FS_TC_SHARE" ] && rm -f $bpf_cgrp2_array - ;; - esac -} - -setup_net() { - case $1 in - start) - $IP link add $HOST_IFC type veth peer name $NS_IFC || return $? - $IP link set dev $HOST_IFC up || return $? - sysctl -q net.ipv6.conf.$HOST_IFC.disable_ipv6=0 - sysctl -q net.ipv6.conf.$HOST_IFC.accept_dad=0 - - $IP netns add $NS || return $? - $IP link set dev $NS_IFC netns $NS || return $? - $IP -n $NS link set dev $NS_IFC up || return $? - $IP netns exec $NS sysctl -q net.ipv6.conf.$NS_IFC.disable_ipv6=0 - $IP netns exec $NS sysctl -q net.ipv6.conf.$NS_IFC.accept_dad=0 - $TC qdisc add dev $HOST_IFC clsact || return $? - $TC filter add dev $HOST_IFC egress bpf da obj $BPF_PROG sec $BPF_SECTION || return $? - ;; - *) - $IP netns del $NS - $IP link del $HOST_IFC - ;; - esac -} - -run_in_cgrp() { - # Fork another bash and move it under the specified cgroup. - # It makes the cgroup cleanup easier at the end of the test. - cmd='echo $$ > ' - cmd="$cmd $1/cgroup.procs; exec $2" - bash -c "$cmd" -} - -do_test() { - run_in_cgrp $CGRP2_TC_LEAF "ping -6 -c3 ff02::1%$HOST_IFC >& /dev/null" - local dropped=$($TC -s qdisc show dev $HOST_IFC | tail -3 | \ - awk '/drop/{print substr($7, 0, index($7, ",")-1)}') - if [[ $dropped -eq 0 ]] - then - echo "FAIL" - return 1 - else - echo "Successfully filtered $dropped packets" - return 0 - fi -} - -do_exit() { - if [ "$DEBUG" == "yes" ] && [ "$MODE" != 'cleanuponly' ] - then - echo "------ DEBUG ------" - echo "mount: "; mount | grep -E '(cgroup2|bpf)'; echo - echo "$CGRP2_TC_LEAF: "; ls -l $CGRP2_TC_LEAF; echo - if [ -d "$BPF_FS_TC_SHARE" ] - then - echo "$BPF_FS_TC_SHARE: "; ls -l $BPF_FS_TC_SHARE; echo - fi - echo "Host net:" - $IP netns - $IP link show dev $HOST_IFC - $IP -6 a show dev $HOST_IFC - $TC -s qdisc show dev $HOST_IFC - echo - echo "$NS net:" - $IP -n $NS link show dev $NS_IFC - $IP -n $NS -6 link show dev $NS_IFC - echo "------ DEBUG ------" - echo - fi - - if [ "$MODE" != 'nocleanup' ] - then - setup_net stop - setup_bpf_cgrp2_array stop - setup_cgrp2 stop - fi -} - -init_cgrp2_vars -init_bpf_fs_vars - -while [[ $# -ge 1 ]] -do - a="$1" - case $a in - debug) - DEBUG='yes' - shift 1 - ;; - cleanup-only) - MODE='cleanuponly' - shift 1 - ;; - no-cleanup) - MODE='nocleanup' - shift 1 - ;; - *) - echo "test_cgrp2_tc [debug] [cleanup-only | no-cleanup]" - echo " debug: Print cgrp and network setup details at the end of the test" - echo " cleanup-only: Try to cleanup things from last test. No test will be run" - echo " no-cleanup: Run the test but don't do cleanup at the end" - echo "[Note: If no arg is given, it will run the test and do cleanup at the end]" - echo - exit -1 - ;; - esac -done - -trap do_exit 0 - -[ "$MODE" == 'cleanuponly' ] && exit - -setup_cgrp2 start || exit $? -setup_net start || exit $? -init_bpf_fs_vars || exit $? -setup_bpf_cgrp2_array start || exit $? -do_test -echo diff --git a/samples/bpf/test_current_task_under_cgroup.bpf.c b/samples/bpf/test_current_task_under_cgroup.bpf.c deleted file mode 100644 index 58b9cf7ed659..000000000000 --- a/samples/bpf/test_current_task_under_cgroup.bpf.c +++ /dev/null @@ -1,43 +0,0 @@ -/* Copyright (c) 2016 Sargun Dhillon - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of version 2 of the GNU General Public - * License as published by the Free Software Foundation. - */ - -#include "vmlinux.h" -#include -#include -#include -#include - -struct { - __uint(type, BPF_MAP_TYPE_CGROUP_ARRAY); - __uint(key_size, sizeof(u32)); - __uint(value_size, sizeof(u32)); - __uint(max_entries, 1); -} cgroup_map SEC(".maps"); - -struct { - __uint(type, BPF_MAP_TYPE_ARRAY); - __type(key, u32); - __type(value, u64); - __uint(max_entries, 1); -} perf_map SEC(".maps"); - -/* Writes the last PID that called sync to a map at index 0 */ -SEC("ksyscall/sync") -int BPF_KSYSCALL(bpf_prog1) -{ - u64 pid = bpf_get_current_pid_tgid(); - int idx = 0; - - if (!bpf_current_task_under_cgroup(&cgroup_map, 0)) - return 0; - - bpf_map_update_elem(&perf_map, &idx, &pid, BPF_ANY); - return 0; -} - -char _license[] SEC("license") = "GPL"; -u32 _version SEC("version") = LINUX_VERSION_CODE; diff --git a/samples/bpf/test_current_task_under_cgroup_user.c b/samples/bpf/test_current_task_under_cgroup_user.c deleted file mode 100644 index 9726ed2a8a8b..000000000000 --- a/samples/bpf/test_current_task_under_cgroup_user.c +++ /dev/null @@ -1,115 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* Copyright (c) 2016 Sargun Dhillon - */ - -#define _GNU_SOURCE -#include -#include -#include -#include -#include "cgroup_helpers.h" - -#define CGROUP_PATH "/my-cgroup" - -int main(int argc, char **argv) -{ - pid_t remote_pid, local_pid = getpid(); - int cg2 = -1, idx = 0, rc = 1; - struct bpf_link *link = NULL; - struct bpf_program *prog; - struct bpf_object *obj; - char filename[256]; - int map_fd[2]; - - snprintf(filename, sizeof(filename), "%s.bpf.o", argv[0]); - obj = bpf_object__open_file(filename, NULL); - if (libbpf_get_error(obj)) { - fprintf(stderr, "ERROR: opening BPF object file failed\n"); - return 0; - } - - prog = bpf_object__find_program_by_name(obj, "bpf_prog1"); - if (!prog) { - printf("finding a prog in obj file failed\n"); - goto cleanup; - } - - /* load BPF program */ - if (bpf_object__load(obj)) { - fprintf(stderr, "ERROR: loading BPF object file failed\n"); - goto cleanup; - } - - map_fd[0] = bpf_object__find_map_fd_by_name(obj, "cgroup_map"); - map_fd[1] = bpf_object__find_map_fd_by_name(obj, "perf_map"); - if (map_fd[0] < 0 || map_fd[1] < 0) { - fprintf(stderr, "ERROR: finding a map in obj file failed\n"); - goto cleanup; - } - - link = bpf_program__attach(prog); - if (libbpf_get_error(link)) { - fprintf(stderr, "ERROR: bpf_program__attach failed\n"); - link = NULL; - goto cleanup; - } - - if (setup_cgroup_environment()) - goto err; - - cg2 = create_and_get_cgroup(CGROUP_PATH); - - if (cg2 < 0) - goto err; - - if (bpf_map_update_elem(map_fd[0], &idx, &cg2, BPF_ANY)) { - log_err("Adding target cgroup to map"); - goto err; - } - - if (join_cgroup(CGROUP_PATH)) - goto err; - - /* - * The installed helper program catched the sync call, and should - * write it to the map. - */ - - sync(); - bpf_map_lookup_elem(map_fd[1], &idx, &remote_pid); - - if (local_pid != remote_pid) { - fprintf(stderr, - "BPF Helper didn't write correct PID to map, but: %d\n", - remote_pid); - goto err; - } - - /* Verify the negative scenario; leave the cgroup */ - if (join_cgroup("/")) - goto err; - - remote_pid = 0; - bpf_map_update_elem(map_fd[1], &idx, &remote_pid, BPF_ANY); - - sync(); - bpf_map_lookup_elem(map_fd[1], &idx, &remote_pid); - - if (local_pid == remote_pid) { - fprintf(stderr, "BPF cgroup negative test did not work\n"); - goto err; - } - - rc = 0; - -err: - if (cg2 != -1) - close(cg2); - - cleanup_cgroup_environment(); - -cleanup: - bpf_link__destroy(link); - bpf_object__close(obj); - return rc; -} diff --git a/samples/bpf/test_overhead_kprobe.bpf.c b/samples/bpf/test_overhead_kprobe.bpf.c deleted file mode 100644 index 668cf5259c60..000000000000 --- a/samples/bpf/test_overhead_kprobe.bpf.c +++ /dev/null @@ -1,41 +0,0 @@ -/* Copyright (c) 2016 Facebook - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of version 2 of the GNU General Public - * License as published by the Free Software Foundation. - */ -#include "vmlinux.h" -#include -#include -#include -#include - -SEC("kprobe/__set_task_comm") -int prog(struct pt_regs *ctx) -{ - struct signal_struct *signal; - struct task_struct *tsk; - char oldcomm[TASK_COMM_LEN] = {}; - char newcomm[TASK_COMM_LEN] = {}; - u16 oom_score_adj; - u32 pid; - - tsk = (void *)PT_REGS_PARM1_CORE(ctx); - - pid = BPF_CORE_READ(tsk, pid); - bpf_core_read_str(oldcomm, sizeof(oldcomm), &tsk->comm); - bpf_core_read_str(newcomm, sizeof(newcomm), - (void *)PT_REGS_PARM2(ctx)); - signal = BPF_CORE_READ(tsk, signal); - oom_score_adj = BPF_CORE_READ(signal, oom_score_adj); - return 0; -} - -SEC("kprobe/fib_table_lookup") -int prog2(struct pt_regs *ctx) -{ - return 0; -} - -char _license[] SEC("license") = "GPL"; -u32 _version SEC("version") = LINUX_VERSION_CODE; diff --git a/samples/bpf/test_overhead_raw_tp.bpf.c b/samples/bpf/test_overhead_raw_tp.bpf.c deleted file mode 100644 index 6af39fe3f8dd..000000000000 --- a/samples/bpf/test_overhead_raw_tp.bpf.c +++ /dev/null @@ -1,17 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Copyright (c) 2018 Facebook */ -#include "vmlinux.h" -#include - -SEC("raw_tracepoint/task_rename") -int prog(struct bpf_raw_tracepoint_args *ctx) -{ - return 0; -} - -SEC("raw_tracepoint/fib_table_lookup") -int prog2(struct bpf_raw_tracepoint_args *ctx) -{ - return 0; -} -char _license[] SEC("license") = "GPL"; diff --git a/samples/bpf/test_overhead_tp.bpf.c b/samples/bpf/test_overhead_tp.bpf.c deleted file mode 100644 index 5dc08b587978..000000000000 --- a/samples/bpf/test_overhead_tp.bpf.c +++ /dev/null @@ -1,23 +0,0 @@ -/* Copyright (c) 2016 Facebook - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of version 2 of the GNU General Public - * License as published by the Free Software Foundation. - */ -#include "vmlinux.h" -#include - -/* from /sys/kernel/tracing/events/task/task_rename/format */ -SEC("tracepoint/task/task_rename") -int prog(struct trace_event_raw_task_rename *ctx) -{ - return 0; -} - -/* from /sys/kernel/tracing/events/fib/fib_table_lookup/format */ -SEC("tracepoint/fib/fib_table_lookup") -int prog2(struct trace_event_raw_fib_table_lookup *ctx) -{ - return 0; -} -char _license[] SEC("license") = "GPL"; diff --git a/samples/bpf/test_overhead_user.c b/samples/bpf/test_overhead_user.c deleted file mode 100644 index dbd86f7b1473..000000000000 --- a/samples/bpf/test_overhead_user.c +++ /dev/null @@ -1,225 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* Copyright (c) 2016 Facebook - */ -#define _GNU_SOURCE -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#define MAX_CNT 1000000 -#define DUMMY_IP "127.0.0.1" -#define DUMMY_PORT 80 - -static struct bpf_link *links[2]; -static struct bpf_object *obj; -static int cnt; - -static __u64 time_get_ns(void) -{ - struct timespec ts; - - clock_gettime(CLOCK_MONOTONIC, &ts); - return ts.tv_sec * 1000000000ull + ts.tv_nsec; -} - -static void test_task_rename(int cpu) -{ - char buf[] = "test\n"; - __u64 start_time; - int i, fd; - - fd = open("/proc/self/comm", O_WRONLY|O_TRUNC); - if (fd < 0) { - printf("couldn't open /proc\n"); - exit(1); - } - start_time = time_get_ns(); - for (i = 0; i < MAX_CNT; i++) { - if (write(fd, buf, sizeof(buf)) < 0) { - printf("task rename failed: %s\n", strerror(errno)); - close(fd); - return; - } - } - printf("task_rename:%d: %lld events per sec\n", - cpu, MAX_CNT * 1000000000ll / (time_get_ns() - start_time)); - close(fd); -} - -static void test_fib_table_lookup(int cpu) -{ - struct sockaddr_in addr; - char buf[] = "test\n"; - __u64 start_time; - int i, fd; - - fd = socket(AF_INET, SOCK_DGRAM, IPPROTO_UDP); - if (fd < 0) { - printf("couldn't open socket\n"); - exit(1); - } - memset((char *)&addr, 0, sizeof(addr)); - addr.sin_addr.s_addr = inet_addr(DUMMY_IP); - addr.sin_port = htons(DUMMY_PORT); - addr.sin_family = AF_INET; - start_time = time_get_ns(); - for (i = 0; i < MAX_CNT; i++) { - if (sendto(fd, buf, strlen(buf), 0, - (struct sockaddr *)&addr, sizeof(addr)) < 0) { - printf("failed to start ping: %s\n", strerror(errno)); - close(fd); - return; - } - } - printf("fib_table_lookup:%d: %lld events per sec\n", - cpu, MAX_CNT * 1000000000ll / (time_get_ns() - start_time)); - close(fd); -} - -static void loop(int cpu, int flags) -{ - cpu_set_t cpuset; - - CPU_ZERO(&cpuset); - CPU_SET(cpu, &cpuset); - sched_setaffinity(0, sizeof(cpuset), &cpuset); - - if (flags & 1) - test_task_rename(cpu); - if (flags & 2) - test_fib_table_lookup(cpu); -} - -static void run_perf_test(int tasks, int flags) -{ - pid_t pid[tasks]; - int i; - - for (i = 0; i < tasks; i++) { - pid[i] = fork(); - if (pid[i] == 0) { - loop(i, flags); - exit(0); - } else if (pid[i] == -1) { - printf("couldn't spawn #%d process\n", i); - exit(1); - } - } - for (i = 0; i < tasks; i++) { - int status; - - assert(waitpid(pid[i], &status, 0) == pid[i]); - assert(status == 0); - } -} - -static int load_progs(char *filename) -{ - struct bpf_program *prog; - int err = 0; - - obj = bpf_object__open_file(filename, NULL); - err = libbpf_get_error(obj); - if (err < 0) { - fprintf(stderr, "ERROR: opening BPF object file failed\n"); - return err; - } - - /* load BPF program */ - err = bpf_object__load(obj); - if (err < 0) { - fprintf(stderr, "ERROR: loading BPF object file failed\n"); - return err; - } - - bpf_object__for_each_program(prog, obj) { - links[cnt] = bpf_program__attach(prog); - err = libbpf_get_error(links[cnt]); - if (err < 0) { - fprintf(stderr, "ERROR: bpf_program__attach failed\n"); - links[cnt] = NULL; - return err; - } - cnt++; - } - - return err; -} - -static void unload_progs(void) -{ - while (cnt) - bpf_link__destroy(links[--cnt]); - - bpf_object__close(obj); -} - -int main(int argc, char **argv) -{ - int num_cpu = sysconf(_SC_NPROCESSORS_ONLN); - int test_flags = ~0; - char filename[256]; - int err = 0; - - - if (argc > 1) - test_flags = atoi(argv[1]) ? : test_flags; - if (argc > 2) - num_cpu = atoi(argv[2]) ? : num_cpu; - - if (test_flags & 0x3) { - printf("BASE\n"); - run_perf_test(num_cpu, test_flags); - } - - if (test_flags & 0xC) { - snprintf(filename, sizeof(filename), - "%s_kprobe.bpf.o", argv[0]); - - printf("w/KPROBE\n"); - err = load_progs(filename); - if (!err) - run_perf_test(num_cpu, test_flags >> 2); - - unload_progs(); - } - - if (test_flags & 0x30) { - snprintf(filename, sizeof(filename), - "%s_tp.bpf.o", argv[0]); - printf("w/TRACEPOINT\n"); - err = load_progs(filename); - if (!err) - run_perf_test(num_cpu, test_flags >> 4); - - unload_progs(); - } - - if (test_flags & 0xC0) { - snprintf(filename, sizeof(filename), - "%s_raw_tp.bpf.o", argv[0]); - printf("w/RAW_TRACEPOINT\n"); - err = load_progs(filename); - if (!err) - run_perf_test(num_cpu, test_flags >> 6); - - unload_progs(); - } - - return err; -} diff --git a/samples/bpf/test_override_return.sh b/samples/bpf/test_override_return.sh deleted file mode 100755 index 35db26f736b9..000000000000 --- a/samples/bpf/test_override_return.sh +++ /dev/null @@ -1,16 +0,0 @@ -#!/bin/bash - -rm -r tmpmnt -rm -f testfile.img -dd if=/dev/zero of=testfile.img bs=1M seek=1000 count=1 -DEVICE=$(losetup --show -f testfile.img) -mkfs.btrfs -f $DEVICE -mkdir tmpmnt -./tracex7 $DEVICE -if [ $? -eq 0 ] -then - echo "SUCCESS!" -else - echo "FAILED!" -fi -losetup -d $DEVICE diff --git a/samples/bpf/test_probe_write_user.bpf.c b/samples/bpf/test_probe_write_user.bpf.c deleted file mode 100644 index a4f3798b7fb0..000000000000 --- a/samples/bpf/test_probe_write_user.bpf.c +++ /dev/null @@ -1,52 +0,0 @@ -/* Copyright (c) 2016 Sargun Dhillon - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of version 2 of the GNU General Public - * License as published by the Free Software Foundation. - */ -#include "vmlinux.h" -#include -#include -#include -#include -#include - -struct { - __uint(type, BPF_MAP_TYPE_HASH); - __type(key, struct sockaddr_in); - __type(value, struct sockaddr_in); - __uint(max_entries, 256); -} dnat_map SEC(".maps"); - -/* kprobe is NOT a stable ABI - * kernel functions can be removed, renamed or completely change semantics. - * Number of arguments and their positions can change, etc. - * In such case this bpf+kprobe example will no longer be meaningful - * - * This example sits on a syscall, and the syscall ABI is relatively stable - * of course, across platforms, and over time, the ABI may change. - */ -SEC("ksyscall/connect") -int BPF_KSYSCALL(bpf_prog1, int fd, struct sockaddr_in *uservaddr, - int addrlen) -{ - struct sockaddr_in new_addr, orig_addr = {}; - struct sockaddr_in *mapped_addr; - - if (addrlen > sizeof(orig_addr)) - return 0; - - if (bpf_probe_read_user(&orig_addr, sizeof(orig_addr), uservaddr) != 0) - return 0; - - mapped_addr = bpf_map_lookup_elem(&dnat_map, &orig_addr); - if (mapped_addr != NULL) { - memcpy(&new_addr, mapped_addr, sizeof(new_addr)); - bpf_probe_write_user(uservaddr, &new_addr, - sizeof(new_addr)); - } - return 0; -} - -char _license[] SEC("license") = "GPL"; -u32 _version SEC("version") = LINUX_VERSION_CODE; diff --git a/samples/bpf/test_probe_write_user_user.c b/samples/bpf/test_probe_write_user_user.c deleted file mode 100644 index 2a539aec4116..000000000000 --- a/samples/bpf/test_probe_write_user_user.c +++ /dev/null @@ -1,108 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -#include -#include -#include -#include -#include -#include -#include -#include - -int main(int ac, char **argv) -{ - struct sockaddr_in *serv_addr_in, *mapped_addr_in, *tmp_addr_in; - struct sockaddr serv_addr, mapped_addr, tmp_addr; - int serverfd, serverconnfd, clientfd, map_fd; - struct bpf_link *link = NULL; - struct bpf_program *prog; - struct bpf_object *obj; - socklen_t sockaddr_len; - char filename[256]; - char *ip; - - serv_addr_in = (struct sockaddr_in *)&serv_addr; - mapped_addr_in = (struct sockaddr_in *)&mapped_addr; - tmp_addr_in = (struct sockaddr_in *)&tmp_addr; - - snprintf(filename, sizeof(filename), "%s.bpf.o", argv[0]); - obj = bpf_object__open_file(filename, NULL); - if (libbpf_get_error(obj)) { - fprintf(stderr, "ERROR: opening BPF object file failed\n"); - return 0; - } - - prog = bpf_object__find_program_by_name(obj, "bpf_prog1"); - if (libbpf_get_error(prog)) { - fprintf(stderr, "ERROR: finding a prog in obj file failed\n"); - goto cleanup; - } - - /* load BPF program */ - if (bpf_object__load(obj)) { - fprintf(stderr, "ERROR: loading BPF object file failed\n"); - goto cleanup; - } - - map_fd = bpf_object__find_map_fd_by_name(obj, "dnat_map"); - if (map_fd < 0) { - fprintf(stderr, "ERROR: finding a map in obj file failed\n"); - goto cleanup; - } - - link = bpf_program__attach(prog); - if (libbpf_get_error(link)) { - fprintf(stderr, "ERROR: bpf_program__attach failed\n"); - link = NULL; - goto cleanup; - } - - assert((serverfd = socket(AF_INET, SOCK_STREAM, 0)) > 0); - assert((clientfd = socket(AF_INET, SOCK_STREAM, 0)) > 0); - - /* Bind server to ephemeral port on lo */ - memset(&serv_addr, 0, sizeof(serv_addr)); - serv_addr_in->sin_family = AF_INET; - serv_addr_in->sin_port = 0; - serv_addr_in->sin_addr.s_addr = htonl(INADDR_LOOPBACK); - - assert(bind(serverfd, &serv_addr, sizeof(serv_addr)) == 0); - - sockaddr_len = sizeof(serv_addr); - assert(getsockname(serverfd, &serv_addr, &sockaddr_len) == 0); - ip = inet_ntoa(serv_addr_in->sin_addr); - printf("Server bound to: %s:%d\n", ip, ntohs(serv_addr_in->sin_port)); - - memset(&mapped_addr, 0, sizeof(mapped_addr)); - mapped_addr_in->sin_family = AF_INET; - mapped_addr_in->sin_port = htons(5555); - mapped_addr_in->sin_addr.s_addr = inet_addr("255.255.255.255"); - - assert(!bpf_map_update_elem(map_fd, &mapped_addr, &serv_addr, BPF_ANY)); - - assert(listen(serverfd, 5) == 0); - - ip = inet_ntoa(mapped_addr_in->sin_addr); - printf("Client connecting to: %s:%d\n", - ip, ntohs(mapped_addr_in->sin_port)); - assert(connect(clientfd, &mapped_addr, sizeof(mapped_addr)) == 0); - - sockaddr_len = sizeof(tmp_addr); - ip = inet_ntoa(tmp_addr_in->sin_addr); - assert((serverconnfd = accept(serverfd, &tmp_addr, &sockaddr_len)) > 0); - printf("Server received connection from: %s:%d\n", - ip, ntohs(tmp_addr_in->sin_port)); - - sockaddr_len = sizeof(tmp_addr); - assert(getpeername(clientfd, &tmp_addr, &sockaddr_len) == 0); - ip = inet_ntoa(tmp_addr_in->sin_addr); - printf("Client's peer address: %s:%d\n", - ip, ntohs(tmp_addr_in->sin_port)); - - /* Is the server's getsockname = the socket getpeername */ - assert(memcmp(&serv_addr, &tmp_addr, sizeof(struct sockaddr_in)) == 0); - -cleanup: - bpf_link__destroy(link); - bpf_object__close(obj); - return 0; -} diff --git a/samples/bpf/tracex7.bpf.c b/samples/bpf/tracex7.bpf.c deleted file mode 100644 index ab8d6704a5a4..000000000000 --- a/samples/bpf/tracex7.bpf.c +++ /dev/null @@ -1,15 +0,0 @@ -#include "vmlinux.h" -#include -#include - -SEC("kprobe/open_ctree") -int bpf_prog1(struct pt_regs *ctx) -{ - unsigned long rc = -12; - - bpf_override_return(ctx, rc); - return 0; -} - -char _license[] SEC("license") = "GPL"; -u32 _version SEC("version") = LINUX_VERSION_CODE; diff --git a/samples/bpf/tracex7_user.c b/samples/bpf/tracex7_user.c deleted file mode 100644 index b10b5e03a226..000000000000 --- a/samples/bpf/tracex7_user.c +++ /dev/null @@ -1,56 +0,0 @@ -#define _GNU_SOURCE - -#include -#include -#include - -int main(int argc, char **argv) -{ - struct bpf_link *link = NULL; - struct bpf_program *prog; - struct bpf_object *obj; - char filename[256]; - char command[256]; - int ret = 0; - FILE *f; - - if (!argv[1]) { - fprintf(stderr, "ERROR: Run with the btrfs device argument!\n"); - return 0; - } - - snprintf(filename, sizeof(filename), "%s.bpf.o", argv[0]); - obj = bpf_object__open_file(filename, NULL); - if (libbpf_get_error(obj)) { - fprintf(stderr, "ERROR: opening BPF object file failed\n"); - return 0; - } - - prog = bpf_object__find_program_by_name(obj, "bpf_prog1"); - if (!prog) { - fprintf(stderr, "ERROR: finding a prog in obj file failed\n"); - goto cleanup; - } - - /* load BPF program */ - if (bpf_object__load(obj)) { - fprintf(stderr, "ERROR: loading BPF object file failed\n"); - goto cleanup; - } - - link = bpf_program__attach(prog); - if (libbpf_get_error(link)) { - fprintf(stderr, "ERROR: bpf_program__attach failed\n"); - link = NULL; - goto cleanup; - } - - snprintf(command, 256, "mount %s tmpmnt/", argv[1]); - f = popen(command, "r"); - ret = pclose(f); - -cleanup: - bpf_link__destroy(link); - bpf_object__close(obj); - return ret ? 0 : 1; -} diff --git a/samples/bpf/xdp2skb_meta_kern.c b/samples/bpf/xdp2skb_meta_kern.c index d5631014a176..af29a1bde4e4 100644 --- a/samples/bpf/xdp2skb_meta_kern.c +++ b/samples/bpf/xdp2skb_meta_kern.c @@ -32,7 +32,7 @@ SEC("xdp_mark") int _xdp_mark(struct xdp_md *ctx) { struct meta_info *meta; - void *data, *data_end; + void *data; int ret; /* Reserve space in-front of data pointer for our meta info. diff --git a/samples/bpf/xdp_adjust_tail_kern.c b/samples/bpf/xdp_adjust_tail_kern.c index ffdd548627f0..da67bcad1c63 100644 --- a/samples/bpf/xdp_adjust_tail_kern.c +++ b/samples/bpf/xdp_adjust_tail_kern.c @@ -57,6 +57,7 @@ static __always_inline void swap_mac(void *data, struct ethhdr *orig_eth) static __always_inline __u16 csum_fold_helper(__u32 csum) { + csum = (csum & 0xffff) + (csum >> 16); return ~((csum & 0xffff) + (csum >> 16)); } diff --git a/scripts/Makefile.btf b/scripts/Makefile.btf index b75f09f3f424..c3cbeb13de50 100644 --- a/scripts/Makefile.btf +++ b/scripts/Makefile.btf @@ -3,6 +3,8 @@ pahole-ver := $(CONFIG_PAHOLE_VERSION) pahole-flags-y := +JOBS := $(patsubst -j%,%,$(filter -j%,$(MAKEFLAGS))) + ifeq ($(call test-le, $(pahole-ver), 125),y) # pahole 1.18 through 1.21 can't handle zero-sized per-CPU vars @@ -12,14 +14,14 @@ endif pahole-flags-$(call test-ge, $(pahole-ver), 121) += --btf_gen_floats -pahole-flags-$(call test-ge, $(pahole-ver), 122) += -j +pahole-flags-$(call test-ge, $(pahole-ver), 122) += -j$(JOBS) pahole-flags-$(call test-ge, $(pahole-ver), 125) += --skip_encoding_btf_inconsistent_proto --btf_gen_optimized else # Switch to using --btf_features for v1.26 and later. -pahole-flags-$(call test-ge, $(pahole-ver), 126) = -j --btf_features=encode_force,var,float,enum64,decl_tag,type_tag,optimized_func,consistent_func,decl_tag_kfuncs +pahole-flags-$(call test-ge, $(pahole-ver), 126) = -j$(JOBS) --btf_features=encode_force,var,float,enum64,decl_tag,type_tag,optimized_func,consistent_func,decl_tag_kfuncs ifneq ($(KBUILD_EXTMOD),) module-pahole-flags-$(call test-ge, $(pahole-ver), 126) += --btf_features=distilled_base diff --git a/scripts/bpf_doc.py b/scripts/bpf_doc.py index c55878bddfdd..e74a01a85070 100755 --- a/scripts/bpf_doc.py +++ b/scripts/bpf_doc.py @@ -37,10 +37,11 @@ class APIElement(object): @desc: textual description of the symbol @ret: (optional) description of any associated return value """ - def __init__(self, proto='', desc='', ret=''): + def __init__(self, proto='', desc='', ret='', attrs=[]): self.proto = proto self.desc = desc self.ret = ret + self.attrs = attrs class Helper(APIElement): @@ -81,6 +82,11 @@ class Helper(APIElement): return res +ATTRS = { + '__bpf_fastcall': 'bpf_fastcall' +} + + class HeaderParser(object): """ An object used to parse a file in order to extract the documentation of a @@ -111,7 +117,8 @@ class HeaderParser(object): proto = self.parse_proto() desc = self.parse_desc(proto) ret = self.parse_ret(proto) - return Helper(proto=proto, desc=desc, ret=ret) + attrs = self.parse_attrs(proto) + return Helper(proto=proto, desc=desc, ret=ret, attrs=attrs) def parse_symbol(self): p = re.compile(r' \* ?(BPF\w+)$') @@ -192,6 +199,28 @@ class HeaderParser(object): raise Exception("No return found for " + proto) return ret + def parse_attrs(self, proto): + p = re.compile(r' \* ?(?:\t| {5,8})Attributes$') + capture = p.match(self.line) + if not capture: + return [] + # Expect a single line with mnemonics for attributes separated by spaces + self.line = self.reader.readline() + p = re.compile(r' \* ?(?:\t| {5,8})(?:\t| {8})(.*)') + capture = p.match(self.line) + if not capture: + raise Exception("Incomplete 'Attributes' section for " + proto) + attrs = capture.group(1).split(' ') + for attr in attrs: + if attr not in ATTRS: + raise Exception("Unexpected attribute '" + attr + "' specified for " + proto) + self.line = self.reader.readline() + if self.line != ' *\n': + raise Exception("Expecting empty line after 'Attributes' section for " + proto) + # Prepare a line for next self.parse_* to consume + self.line = self.reader.readline() + return attrs + def seek_to(self, target, help_message, discard_lines = 1): self.reader.seek(0) offset = self.reader.read().find(target) @@ -789,6 +818,21 @@ class PrinterHelpers(Printer): print('%s;' % fwd) print('') + used_attrs = set() + for helper in self.elements: + for attr in helper.attrs: + used_attrs.add(attr) + for attr in sorted(used_attrs): + print('#ifndef %s' % attr) + print('#if __has_attribute(%s)' % ATTRS[attr]) + print('#define %s __attribute__((%s))' % (attr, ATTRS[attr])) + print('#else') + print('#define %s' % attr) + print('#endif') + print('#endif') + if used_attrs: + print('') + def print_footer(self): footer = '' print(footer) @@ -827,7 +871,10 @@ class PrinterHelpers(Printer): print(' *{}{}'.format(' \t' if line else '', line)) print(' */') - print('static %s %s(* const %s)(' % (self.map_type(proto['ret_type']), + print('static ', end='') + if helper.attrs: + print('%s ' % (" ".join(helper.attrs)), end='') + print('%s %s(* const %s)(' % (self.map_type(proto['ret_type']), proto['ret_star'], proto['name']), end='') comma = '' for i, a in enumerate(proto['args']): diff --git a/tools/bpf/bpf_jit_disasm.c b/tools/bpf/bpf_jit_disasm.c index a90a5d110f92..1baee9e2aba9 100644 --- a/tools/bpf/bpf_jit_disasm.c +++ b/tools/bpf/bpf_jit_disasm.c @@ -210,7 +210,7 @@ static uint8_t *get_last_jit_image(char *haystack, size_t hlen, return NULL; } if (proglen > 1000000) { - printf("proglen of %d too big, stopping\n", proglen); + printf("proglen of %u too big, stopping\n", proglen); return NULL; } diff --git a/tools/bpf/bpftool/Makefile b/tools/bpf/bpftool/Makefile index ba927379eb20..a4263dfb5e03 100644 --- a/tools/bpf/bpftool/Makefile +++ b/tools/bpf/bpftool/Makefile @@ -147,7 +147,11 @@ ifeq ($(feature-llvm),1) # If LLVM is available, use it for JIT disassembly CFLAGS += -DHAVE_LLVM_SUPPORT LLVM_CONFIG_LIB_COMPONENTS := mcdisassembler all-targets - CFLAGS += $(shell $(LLVM_CONFIG) --cflags) + # llvm-config always adds -D_GNU_SOURCE, however, it may already be in CFLAGS + # (e.g. when bpftool build is called from selftests build as selftests + # Makefile includes lib.mk which sets -D_GNU_SOURCE) which would cause + # compilation error due to redefinition. Let's filter it out here. + CFLAGS += $(filter-out -D_GNU_SOURCE,$(shell $(LLVM_CONFIG) --cflags)) LIBS += $(shell $(LLVM_CONFIG) --libs $(LLVM_CONFIG_LIB_COMPONENTS)) ifeq ($(shell $(LLVM_CONFIG) --shared-mode),static) LIBS += $(shell $(LLVM_CONFIG) --system-libs $(LLVM_CONFIG_LIB_COMPONENTS)) diff --git a/tools/bpf/bpftool/btf.c b/tools/bpf/bpftool/btf.c index 7d2af1ff3c8d..d005e4fd6128 100644 --- a/tools/bpf/bpftool/btf.c +++ b/tools/bpf/bpftool/btf.c @@ -1,11 +1,15 @@ // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) /* Copyright (C) 2019 Facebook */ +#ifndef _GNU_SOURCE +#define _GNU_SOURCE +#endif #include #include #include #include #include +#include #include #include #include @@ -21,6 +25,7 @@ #include "main.h" #define KFUNC_DECL_TAG "bpf_kfunc" +#define FASTCALL_DECL_TAG "bpf_fastcall" static const char * const btf_kind_str[NR_BTF_KINDS] = { [BTF_KIND_UNKN] = "UNKNOWN", @@ -284,7 +289,7 @@ static int dump_btf_type(const struct btf *btf, __u32 id, } else { if (btf_kflag(t)) printf("\n\t'%s' val=%lldLL", name, - (unsigned long long)val); + (long long)val); else printf("\n\t'%s' val=%lluULL", name, (unsigned long long)val); @@ -464,19 +469,59 @@ static int dump_btf_raw(const struct btf *btf, return 0; } +struct ptr_array { + __u32 cnt; + __u32 cap; + const void **elems; +}; + +static int ptr_array_push(const void *ptr, struct ptr_array *arr) +{ + __u32 new_cap; + void *tmp; + + if (arr->cnt == arr->cap) { + new_cap = (arr->cap ?: 16) * 2; + tmp = realloc(arr->elems, sizeof(*arr->elems) * new_cap); + if (!tmp) + return -ENOMEM; + arr->elems = tmp; + arr->cap = new_cap; + } + arr->elems[arr->cnt++] = ptr; + return 0; +} + +static void ptr_array_free(struct ptr_array *arr) +{ + free(arr->elems); +} + +static int cmp_kfuncs(const void *pa, const void *pb, void *ctx) +{ + struct btf *btf = ctx; + const struct btf_type *a = *(void **)pa; + const struct btf_type *b = *(void **)pb; + + return strcmp(btf__str_by_offset(btf, a->name_off), + btf__str_by_offset(btf, b->name_off)); +} + static int dump_btf_kfuncs(struct btf_dump *d, const struct btf *btf) { LIBBPF_OPTS(btf_dump_emit_type_decl_opts, opts); - int cnt = btf__type_cnt(btf); - int i; + __u32 cnt = btf__type_cnt(btf), i, j; + struct ptr_array fastcalls = {}; + struct ptr_array kfuncs = {}; + int err = 0; printf("\n/* BPF kfuncs */\n"); printf("#ifndef BPF_NO_KFUNC_PROTOTYPES\n"); for (i = 1; i < cnt; i++) { const struct btf_type *t = btf__type_by_id(btf, i); + const struct btf_type *ft; const char *name; - int err; if (!btf_is_decl_tag(t)) continue; @@ -484,27 +529,53 @@ static int dump_btf_kfuncs(struct btf_dump *d, const struct btf *btf) if (btf_decl_tag(t)->component_idx != -1) continue; - name = btf__name_by_offset(btf, t->name_off); - if (strncmp(name, KFUNC_DECL_TAG, sizeof(KFUNC_DECL_TAG))) + ft = btf__type_by_id(btf, t->type); + if (!btf_is_func(ft)) continue; - t = btf__type_by_id(btf, t->type); - if (!btf_is_func(t)) - continue; + name = btf__name_by_offset(btf, t->name_off); + if (strncmp(name, KFUNC_DECL_TAG, sizeof(KFUNC_DECL_TAG)) == 0) { + err = ptr_array_push(ft, &kfuncs); + if (err) + goto out; + } + + if (strncmp(name, FASTCALL_DECL_TAG, sizeof(FASTCALL_DECL_TAG)) == 0) { + err = ptr_array_push(ft, &fastcalls); + if (err) + goto out; + } + } + + /* Sort kfuncs by name for improved vmlinux.h stability */ + qsort_r(kfuncs.elems, kfuncs.cnt, sizeof(*kfuncs.elems), cmp_kfuncs, (void *)btf); + for (i = 0; i < kfuncs.cnt; i++) { + const struct btf_type *t = kfuncs.elems[i]; printf("extern "); + /* Assume small amount of fastcall kfuncs */ + for (j = 0; j < fastcalls.cnt; j++) { + if (fastcalls.elems[j] == t) { + printf("__bpf_fastcall "); + break; + } + } + opts.field_name = btf__name_by_offset(btf, t->name_off); err = btf_dump__emit_type_decl(d, t->type, &opts); if (err) - return err; + goto out; printf(" __weak __ksym;\n"); } printf("#endif\n\n"); - return 0; +out: + ptr_array_free(&fastcalls); + ptr_array_free(&kfuncs); + return err; } static void __printf(2, 0) btf_dump_printf(void *ctx, @@ -718,6 +789,13 @@ static int dump_btf_c(const struct btf *btf, printf("#ifndef __weak\n"); printf("#define __weak __attribute__((weak))\n"); printf("#endif\n\n"); + printf("#ifndef __bpf_fastcall\n"); + printf("#if __has_attribute(bpf_fastcall)\n"); + printf("#define __bpf_fastcall __attribute__((bpf_fastcall))\n"); + printf("#else\n"); + printf("#define __bpf_fastcall\n"); + printf("#endif\n"); + printf("#endif\n\n"); if (root_type_cnt) { for (i = 0; i < root_type_cnt; i++) { diff --git a/tools/bpf/bpftool/jit_disasm.c b/tools/bpf/bpftool/jit_disasm.c index 7b8d9ec89ebd..c032d2c6ab6d 100644 --- a/tools/bpf/bpftool/jit_disasm.c +++ b/tools/bpf/bpftool/jit_disasm.c @@ -80,7 +80,8 @@ symbol_lookup_callback(__maybe_unused void *disasm_info, static int init_context(disasm_ctx_t *ctx, const char *arch, __maybe_unused const char *disassembler_options, - __maybe_unused unsigned char *image, __maybe_unused ssize_t len) + __maybe_unused unsigned char *image, __maybe_unused ssize_t len, + __maybe_unused __u64 func_ksym) { char *triple; @@ -109,12 +110,13 @@ static void destroy_context(disasm_ctx_t *ctx) } static int -disassemble_insn(disasm_ctx_t *ctx, unsigned char *image, ssize_t len, int pc) +disassemble_insn(disasm_ctx_t *ctx, unsigned char *image, ssize_t len, int pc, + __u64 func_ksym) { char buf[256]; int count; - count = LLVMDisasmInstruction(*ctx, image + pc, len - pc, pc, + count = LLVMDisasmInstruction(*ctx, image + pc, len - pc, func_ksym + pc, buf, sizeof(buf)); if (json_output) printf_json(buf); @@ -136,8 +138,21 @@ int disasm_init(void) #ifdef HAVE_LIBBFD_SUPPORT #define DISASM_SPACER "\t" +struct disasm_info { + struct disassemble_info info; + __u64 func_ksym; +}; + +static void disasm_print_addr(bfd_vma addr, struct disassemble_info *info) +{ + struct disasm_info *dinfo = container_of(info, struct disasm_info, info); + + addr += dinfo->func_ksym; + generic_print_address(addr, info); +} + typedef struct { - struct disassemble_info *info; + struct disasm_info *info; disassembler_ftype disassemble; bfd *bfdf; } disasm_ctx_t; @@ -215,7 +230,7 @@ static int fprintf_json_styled(void *out, static int init_context(disasm_ctx_t *ctx, const char *arch, const char *disassembler_options, - unsigned char *image, ssize_t len) + unsigned char *image, ssize_t len, __u64 func_ksym) { struct disassemble_info *info; char tpath[PATH_MAX]; @@ -238,12 +253,13 @@ static int init_context(disasm_ctx_t *ctx, const char *arch, } bfdf = ctx->bfdf; - ctx->info = malloc(sizeof(struct disassemble_info)); + ctx->info = malloc(sizeof(struct disasm_info)); if (!ctx->info) { p_err("mem alloc failed"); goto err_close; } - info = ctx->info; + ctx->info->func_ksym = func_ksym; + info = &ctx->info->info; if (json_output) init_disassemble_info_compat(info, stdout, @@ -272,6 +288,7 @@ static int init_context(disasm_ctx_t *ctx, const char *arch, info->disassembler_options = disassembler_options; info->buffer = image; info->buffer_length = len; + info->print_address_func = disasm_print_addr; disassemble_init_for_target(info); @@ -304,9 +321,10 @@ static void destroy_context(disasm_ctx_t *ctx) static int disassemble_insn(disasm_ctx_t *ctx, __maybe_unused unsigned char *image, - __maybe_unused ssize_t len, int pc) + __maybe_unused ssize_t len, int pc, + __maybe_unused __u64 func_ksym) { - return ctx->disassemble(pc, ctx->info); + return ctx->disassemble(pc, &ctx->info->info); } int disasm_init(void) @@ -331,7 +349,7 @@ int disasm_print_insn(unsigned char *image, ssize_t len, int opcodes, if (!len) return -1; - if (init_context(&ctx, arch, disassembler_options, image, len)) + if (init_context(&ctx, arch, disassembler_options, image, len, func_ksym)) return -1; if (json_output) @@ -360,7 +378,7 @@ int disasm_print_insn(unsigned char *image, ssize_t len, int opcodes, printf("%4x:" DISASM_SPACER, pc); } - count = disassemble_insn(&ctx, image, len, pc); + count = disassemble_insn(&ctx, image, len, pc, func_ksym); if (json_output) { /* Operand array, was started in fprintf_json. Before diff --git a/tools/bpf/resolve_btfids/main.c b/tools/bpf/resolve_btfids/main.c index d54aaa0619df..bd9f960bce3d 100644 --- a/tools/bpf/resolve_btfids/main.c +++ b/tools/bpf/resolve_btfids/main.c @@ -679,8 +679,8 @@ static int sets_patch(struct object *obj) next = rb_first(&obj->sets); while (next) { - struct btf_id_set8 *set8; - struct btf_id_set *set; + struct btf_id_set8 *set8 = NULL; + struct btf_id_set *set = NULL; unsigned long addr, off; struct btf_id *id; diff --git a/tools/bpf/runqslower/runqslower.bpf.c b/tools/bpf/runqslower/runqslower.bpf.c index 9a5c1f008fe6..fced54a3adf6 100644 --- a/tools/bpf/runqslower/runqslower.bpf.c +++ b/tools/bpf/runqslower/runqslower.bpf.c @@ -70,7 +70,6 @@ int handle__sched_switch(u64 *ctx) struct task_struct *next = (struct task_struct *)ctx[2]; struct runq_event event = {}; u64 *tsp, delta_us; - long state; u32 pid; /* ivcsw: treat like an enqueue event and store timestamp */ diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index 4a939c90dc2e..4162afc6b5d0 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -1116,6 +1116,7 @@ enum bpf_attach_type { BPF_NETKIT_PRIMARY, BPF_NETKIT_PEER, BPF_TRACE_KPROBE_SESSION, + BPF_TRACE_UPROBE_SESSION, __MAX_BPF_ATTACH_TYPE }; @@ -1973,6 +1974,8 @@ union bpf_attr { * program. * Return * The SMP id of the processor running the program. + * Attributes + * __bpf_fastcall * * long bpf_skb_store_bytes(struct sk_buff *skb, u32 offset, const void *from, u32 len, u64 flags) * Description @@ -3104,10 +3107,6 @@ union bpf_attr { * with the **CONFIG_BPF_KPROBE_OVERRIDE** configuration * option, and in this case it only works on functions tagged with * **ALLOW_ERROR_INJECTION** in the kernel code. - * - * Also, the helper is only available for the architectures having - * the CONFIG_FUNCTION_ERROR_INJECTION option. As of this writing, - * x86 architecture is the only one to support this feature. * Return * 0 * @@ -5372,7 +5371,7 @@ union bpf_attr { * Currently, the **flags** must be 0. Currently, nr_loops is * limited to 1 << 23 (~8 million) loops. * - * long (\*callback_fn)(u32 index, void \*ctx); + * long (\*callback_fn)(u64 index, void \*ctx); * * where **index** is the current index in the loop. The index * is zero-indexed. diff --git a/tools/lib/bpf/Makefile b/tools/lib/bpf/Makefile index 1b22f0f37288..857a5f7b413d 100644 --- a/tools/lib/bpf/Makefile +++ b/tools/lib/bpf/Makefile @@ -61,7 +61,8 @@ ifndef VERBOSE endif INCLUDES = -I$(or $(OUTPUT),.) \ - -I$(srctree)/tools/include -I$(srctree)/tools/include/uapi + -I$(srctree)/tools/include -I$(srctree)/tools/include/uapi \ + -I$(srctree)/tools/arch/$(SRCARCH)/include export prefix libdir src obj diff --git a/tools/lib/bpf/bpf.c b/tools/lib/bpf/bpf.c index 2a4c71501a17..becdfa701c75 100644 --- a/tools/lib/bpf/bpf.c +++ b/tools/lib/bpf/bpf.c @@ -776,6 +776,7 @@ int bpf_link_create(int prog_fd, int target_fd, return libbpf_err(-EINVAL); break; case BPF_TRACE_UPROBE_MULTI: + case BPF_TRACE_UPROBE_SESSION: attr.link_create.uprobe_multi.flags = OPTS_GET(opts, uprobe_multi.flags, 0); attr.link_create.uprobe_multi.cnt = OPTS_GET(opts, uprobe_multi.cnt, 0); attr.link_create.uprobe_multi.path = ptr_to_u64(OPTS_GET(opts, uprobe_multi.path, 0)); diff --git a/tools/lib/bpf/bpf_gen_internal.h b/tools/lib/bpf/bpf_gen_internal.h index fdf44403ff36..6ff963a491d9 100644 --- a/tools/lib/bpf/bpf_gen_internal.h +++ b/tools/lib/bpf/bpf_gen_internal.h @@ -34,6 +34,7 @@ struct bpf_gen { void *data_cur; void *insn_start; void *insn_cur; + bool swapped_endian; ssize_t cleanup_label; __u32 nr_progs; __u32 nr_maps; diff --git a/tools/lib/bpf/bpf_helpers.h b/tools/lib/bpf/bpf_helpers.h index 80bc0242e8dc..686824b8b413 100644 --- a/tools/lib/bpf/bpf_helpers.h +++ b/tools/lib/bpf/bpf_helpers.h @@ -185,6 +185,7 @@ enum libbpf_tristate { #define __kptr_untrusted __attribute__((btf_type_tag("kptr_untrusted"))) #define __kptr __attribute__((btf_type_tag("kptr"))) #define __percpu_kptr __attribute__((btf_type_tag("percpu_kptr"))) +#define __uptr __attribute__((btf_type_tag("uptr"))) #if defined (__clang__) #define bpf_ksym_exists(sym) ({ \ diff --git a/tools/lib/bpf/btf.c b/tools/lib/bpf/btf.c index 3c131039c523..12468ae0d573 100644 --- a/tools/lib/bpf/btf.c +++ b/tools/lib/bpf/btf.c @@ -22,6 +22,7 @@ #include "libbpf_internal.h" #include "hashmap.h" #include "strset.h" +#include "str_error.h" #define BTF_MAX_NR_TYPES 0x7fffffffU #define BTF_MAX_STR_OFFSET 0x7fffffffU @@ -1179,7 +1180,7 @@ static struct btf *btf_parse_elf(const char *path, struct btf *base_btf, fd = open(path, O_RDONLY | O_CLOEXEC); if (fd < 0) { err = -errno; - pr_warn("failed to open %s: %s\n", path, strerror(errno)); + pr_warn("failed to open %s: %s\n", path, errstr(err)); return ERR_PTR(err); } @@ -1445,7 +1446,7 @@ int btf_load_into_kernel(struct btf *btf, goto retry_load; err = -errno; - pr_warn("BTF loading error: %d\n", err); + pr_warn("BTF loading error: %s\n", errstr(err)); /* don't print out contents of custom log_buf */ if (!log_buf && buf[0]) pr_warn("-- BEGIN BTF LOAD LOG ---\n%s\n-- END BTF LOAD LOG --\n", buf); @@ -2885,7 +2886,7 @@ int btf__add_decl_tag(struct btf *btf, const char *value, int ref_type_id, return btf_commit_type(btf, sz); } -struct btf_ext_sec_setup_param { +struct btf_ext_sec_info_param { __u32 off; __u32 len; __u32 min_rec_size; @@ -2893,14 +2894,20 @@ struct btf_ext_sec_setup_param { const char *desc; }; -static int btf_ext_setup_info(struct btf_ext *btf_ext, - struct btf_ext_sec_setup_param *ext_sec) +/* + * Parse a single info subsection of the BTF.ext info data: + * - validate subsection structure and elements + * - save info subsection start and sizing details in struct btf_ext + * - endian-independent operation, for calling before byte-swapping + */ +static int btf_ext_parse_sec_info(struct btf_ext *btf_ext, + struct btf_ext_sec_info_param *ext_sec, + bool is_native) { const struct btf_ext_info_sec *sinfo; struct btf_ext_info *ext_info; __u32 info_left, record_size; size_t sec_cnt = 0; - /* The start of the info sec (including the __u32 record_size). */ void *info; if (ext_sec->len == 0) @@ -2912,6 +2919,7 @@ static int btf_ext_setup_info(struct btf_ext *btf_ext, return -EINVAL; } + /* The start of the info sec (including the __u32 record_size). */ info = btf_ext->data + btf_ext->hdr->hdr_len + ext_sec->off; info_left = ext_sec->len; @@ -2927,9 +2935,13 @@ static int btf_ext_setup_info(struct btf_ext *btf_ext, return -EINVAL; } - /* The record size needs to meet the minimum standard */ - record_size = *(__u32 *)info; + /* The record size needs to meet either the minimum standard or, when + * handling non-native endianness data, the exact standard so as + * to allow safe byte-swapping. + */ + record_size = is_native ? *(__u32 *)info : bswap_32(*(__u32 *)info); if (record_size < ext_sec->min_rec_size || + (!is_native && record_size != ext_sec->min_rec_size) || record_size & 0x03) { pr_debug("%s section in .BTF.ext has invalid record size %u\n", ext_sec->desc, record_size); @@ -2941,7 +2953,7 @@ static int btf_ext_setup_info(struct btf_ext *btf_ext, /* If no records, return failure now so .BTF.ext won't be used. */ if (!info_left) { - pr_debug("%s section in .BTF.ext has no records", ext_sec->desc); + pr_debug("%s section in .BTF.ext has no records\n", ext_sec->desc); return -EINVAL; } @@ -2956,7 +2968,7 @@ static int btf_ext_setup_info(struct btf_ext *btf_ext, return -EINVAL; } - num_records = sinfo->num_info; + num_records = is_native ? sinfo->num_info : bswap_32(sinfo->num_info); if (num_records == 0) { pr_debug("%s section has incorrect num_records in .BTF.ext\n", ext_sec->desc); @@ -2984,64 +2996,157 @@ static int btf_ext_setup_info(struct btf_ext *btf_ext, return 0; } -static int btf_ext_setup_func_info(struct btf_ext *btf_ext) +/* Parse all info secs in the BTF.ext info data */ +static int btf_ext_parse_info(struct btf_ext *btf_ext, bool is_native) { - struct btf_ext_sec_setup_param param = { + struct btf_ext_sec_info_param func_info = { .off = btf_ext->hdr->func_info_off, .len = btf_ext->hdr->func_info_len, .min_rec_size = sizeof(struct bpf_func_info_min), .ext_info = &btf_ext->func_info, .desc = "func_info" }; - - return btf_ext_setup_info(btf_ext, ¶m); -} - -static int btf_ext_setup_line_info(struct btf_ext *btf_ext) -{ - struct btf_ext_sec_setup_param param = { + struct btf_ext_sec_info_param line_info = { .off = btf_ext->hdr->line_info_off, .len = btf_ext->hdr->line_info_len, .min_rec_size = sizeof(struct bpf_line_info_min), .ext_info = &btf_ext->line_info, .desc = "line_info", }; - - return btf_ext_setup_info(btf_ext, ¶m); -} - -static int btf_ext_setup_core_relos(struct btf_ext *btf_ext) -{ - struct btf_ext_sec_setup_param param = { + struct btf_ext_sec_info_param core_relo = { .off = btf_ext->hdr->core_relo_off, .len = btf_ext->hdr->core_relo_len, .min_rec_size = sizeof(struct bpf_core_relo), .ext_info = &btf_ext->core_relo_info, .desc = "core_relo", }; + int err; - return btf_ext_setup_info(btf_ext, ¶m); + err = btf_ext_parse_sec_info(btf_ext, &func_info, is_native); + if (err) + return err; + + err = btf_ext_parse_sec_info(btf_ext, &line_info, is_native); + if (err) + return err; + + if (btf_ext->hdr->hdr_len < offsetofend(struct btf_ext_header, core_relo_len)) + return 0; /* skip core relos parsing */ + + err = btf_ext_parse_sec_info(btf_ext, &core_relo, is_native); + if (err) + return err; + + return 0; } -static int btf_ext_parse_hdr(__u8 *data, __u32 data_size) +/* Swap byte-order of BTF.ext header with any endianness */ +static void btf_ext_bswap_hdr(struct btf_ext_header *h) { - const struct btf_ext_header *hdr = (struct btf_ext_header *)data; + bool is_native = h->magic == BTF_MAGIC; + __u32 hdr_len; - if (data_size < offsetofend(struct btf_ext_header, hdr_len) || - data_size < hdr->hdr_len) { - pr_debug("BTF.ext header not found"); + hdr_len = is_native ? h->hdr_len : bswap_32(h->hdr_len); + + h->magic = bswap_16(h->magic); + h->hdr_len = bswap_32(h->hdr_len); + h->func_info_off = bswap_32(h->func_info_off); + h->func_info_len = bswap_32(h->func_info_len); + h->line_info_off = bswap_32(h->line_info_off); + h->line_info_len = bswap_32(h->line_info_len); + + if (hdr_len < offsetofend(struct btf_ext_header, core_relo_len)) + return; + + h->core_relo_off = bswap_32(h->core_relo_off); + h->core_relo_len = bswap_32(h->core_relo_len); +} + +/* Swap byte-order of generic info subsection */ +static void btf_ext_bswap_info_sec(void *info, __u32 len, bool is_native, + info_rec_bswap_fn bswap_fn) +{ + struct btf_ext_info_sec *sec; + __u32 info_left, rec_size, *rs; + + if (len == 0) + return; + + rs = info; /* info record size */ + rec_size = is_native ? *rs : bswap_32(*rs); + *rs = bswap_32(*rs); + + sec = info + sizeof(__u32); /* info sec #1 */ + info_left = len - sizeof(__u32); + while (info_left) { + unsigned int sec_hdrlen = sizeof(struct btf_ext_info_sec); + __u32 i, num_recs; + void *p; + + num_recs = is_native ? sec->num_info : bswap_32(sec->num_info); + sec->sec_name_off = bswap_32(sec->sec_name_off); + sec->num_info = bswap_32(sec->num_info); + p = sec->data; /* info rec #1 */ + for (i = 0; i < num_recs; i++, p += rec_size) + bswap_fn(p); + sec = p; + info_left -= sec_hdrlen + (__u64)rec_size * num_recs; + } +} + +/* + * Swap byte-order of all info data in a BTF.ext section + * - requires BTF.ext hdr in native endianness + */ +static void btf_ext_bswap_info(struct btf_ext *btf_ext, void *data) +{ + const bool is_native = btf_ext->swapped_endian; + const struct btf_ext_header *h = data; + void *info; + + /* Swap func_info subsection byte-order */ + info = data + h->hdr_len + h->func_info_off; + btf_ext_bswap_info_sec(info, h->func_info_len, is_native, + (info_rec_bswap_fn)bpf_func_info_bswap); + + /* Swap line_info subsection byte-order */ + info = data + h->hdr_len + h->line_info_off; + btf_ext_bswap_info_sec(info, h->line_info_len, is_native, + (info_rec_bswap_fn)bpf_line_info_bswap); + + /* Swap core_relo subsection byte-order (if present) */ + if (h->hdr_len < offsetofend(struct btf_ext_header, core_relo_len)) + return; + + info = data + h->hdr_len + h->core_relo_off; + btf_ext_bswap_info_sec(info, h->core_relo_len, is_native, + (info_rec_bswap_fn)bpf_core_relo_bswap); +} + +/* Parse hdr data and info sections: check and convert to native endianness */ +static int btf_ext_parse(struct btf_ext *btf_ext) +{ + __u32 hdr_len, data_size = btf_ext->data_size; + struct btf_ext_header *hdr = btf_ext->hdr; + bool swapped_endian = false; + int err; + + if (data_size < offsetofend(struct btf_ext_header, hdr_len)) { + pr_debug("BTF.ext header too short\n"); return -EINVAL; } + hdr_len = hdr->hdr_len; if (hdr->magic == bswap_16(BTF_MAGIC)) { - pr_warn("BTF.ext in non-native endianness is not supported\n"); - return -ENOTSUP; + swapped_endian = true; + hdr_len = bswap_32(hdr_len); } else if (hdr->magic != BTF_MAGIC) { pr_debug("Invalid BTF.ext magic:%x\n", hdr->magic); return -EINVAL; } - if (hdr->version != BTF_VERSION) { + /* Ensure known version of structs, current BTF_VERSION == 1 */ + if (hdr->version != 1) { pr_debug("Unsupported BTF.ext version:%u\n", hdr->version); return -ENOTSUP; } @@ -3051,11 +3156,39 @@ static int btf_ext_parse_hdr(__u8 *data, __u32 data_size) return -ENOTSUP; } - if (data_size == hdr->hdr_len) { + if (data_size < hdr_len) { + pr_debug("BTF.ext header not found\n"); + return -EINVAL; + } else if (data_size == hdr_len) { pr_debug("BTF.ext has no data\n"); return -EINVAL; } + /* Verify mandatory hdr info details present */ + if (hdr_len < offsetofend(struct btf_ext_header, line_info_len)) { + pr_warn("BTF.ext header missing func_info, line_info\n"); + return -EINVAL; + } + + /* Keep hdr native byte-order in memory for introspection */ + if (swapped_endian) + btf_ext_bswap_hdr(btf_ext->hdr); + + /* Validate info subsections and cache key metadata */ + err = btf_ext_parse_info(btf_ext, !swapped_endian); + if (err) + return err; + + /* Keep infos native byte-order in memory for introspection */ + if (swapped_endian) + btf_ext_bswap_info(btf_ext, btf_ext->data); + + /* + * Set btf_ext->swapped_endian only after all header and info data has + * been swapped, helping bswap functions determine if their data are + * in native byte-order when called. + */ + btf_ext->swapped_endian = swapped_endian; return 0; } @@ -3067,6 +3200,7 @@ void btf_ext__free(struct btf_ext *btf_ext) free(btf_ext->line_info.sec_idxs); free(btf_ext->core_relo_info.sec_idxs); free(btf_ext->data); + free(btf_ext->data_swapped); free(btf_ext); } @@ -3087,29 +3221,7 @@ struct btf_ext *btf_ext__new(const __u8 *data, __u32 size) } memcpy(btf_ext->data, data, size); - err = btf_ext_parse_hdr(btf_ext->data, size); - if (err) - goto done; - - if (btf_ext->hdr->hdr_len < offsetofend(struct btf_ext_header, line_info_len)) { - err = -EINVAL; - goto done; - } - - err = btf_ext_setup_func_info(btf_ext); - if (err) - goto done; - - err = btf_ext_setup_line_info(btf_ext); - if (err) - goto done; - - if (btf_ext->hdr->hdr_len < offsetofend(struct btf_ext_header, core_relo_len)) - goto done; /* skip core relos parsing */ - - err = btf_ext_setup_core_relos(btf_ext); - if (err) - goto done; + err = btf_ext_parse(btf_ext); done: if (err) { @@ -3120,15 +3232,66 @@ struct btf_ext *btf_ext__new(const __u8 *data, __u32 size) return btf_ext; } +static void *btf_ext_raw_data(const struct btf_ext *btf_ext_ro, bool swap_endian) +{ + struct btf_ext *btf_ext = (struct btf_ext *)btf_ext_ro; + const __u32 data_sz = btf_ext->data_size; + void *data; + + /* Return native data (always present) or swapped data if present */ + if (!swap_endian) + return btf_ext->data; + else if (btf_ext->data_swapped) + return btf_ext->data_swapped; + + /* Recreate missing swapped data, then cache and return */ + data = calloc(1, data_sz); + if (!data) + return NULL; + memcpy(data, btf_ext->data, data_sz); + + btf_ext_bswap_info(btf_ext, data); + btf_ext_bswap_hdr(data); + btf_ext->data_swapped = data; + return data; +} + const void *btf_ext__raw_data(const struct btf_ext *btf_ext, __u32 *size) { + void *data; + + data = btf_ext_raw_data(btf_ext, btf_ext->swapped_endian); + if (!data) + return errno = ENOMEM, NULL; + *size = btf_ext->data_size; - return btf_ext->data; + return data; } __attribute__((alias("btf_ext__raw_data"))) const void *btf_ext__get_raw_data(const struct btf_ext *btf_ext, __u32 *size); +enum btf_endianness btf_ext__endianness(const struct btf_ext *btf_ext) +{ + if (is_host_big_endian()) + return btf_ext->swapped_endian ? BTF_LITTLE_ENDIAN : BTF_BIG_ENDIAN; + else + return btf_ext->swapped_endian ? BTF_BIG_ENDIAN : BTF_LITTLE_ENDIAN; +} + +int btf_ext__set_endianness(struct btf_ext *btf_ext, enum btf_endianness endian) +{ + if (endian != BTF_LITTLE_ENDIAN && endian != BTF_BIG_ENDIAN) + return libbpf_err(-EINVAL); + + btf_ext->swapped_endian = is_host_big_endian() != (endian == BTF_BIG_ENDIAN); + + if (!btf_ext->swapped_endian) { + free(btf_ext->data_swapped); + btf_ext->data_swapped = NULL; + } + return 0; +} struct btf_dedup; @@ -3291,7 +3454,7 @@ int btf__dedup(struct btf *btf, const struct btf_dedup_opts *opts) d = btf_dedup_new(btf, opts); if (IS_ERR(d)) { - pr_debug("btf_dedup_new failed: %ld", PTR_ERR(d)); + pr_debug("btf_dedup_new failed: %ld\n", PTR_ERR(d)); return libbpf_err(-EINVAL); } @@ -3302,42 +3465,42 @@ int btf__dedup(struct btf *btf, const struct btf_dedup_opts *opts) err = btf_dedup_prep(d); if (err) { - pr_debug("btf_dedup_prep failed:%d\n", err); + pr_debug("btf_dedup_prep failed: %s\n", errstr(err)); goto done; } err = btf_dedup_strings(d); if (err < 0) { - pr_debug("btf_dedup_strings failed:%d\n", err); + pr_debug("btf_dedup_strings failed: %s\n", errstr(err)); goto done; } err = btf_dedup_prim_types(d); if (err < 0) { - pr_debug("btf_dedup_prim_types failed:%d\n", err); + pr_debug("btf_dedup_prim_types failed: %s\n", errstr(err)); goto done; } err = btf_dedup_struct_types(d); if (err < 0) { - pr_debug("btf_dedup_struct_types failed:%d\n", err); + pr_debug("btf_dedup_struct_types failed: %s\n", errstr(err)); goto done; } err = btf_dedup_resolve_fwds(d); if (err < 0) { - pr_debug("btf_dedup_resolve_fwds failed:%d\n", err); + pr_debug("btf_dedup_resolve_fwds failed: %s\n", errstr(err)); goto done; } err = btf_dedup_ref_types(d); if (err < 0) { - pr_debug("btf_dedup_ref_types failed:%d\n", err); + pr_debug("btf_dedup_ref_types failed: %s\n", errstr(err)); goto done; } err = btf_dedup_compact_types(d); if (err < 0) { - pr_debug("btf_dedup_compact_types failed:%d\n", err); + pr_debug("btf_dedup_compact_types failed: %s\n", errstr(err)); goto done; } err = btf_dedup_remap_types(d); if (err < 0) { - pr_debug("btf_dedup_remap_types failed:%d\n", err); + pr_debug("btf_dedup_remap_types failed: %s\n", errstr(err)); goto done; } @@ -3385,7 +3548,7 @@ struct btf_dedup { struct strset *strs_set; }; -static long hash_combine(long h, long value) +static unsigned long hash_combine(unsigned long h, unsigned long value) { return h * 31 + value; } @@ -5056,7 +5219,8 @@ struct btf *btf__load_vmlinux_btf(void) btf = btf__parse(sysfs_btf_path, NULL); if (!btf) { err = -errno; - pr_warn("failed to read kernel BTF from '%s': %d\n", sysfs_btf_path, err); + pr_warn("failed to read kernel BTF from '%s': %s\n", + sysfs_btf_path, errstr(err)); return libbpf_err_ptr(err); } pr_debug("loaded kernel BTF from '%s'\n", sysfs_btf_path); @@ -5073,7 +5237,7 @@ struct btf *btf__load_vmlinux_btf(void) btf = btf__parse(path, NULL); err = libbpf_get_error(btf); - pr_debug("loading kernel BTF '%s': %d\n", path, err); + pr_debug("loading kernel BTF '%s': %s\n", path, errstr(err)); if (err) continue; diff --git a/tools/lib/bpf/btf.h b/tools/lib/bpf/btf.h index 4e349ad79ee6..47ee8f6ac489 100644 --- a/tools/lib/bpf/btf.h +++ b/tools/lib/bpf/btf.h @@ -167,6 +167,9 @@ LIBBPF_API const char *btf__str_by_offset(const struct btf *btf, __u32 offset); LIBBPF_API struct btf_ext *btf_ext__new(const __u8 *data, __u32 size); LIBBPF_API void btf_ext__free(struct btf_ext *btf_ext); LIBBPF_API const void *btf_ext__raw_data(const struct btf_ext *btf_ext, __u32 *size); +LIBBPF_API enum btf_endianness btf_ext__endianness(const struct btf_ext *btf_ext); +LIBBPF_API int btf_ext__set_endianness(struct btf_ext *btf_ext, + enum btf_endianness endian); LIBBPF_API int btf__find_str(struct btf *btf, const char *s); LIBBPF_API int btf__add_str(struct btf *btf, const char *s); diff --git a/tools/lib/bpf/btf_dump.c b/tools/lib/bpf/btf_dump.c index 0a7327541c17..a3fc6908f6c9 100644 --- a/tools/lib/bpf/btf_dump.c +++ b/tools/lib/bpf/btf_dump.c @@ -21,6 +21,7 @@ #include "hashmap.h" #include "libbpf.h" #include "libbpf_internal.h" +#include "str_error.h" static const char PREFIXES[] = "\t\t\t\t\t\t\t\t\t\t\t\t\t"; static const size_t PREFIX_CNT = sizeof(PREFIXES) - 1; @@ -867,8 +868,8 @@ static void btf_dump_emit_bit_padding(const struct btf_dump *d, } pads[] = { {"long", d->ptr_sz * 8}, {"int", 32}, {"short", 16}, {"char", 8} }; - int new_off, pad_bits, bits, i; - const char *pad_type; + int new_off = 0, pad_bits = 0, bits, i; + const char *pad_type = NULL; if (cur_off >= next_off) return; /* no gap */ @@ -1304,7 +1305,7 @@ static void btf_dump_emit_type_decl(struct btf_dump *d, __u32 id, * chain, restore stack, emit warning, and try to * proceed nevertheless */ - pr_warn("not enough memory for decl stack:%d", err); + pr_warn("not enough memory for decl stack: %s\n", errstr(err)); d->decl_stack_cnt = stack_start; return; } diff --git a/tools/lib/bpf/btf_relocate.c b/tools/lib/bpf/btf_relocate.c index 4f7399d85eab..b72f83e15156 100644 --- a/tools/lib/bpf/btf_relocate.c +++ b/tools/lib/bpf/btf_relocate.c @@ -428,7 +428,7 @@ static int btf_relocate_rewrite_strs(struct btf_relocate *r, __u32 i) } else { off = r->str_map[*str_off]; if (!off) { - pr_warn("string '%s' [offset %u] is not mapped to base BTF", + pr_warn("string '%s' [offset %u] is not mapped to base BTF\n", btf__str_by_offset(r->btf, off), *str_off); return -ENOENT; } diff --git a/tools/lib/bpf/elf.c b/tools/lib/bpf/elf.c index b5ab1cb13e5e..823f83ad819c 100644 --- a/tools/lib/bpf/elf.c +++ b/tools/lib/bpf/elf.c @@ -24,7 +24,6 @@ int elf_open(const char *binary_path, struct elf_fd *elf_fd) { - char errmsg[STRERR_BUFSIZE]; int fd, ret; Elf *elf; @@ -38,8 +37,7 @@ int elf_open(const char *binary_path, struct elf_fd *elf_fd) fd = open(binary_path, O_RDONLY | O_CLOEXEC); if (fd < 0) { ret = -errno; - pr_warn("elf: failed to open %s: %s\n", binary_path, - libbpf_strerror_r(ret, errmsg, sizeof(errmsg))); + pr_warn("elf: failed to open %s: %s\n", binary_path, errstr(ret)); return ret; } elf = elf_begin(fd, ELF_C_READ_MMAP, NULL); diff --git a/tools/lib/bpf/features.c b/tools/lib/bpf/features.c index 50befe125ddc..760657f5224c 100644 --- a/tools/lib/bpf/features.c +++ b/tools/lib/bpf/features.c @@ -47,7 +47,6 @@ static int probe_kern_prog_name(int token_fd) static int probe_kern_global_data(int token_fd) { - char *cp, errmsg[STRERR_BUFSIZE]; struct bpf_insn insns[] = { BPF_LD_MAP_VALUE(BPF_REG_1, 0, 16), BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 42), @@ -67,9 +66,8 @@ static int probe_kern_global_data(int token_fd) map = bpf_map_create(BPF_MAP_TYPE_ARRAY, "libbpf_global", sizeof(int), 32, 1, &map_opts); if (map < 0) { ret = -errno; - cp = libbpf_strerror_r(ret, errmsg, sizeof(errmsg)); - pr_warn("Error in %s():%s(%d). Couldn't create simple array map.\n", - __func__, cp, -ret); + pr_warn("Error in %s(): %s. Couldn't create simple array map.\n", + __func__, errstr(ret)); return ret; } @@ -267,7 +265,6 @@ static int probe_kern_probe_read_kernel(int token_fd) static int probe_prog_bind_map(int token_fd) { - char *cp, errmsg[STRERR_BUFSIZE]; struct bpf_insn insns[] = { BPF_MOV64_IMM(BPF_REG_0, 0), BPF_EXIT_INSN(), @@ -285,9 +282,8 @@ static int probe_prog_bind_map(int token_fd) map = bpf_map_create(BPF_MAP_TYPE_ARRAY, "libbpf_det_bind", sizeof(int), 32, 1, &map_opts); if (map < 0) { ret = -errno; - cp = libbpf_strerror_r(ret, errmsg, sizeof(errmsg)); - pr_warn("Error in %s():%s(%d). Couldn't create simple array map.\n", - __func__, cp, -ret); + pr_warn("Error in %s(): %s. Couldn't create simple array map.\n", + __func__, errstr(ret)); return ret; } @@ -604,7 +600,8 @@ bool feat_supported(struct kern_feature_cache *cache, enum kern_feature_id feat_ } else if (ret == 0) { WRITE_ONCE(cache->res[feat_id], FEAT_MISSING); } else { - pr_warn("Detection of kernel %s support failed: %d\n", feat->desc, ret); + pr_warn("Detection of kernel %s support failed: %s\n", + feat->desc, errstr(ret)); WRITE_ONCE(cache->res[feat_id], FEAT_MISSING); } } diff --git a/tools/lib/bpf/gen_loader.c b/tools/lib/bpf/gen_loader.c index cf3323fd47b8..113ae4abd345 100644 --- a/tools/lib/bpf/gen_loader.c +++ b/tools/lib/bpf/gen_loader.c @@ -14,6 +14,7 @@ #include "bpf_gen_internal.h" #include "skel_internal.h" #include +#include "str_error.h" #define MAX_USED_MAPS 64 #define MAX_USED_PROGS 32 @@ -393,7 +394,7 @@ int bpf_gen__finish(struct bpf_gen *gen, int nr_progs, int nr_maps) blob_fd_array_off(gen, i)); emit(gen, BPF_MOV64_IMM(BPF_REG_0, 0)); emit(gen, BPF_EXIT_INSN()); - pr_debug("gen: finish %d\n", gen->error); + pr_debug("gen: finish %s\n", errstr(gen->error)); if (!gen->error) { struct gen_loader_opts *opts = gen->opts; @@ -401,6 +402,15 @@ int bpf_gen__finish(struct bpf_gen *gen, int nr_progs, int nr_maps) opts->insns_sz = gen->insn_cur - gen->insn_start; opts->data = gen->data_start; opts->data_sz = gen->data_cur - gen->data_start; + + /* use target endianness for embedded loader */ + if (gen->swapped_endian) { + struct bpf_insn *insn = (struct bpf_insn *)opts->insns; + int insn_cnt = opts->insns_sz / sizeof(struct bpf_insn); + + for (i = 0; i < insn_cnt; i++) + bpf_insn_bswap(insn++); + } } return gen->error; } @@ -414,6 +424,28 @@ void bpf_gen__free(struct bpf_gen *gen) free(gen); } +/* + * Fields of bpf_attr are set to values in native byte-order before being + * written to the target-bound data blob, and may need endian conversion. + * This macro allows providing the correct value in situ more simply than + * writing a separate converter for *all fields* of *all records* included + * in union bpf_attr. Note that sizeof(rval) should match the assignment + * target to avoid runtime problems. + */ +#define tgt_endian(rval) ({ \ + typeof(rval) _val = (rval); \ + if (gen->swapped_endian) { \ + switch (sizeof(_val)) { \ + case 1: break; \ + case 2: _val = bswap_16(_val); break; \ + case 4: _val = bswap_32(_val); break; \ + case 8: _val = bswap_64(_val); break; \ + default: pr_warn("unsupported bswap size!\n"); \ + } \ + } \ + _val; \ +}) + void bpf_gen__load_btf(struct bpf_gen *gen, const void *btf_raw_data, __u32 btf_raw_size) { @@ -422,11 +454,12 @@ void bpf_gen__load_btf(struct bpf_gen *gen, const void *btf_raw_data, union bpf_attr attr; memset(&attr, 0, attr_size); - pr_debug("gen: load_btf: size %d\n", btf_raw_size); btf_data = add_data(gen, btf_raw_data, btf_raw_size); - attr.btf_size = btf_raw_size; + attr.btf_size = tgt_endian(btf_raw_size); btf_load_attr = add_data(gen, &attr, attr_size); + pr_debug("gen: load_btf: off %d size %d, attr: off %d size %d\n", + btf_data, btf_raw_size, btf_load_attr, attr_size); /* populate union bpf_attr with user provided log details */ move_ctx2blob(gen, attr_field(btf_load_attr, btf_log_level), 4, @@ -457,28 +490,29 @@ void bpf_gen__map_create(struct bpf_gen *gen, union bpf_attr attr; memset(&attr, 0, attr_size); - attr.map_type = map_type; - attr.key_size = key_size; - attr.value_size = value_size; - attr.map_flags = map_attr->map_flags; - attr.map_extra = map_attr->map_extra; + attr.map_type = tgt_endian(map_type); + attr.key_size = tgt_endian(key_size); + attr.value_size = tgt_endian(value_size); + attr.map_flags = tgt_endian(map_attr->map_flags); + attr.map_extra = tgt_endian(map_attr->map_extra); if (map_name) libbpf_strlcpy(attr.map_name, map_name, sizeof(attr.map_name)); - attr.numa_node = map_attr->numa_node; - attr.map_ifindex = map_attr->map_ifindex; - attr.max_entries = max_entries; - attr.btf_key_type_id = map_attr->btf_key_type_id; - attr.btf_value_type_id = map_attr->btf_value_type_id; - - pr_debug("gen: map_create: %s idx %d type %d value_type_id %d\n", - attr.map_name, map_idx, map_type, attr.btf_value_type_id); + attr.numa_node = tgt_endian(map_attr->numa_node); + attr.map_ifindex = tgt_endian(map_attr->map_ifindex); + attr.max_entries = tgt_endian(max_entries); + attr.btf_key_type_id = tgt_endian(map_attr->btf_key_type_id); + attr.btf_value_type_id = tgt_endian(map_attr->btf_value_type_id); map_create_attr = add_data(gen, &attr, attr_size); - if (attr.btf_value_type_id) + pr_debug("gen: map_create: %s idx %d type %d value_type_id %d, attr: off %d size %d\n", + map_name, map_idx, map_type, map_attr->btf_value_type_id, + map_create_attr, attr_size); + + if (map_attr->btf_value_type_id) /* populate union bpf_attr with btf_fd saved in the stack earlier */ move_stack2blob(gen, attr_field(map_create_attr, btf_fd), 4, stack_off(btf_fd)); - switch (attr.map_type) { + switch (map_type) { case BPF_MAP_TYPE_ARRAY_OF_MAPS: case BPF_MAP_TYPE_HASH_OF_MAPS: move_stack2blob(gen, attr_field(map_create_attr, inner_map_fd), 4, @@ -498,8 +532,8 @@ void bpf_gen__map_create(struct bpf_gen *gen, /* emit MAP_CREATE command */ emit_sys_bpf(gen, BPF_MAP_CREATE, map_create_attr, attr_size); debug_ret(gen, "map_create %s idx %d type %d value_size %d value_btf_id %d", - attr.map_name, map_idx, map_type, value_size, - attr.btf_value_type_id); + map_name, map_idx, map_type, value_size, + map_attr->btf_value_type_id); emit_check_err(gen); /* remember map_fd in the stack, if successful */ if (map_idx < 0) { @@ -784,12 +818,12 @@ static void emit_relo_ksym_typeless(struct bpf_gen *gen, emit_ksym_relo_log(gen, relo, kdesc->ref); } -static __u32 src_reg_mask(void) +static __u32 src_reg_mask(struct bpf_gen *gen) { -#if defined(__LITTLE_ENDIAN_BITFIELD) - return 0x0f; /* src_reg,dst_reg,... */ -#elif defined(__BIG_ENDIAN_BITFIELD) - return 0xf0; /* dst_reg,src_reg,... */ +#if defined(__LITTLE_ENDIAN_BITFIELD) /* src_reg,dst_reg,... */ + return gen->swapped_endian ? 0xf0 : 0x0f; +#elif defined(__BIG_ENDIAN_BITFIELD) /* dst_reg,src_reg,... */ + return gen->swapped_endian ? 0x0f : 0xf0; #else #error "Unsupported bit endianness, cannot proceed" #endif @@ -840,7 +874,7 @@ static void emit_relo_ksym_btf(struct bpf_gen *gen, struct ksym_relo_desc *relo, emit(gen, BPF_JMP_IMM(BPF_JA, 0, 0, 3)); clear_src_reg: /* clear bpf_object__relocate_data's src_reg assignment, otherwise we get a verifier failure */ - reg_mask = src_reg_mask(); + reg_mask = src_reg_mask(gen); emit(gen, BPF_LDX_MEM(BPF_B, BPF_REG_9, BPF_REG_8, offsetofend(struct bpf_insn, code))); emit(gen, BPF_ALU32_IMM(BPF_AND, BPF_REG_9, reg_mask)); emit(gen, BPF_STX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, offsetofend(struct bpf_insn, code))); @@ -931,48 +965,94 @@ static void cleanup_relos(struct bpf_gen *gen, int insns) cleanup_core_relo(gen); } +/* Convert func, line, and core relo info blobs to target endianness */ +static void info_blob_bswap(struct bpf_gen *gen, int func_info, int line_info, + int core_relos, struct bpf_prog_load_opts *load_attr) +{ + struct bpf_func_info *fi = gen->data_start + func_info; + struct bpf_line_info *li = gen->data_start + line_info; + struct bpf_core_relo *cr = gen->data_start + core_relos; + int i; + + for (i = 0; i < load_attr->func_info_cnt; i++) + bpf_func_info_bswap(fi++); + + for (i = 0; i < load_attr->line_info_cnt; i++) + bpf_line_info_bswap(li++); + + for (i = 0; i < gen->core_relo_cnt; i++) + bpf_core_relo_bswap(cr++); +} + void bpf_gen__prog_load(struct bpf_gen *gen, enum bpf_prog_type prog_type, const char *prog_name, const char *license, struct bpf_insn *insns, size_t insn_cnt, struct bpf_prog_load_opts *load_attr, int prog_idx) { + int func_info_tot_sz = load_attr->func_info_cnt * + load_attr->func_info_rec_size; + int line_info_tot_sz = load_attr->line_info_cnt * + load_attr->line_info_rec_size; + int core_relo_tot_sz = gen->core_relo_cnt * + sizeof(struct bpf_core_relo); int prog_load_attr, license_off, insns_off, func_info, line_info, core_relos; int attr_size = offsetofend(union bpf_attr, core_relo_rec_size); union bpf_attr attr; memset(&attr, 0, attr_size); - pr_debug("gen: prog_load: type %d insns_cnt %zd progi_idx %d\n", - prog_type, insn_cnt, prog_idx); /* add license string to blob of bytes */ license_off = add_data(gen, license, strlen(license) + 1); /* add insns to blob of bytes */ insns_off = add_data(gen, insns, insn_cnt * sizeof(struct bpf_insn)); + pr_debug("gen: prog_load: prog_idx %d type %d insn off %d insns_cnt %zd license off %d\n", + prog_idx, prog_type, insns_off, insn_cnt, license_off); - attr.prog_type = prog_type; - attr.expected_attach_type = load_attr->expected_attach_type; - attr.attach_btf_id = load_attr->attach_btf_id; - attr.prog_ifindex = load_attr->prog_ifindex; + /* convert blob insns to target endianness */ + if (gen->swapped_endian) { + struct bpf_insn *insn = gen->data_start + insns_off; + int i; + + for (i = 0; i < insn_cnt; i++, insn++) + bpf_insn_bswap(insn); + } + + attr.prog_type = tgt_endian(prog_type); + attr.expected_attach_type = tgt_endian(load_attr->expected_attach_type); + attr.attach_btf_id = tgt_endian(load_attr->attach_btf_id); + attr.prog_ifindex = tgt_endian(load_attr->prog_ifindex); attr.kern_version = 0; - attr.insn_cnt = (__u32)insn_cnt; - attr.prog_flags = load_attr->prog_flags; + attr.insn_cnt = tgt_endian((__u32)insn_cnt); + attr.prog_flags = tgt_endian(load_attr->prog_flags); - attr.func_info_rec_size = load_attr->func_info_rec_size; - attr.func_info_cnt = load_attr->func_info_cnt; - func_info = add_data(gen, load_attr->func_info, - attr.func_info_cnt * attr.func_info_rec_size); + attr.func_info_rec_size = tgt_endian(load_attr->func_info_rec_size); + attr.func_info_cnt = tgt_endian(load_attr->func_info_cnt); + func_info = add_data(gen, load_attr->func_info, func_info_tot_sz); + pr_debug("gen: prog_load: func_info: off %d cnt %d rec size %d\n", + func_info, load_attr->func_info_cnt, + load_attr->func_info_rec_size); - attr.line_info_rec_size = load_attr->line_info_rec_size; - attr.line_info_cnt = load_attr->line_info_cnt; - line_info = add_data(gen, load_attr->line_info, - attr.line_info_cnt * attr.line_info_rec_size); + attr.line_info_rec_size = tgt_endian(load_attr->line_info_rec_size); + attr.line_info_cnt = tgt_endian(load_attr->line_info_cnt); + line_info = add_data(gen, load_attr->line_info, line_info_tot_sz); + pr_debug("gen: prog_load: line_info: off %d cnt %d rec size %d\n", + line_info, load_attr->line_info_cnt, + load_attr->line_info_rec_size); - attr.core_relo_rec_size = sizeof(struct bpf_core_relo); - attr.core_relo_cnt = gen->core_relo_cnt; - core_relos = add_data(gen, gen->core_relos, - attr.core_relo_cnt * attr.core_relo_rec_size); + attr.core_relo_rec_size = tgt_endian((__u32)sizeof(struct bpf_core_relo)); + attr.core_relo_cnt = tgt_endian(gen->core_relo_cnt); + core_relos = add_data(gen, gen->core_relos, core_relo_tot_sz); + pr_debug("gen: prog_load: core_relos: off %d cnt %d rec size %zd\n", + core_relos, gen->core_relo_cnt, + sizeof(struct bpf_core_relo)); + + /* convert all info blobs to target endianness */ + if (gen->swapped_endian) + info_blob_bswap(gen, func_info, line_info, core_relos, load_attr); libbpf_strlcpy(attr.prog_name, prog_name, sizeof(attr.prog_name)); prog_load_attr = add_data(gen, &attr, attr_size); + pr_debug("gen: prog_load: attr: off %d size %d\n", + prog_load_attr, attr_size); /* populate union bpf_attr with a pointer to license */ emit_rel_store(gen, attr_field(prog_load_attr, license), license_off); @@ -1040,7 +1120,6 @@ void bpf_gen__map_update_elem(struct bpf_gen *gen, int map_idx, void *pvalue, int zero = 0; memset(&attr, 0, attr_size); - pr_debug("gen: map_update_elem: idx %d\n", map_idx); value = add_data(gen, pvalue, value_size); key = add_data(gen, &zero, sizeof(zero)); @@ -1068,6 +1147,8 @@ void bpf_gen__map_update_elem(struct bpf_gen *gen, int map_idx, void *pvalue, emit(gen, BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel)); map_update_attr = add_data(gen, &attr, attr_size); + pr_debug("gen: map_update_elem: idx %d, value: off %d size %d, attr: off %d size %d\n", + map_idx, value, value_size, map_update_attr, attr_size); move_blob2blob(gen, attr_field(map_update_attr, map_fd), 4, blob_fd_array_off(gen, map_idx)); emit_rel_store(gen, attr_field(map_update_attr, key), key); @@ -1084,14 +1165,16 @@ void bpf_gen__populate_outer_map(struct bpf_gen *gen, int outer_map_idx, int slo int attr_size = offsetofend(union bpf_attr, flags); int map_update_attr, key; union bpf_attr attr; + int tgt_slot; memset(&attr, 0, attr_size); - pr_debug("gen: populate_outer_map: outer %d key %d inner %d\n", - outer_map_idx, slot, inner_map_idx); - key = add_data(gen, &slot, sizeof(slot)); + tgt_slot = tgt_endian(slot); + key = add_data(gen, &tgt_slot, sizeof(tgt_slot)); map_update_attr = add_data(gen, &attr, attr_size); + pr_debug("gen: populate_outer_map: outer %d key %d inner %d, attr: off %d size %d\n", + outer_map_idx, slot, inner_map_idx, map_update_attr, attr_size); move_blob2blob(gen, attr_field(map_update_attr, map_fd), 4, blob_fd_array_off(gen, outer_map_idx)); emit_rel_store(gen, attr_field(map_update_attr, key), key); @@ -1112,8 +1195,9 @@ void bpf_gen__map_freeze(struct bpf_gen *gen, int map_idx) union bpf_attr attr; memset(&attr, 0, attr_size); - pr_debug("gen: map_freeze: idx %d\n", map_idx); map_freeze_attr = add_data(gen, &attr, attr_size); + pr_debug("gen: map_freeze: idx %d, attr: off %d size %d\n", + map_idx, map_freeze_attr, attr_size); move_blob2blob(gen, attr_field(map_freeze_attr, map_fd), 4, blob_fd_array_off(gen, map_idx)); /* emit MAP_FREEZE command */ diff --git a/tools/lib/bpf/hashmap.h b/tools/lib/bpf/hashmap.h index c12f8320e668..0c4f155e8eb7 100644 --- a/tools/lib/bpf/hashmap.h +++ b/tools/lib/bpf/hashmap.h @@ -166,8 +166,8 @@ bool hashmap_find(const struct hashmap *map, long key, long *value); * @bkt: integer used as a bucket loop cursor */ #define hashmap__for_each_entry(map, cur, bkt) \ - for (bkt = 0; bkt < map->cap; bkt++) \ - for (cur = map->buckets[bkt]; cur; cur = cur->next) + for (bkt = 0; bkt < (map)->cap; bkt++) \ + for (cur = (map)->buckets[bkt]; cur; cur = cur->next) /* * hashmap__for_each_entry_safe - iterate over all entries in hashmap, safe @@ -178,8 +178,8 @@ bool hashmap_find(const struct hashmap *map, long key, long *value); * @bkt: integer used as a bucket loop cursor */ #define hashmap__for_each_entry_safe(map, cur, tmp, bkt) \ - for (bkt = 0; bkt < map->cap; bkt++) \ - for (cur = map->buckets[bkt]; \ + for (bkt = 0; bkt < (map)->cap; bkt++) \ + for (cur = (map)->buckets[bkt]; \ cur && ({tmp = cur->next; true; }); \ cur = tmp) @@ -190,19 +190,19 @@ bool hashmap_find(const struct hashmap *map, long key, long *value); * @key: key to iterate entries for */ #define hashmap__for_each_key_entry(map, cur, _key) \ - for (cur = map->buckets \ - ? map->buckets[hash_bits(map->hash_fn((_key), map->ctx), map->cap_bits)] \ + for (cur = (map)->buckets \ + ? (map)->buckets[hash_bits((map)->hash_fn((_key), (map)->ctx), (map)->cap_bits)] \ : NULL; \ cur; \ cur = cur->next) \ - if (map->equal_fn(cur->key, (_key), map->ctx)) + if ((map)->equal_fn(cur->key, (_key), (map)->ctx)) #define hashmap__for_each_key_entry_safe(map, cur, tmp, _key) \ - for (cur = map->buckets \ - ? map->buckets[hash_bits(map->hash_fn((_key), map->ctx), map->cap_bits)] \ + for (cur = (map)->buckets \ + ? (map)->buckets[hash_bits((map)->hash_fn((_key), (map)->ctx), (map)->cap_bits)] \ : NULL; \ cur && ({ tmp = cur->next; true; }); \ cur = tmp) \ - if (map->equal_fn(cur->key, (_key), map->ctx)) + if ((map)->equal_fn(cur->key, (_key), (map)->ctx)) #endif /* __LIBBPF_HASHMAP_H */ diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index 219facd0e66e..66173ddb5a2d 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -133,6 +133,7 @@ static const char * const attach_type_name[] = { [BPF_NETKIT_PRIMARY] = "netkit_primary", [BPF_NETKIT_PEER] = "netkit_peer", [BPF_TRACE_KPROBE_SESSION] = "trace_kprobe_session", + [BPF_TRACE_UPROBE_SESSION] = "trace_uprobe_session", }; static const char * const link_type_name[] = { @@ -694,6 +695,8 @@ struct bpf_object { /* Information when doing ELF related work. Only valid if efile.elf is not NULL */ struct elf_state efile; + unsigned char byteorder; + struct btf *btf; struct btf_ext *btf_ext; @@ -940,6 +943,20 @@ bpf_object__add_programs(struct bpf_object *obj, Elf_Data *sec_data, return 0; } +static void bpf_object_bswap_progs(struct bpf_object *obj) +{ + struct bpf_program *prog = obj->programs; + struct bpf_insn *insn; + int p, i; + + for (p = 0; p < obj->nr_programs; p++, prog++) { + insn = prog->insns; + for (i = 0; i < prog->insns_cnt; i++, insn++) + bpf_insn_bswap(insn); + } + pr_debug("converted %zu BPF programs to native byte order\n", obj->nr_programs); +} + static const struct btf_member * find_member_by_offset(const struct btf_type *t, __u32 bit_offset) { @@ -1506,6 +1523,7 @@ static void bpf_object__elf_finish(struct bpf_object *obj) elf_end(obj->efile.elf); obj->efile.elf = NULL; + obj->efile.ehdr = NULL; obj->efile.symbols = NULL; obj->efile.arena_data = NULL; @@ -1533,11 +1551,8 @@ static int bpf_object__elf_init(struct bpf_object *obj) } else { obj->efile.fd = open(obj->path, O_RDONLY | O_CLOEXEC); if (obj->efile.fd < 0) { - char errmsg[STRERR_BUFSIZE], *cp; - err = -errno; - cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg)); - pr_warn("elf: failed to open %s: %s\n", obj->path, cp); + pr_warn("elf: failed to open %s: %s\n", obj->path, errstr(err)); return err; } @@ -1571,6 +1586,16 @@ static int bpf_object__elf_init(struct bpf_object *obj) goto errout; } + /* Validate ELF object endianness... */ + if (ehdr->e_ident[EI_DATA] != ELFDATA2LSB && + ehdr->e_ident[EI_DATA] != ELFDATA2MSB) { + err = -LIBBPF_ERRNO__ENDIAN; + pr_warn("elf: '%s' has unknown byte order\n", obj->path); + goto errout; + } + /* and save after bpf_object_open() frees ELF data */ + obj->byteorder = ehdr->e_ident[EI_DATA]; + if (elf_getshdrstrndx(elf, &obj->efile.shstrndx)) { pr_warn("elf: failed to get section names section index for %s: %s\n", obj->path, elf_errmsg(-1)); @@ -1599,19 +1624,15 @@ static int bpf_object__elf_init(struct bpf_object *obj) return err; } -static int bpf_object__check_endianness(struct bpf_object *obj) +static bool is_native_endianness(struct bpf_object *obj) { #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ - if (obj->efile.ehdr->e_ident[EI_DATA] == ELFDATA2LSB) - return 0; + return obj->byteorder == ELFDATA2LSB; #elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ - if (obj->efile.ehdr->e_ident[EI_DATA] == ELFDATA2MSB) - return 0; + return obj->byteorder == ELFDATA2MSB; #else # error "Unrecognized __BYTE_ORDER__" #endif - pr_warn("elf: endianness mismatch in %s.\n", obj->path); - return -LIBBPF_ERRNO__ENDIAN; } static int @@ -1937,8 +1958,7 @@ bpf_object__init_internal_map(struct bpf_object *obj, enum libbpf_map_type type, if (map->mmaped == MAP_FAILED) { err = -errno; map->mmaped = NULL; - pr_warn("failed to alloc map '%s' content buffer: %d\n", - map->name, err); + pr_warn("failed to alloc map '%s' content buffer: %s\n", map->name, errstr(err)); zfree(&map->real_name); zfree(&map->name); return err; @@ -2102,7 +2122,7 @@ static int parse_u64(const char *value, __u64 *res) *res = strtoull(value, &value_end, 0); if (errno) { err = -errno; - pr_warn("failed to parse '%s' as integer: %d\n", value, err); + pr_warn("failed to parse '%s': %s\n", value, errstr(err)); return err; } if (*value_end) { @@ -2268,8 +2288,8 @@ static int bpf_object__read_kconfig_file(struct bpf_object *obj, void *data) while (gzgets(file, buf, sizeof(buf))) { err = bpf_object__process_kconfig_line(obj, buf, data); if (err) { - pr_warn("error parsing system Kconfig line '%s': %d\n", - buf, err); + pr_warn("error parsing system Kconfig line '%s': %s\n", + buf, errstr(err)); goto out; } } @@ -2289,15 +2309,15 @@ static int bpf_object__read_kconfig_mem(struct bpf_object *obj, file = fmemopen((void *)config, strlen(config), "r"); if (!file) { err = -errno; - pr_warn("failed to open in-memory Kconfig: %d\n", err); + pr_warn("failed to open in-memory Kconfig: %s\n", errstr(err)); return err; } while (fgets(buf, sizeof(buf), file)) { err = bpf_object__process_kconfig_line(obj, buf, data); if (err) { - pr_warn("error parsing in-memory Kconfig line '%s': %d\n", - buf, err); + pr_warn("error parsing in-memory Kconfig line '%s': %s\n", + buf, errstr(err)); break; } } @@ -3212,7 +3232,7 @@ static int bpf_object__init_btf(struct bpf_object *obj, err = libbpf_get_error(obj->btf); if (err) { obj->btf = NULL; - pr_warn("Error loading ELF section %s: %d.\n", BTF_ELF_SEC, err); + pr_warn("Error loading ELF section %s: %s.\n", BTF_ELF_SEC, errstr(err)); goto out; } /* enforce 8-byte pointers for BPF-targeted BTFs */ @@ -3230,8 +3250,8 @@ static int bpf_object__init_btf(struct bpf_object *obj, obj->btf_ext = btf_ext__new(btf_ext_data->d_buf, btf_ext_data->d_size); err = libbpf_get_error(obj->btf_ext); if (err) { - pr_warn("Error loading ELF section %s: %d. Ignored and continue.\n", - BTF_EXT_ELF_SEC, err); + pr_warn("Error loading ELF section %s: %s. Ignored and continue.\n", + BTF_EXT_ELF_SEC, errstr(err)); obj->btf_ext = NULL; goto out; } @@ -3323,8 +3343,8 @@ static int btf_fixup_datasec(struct bpf_object *obj, struct btf *btf, if (t->size == 0) { err = find_elf_sec_sz(obj, sec_name, &size); if (err || !size) { - pr_debug("sec '%s': failed to determine size from ELF: size %u, err %d\n", - sec_name, size, err); + pr_debug("sec '%s': failed to determine size from ELF: size %u, err %s\n", + sec_name, size, errstr(err)); return -ENOENT; } @@ -3478,7 +3498,7 @@ static int bpf_object__load_vmlinux_btf(struct bpf_object *obj, bool force) obj->btf_vmlinux = btf__load_vmlinux_btf(); err = libbpf_get_error(obj->btf_vmlinux); if (err) { - pr_warn("Error loading vmlinux BTF: %d\n", err); + pr_warn("Error loading vmlinux BTF: %s\n", errstr(err)); obj->btf_vmlinux = NULL; return err; } @@ -3581,11 +3601,14 @@ static int bpf_object__sanitize_and_load_btf(struct bpf_object *obj) report: if (err) { btf_mandatory = kernel_needs_btf(obj); - pr_warn("Error loading .BTF into kernel: %d. %s\n", err, - btf_mandatory ? "BTF is mandatory, can't proceed." - : "BTF is optional, ignoring."); - if (!btf_mandatory) + if (btf_mandatory) { + pr_warn("Error loading .BTF into kernel: %s. BTF is mandatory, can't proceed.\n", + errstr(err)); + } else { + pr_info("Error loading .BTF into kernel: %s. BTF is optional, ignoring.\n", + errstr(err)); err = 0; + } } return err; } @@ -3953,6 +3976,10 @@ static int bpf_object__elf_collect(struct bpf_object *obj) return -LIBBPF_ERRNO__FORMAT; } + /* change BPF program insns to native endianness for introspection */ + if (!is_native_endianness(obj)) + bpf_object_bswap_progs(obj); + /* sort BPF programs by section name and in-section instruction offset * for faster search */ @@ -3985,7 +4012,7 @@ static bool sym_is_subprog(const Elf64_Sym *sym, int text_shndx) return true; /* global function */ - return bind == STB_GLOBAL && type == STT_FUNC; + return (bind == STB_GLOBAL || bind == STB_WEAK) && type == STT_FUNC; } static int find_extern_btf_id(const struct btf *btf, const char *ext_name) @@ -4389,7 +4416,7 @@ static int bpf_object__collect_externs(struct bpf_object *obj) static bool prog_is_subprog(const struct bpf_object *obj, const struct bpf_program *prog) { - return prog->sec_idx == obj->efile.text_shndx && obj->nr_programs > 1; + return prog->sec_idx == obj->efile.text_shndx; } struct bpf_program * @@ -4783,8 +4810,8 @@ static int bpf_get_map_info_from_fdinfo(int fd, struct bpf_map_info *info) fp = fopen(file, "re"); if (!fp) { err = -errno; - pr_warn("failed to open %s: %d. No procfs support?\n", file, - err); + pr_warn("failed to open %s: %s. No procfs support?\n", file, + errstr(err)); return err; } @@ -4939,8 +4966,8 @@ static int bpf_object_prepare_token(struct bpf_object *obj) bpffs_fd = open(bpffs_path, O_DIRECTORY, O_RDWR); if (bpffs_fd < 0) { err = -errno; - __pr(level, "object '%s': failed (%d) to open BPF FS mount at '%s'%s\n", - obj->name, err, bpffs_path, + __pr(level, "object '%s': failed (%s) to open BPF FS mount at '%s'%s\n", + obj->name, errstr(err), bpffs_path, mandatory ? "" : ", skipping optional step..."); return mandatory ? err : 0; } @@ -4974,7 +5001,6 @@ static int bpf_object_prepare_token(struct bpf_object *obj) static int bpf_object__probe_loading(struct bpf_object *obj) { - char *cp, errmsg[STRERR_BUFSIZE]; struct bpf_insn insns[] = { BPF_MOV64_IMM(BPF_REG_0, 0), BPF_EXIT_INSN(), @@ -4990,7 +5016,8 @@ bpf_object__probe_loading(struct bpf_object *obj) ret = bump_rlimit_memlock(); if (ret) - pr_warn("Failed to bump RLIMIT_MEMLOCK (err = %d), you might need to do it explicitly!\n", ret); + pr_warn("Failed to bump RLIMIT_MEMLOCK (err = %s), you might need to do it explicitly!\n", + errstr(ret)); /* make sure basic loading works */ ret = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, NULL, "GPL", insns, insn_cnt, &opts); @@ -4998,11 +5025,8 @@ bpf_object__probe_loading(struct bpf_object *obj) ret = bpf_prog_load(BPF_PROG_TYPE_TRACEPOINT, NULL, "GPL", insns, insn_cnt, &opts); if (ret < 0) { ret = errno; - cp = libbpf_strerror_r(ret, errmsg, sizeof(errmsg)); - pr_warn("Error in %s():%s(%d). Couldn't load trivial BPF " - "program. Make sure your kernel supports BPF " - "(CONFIG_BPF_SYSCALL=y) and/or that RLIMIT_MEMLOCK is " - "set to big enough value.\n", __func__, cp, ret); + pr_warn("Error in %s(): %s. Couldn't load trivial BPF program. Make sure your kernel supports BPF (CONFIG_BPF_SYSCALL=y) and/or that RLIMIT_MEMLOCK is set to big enough value.\n", + __func__, errstr(ret)); return -ret; } close(ret); @@ -5027,7 +5051,6 @@ bool kernel_supports(const struct bpf_object *obj, enum kern_feature_id feat_id) static bool map_is_reuse_compat(const struct bpf_map *map, int map_fd) { struct bpf_map_info map_info; - char msg[STRERR_BUFSIZE]; __u32 map_info_len = sizeof(map_info); int err; @@ -5037,7 +5060,7 @@ static bool map_is_reuse_compat(const struct bpf_map *map, int map_fd) err = bpf_get_map_info_from_fdinfo(map_fd, &map_info); if (err) { pr_warn("failed to get map info for map FD %d: %s\n", map_fd, - libbpf_strerror_r(errno, msg, sizeof(msg))); + errstr(err)); return false; } @@ -5052,7 +5075,6 @@ static bool map_is_reuse_compat(const struct bpf_map *map, int map_fd) static int bpf_object__reuse_map(struct bpf_map *map) { - char *cp, errmsg[STRERR_BUFSIZE]; int err, pin_fd; pin_fd = bpf_obj_get(map->pin_path); @@ -5064,9 +5086,8 @@ bpf_object__reuse_map(struct bpf_map *map) return 0; } - cp = libbpf_strerror_r(-err, errmsg, sizeof(errmsg)); pr_warn("couldn't retrieve pinned map '%s': %s\n", - map->pin_path, cp); + map->pin_path, errstr(err)); return err; } @@ -5092,8 +5113,8 @@ static int bpf_object__populate_internal_map(struct bpf_object *obj, struct bpf_map *map) { enum libbpf_map_type map_type = map->libbpf_type; - char *cp, errmsg[STRERR_BUFSIZE]; int err, zero = 0; + size_t mmap_sz; if (obj->gen_loader) { bpf_gen__map_update_elem(obj->gen_loader, map - obj->maps, @@ -5106,9 +5127,8 @@ bpf_object__populate_internal_map(struct bpf_object *obj, struct bpf_map *map) err = bpf_map_update_elem(map->fd, &zero, map->mmaped, 0); if (err) { err = -errno; - cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg)); - pr_warn("Error setting initial map(%s) contents: %s\n", - map->name, cp); + pr_warn("map '%s': failed to set initial contents: %s\n", + bpf_map__name(map), errstr(err)); return err; } @@ -5117,12 +5137,43 @@ bpf_object__populate_internal_map(struct bpf_object *obj, struct bpf_map *map) err = bpf_map_freeze(map->fd); if (err) { err = -errno; - cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg)); - pr_warn("Error freezing map(%s) as read-only: %s\n", - map->name, cp); + pr_warn("map '%s': failed to freeze as read-only: %s\n", + bpf_map__name(map), errstr(err)); return err; } } + + /* Remap anonymous mmap()-ed "map initialization image" as + * a BPF map-backed mmap()-ed memory, but preserving the same + * memory address. This will cause kernel to change process' + * page table to point to a different piece of kernel memory, + * but from userspace point of view memory address (and its + * contents, being identical at this point) will stay the + * same. This mapping will be released by bpf_object__close() + * as per normal clean up procedure. + */ + mmap_sz = bpf_map_mmap_sz(map); + if (map->def.map_flags & BPF_F_MMAPABLE) { + void *mmaped; + int prot; + + if (map->def.map_flags & BPF_F_RDONLY_PROG) + prot = PROT_READ; + else + prot = PROT_READ | PROT_WRITE; + mmaped = mmap(map->mmaped, mmap_sz, prot, MAP_SHARED | MAP_FIXED, map->fd, 0); + if (mmaped == MAP_FAILED) { + err = -errno; + pr_warn("map '%s': failed to re-mmap() contents: %s\n", + bpf_map__name(map), errstr(err)); + return err; + } + map->mmaped = mmaped; + } else if (map->mmaped) { + munmap(map->mmaped, mmap_sz); + map->mmaped = NULL; + } + return 0; } @@ -5171,8 +5222,8 @@ static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map, b return err; err = bpf_object__create_map(obj, map->inner_map, true); if (err) { - pr_warn("map '%s': failed to create inner map: %d\n", - map->name, err); + pr_warn("map '%s': failed to create inner map: %s\n", + map->name, errstr(err)); return err; } map->inner_map_fd = map->inner_map->fd; @@ -5226,12 +5277,9 @@ static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map, b def->max_entries, &create_attr); } if (map_fd < 0 && (create_attr.btf_key_type_id || create_attr.btf_value_type_id)) { - char *cp, errmsg[STRERR_BUFSIZE]; - err = -errno; - cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg)); - pr_warn("Error in bpf_create_map_xattr(%s):%s(%d). Retrying without BTF.\n", - map->name, cp, err); + pr_warn("Error in bpf_create_map_xattr(%s): %s. Retrying without BTF.\n", + map->name, errstr(err)); create_attr.btf_fd = 0; create_attr.btf_key_type_id = 0; create_attr.btf_value_type_id = 0; @@ -5286,8 +5334,8 @@ static int init_map_in_map_slots(struct bpf_object *obj, struct bpf_map *map) } if (err) { err = -errno; - pr_warn("map '%s': failed to initialize slot [%d] to map '%s' fd=%d: %d\n", - map->name, i, targ_map->name, fd, err); + pr_warn("map '%s': failed to initialize slot [%d] to map '%s' fd=%d: %s\n", + map->name, i, targ_map->name, fd, errstr(err)); return err; } pr_debug("map '%s': slot [%d] set to map '%s' fd=%d\n", @@ -5319,8 +5367,8 @@ static int init_prog_array_slots(struct bpf_object *obj, struct bpf_map *map) err = bpf_map_update_elem(map->fd, &i, &fd, 0); if (err) { err = -errno; - pr_warn("map '%s': failed to initialize slot [%d] to prog '%s' fd=%d: %d\n", - map->name, i, targ_prog->name, fd, err); + pr_warn("map '%s': failed to initialize slot [%d] to prog '%s' fd=%d: %s\n", + map->name, i, targ_prog->name, fd, errstr(err)); return err; } pr_debug("map '%s': slot [%d] set to prog '%s' fd=%d\n", @@ -5373,7 +5421,6 @@ static int bpf_object__create_maps(struct bpf_object *obj) { struct bpf_map *map; - char *cp, errmsg[STRERR_BUFSIZE]; unsigned int i, j; int err; bool retried; @@ -5439,8 +5486,7 @@ bpf_object__create_maps(struct bpf_object *obj) err = bpf_object__populate_internal_map(obj, map); if (err < 0) goto err_out; - } - if (map->def.type == BPF_MAP_TYPE_ARENA) { + } else if (map->def.type == BPF_MAP_TYPE_ARENA) { map->mmaped = mmap((void *)(long)map->map_extra, bpf_map_mmap_sz(map), PROT_READ | PROT_WRITE, map->map_extra ? MAP_SHARED | MAP_FIXED : MAP_SHARED, @@ -5448,8 +5494,8 @@ bpf_object__create_maps(struct bpf_object *obj) if (map->mmaped == MAP_FAILED) { err = -errno; map->mmaped = NULL; - pr_warn("map '%s': failed to mmap arena: %d\n", - map->name, err); + pr_warn("map '%s': failed to mmap arena: %s\n", + map->name, errstr(err)); return err; } if (obj->arena_data) { @@ -5471,8 +5517,8 @@ bpf_object__create_maps(struct bpf_object *obj) retried = true; goto retry; } - pr_warn("map '%s': failed to auto-pin at '%s': %d\n", - map->name, map->pin_path, err); + pr_warn("map '%s': failed to auto-pin at '%s': %s\n", + map->name, map->pin_path, errstr(err)); goto err_out; } } @@ -5481,8 +5527,7 @@ bpf_object__create_maps(struct bpf_object *obj) return 0; err_out: - cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg)); - pr_warn("map '%s': failed to create: %s(%d)\n", map->name, cp, err); + pr_warn("map '%s': failed to create: %s\n", map->name, errstr(err)); pr_perm_msg(err); for (j = 0; j < i; j++) zclose(obj->maps[j].fd); @@ -5606,7 +5651,7 @@ static int load_module_btfs(struct bpf_object *obj) } if (err) { err = -errno; - pr_warn("failed to iterate BTF objects: %d\n", err); + pr_warn("failed to iterate BTF objects: %s\n", errstr(err)); return err; } @@ -5615,7 +5660,7 @@ static int load_module_btfs(struct bpf_object *obj) if (errno == ENOENT) continue; /* expected race: BTF was unloaded */ err = -errno; - pr_warn("failed to get BTF object #%d FD: %d\n", id, err); + pr_warn("failed to get BTF object #%d FD: %s\n", id, errstr(err)); return err; } @@ -5627,7 +5672,7 @@ static int load_module_btfs(struct bpf_object *obj) err = bpf_btf_get_info_by_fd(fd, &info, &len); if (err) { err = -errno; - pr_warn("failed to get BTF object #%d info: %d\n", id, err); + pr_warn("failed to get BTF object #%d info: %s\n", id, errstr(err)); goto err_out; } @@ -5640,8 +5685,8 @@ static int load_module_btfs(struct bpf_object *obj) btf = btf_get_from_fd(fd, obj->btf_vmlinux); err = libbpf_get_error(btf); if (err) { - pr_warn("failed to load module [%s]'s BTF object #%d: %d\n", - name, id, err); + pr_warn("failed to load module [%s]'s BTF object #%d: %s\n", + name, id, errstr(err)); goto err_out; } @@ -5870,7 +5915,7 @@ bpf_object__relocate_core(struct bpf_object *obj, const char *targ_btf_path) obj->btf_vmlinux_override = btf__parse(targ_btf_path, NULL); err = libbpf_get_error(obj->btf_vmlinux_override); if (err) { - pr_warn("failed to parse target BTF: %d\n", err); + pr_warn("failed to parse target BTF: %s\n", errstr(err)); return err; } } @@ -5930,8 +5975,8 @@ bpf_object__relocate_core(struct bpf_object *obj, const char *targ_btf_path) err = record_relo_core(prog, rec, insn_idx); if (err) { - pr_warn("prog '%s': relo #%d: failed to record relocation: %d\n", - prog->name, i, err); + pr_warn("prog '%s': relo #%d: failed to record relocation: %s\n", + prog->name, i, errstr(err)); goto out; } @@ -5940,15 +5985,15 @@ bpf_object__relocate_core(struct bpf_object *obj, const char *targ_btf_path) err = bpf_core_resolve_relo(prog, rec, i, obj->btf, cand_cache, &targ_res); if (err) { - pr_warn("prog '%s': relo #%d: failed to relocate: %d\n", - prog->name, i, err); + pr_warn("prog '%s': relo #%d: failed to relocate: %s\n", + prog->name, i, errstr(err)); goto out; } err = bpf_core_patch_insn(prog->name, insn, insn_idx, rec, i, &targ_res); if (err) { - pr_warn("prog '%s': relo #%d: failed to patch insn #%u: %d\n", - prog->name, i, insn_idx, err); + pr_warn("prog '%s': relo #%d: failed to patch insn #%u: %s\n", + prog->name, i, insn_idx, errstr(err)); goto out; } } @@ -6216,8 +6261,8 @@ reloc_prog_func_and_line_info(const struct bpf_object *obj, &main_prog->func_info_rec_size); if (err) { if (err != -ENOENT) { - pr_warn("prog '%s': error relocating .BTF.ext function info: %d\n", - prog->name, err); + pr_warn("prog '%s': error relocating .BTF.ext function info: %s\n", + prog->name, errstr(err)); return err; } if (main_prog->func_info) { @@ -6244,8 +6289,8 @@ reloc_prog_func_and_line_info(const struct bpf_object *obj, &main_prog->line_info_rec_size); if (err) { if (err != -ENOENT) { - pr_warn("prog '%s': error relocating .BTF.ext line info: %d\n", - prog->name, err); + pr_warn("prog '%s': error relocating .BTF.ext line info: %s\n", + prog->name, errstr(err)); return err; } if (main_prog->line_info) { @@ -7009,8 +7054,8 @@ static int bpf_object__relocate(struct bpf_object *obj, const char *targ_btf_pat if (obj->btf_ext) { err = bpf_object__relocate_core(obj, targ_btf_path); if (err) { - pr_warn("failed to perform CO-RE relocations: %d\n", - err); + pr_warn("failed to perform CO-RE relocations: %s\n", + errstr(err)); return err; } bpf_object__sort_relos(obj); @@ -7054,8 +7099,8 @@ static int bpf_object__relocate(struct bpf_object *obj, const char *targ_btf_pat err = bpf_object__relocate_calls(obj, prog); if (err) { - pr_warn("prog '%s': failed to relocate calls: %d\n", - prog->name, err); + pr_warn("prog '%s': failed to relocate calls: %s\n", + prog->name, errstr(err)); return err; } @@ -7091,16 +7136,16 @@ static int bpf_object__relocate(struct bpf_object *obj, const char *targ_btf_pat /* Process data relos for main programs */ err = bpf_object__relocate_data(obj, prog); if (err) { - pr_warn("prog '%s': failed to relocate data references: %d\n", - prog->name, err); + pr_warn("prog '%s': failed to relocate data references: %s\n", + prog->name, errstr(err)); return err; } /* Fix up .BTF.ext information, if necessary */ err = bpf_program_fixup_func_info(obj, prog); if (err) { - pr_warn("prog '%s': failed to perform .BTF.ext fix ups: %d\n", - prog->name, err); + pr_warn("prog '%s': failed to perform .BTF.ext fix ups: %s\n", + prog->name, errstr(err)); return err; } } @@ -7352,8 +7397,14 @@ static int libbpf_prepare_prog_load(struct bpf_program *prog, opts->prog_flags |= BPF_F_XDP_HAS_FRAGS; /* special check for usdt to use uprobe_multi link */ - if ((def & SEC_USDT) && kernel_supports(prog->obj, FEAT_UPROBE_MULTI_LINK)) + if ((def & SEC_USDT) && kernel_supports(prog->obj, FEAT_UPROBE_MULTI_LINK)) { + /* for BPF_TRACE_UPROBE_MULTI, user might want to query expected_attach_type + * in prog, and expected_attach_type we set in kernel is from opts, so we + * update both. + */ prog->expected_attach_type = BPF_TRACE_UPROBE_MULTI; + opts->expected_attach_type = BPF_TRACE_UPROBE_MULTI; + } if ((def & SEC_ATTACH_BTF) && !prog->attach_btf_id) { int btf_obj_fd = 0, btf_type_id = 0, err; @@ -7403,7 +7454,6 @@ static int bpf_object_load_prog(struct bpf_object *obj, struct bpf_program *prog { LIBBPF_OPTS(bpf_prog_load_opts, load_attr); const char *prog_name = NULL; - char *cp, errmsg[STRERR_BUFSIZE]; size_t log_buf_size = 0; char *log_buf = NULL, *tmp; bool own_log_buf = true; @@ -7443,6 +7493,7 @@ static int bpf_object_load_prog(struct bpf_object *obj, struct bpf_program *prog load_attr.attach_btf_id = prog->attach_btf_id; load_attr.kern_version = kern_version; load_attr.prog_ifindex = prog->prog_ifindex; + load_attr.expected_attach_type = prog->expected_attach_type; /* specify func_info/line_info only if kernel supports them */ if (obj->btf && btf__fd(obj->btf) >= 0 && kernel_supports(obj, FEAT_BTF_FUNC)) { @@ -7466,17 +7517,14 @@ static int bpf_object_load_prog(struct bpf_object *obj, struct bpf_program *prog if (prog->sec_def && prog->sec_def->prog_prepare_load_fn) { err = prog->sec_def->prog_prepare_load_fn(prog, &load_attr, prog->sec_def->cookie); if (err < 0) { - pr_warn("prog '%s': failed to prepare load attributes: %d\n", - prog->name, err); + pr_warn("prog '%s': failed to prepare load attributes: %s\n", + prog->name, errstr(err)); return err; } insns = prog->insns; insns_cnt = prog->insns_cnt; } - /* allow prog_prepare_load_fn to change expected_attach_type */ - load_attr.expected_attach_type = prog->expected_attach_type; - if (obj->gen_loader) { bpf_gen__prog_load(obj->gen_loader, prog->type, prog->name, license, insns, insns_cnt, &load_attr, @@ -7534,9 +7582,8 @@ static int bpf_object_load_prog(struct bpf_object *obj, struct bpf_program *prog continue; if (bpf_prog_bind_map(ret, map->fd, NULL)) { - cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg)); pr_warn("prog '%s': failed to bind map '%s': %s\n", - prog->name, map->real_name, cp); + prog->name, map->real_name, errstr(errno)); /* Don't fail hard if can't bind rodata. */ } } @@ -7566,8 +7613,7 @@ static int bpf_object_load_prog(struct bpf_object *obj, struct bpf_program *prog /* post-process verifier log to improve error descriptions */ fixup_verifier_log(prog, log_buf, log_buf_size); - cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg)); - pr_warn("prog '%s': BPF program load failed: %s\n", prog->name, cp); + pr_warn("prog '%s': BPF program load failed: %s\n", prog->name, errstr(errno)); pr_perm_msg(ret); if (own_log_buf && log_buf && log_buf[0] != '\0') { @@ -7860,7 +7906,7 @@ bpf_object__load_progs(struct bpf_object *obj, int log_level) err = bpf_object_load_prog(obj, prog, prog->insns, prog->insns_cnt, obj->license, obj->kern_version, &prog->fd); if (err) { - pr_warn("prog '%s': failed to load: %d\n", prog->name, err); + pr_warn("prog '%s': failed to load: %s\n", prog->name, errstr(err)); return err; } } @@ -7894,8 +7940,8 @@ static int bpf_object_init_progs(struct bpf_object *obj, const struct bpf_object if (prog->sec_def->prog_setup_fn) { err = prog->sec_def->prog_setup_fn(prog, prog->sec_def->cookie); if (err < 0) { - pr_warn("prog '%s': failed to initialize: %d\n", - prog->name, err); + pr_warn("prog '%s': failed to initialize: %s\n", + prog->name, errstr(err)); return err; } } @@ -7992,7 +8038,6 @@ static struct bpf_object *bpf_object_open(const char *path, const void *obj_buf, } err = bpf_object__elf_init(obj); - err = err ? : bpf_object__check_endianness(obj); err = err ? : bpf_object__elf_collect(obj); err = err ? : bpf_object__collect_externs(obj); err = err ? : bpf_object_fixup_btf(obj); @@ -8085,7 +8130,7 @@ static int libbpf_kallsyms_parse(kallsyms_cb_t cb, void *ctx) f = fopen("/proc/kallsyms", "re"); if (!f) { err = -errno; - pr_warn("failed to open /proc/kallsyms: %d\n", err); + pr_warn("failed to open /proc/kallsyms: %s\n", errstr(err)); return err; } @@ -8498,8 +8543,15 @@ static int bpf_object_load(struct bpf_object *obj, int extra_log_level, const ch return libbpf_err(-EINVAL); } - if (obj->gen_loader) + /* Disallow kernel loading programs of non-native endianness but + * permit cross-endian creation of "light skeleton". + */ + if (obj->gen_loader) { bpf_gen__init(obj->gen_loader, extra_log_level, obj->nr_programs, obj->nr_maps); + } else if (!is_native_endianness(obj)) { + pr_warn("object '%s': loading non-native endianness is unsupported\n", obj->name); + return libbpf_err(-LIBBPF_ERRNO__ENDIAN); + } err = bpf_object_prepare_token(obj); err = err ? : bpf_object__probe_loading(obj); @@ -8562,7 +8614,6 @@ int bpf_object__load(struct bpf_object *obj) static int make_parent_dir(const char *path) { - char *cp, errmsg[STRERR_BUFSIZE]; char *dname, *dir; int err = 0; @@ -8576,15 +8627,13 @@ static int make_parent_dir(const char *path) free(dname); if (err) { - cp = libbpf_strerror_r(-err, errmsg, sizeof(errmsg)); - pr_warn("failed to mkdir %s: %s\n", path, cp); + pr_warn("failed to mkdir %s: %s\n", path, errstr(err)); } return err; } static int check_path(const char *path) { - char *cp, errmsg[STRERR_BUFSIZE]; struct statfs st_fs; char *dname, *dir; int err = 0; @@ -8598,8 +8647,7 @@ static int check_path(const char *path) dir = dirname(dname); if (statfs(dir, &st_fs)) { - cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg)); - pr_warn("failed to statfs %s: %s\n", dir, cp); + pr_warn("failed to statfs %s: %s\n", dir, errstr(errno)); err = -errno; } free(dname); @@ -8614,7 +8662,6 @@ static int check_path(const char *path) int bpf_program__pin(struct bpf_program *prog, const char *path) { - char *cp, errmsg[STRERR_BUFSIZE]; int err; if (prog->fd < 0) { @@ -8632,8 +8679,7 @@ int bpf_program__pin(struct bpf_program *prog, const char *path) if (bpf_obj_pin(prog->fd, path)) { err = -errno; - cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg)); - pr_warn("prog '%s': failed to pin at '%s': %s\n", prog->name, path, cp); + pr_warn("prog '%s': failed to pin at '%s': %s\n", prog->name, path, errstr(err)); return libbpf_err(err); } @@ -8664,7 +8710,6 @@ int bpf_program__unpin(struct bpf_program *prog, const char *path) int bpf_map__pin(struct bpf_map *map, const char *path) { - char *cp, errmsg[STRERR_BUFSIZE]; int err; if (map == NULL) { @@ -8723,8 +8768,7 @@ int bpf_map__pin(struct bpf_map *map, const char *path) return 0; out_err: - cp = libbpf_strerror_r(-err, errmsg, sizeof(errmsg)); - pr_warn("failed to pin map: %s\n", cp); + pr_warn("failed to pin map: %s\n", errstr(err)); return libbpf_err(err); } @@ -9096,6 +9140,7 @@ int bpf_object__gen_loader(struct bpf_object *obj, struct gen_loader_opts *opts) if (!gen) return -ENOMEM; gen->opts = opts; + gen->swapped_endian = !is_native_endianness(obj); obj->gen_loader = gen; return 0; } @@ -9370,8 +9415,10 @@ static const struct bpf_sec_def section_defs[] = { SEC_DEF("kprobe.session+", KPROBE, BPF_TRACE_KPROBE_SESSION, SEC_NONE, attach_kprobe_session), SEC_DEF("uprobe.multi+", KPROBE, BPF_TRACE_UPROBE_MULTI, SEC_NONE, attach_uprobe_multi), SEC_DEF("uretprobe.multi+", KPROBE, BPF_TRACE_UPROBE_MULTI, SEC_NONE, attach_uprobe_multi), + SEC_DEF("uprobe.session+", KPROBE, BPF_TRACE_UPROBE_SESSION, SEC_NONE, attach_uprobe_multi), SEC_DEF("uprobe.multi.s+", KPROBE, BPF_TRACE_UPROBE_MULTI, SEC_SLEEPABLE, attach_uprobe_multi), SEC_DEF("uretprobe.multi.s+", KPROBE, BPF_TRACE_UPROBE_MULTI, SEC_SLEEPABLE, attach_uprobe_multi), + SEC_DEF("uprobe.session.s+", KPROBE, BPF_TRACE_UPROBE_SESSION, SEC_SLEEPABLE, attach_uprobe_multi), SEC_DEF("ksyscall+", KPROBE, 0, SEC_NONE, attach_ksyscall), SEC_DEF("kretsyscall+", KPROBE, 0, SEC_NONE, attach_ksyscall), SEC_DEF("usdt+", KPROBE, 0, SEC_USDT, attach_usdt), @@ -9910,8 +9957,8 @@ static int libbpf_find_prog_btf_id(const char *name, __u32 attach_prog_fd) memset(&info, 0, info_len); err = bpf_prog_get_info_by_fd(attach_prog_fd, &info, &info_len); if (err) { - pr_warn("failed bpf_prog_get_info_by_fd for FD %d: %d\n", - attach_prog_fd, err); + pr_warn("failed bpf_prog_get_info_by_fd for FD %d: %s\n", + attach_prog_fd, errstr(err)); return err; } @@ -9923,7 +9970,7 @@ static int libbpf_find_prog_btf_id(const char *name, __u32 attach_prog_fd) btf = btf__load_from_kernel_by_id(info.btf_id); err = libbpf_get_error(btf); if (err) { - pr_warn("Failed to get BTF %d of the program: %d\n", info.btf_id, err); + pr_warn("Failed to get BTF %d of the program: %s\n", info.btf_id, errstr(err)); goto out; } err = btf__find_by_name_kind(btf, name, BTF_KIND_FUNC); @@ -10005,8 +10052,8 @@ static int libbpf_find_attach_btf_id(struct bpf_program *prog, const char *attac } err = libbpf_find_prog_btf_id(attach_name, attach_prog_fd); if (err < 0) { - pr_warn("prog '%s': failed to find BPF program (FD %d) BTF ID for '%s': %d\n", - prog->name, attach_prog_fd, attach_name, err); + pr_warn("prog '%s': failed to find BPF program (FD %d) BTF ID for '%s': %s\n", + prog->name, attach_prog_fd, attach_name, errstr(err)); return err; } *btf_obj_fd = 0; @@ -10025,8 +10072,8 @@ static int libbpf_find_attach_btf_id(struct bpf_program *prog, const char *attac btf_type_id); } if (err) { - pr_warn("prog '%s': failed to find kernel BTF type ID of '%s': %d\n", - prog->name, attach_name, err); + pr_warn("prog '%s': failed to find kernel BTF type ID of '%s': %s\n", + prog->name, attach_name, errstr(err)); return err; } return 0; @@ -10254,14 +10301,14 @@ int bpf_map__set_value_size(struct bpf_map *map, __u32 size) mmap_new_sz = array_map_mmap_sz(size, map->def.max_entries); err = bpf_map_mmap_resize(map, mmap_old_sz, mmap_new_sz); if (err) { - pr_warn("map '%s': failed to resize memory-mapped region: %d\n", - bpf_map__name(map), err); + pr_warn("map '%s': failed to resize memory-mapped region: %s\n", + bpf_map__name(map), errstr(err)); return err; } err = map_btf_datasec_resize(map, size); if (err && err != -ENOENT) { - pr_warn("map '%s': failed to adjust resized BTF, clearing BTF key/value info: %d\n", - bpf_map__name(map), err); + pr_warn("map '%s': failed to adjust resized BTF, clearing BTF key/value info: %s\n", + bpf_map__name(map), errstr(err)); map->btf_value_type_id = 0; map->btf_key_type_id = 0; } @@ -10752,7 +10799,6 @@ static void bpf_link_perf_dealloc(struct bpf_link *link) struct bpf_link *bpf_program__attach_perf_event_opts(const struct bpf_program *prog, int pfd, const struct bpf_perf_event_opts *opts) { - char errmsg[STRERR_BUFSIZE]; struct bpf_link_perf *link; int prog_fd, link_fd = -1, err; bool force_ioctl_attach; @@ -10787,9 +10833,8 @@ struct bpf_link *bpf_program__attach_perf_event_opts(const struct bpf_program *p link_fd = bpf_link_create(prog_fd, pfd, BPF_PERF_EVENT, &link_opts); if (link_fd < 0) { err = -errno; - pr_warn("prog '%s': failed to create BPF link for perf_event FD %d: %d (%s)\n", - prog->name, pfd, - err, libbpf_strerror_r(err, errmsg, sizeof(errmsg))); + pr_warn("prog '%s': failed to create BPF link for perf_event FD %d: %s\n", + prog->name, pfd, errstr(err)); goto err_out; } link->link.fd = link_fd; @@ -10803,7 +10848,7 @@ struct bpf_link *bpf_program__attach_perf_event_opts(const struct bpf_program *p if (ioctl(pfd, PERF_EVENT_IOC_SET_BPF, prog_fd) < 0) { err = -errno; pr_warn("prog '%s': failed to attach to perf_event FD %d: %s\n", - prog->name, pfd, libbpf_strerror_r(err, errmsg, sizeof(errmsg))); + prog->name, pfd, errstr(err)); if (err == -EPROTO) pr_warn("prog '%s': try add PERF_SAMPLE_CALLCHAIN to or remove exclude_callchain_[kernel|user] from pfd %d\n", prog->name, pfd); @@ -10814,7 +10859,7 @@ struct bpf_link *bpf_program__attach_perf_event_opts(const struct bpf_program *p if (ioctl(pfd, PERF_EVENT_IOC_ENABLE, 0) < 0) { err = -errno; pr_warn("prog '%s': failed to enable perf_event FD %d: %s\n", - prog->name, pfd, libbpf_strerror_r(err, errmsg, sizeof(errmsg))); + prog->name, pfd, errstr(err)); goto err_out; } @@ -10838,22 +10883,19 @@ struct bpf_link *bpf_program__attach_perf_event(const struct bpf_program *prog, */ static int parse_uint_from_file(const char *file, const char *fmt) { - char buf[STRERR_BUFSIZE]; int err, ret; FILE *f; f = fopen(file, "re"); if (!f) { err = -errno; - pr_debug("failed to open '%s': %s\n", file, - libbpf_strerror_r(err, buf, sizeof(buf))); + pr_debug("failed to open '%s': %s\n", file, errstr(err)); return err; } err = fscanf(f, fmt, &ret); if (err != 1) { err = err == EOF ? -EIO : -errno; - pr_debug("failed to parse '%s': %s\n", file, - libbpf_strerror_r(err, buf, sizeof(buf))); + pr_debug("failed to parse '%s': %s\n", file, errstr(err)); fclose(f); return err; } @@ -10897,7 +10939,6 @@ static int perf_event_open_probe(bool uprobe, bool retprobe, const char *name, { const size_t attr_sz = sizeof(struct perf_event_attr); struct perf_event_attr attr; - char errmsg[STRERR_BUFSIZE]; int type, pfd; if ((__u64)ref_ctr_off >= (1ULL << PERF_UPROBE_REF_CTR_OFFSET_BITS)) @@ -10910,7 +10951,7 @@ static int perf_event_open_probe(bool uprobe, bool retprobe, const char *name, if (type < 0) { pr_warn("failed to determine %s perf type: %s\n", uprobe ? "uprobe" : "kprobe", - libbpf_strerror_r(type, errmsg, sizeof(errmsg))); + errstr(type)); return type; } if (retprobe) { @@ -10920,7 +10961,7 @@ static int perf_event_open_probe(bool uprobe, bool retprobe, const char *name, if (bit < 0) { pr_warn("failed to determine %s retprobe bit: %s\n", uprobe ? "uprobe" : "kprobe", - libbpf_strerror_r(bit, errmsg, sizeof(errmsg))); + errstr(bit)); return bit; } attr.config |= 1 << bit; @@ -11049,14 +11090,13 @@ static int perf_event_kprobe_open_legacy(const char *probe_name, bool retprobe, { const size_t attr_sz = sizeof(struct perf_event_attr); struct perf_event_attr attr; - char errmsg[STRERR_BUFSIZE]; int type, pfd, err; err = add_kprobe_event_legacy(probe_name, retprobe, kfunc_name, offset); if (err < 0) { pr_warn("failed to add legacy kprobe event for '%s+0x%zx': %s\n", kfunc_name, offset, - libbpf_strerror_r(err, errmsg, sizeof(errmsg))); + errstr(err)); return err; } type = determine_kprobe_perf_type_legacy(probe_name, retprobe); @@ -11064,7 +11104,7 @@ static int perf_event_kprobe_open_legacy(const char *probe_name, bool retprobe, err = type; pr_warn("failed to determine legacy kprobe event id for '%s+0x%zx': %s\n", kfunc_name, offset, - libbpf_strerror_r(err, errmsg, sizeof(errmsg))); + errstr(err)); goto err_clean_legacy; } @@ -11080,7 +11120,7 @@ static int perf_event_kprobe_open_legacy(const char *probe_name, bool retprobe, if (pfd < 0) { err = -errno; pr_warn("legacy kprobe perf_event_open() failed: %s\n", - libbpf_strerror_r(err, errmsg, sizeof(errmsg))); + errstr(err)); goto err_clean_legacy; } return pfd; @@ -11156,7 +11196,6 @@ bpf_program__attach_kprobe_opts(const struct bpf_program *prog, { DECLARE_LIBBPF_OPTS(bpf_perf_event_opts, pe_opts); enum probe_attach_mode attach_mode; - char errmsg[STRERR_BUFSIZE]; char *legacy_probe = NULL; struct bpf_link *link; size_t offset; @@ -11214,7 +11253,7 @@ bpf_program__attach_kprobe_opts(const struct bpf_program *prog, pr_warn("prog '%s': failed to create %s '%s+0x%zx' perf event: %s\n", prog->name, retprobe ? "kretprobe" : "kprobe", func_name, offset, - libbpf_strerror_r(err, errmsg, sizeof(errmsg))); + errstr(err)); goto err_out; } link = bpf_program__attach_perf_event_opts(prog, pfd, &pe_opts); @@ -11224,7 +11263,7 @@ bpf_program__attach_kprobe_opts(const struct bpf_program *prog, pr_warn("prog '%s': failed to attach to %s '%s+0x%zx': %s\n", prog->name, retprobe ? "kretprobe" : "kprobe", func_name, offset, - libbpf_strerror_r(err, errmsg, sizeof(errmsg))); + errstr(err)); goto err_clean_legacy; } if (legacy) { @@ -11360,7 +11399,7 @@ static int libbpf_available_kallsyms_parse(struct kprobe_multi_resolve *res) f = fopen(available_functions_file, "re"); if (!f) { err = -errno; - pr_warn("failed to open %s: %d\n", available_functions_file, err); + pr_warn("failed to open %s: %s\n", available_functions_file, errstr(err)); return err; } @@ -11435,7 +11474,7 @@ static int libbpf_available_kprobes_parse(struct kprobe_multi_resolve *res) f = fopen(available_path, "re"); if (!f) { err = -errno; - pr_warn("failed to open %s: %d\n", available_path, err); + pr_warn("failed to open %s: %s\n", available_path, errstr(err)); return err; } @@ -11481,7 +11520,6 @@ bpf_program__attach_kprobe_multi_opts(const struct bpf_program *prog, }; enum bpf_attach_type attach_type; struct bpf_link *link = NULL; - char errmsg[STRERR_BUFSIZE]; const unsigned long *addrs; int err, link_fd, prog_fd; bool retprobe, session; @@ -11549,7 +11587,7 @@ bpf_program__attach_kprobe_multi_opts(const struct bpf_program *prog, if (link_fd < 0) { err = -errno; pr_warn("prog '%s': failed to attach: %s\n", - prog->name, libbpf_strerror_r(err, errmsg, sizeof(errmsg))); + prog->name, errstr(err)); goto error; } link->fd = link_fd; @@ -11693,7 +11731,9 @@ static int attach_uprobe_multi(const struct bpf_program *prog, long cookie, stru ret = 0; break; case 3: + opts.session = str_has_pfx(probe_type, "uprobe.session"); opts.retprobe = str_has_pfx(probe_type, "uretprobe.multi"); + *link = bpf_program__attach_uprobe_multi(prog, -1, binary_path, func_name, &opts); ret = libbpf_get_error(*link); break; @@ -11756,15 +11796,15 @@ static int perf_event_uprobe_open_legacy(const char *probe_name, bool retprobe, err = add_uprobe_event_legacy(probe_name, retprobe, binary_path, offset); if (err < 0) { - pr_warn("failed to add legacy uprobe event for %s:0x%zx: %d\n", - binary_path, (size_t)offset, err); + pr_warn("failed to add legacy uprobe event for %s:0x%zx: %s\n", + binary_path, (size_t)offset, errstr(err)); return err; } type = determine_uprobe_perf_type_legacy(probe_name, retprobe); if (type < 0) { err = type; - pr_warn("failed to determine legacy uprobe event id for %s:0x%zx: %d\n", - binary_path, offset, err); + pr_warn("failed to determine legacy uprobe event id for %s:0x%zx: %s\n", + binary_path, offset, errstr(err)); goto err_clean_legacy; } @@ -11779,7 +11819,7 @@ static int perf_event_uprobe_open_legacy(const char *probe_name, bool retprobe, -1 /* group_fd */, PERF_FLAG_FD_CLOEXEC); if (pfd < 0) { err = -errno; - pr_warn("legacy uprobe perf_event_open() failed: %d\n", err); + pr_warn("legacy uprobe perf_event_open() failed: %s\n", errstr(err)); goto err_clean_legacy; } return pfd; @@ -11942,10 +11982,11 @@ bpf_program__attach_uprobe_multi(const struct bpf_program *prog, const unsigned long *ref_ctr_offsets = NULL, *offsets = NULL; LIBBPF_OPTS(bpf_link_create_opts, lopts); unsigned long *resolved_offsets = NULL; + enum bpf_attach_type attach_type; int err = 0, link_fd, prog_fd; struct bpf_link *link = NULL; - char errmsg[STRERR_BUFSIZE]; char full_path[PATH_MAX]; + bool retprobe, session; const __u64 *cookies; const char **syms; size_t cnt; @@ -11965,6 +12006,8 @@ bpf_program__attach_uprobe_multi(const struct bpf_program *prog, ref_ctr_offsets = OPTS_GET(opts, ref_ctr_offsets, NULL); cookies = OPTS_GET(opts, cookies, NULL); cnt = OPTS_GET(opts, cnt, 0); + retprobe = OPTS_GET(opts, retprobe, false); + session = OPTS_GET(opts, session, false); /* * User can specify 2 mutually exclusive set of inputs: @@ -11993,12 +12036,15 @@ bpf_program__attach_uprobe_multi(const struct bpf_program *prog, return libbpf_err_ptr(-EINVAL); } + if (retprobe && session) + return libbpf_err_ptr(-EINVAL); + if (func_pattern) { if (!strchr(path, '/')) { err = resolve_full_path(path, full_path, sizeof(full_path)); if (err) { - pr_warn("prog '%s': failed to resolve full path for '%s': %d\n", - prog->name, path, err); + pr_warn("prog '%s': failed to resolve full path for '%s': %s\n", + prog->name, path, errstr(err)); return libbpf_err_ptr(err); } path = full_path; @@ -12016,12 +12062,14 @@ bpf_program__attach_uprobe_multi(const struct bpf_program *prog, offsets = resolved_offsets; } + attach_type = session ? BPF_TRACE_UPROBE_SESSION : BPF_TRACE_UPROBE_MULTI; + lopts.uprobe_multi.path = path; lopts.uprobe_multi.offsets = offsets; lopts.uprobe_multi.ref_ctr_offsets = ref_ctr_offsets; lopts.uprobe_multi.cookies = cookies; lopts.uprobe_multi.cnt = cnt; - lopts.uprobe_multi.flags = OPTS_GET(opts, retprobe, false) ? BPF_F_UPROBE_MULTI_RETURN : 0; + lopts.uprobe_multi.flags = retprobe ? BPF_F_UPROBE_MULTI_RETURN : 0; if (pid == 0) pid = getpid(); @@ -12035,11 +12083,11 @@ bpf_program__attach_uprobe_multi(const struct bpf_program *prog, } link->detach = &bpf_link__detach_fd; - link_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_UPROBE_MULTI, &lopts); + link_fd = bpf_link_create(prog_fd, 0, attach_type, &lopts); if (link_fd < 0) { err = -errno; pr_warn("prog '%s': failed to attach multi-uprobe: %s\n", - prog->name, libbpf_strerror_r(err, errmsg, sizeof(errmsg))); + prog->name, errstr(err)); goto error; } link->fd = link_fd; @@ -12058,7 +12106,7 @@ bpf_program__attach_uprobe_opts(const struct bpf_program *prog, pid_t pid, const struct bpf_uprobe_opts *opts) { const char *archive_path = NULL, *archive_sep = NULL; - char errmsg[STRERR_BUFSIZE], *legacy_probe = NULL; + char *legacy_probe = NULL; DECLARE_LIBBPF_OPTS(bpf_perf_event_opts, pe_opts); enum probe_attach_mode attach_mode; char full_path[PATH_MAX]; @@ -12090,8 +12138,8 @@ bpf_program__attach_uprobe_opts(const struct bpf_program *prog, pid_t pid, } else if (!strchr(binary_path, '/')) { err = resolve_full_path(binary_path, full_path, sizeof(full_path)); if (err) { - pr_warn("prog '%s': failed to resolve full path for '%s': %d\n", - prog->name, binary_path, err); + pr_warn("prog '%s': failed to resolve full path for '%s': %s\n", + prog->name, binary_path, errstr(err)); return libbpf_err_ptr(err); } binary_path = full_path; @@ -12157,7 +12205,7 @@ bpf_program__attach_uprobe_opts(const struct bpf_program *prog, pid_t pid, pr_warn("prog '%s': failed to create %s '%s:0x%zx' perf event: %s\n", prog->name, retprobe ? "uretprobe" : "uprobe", binary_path, func_offset, - libbpf_strerror_r(err, errmsg, sizeof(errmsg))); + errstr(err)); goto err_out; } @@ -12168,7 +12216,7 @@ bpf_program__attach_uprobe_opts(const struct bpf_program *prog, pid_t pid, pr_warn("prog '%s': failed to attach to %s '%s:0x%zx': %s\n", prog->name, retprobe ? "uretprobe" : "uprobe", binary_path, func_offset, - libbpf_strerror_r(err, errmsg, sizeof(errmsg))); + errstr(err)); goto err_clean_legacy; } if (legacy) { @@ -12289,8 +12337,8 @@ struct bpf_link *bpf_program__attach_usdt(const struct bpf_program *prog, if (!strchr(binary_path, '/')) { err = resolve_full_path(binary_path, resolved_path, sizeof(resolved_path)); if (err) { - pr_warn("prog '%s': failed to resolve full path for '%s': %d\n", - prog->name, binary_path, err); + pr_warn("prog '%s': failed to resolve full path for '%s': %s\n", + prog->name, binary_path, errstr(err)); return libbpf_err_ptr(err); } binary_path = resolved_path; @@ -12368,14 +12416,13 @@ static int perf_event_open_tracepoint(const char *tp_category, { const size_t attr_sz = sizeof(struct perf_event_attr); struct perf_event_attr attr; - char errmsg[STRERR_BUFSIZE]; int tp_id, pfd, err; tp_id = determine_tracepoint_id(tp_category, tp_name); if (tp_id < 0) { pr_warn("failed to determine tracepoint '%s/%s' perf event ID: %s\n", tp_category, tp_name, - libbpf_strerror_r(tp_id, errmsg, sizeof(errmsg))); + errstr(tp_id)); return tp_id; } @@ -12390,7 +12437,7 @@ static int perf_event_open_tracepoint(const char *tp_category, err = -errno; pr_warn("tracepoint '%s/%s' perf_event_open() failed: %s\n", tp_category, tp_name, - libbpf_strerror_r(err, errmsg, sizeof(errmsg))); + errstr(err)); return err; } return pfd; @@ -12402,7 +12449,6 @@ struct bpf_link *bpf_program__attach_tracepoint_opts(const struct bpf_program *p const struct bpf_tracepoint_opts *opts) { DECLARE_LIBBPF_OPTS(bpf_perf_event_opts, pe_opts); - char errmsg[STRERR_BUFSIZE]; struct bpf_link *link; int pfd, err; @@ -12415,7 +12461,7 @@ struct bpf_link *bpf_program__attach_tracepoint_opts(const struct bpf_program *p if (pfd < 0) { pr_warn("prog '%s': failed to create tracepoint '%s/%s' perf event: %s\n", prog->name, tp_category, tp_name, - libbpf_strerror_r(pfd, errmsg, sizeof(errmsg))); + errstr(pfd)); return libbpf_err_ptr(pfd); } link = bpf_program__attach_perf_event_opts(prog, pfd, &pe_opts); @@ -12424,7 +12470,7 @@ struct bpf_link *bpf_program__attach_tracepoint_opts(const struct bpf_program *p close(pfd); pr_warn("prog '%s': failed to attach to tracepoint '%s/%s': %s\n", prog->name, tp_category, tp_name, - libbpf_strerror_r(err, errmsg, sizeof(errmsg))); + errstr(err)); return libbpf_err_ptr(err); } return link; @@ -12475,7 +12521,6 @@ bpf_program__attach_raw_tracepoint_opts(const struct bpf_program *prog, struct bpf_raw_tracepoint_opts *opts) { LIBBPF_OPTS(bpf_raw_tp_opts, raw_opts); - char errmsg[STRERR_BUFSIZE]; struct bpf_link *link; int prog_fd, pfd; @@ -12500,7 +12545,7 @@ bpf_program__attach_raw_tracepoint_opts(const struct bpf_program *prog, pfd = -errno; free(link); pr_warn("prog '%s': failed to attach to raw tracepoint '%s': %s\n", - prog->name, tp_name, libbpf_strerror_r(pfd, errmsg, sizeof(errmsg))); + prog->name, tp_name, errstr(pfd)); return libbpf_err_ptr(pfd); } link->fd = pfd; @@ -12559,7 +12604,6 @@ static struct bpf_link *bpf_program__attach_btf_id(const struct bpf_program *pro const struct bpf_trace_opts *opts) { LIBBPF_OPTS(bpf_link_create_opts, link_opts); - char errmsg[STRERR_BUFSIZE]; struct bpf_link *link; int prog_fd, pfd; @@ -12584,7 +12628,7 @@ static struct bpf_link *bpf_program__attach_btf_id(const struct bpf_program *pro pfd = -errno; free(link); pr_warn("prog '%s': failed to attach: %s\n", - prog->name, libbpf_strerror_r(pfd, errmsg, sizeof(errmsg))); + prog->name, errstr(pfd)); return libbpf_err_ptr(pfd); } link->fd = pfd; @@ -12625,7 +12669,6 @@ bpf_program_attach_fd(const struct bpf_program *prog, const struct bpf_link_create_opts *opts) { enum bpf_attach_type attach_type; - char errmsg[STRERR_BUFSIZE]; struct bpf_link *link; int prog_fd, link_fd; @@ -12647,7 +12690,7 @@ bpf_program_attach_fd(const struct bpf_program *prog, free(link); pr_warn("prog '%s': failed to attach to %s: %s\n", prog->name, target_name, - libbpf_strerror_r(link_fd, errmsg, sizeof(errmsg))); + errstr(link_fd)); return libbpf_err_ptr(link_fd); } link->fd = link_fd; @@ -12760,7 +12803,7 @@ struct bpf_link *bpf_program__attach_freplace(const struct bpf_program *prog, } if (prog->type != BPF_PROG_TYPE_EXT) { - pr_warn("prog '%s': only BPF_PROG_TYPE_EXT can attach as freplace", + pr_warn("prog '%s': only BPF_PROG_TYPE_EXT can attach as freplace\n", prog->name); return libbpf_err_ptr(-EINVAL); } @@ -12789,7 +12832,6 @@ bpf_program__attach_iter(const struct bpf_program *prog, const struct bpf_iter_attach_opts *opts) { DECLARE_LIBBPF_OPTS(bpf_link_create_opts, link_create_opts); - char errmsg[STRERR_BUFSIZE]; struct bpf_link *link; int prog_fd, link_fd; __u32 target_fd = 0; @@ -12817,7 +12859,7 @@ bpf_program__attach_iter(const struct bpf_program *prog, link_fd = -errno; free(link); pr_warn("prog '%s': failed to attach to iterator: %s\n", - prog->name, libbpf_strerror_r(link_fd, errmsg, sizeof(errmsg))); + prog->name, errstr(link_fd)); return libbpf_err_ptr(link_fd); } link->fd = link_fd; @@ -12859,12 +12901,10 @@ struct bpf_link *bpf_program__attach_netfilter(const struct bpf_program *prog, link_fd = bpf_link_create(prog_fd, 0, BPF_NETFILTER, &lopts); if (link_fd < 0) { - char errmsg[STRERR_BUFSIZE]; - link_fd = -errno; free(link); pr_warn("prog '%s': failed to attach to netfilter: %s\n", - prog->name, libbpf_strerror_r(link_fd, errmsg, sizeof(errmsg))); + prog->name, errstr(link_fd)); return libbpf_err_ptr(link_fd); } link->fd = link_fd; @@ -13149,7 +13189,6 @@ perf_buffer__open_cpu_buf(struct perf_buffer *pb, struct perf_event_attr *attr, int cpu, int map_key) { struct perf_cpu_buf *cpu_buf; - char msg[STRERR_BUFSIZE]; int err; cpu_buf = calloc(1, sizeof(*cpu_buf)); @@ -13165,7 +13204,7 @@ perf_buffer__open_cpu_buf(struct perf_buffer *pb, struct perf_event_attr *attr, if (cpu_buf->fd < 0) { err = -errno; pr_warn("failed to open perf buffer event on cpu #%d: %s\n", - cpu, libbpf_strerror_r(err, msg, sizeof(msg))); + cpu, errstr(err)); goto error; } @@ -13176,14 +13215,14 @@ perf_buffer__open_cpu_buf(struct perf_buffer *pb, struct perf_event_attr *attr, cpu_buf->base = NULL; err = -errno; pr_warn("failed to mmap perf buffer on cpu #%d: %s\n", - cpu, libbpf_strerror_r(err, msg, sizeof(msg))); + cpu, errstr(err)); goto error; } if (ioctl(cpu_buf->fd, PERF_EVENT_IOC_ENABLE, 0) < 0) { err = -errno; pr_warn("failed to enable perf buffer event on cpu #%d: %s\n", - cpu, libbpf_strerror_r(err, msg, sizeof(msg))); + cpu, errstr(err)); goto error; } @@ -13259,7 +13298,6 @@ static struct perf_buffer *__perf_buffer__new(int map_fd, size_t page_cnt, { const char *online_cpus_file = "/sys/devices/system/cpu/online"; struct bpf_map_info map; - char msg[STRERR_BUFSIZE]; struct perf_buffer *pb; bool *online = NULL; __u32 map_info_len; @@ -13282,7 +13320,7 @@ static struct perf_buffer *__perf_buffer__new(int map_fd, size_t page_cnt, */ if (err != -EINVAL) { pr_warn("failed to get map info for map FD %d: %s\n", - map_fd, libbpf_strerror_r(err, msg, sizeof(msg))); + map_fd, errstr(err)); return ERR_PTR(err); } pr_debug("failed to get map info for FD %d; API not supported? Ignoring...\n", @@ -13312,7 +13350,7 @@ static struct perf_buffer *__perf_buffer__new(int map_fd, size_t page_cnt, if (pb->epoll_fd < 0) { err = -errno; pr_warn("failed to create epoll instance: %s\n", - libbpf_strerror_r(err, msg, sizeof(msg))); + errstr(err)); goto error; } @@ -13343,7 +13381,7 @@ static struct perf_buffer *__perf_buffer__new(int map_fd, size_t page_cnt, err = parse_cpu_mask_file(online_cpus_file, &online, &n); if (err) { - pr_warn("failed to get online CPU mask: %d\n", err); + pr_warn("failed to get online CPU mask: %s\n", errstr(err)); goto error; } @@ -13374,7 +13412,7 @@ static struct perf_buffer *__perf_buffer__new(int map_fd, size_t page_cnt, err = -errno; pr_warn("failed to set cpu #%d, key %d -> perf FD %d: %s\n", cpu, map_key, cpu_buf->fd, - libbpf_strerror_r(err, msg, sizeof(msg))); + errstr(err)); goto error; } @@ -13385,7 +13423,7 @@ static struct perf_buffer *__perf_buffer__new(int map_fd, size_t page_cnt, err = -errno; pr_warn("failed to epoll_ctl cpu #%d perf FD %d: %s\n", cpu, cpu_buf->fd, - libbpf_strerror_r(err, msg, sizeof(msg))); + errstr(err)); goto error; } j++; @@ -13480,7 +13518,7 @@ int perf_buffer__poll(struct perf_buffer *pb, int timeout_ms) err = perf_buffer__process_records(pb, cpu_buf); if (err) { - pr_warn("error while processing records: %d\n", err); + pr_warn("error while processing records: %s\n", errstr(err)); return libbpf_err(err); } } @@ -13564,7 +13602,8 @@ int perf_buffer__consume(struct perf_buffer *pb) err = perf_buffer__process_records(pb, cpu_buf); if (err) { - pr_warn("perf_buffer: failed to process records in buffer #%d: %d\n", i, err); + pr_warn("perf_buffer: failed to process records in buffer #%d: %s\n", + i, errstr(err)); return libbpf_err(err); } } @@ -13675,14 +13714,14 @@ int parse_cpu_mask_file(const char *fcpu, bool **mask, int *mask_sz) fd = open(fcpu, O_RDONLY | O_CLOEXEC); if (fd < 0) { err = -errno; - pr_warn("Failed to open cpu mask file %s: %d\n", fcpu, err); + pr_warn("Failed to open cpu mask file %s: %s\n", fcpu, errstr(err)); return err; } len = read(fd, buf, sizeof(buf)); close(fd); if (len <= 0) { err = len ? -errno : -EINVAL; - pr_warn("Failed to read cpu mask from %s: %d\n", fcpu, err); + pr_warn("Failed to read cpu mask from %s: %s\n", fcpu, errstr(err)); return err; } if (len >= sizeof(buf)) { @@ -13774,20 +13813,21 @@ int bpf_object__open_skeleton(struct bpf_object_skeleton *s, obj = bpf_object_open(NULL, s->data, s->data_sz, s->name, opts); if (IS_ERR(obj)) { err = PTR_ERR(obj); - pr_warn("failed to initialize skeleton BPF object '%s': %d\n", s->name, err); + pr_warn("failed to initialize skeleton BPF object '%s': %s\n", + s->name, errstr(err)); return libbpf_err(err); } *s->obj = obj; err = populate_skeleton_maps(obj, s->maps, s->map_cnt, s->map_skel_sz); if (err) { - pr_warn("failed to populate skeleton maps for '%s': %d\n", s->name, err); + pr_warn("failed to populate skeleton maps for '%s': %s\n", s->name, errstr(err)); return libbpf_err(err); } err = populate_skeleton_progs(obj, s->progs, s->prog_cnt, s->prog_skel_sz); if (err) { - pr_warn("failed to populate skeleton progs for '%s': %d\n", s->name, err); + pr_warn("failed to populate skeleton progs for '%s': %s\n", s->name, errstr(err)); return libbpf_err(err); } @@ -13817,13 +13857,13 @@ int bpf_object__open_subskeleton(struct bpf_object_subskeleton *s) err = populate_skeleton_maps(s->obj, s->maps, s->map_cnt, s->map_skel_sz); if (err) { - pr_warn("failed to populate subskeleton maps: %d\n", err); + pr_warn("failed to populate subskeleton maps: %s\n", errstr(err)); return libbpf_err(err); } err = populate_skeleton_progs(s->obj, s->progs, s->prog_cnt, s->prog_skel_sz); if (err) { - pr_warn("failed to populate subskeleton maps: %d\n", err); + pr_warn("failed to populate subskeleton maps: %s\n", errstr(err)); return libbpf_err(err); } @@ -13834,7 +13874,7 @@ int bpf_object__open_subskeleton(struct bpf_object_subskeleton *s) map_type = btf__type_by_id(btf, map_type_id); if (!btf_is_datasec(map_type)) { - pr_warn("type for map '%1$s' is not a datasec: %2$s", + pr_warn("type for map '%1$s' is not a datasec: %2$s\n", bpf_map__name(map), __btf_kind_str(btf_kind(map_type))); return libbpf_err(-EINVAL); @@ -13870,53 +13910,18 @@ int bpf_object__load_skeleton(struct bpf_object_skeleton *s) err = bpf_object__load(*s->obj); if (err) { - pr_warn("failed to load BPF skeleton '%s': %d\n", s->name, err); + pr_warn("failed to load BPF skeleton '%s': %s\n", s->name, errstr(err)); return libbpf_err(err); } for (i = 0; i < s->map_cnt; i++) { struct bpf_map_skeleton *map_skel = (void *)s->maps + i * s->map_skel_sz; struct bpf_map *map = *map_skel->map; - size_t mmap_sz = bpf_map_mmap_sz(map); - int prot, map_fd = map->fd; - void **mmaped = map_skel->mmaped; - if (!mmaped) + if (!map_skel->mmaped) continue; - if (!(map->def.map_flags & BPF_F_MMAPABLE)) { - *mmaped = NULL; - continue; - } - - if (map->def.type == BPF_MAP_TYPE_ARENA) { - *mmaped = map->mmaped; - continue; - } - - if (map->def.map_flags & BPF_F_RDONLY_PROG) - prot = PROT_READ; - else - prot = PROT_READ | PROT_WRITE; - - /* Remap anonymous mmap()-ed "map initialization image" as - * a BPF map-backed mmap()-ed memory, but preserving the same - * memory address. This will cause kernel to change process' - * page table to point to a different piece of kernel memory, - * but from userspace point of view memory address (and its - * contents, being identical at this point) will stay the - * same. This mapping will be released by bpf_object__close() - * as per normal clean up procedure, so we don't need to worry - * about it from skeleton's clean up perspective. - */ - *mmaped = mmap(map->mmaped, mmap_sz, prot, MAP_SHARED | MAP_FIXED, map_fd, 0); - if (*mmaped == MAP_FAILED) { - err = -errno; - *mmaped = NULL; - pr_warn("failed to re-mmap() map '%s': %d\n", - bpf_map__name(map), err); - return libbpf_err(err); - } + *map_skel->mmaped = map->mmaped; } return 0; @@ -13944,8 +13949,8 @@ int bpf_object__attach_skeleton(struct bpf_object_skeleton *s) err = prog->sec_def->prog_attach_fn(prog, prog->sec_def->cookie, link); if (err) { - pr_warn("prog '%s': failed to auto-attach: %d\n", - bpf_program__name(prog), err); + pr_warn("prog '%s': failed to auto-attach: %s\n", + bpf_program__name(prog), errstr(err)); return libbpf_err(err); } @@ -13988,7 +13993,8 @@ int bpf_object__attach_skeleton(struct bpf_object_skeleton *s) *link = bpf_map__attach_struct_ops(map); if (!*link) { err = -errno; - pr_warn("map '%s': failed to auto-attach: %d\n", bpf_map__name(map), err); + pr_warn("map '%s': failed to auto-attach: %s\n", + bpf_map__name(map), errstr(err)); return libbpf_err(err); } } diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h index 91484303849c..b2ce3a72b11d 100644 --- a/tools/lib/bpf/libbpf.h +++ b/tools/lib/bpf/libbpf.h @@ -577,10 +577,12 @@ struct bpf_uprobe_multi_opts { size_t cnt; /* create return uprobes */ bool retprobe; + /* create session kprobes */ + bool session; size_t :0; }; -#define bpf_uprobe_multi_opts__last_field retprobe +#define bpf_uprobe_multi_opts__last_field session /** * @brief **bpf_program__attach_uprobe_multi()** attaches a BPF program diff --git a/tools/lib/bpf/libbpf.map b/tools/lib/bpf/libbpf.map index 0096e483f7eb..54b6f312cfa8 100644 --- a/tools/lib/bpf/libbpf.map +++ b/tools/lib/bpf/libbpf.map @@ -421,6 +421,8 @@ LIBBPF_1.5.0 { global: btf__distill_base; btf__relocate; + btf_ext__endianness; + btf_ext__set_endianness; bpf_map__autoattach; bpf_map__set_autoattach; bpf_object__token_fd; @@ -428,3 +430,6 @@ LIBBPF_1.5.0 { ring__consume_n; ring_buffer__consume_n; } LIBBPF_1.4.0; + +LIBBPF_1.6.0 { +} LIBBPF_1.5.0; diff --git a/tools/lib/bpf/libbpf_internal.h b/tools/lib/bpf/libbpf_internal.h index 408df59e0771..de498e2dd6b0 100644 --- a/tools/lib/bpf/libbpf_internal.h +++ b/tools/lib/bpf/libbpf_internal.h @@ -10,6 +10,7 @@ #define __LIBBPF_LIBBPF_INTERNAL_H #include +#include #include #include #include @@ -448,11 +449,11 @@ struct btf_ext_info { * * The func_info subsection layout: * record size for struct bpf_func_info in the func_info subsection - * struct btf_sec_func_info for section #1 + * struct btf_ext_info_sec for section #1 * a list of bpf_func_info records for section #1 * where struct bpf_func_info mimics one in include/uapi/linux/bpf.h * but may not be identical - * struct btf_sec_func_info for section #2 + * struct btf_ext_info_sec for section #2 * a list of bpf_func_info records for section #2 * ...... * @@ -484,6 +485,8 @@ struct btf_ext { struct btf_ext_header *hdr; void *data; }; + void *data_swapped; + bool swapped_endian; struct btf_ext_info func_info; struct btf_ext_info line_info; struct btf_ext_info core_relo_info; @@ -511,6 +514,32 @@ struct bpf_line_info_min { __u32 line_col; }; +/* Functions to byte-swap info records */ + +typedef void (*info_rec_bswap_fn)(void *); + +static inline void bpf_func_info_bswap(struct bpf_func_info *i) +{ + i->insn_off = bswap_32(i->insn_off); + i->type_id = bswap_32(i->type_id); +} + +static inline void bpf_line_info_bswap(struct bpf_line_info *i) +{ + i->insn_off = bswap_32(i->insn_off); + i->file_name_off = bswap_32(i->file_name_off); + i->line_off = bswap_32(i->line_off); + i->line_col = bswap_32(i->line_col); +} + +static inline void bpf_core_relo_bswap(struct bpf_core_relo *i) +{ + i->insn_off = bswap_32(i->insn_off); + i->type_id = bswap_32(i->type_id); + i->access_str_off = bswap_32(i->access_str_off); + i->kind = bswap_32(i->kind); +} + enum btf_field_iter_kind { BTF_FIELD_ITER_IDS, BTF_FIELD_ITER_STRS, @@ -588,6 +617,16 @@ static inline bool is_ldimm64_insn(struct bpf_insn *insn) return insn->code == (BPF_LD | BPF_IMM | BPF_DW); } +static inline void bpf_insn_bswap(struct bpf_insn *insn) +{ + __u8 tmp_reg = insn->dst_reg; + + insn->dst_reg = insn->src_reg; + insn->src_reg = tmp_reg; + insn->off = bswap_16(insn->off); + insn->imm = bswap_32(insn->imm); +} + /* Unconditionally dup FD, ensuring it doesn't use [0, 2] range. * Original FD is not closed or altered in any other way. * Preserves original FD value, if it's invalid (negative). diff --git a/tools/lib/bpf/libbpf_version.h b/tools/lib/bpf/libbpf_version.h index d6e5eff967cb..28c58fb17250 100644 --- a/tools/lib/bpf/libbpf_version.h +++ b/tools/lib/bpf/libbpf_version.h @@ -4,6 +4,6 @@ #define __LIBBPF_VERSION_H #define LIBBPF_MAJOR_VERSION 1 -#define LIBBPF_MINOR_VERSION 5 +#define LIBBPF_MINOR_VERSION 6 #endif /* __LIBBPF_VERSION_H */ diff --git a/tools/lib/bpf/linker.c b/tools/lib/bpf/linker.c index e0005c6ade88..cf71d149fe26 100644 --- a/tools/lib/bpf/linker.c +++ b/tools/lib/bpf/linker.c @@ -20,6 +20,7 @@ #include "btf.h" #include "libbpf_internal.h" #include "strset.h" +#include "str_error.h" #define BTF_EXTERN_SEC ".extern" @@ -135,6 +136,7 @@ struct bpf_linker { int fd; Elf *elf; Elf64_Ehdr *elf_hdr; + bool swapped_endian; /* Output sections metadata */ struct dst_sec *secs; @@ -305,7 +307,7 @@ static int init_output_elf(struct bpf_linker *linker, const char *file) linker->fd = open(file, O_WRONLY | O_CREAT | O_TRUNC | O_CLOEXEC, 0644); if (linker->fd < 0) { err = -errno; - pr_warn("failed to create '%s': %d\n", file, err); + pr_warn("failed to create '%s': %s\n", file, errstr(err)); return err; } @@ -324,13 +326,8 @@ static int init_output_elf(struct bpf_linker *linker, const char *file) linker->elf_hdr->e_machine = EM_BPF; linker->elf_hdr->e_type = ET_REL; -#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ - linker->elf_hdr->e_ident[EI_DATA] = ELFDATA2LSB; -#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ - linker->elf_hdr->e_ident[EI_DATA] = ELFDATA2MSB; -#else -#error "Unknown __BYTE_ORDER__" -#endif + /* Set unknown ELF endianness, assign later from input files */ + linker->elf_hdr->e_ident[EI_DATA] = ELFDATANONE; /* STRTAB */ /* initialize strset with an empty string to conform to ELF */ @@ -396,6 +393,8 @@ static int init_output_elf(struct bpf_linker *linker, const char *file) pr_warn_elf("failed to create SYMTAB data"); return -EINVAL; } + /* Ensure libelf translates byte-order of symbol records */ + sec->data->d_type = ELF_T_SYM; str_off = strset__add_str(linker->strtab_strs, sec->sec_name); if (str_off < 0) @@ -539,19 +538,21 @@ static int linker_load_obj_file(struct bpf_linker *linker, const char *filename, const struct bpf_linker_file_opts *opts, struct src_obj *obj) { -#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ - const int host_endianness = ELFDATA2LSB; -#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ - const int host_endianness = ELFDATA2MSB; -#else -#error "Unknown __BYTE_ORDER__" -#endif int err = 0; Elf_Scn *scn; Elf_Data *data; Elf64_Ehdr *ehdr; Elf64_Shdr *shdr; struct src_sec *sec; + unsigned char obj_byteorder; + unsigned char link_byteorder = linker->elf_hdr->e_ident[EI_DATA]; +#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ + const unsigned char host_byteorder = ELFDATA2LSB; +#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ + const unsigned char host_byteorder = ELFDATA2MSB; +#else +#error "Unknown __BYTE_ORDER__" +#endif pr_debug("linker: adding object file '%s'...\n", filename); @@ -560,7 +561,7 @@ static int linker_load_obj_file(struct bpf_linker *linker, const char *filename, obj->fd = open(filename, O_RDONLY | O_CLOEXEC); if (obj->fd < 0) { err = -errno; - pr_warn("failed to open file '%s': %d\n", filename, err); + pr_warn("failed to open file '%s': %s\n", filename, errstr(err)); return err; } obj->elf = elf_begin(obj->fd, ELF_C_READ_MMAP, NULL); @@ -577,11 +578,25 @@ static int linker_load_obj_file(struct bpf_linker *linker, const char *filename, pr_warn_elf("failed to get ELF header for %s", filename); return err; } - if (ehdr->e_ident[EI_DATA] != host_endianness) { + + /* Linker output endianness set by first input object */ + obj_byteorder = ehdr->e_ident[EI_DATA]; + if (obj_byteorder != ELFDATA2LSB && obj_byteorder != ELFDATA2MSB) { err = -EOPNOTSUPP; - pr_warn_elf("unsupported byte order of ELF file %s", filename); + pr_warn("unknown byte order of ELF file %s\n", filename); return err; } + if (link_byteorder == ELFDATANONE) { + linker->elf_hdr->e_ident[EI_DATA] = obj_byteorder; + linker->swapped_endian = obj_byteorder != host_byteorder; + pr_debug("linker: set %s-endian output byte order\n", + obj_byteorder == ELFDATA2MSB ? "big" : "little"); + } else if (link_byteorder != obj_byteorder) { + err = -EOPNOTSUPP; + pr_warn("byte order mismatch with ELF file %s\n", filename); + return err; + } + if (ehdr->e_type != ET_REL || ehdr->e_machine != EM_BPF || ehdr->e_ident[EI_CLASS] != ELFCLASS64) { @@ -656,7 +671,8 @@ static int linker_load_obj_file(struct bpf_linker *linker, const char *filename, obj->btf = btf__new(data->d_buf, shdr->sh_size); err = libbpf_get_error(obj->btf); if (err) { - pr_warn("failed to parse .BTF from %s: %d\n", filename, err); + pr_warn("failed to parse .BTF from %s: %s\n", + filename, errstr(err)); return err; } sec->skipped = true; @@ -666,7 +682,8 @@ static int linker_load_obj_file(struct bpf_linker *linker, const char *filename, obj->btf_ext = btf_ext__new(data->d_buf, shdr->sh_size); err = libbpf_get_error(obj->btf_ext); if (err) { - pr_warn("failed to parse .BTF.ext from '%s': %d\n", filename, err); + pr_warn("failed to parse .BTF.ext from '%s': %s\n", + filename, errstr(err)); return err; } sec->skipped = true; @@ -1109,6 +1126,24 @@ static bool sec_content_is_same(struct dst_sec *dst_sec, struct src_sec *src_sec return true; } +static bool is_exec_sec(struct dst_sec *sec) +{ + if (!sec || sec->ephemeral) + return false; + return (sec->shdr->sh_type == SHT_PROGBITS) && + (sec->shdr->sh_flags & SHF_EXECINSTR); +} + +static void exec_sec_bswap(void *raw_data, int size) +{ + const int insn_cnt = size / sizeof(struct bpf_insn); + struct bpf_insn *insn = raw_data; + int i; + + for (i = 0; i < insn_cnt; i++, insn++) + bpf_insn_bswap(insn); +} + static int extend_sec(struct bpf_linker *linker, struct dst_sec *dst, struct src_sec *src) { void *tmp; @@ -1168,6 +1203,10 @@ static int extend_sec(struct bpf_linker *linker, struct dst_sec *dst, struct src memset(dst->raw_data + dst->sec_sz, 0, dst_align_sz - dst->sec_sz); /* now copy src data at a properly aligned offset */ memcpy(dst->raw_data + dst_align_sz, src->data->d_buf, src->shdr->sh_size); + + /* convert added bpf insns to native byte-order */ + if (linker->swapped_endian && is_exec_sec(dst)) + exec_sec_bswap(dst->raw_data + dst_align_sz, src->shdr->sh_size); } dst->sec_sz = dst_final_sz; @@ -2415,6 +2454,10 @@ static int linker_append_btf(struct bpf_linker *linker, struct src_obj *obj) if (glob_sym && glob_sym->var_idx >= 0) { __s64 sz; + /* FUNCs don't have size, nothing to update */ + if (btf_is_func(t)) + continue; + dst_var = &dst_sec->sec_vars[glob_sym->var_idx]; /* Because underlying BTF type might have * changed, so might its size have changed, so @@ -2628,6 +2671,10 @@ int bpf_linker__finalize(struct bpf_linker *linker) if (!sec->scn) continue; + /* restore sections with bpf insns to target byte-order */ + if (linker->swapped_endian && is_exec_sec(sec)) + exec_sec_bswap(sec->raw_data, sec->sec_sz); + sec->data->d_buf = sec->raw_data; } @@ -2696,6 +2743,7 @@ static int emit_elf_data_sec(struct bpf_linker *linker, const char *sec_name, static int finalize_btf(struct bpf_linker *linker) { + enum btf_endianness link_endianness; LIBBPF_OPTS(btf_dedup_opts, opts); struct btf *btf = linker->btf; const void *raw_data; @@ -2729,17 +2777,24 @@ static int finalize_btf(struct bpf_linker *linker) err = finalize_btf_ext(linker); if (err) { - pr_warn(".BTF.ext generation failed: %d\n", err); + pr_warn(".BTF.ext generation failed: %s\n", errstr(err)); return err; } opts.btf_ext = linker->btf_ext; err = btf__dedup(linker->btf, &opts); if (err) { - pr_warn("BTF dedup failed: %d\n", err); + pr_warn("BTF dedup failed: %s\n", errstr(err)); return err; } + /* Set .BTF and .BTF.ext output byte order */ + link_endianness = linker->elf_hdr->e_ident[EI_DATA] == ELFDATA2MSB ? + BTF_BIG_ENDIAN : BTF_LITTLE_ENDIAN; + btf__set_endianness(linker->btf, link_endianness); + if (linker->btf_ext) + btf_ext__set_endianness(linker->btf_ext, link_endianness); + /* Emit .BTF section */ raw_data = btf__raw_data(linker->btf, &raw_sz); if (!raw_data) @@ -2747,7 +2802,7 @@ static int finalize_btf(struct bpf_linker *linker) err = emit_elf_data_sec(linker, BTF_ELF_SEC, 8, raw_data, raw_sz); if (err) { - pr_warn("failed to write out .BTF ELF section: %d\n", err); + pr_warn("failed to write out .BTF ELF section: %s\n", errstr(err)); return err; } @@ -2759,7 +2814,7 @@ static int finalize_btf(struct bpf_linker *linker) err = emit_elf_data_sec(linker, BTF_EXT_ELF_SEC, 8, raw_data, raw_sz); if (err) { - pr_warn("failed to write out .BTF.ext ELF section: %d\n", err); + pr_warn("failed to write out .BTF.ext ELF section: %s\n", errstr(err)); return err; } } @@ -2935,7 +2990,7 @@ static int finalize_btf_ext(struct bpf_linker *linker) err = libbpf_get_error(linker->btf_ext); if (err) { linker->btf_ext = NULL; - pr_warn("failed to parse final .BTF.ext data: %d\n", err); + pr_warn("failed to parse final .BTF.ext data: %s\n", errstr(err)); goto out; } diff --git a/tools/lib/bpf/relo_core.c b/tools/lib/bpf/relo_core.c index 63a4d5ad12d1..7632e9d41827 100644 --- a/tools/lib/bpf/relo_core.c +++ b/tools/lib/bpf/relo_core.c @@ -1339,7 +1339,7 @@ int bpf_core_calc_relo_insn(const char *prog_name, cands->cands[i].id, cand_spec); if (err < 0) { bpf_core_format_spec(spec_buf, sizeof(spec_buf), cand_spec); - pr_warn("prog '%s': relo #%d: error matching candidate #%d %s: %d\n ", + pr_warn("prog '%s': relo #%d: error matching candidate #%d %s: %d\n", prog_name, relo_idx, i, spec_buf, err); return err; } diff --git a/tools/lib/bpf/ringbuf.c b/tools/lib/bpf/ringbuf.c index bfd8dac4c0cc..9702b70da444 100644 --- a/tools/lib/bpf/ringbuf.c +++ b/tools/lib/bpf/ringbuf.c @@ -21,6 +21,7 @@ #include "libbpf.h" #include "libbpf_internal.h" #include "bpf.h" +#include "str_error.h" struct ring { ring_buffer_sample_fn sample_cb; @@ -88,8 +89,8 @@ int ring_buffer__add(struct ring_buffer *rb, int map_fd, err = bpf_map_get_info_by_fd(map_fd, &info, &len); if (err) { err = -errno; - pr_warn("ringbuf: failed to get map info for fd=%d: %d\n", - map_fd, err); + pr_warn("ringbuf: failed to get map info for fd=%d: %s\n", + map_fd, errstr(err)); return libbpf_err(err); } @@ -123,8 +124,8 @@ int ring_buffer__add(struct ring_buffer *rb, int map_fd, tmp = mmap(NULL, rb->page_size, PROT_READ | PROT_WRITE, MAP_SHARED, map_fd, 0); if (tmp == MAP_FAILED) { err = -errno; - pr_warn("ringbuf: failed to mmap consumer page for map fd=%d: %d\n", - map_fd, err); + pr_warn("ringbuf: failed to mmap consumer page for map fd=%d: %s\n", + map_fd, errstr(err)); goto err_out; } r->consumer_pos = tmp; @@ -142,8 +143,8 @@ int ring_buffer__add(struct ring_buffer *rb, int map_fd, tmp = mmap(NULL, (size_t)mmap_sz, PROT_READ, MAP_SHARED, map_fd, rb->page_size); if (tmp == MAP_FAILED) { err = -errno; - pr_warn("ringbuf: failed to mmap data pages for map fd=%d: %d\n", - map_fd, err); + pr_warn("ringbuf: failed to mmap data pages for map fd=%d: %s\n", + map_fd, errstr(err)); goto err_out; } r->producer_pos = tmp; @@ -156,8 +157,8 @@ int ring_buffer__add(struct ring_buffer *rb, int map_fd, e->data.fd = rb->ring_cnt; if (epoll_ctl(rb->epoll_fd, EPOLL_CTL_ADD, map_fd, e) < 0) { err = -errno; - pr_warn("ringbuf: failed to epoll add map fd=%d: %d\n", - map_fd, err); + pr_warn("ringbuf: failed to epoll add map fd=%d: %s\n", + map_fd, errstr(err)); goto err_out; } @@ -205,7 +206,7 @@ ring_buffer__new(int map_fd, ring_buffer_sample_fn sample_cb, void *ctx, rb->epoll_fd = epoll_create1(EPOLL_CLOEXEC); if (rb->epoll_fd < 0) { err = -errno; - pr_warn("ringbuf: failed to create epoll instance: %d\n", err); + pr_warn("ringbuf: failed to create epoll instance: %s\n", errstr(err)); goto err_out; } @@ -458,7 +459,8 @@ static int user_ringbuf_map(struct user_ring_buffer *rb, int map_fd) err = bpf_map_get_info_by_fd(map_fd, &info, &len); if (err) { err = -errno; - pr_warn("user ringbuf: failed to get map info for fd=%d: %d\n", map_fd, err); + pr_warn("user ringbuf: failed to get map info for fd=%d: %s\n", + map_fd, errstr(err)); return err; } @@ -474,8 +476,8 @@ static int user_ringbuf_map(struct user_ring_buffer *rb, int map_fd) tmp = mmap(NULL, rb->page_size, PROT_READ, MAP_SHARED, map_fd, 0); if (tmp == MAP_FAILED) { err = -errno; - pr_warn("user ringbuf: failed to mmap consumer page for map fd=%d: %d\n", - map_fd, err); + pr_warn("user ringbuf: failed to mmap consumer page for map fd=%d: %s\n", + map_fd, errstr(err)); return err; } rb->consumer_pos = tmp; @@ -494,8 +496,8 @@ static int user_ringbuf_map(struct user_ring_buffer *rb, int map_fd) map_fd, rb->page_size); if (tmp == MAP_FAILED) { err = -errno; - pr_warn("user ringbuf: failed to mmap data pages for map fd=%d: %d\n", - map_fd, err); + pr_warn("user ringbuf: failed to mmap data pages for map fd=%d: %s\n", + map_fd, errstr(err)); return err; } @@ -506,7 +508,7 @@ static int user_ringbuf_map(struct user_ring_buffer *rb, int map_fd) rb_epoll->events = EPOLLOUT; if (epoll_ctl(rb->epoll_fd, EPOLL_CTL_ADD, map_fd, rb_epoll) < 0) { err = -errno; - pr_warn("user ringbuf: failed to epoll add map fd=%d: %d\n", map_fd, err); + pr_warn("user ringbuf: failed to epoll add map fd=%d: %s\n", map_fd, errstr(err)); return err; } @@ -531,7 +533,7 @@ user_ring_buffer__new(int map_fd, const struct user_ring_buffer_opts *opts) rb->epoll_fd = epoll_create1(EPOLL_CLOEXEC); if (rb->epoll_fd < 0) { err = -errno; - pr_warn("user ringbuf: failed to create epoll instance: %d\n", err); + pr_warn("user ringbuf: failed to create epoll instance: %s\n", errstr(err)); goto err_out; } diff --git a/tools/lib/bpf/skel_internal.h b/tools/lib/bpf/skel_internal.h index 0875452521e9..4d5fa079b5d6 100644 --- a/tools/lib/bpf/skel_internal.h +++ b/tools/lib/bpf/skel_internal.h @@ -351,10 +351,11 @@ static inline int bpf_load_and_run(struct bpf_load_and_run_opts *opts) attr.test.ctx_size_in = opts->ctx->sz; err = skel_sys_bpf(BPF_PROG_RUN, &attr, test_run_attr_sz); if (err < 0 || (int)attr.test.retval < 0) { - opts->errstr = "failed to execute loader prog"; if (err < 0) { + opts->errstr = "failed to execute loader prog"; set_err; } else { + opts->errstr = "error returned by loader prog"; err = (int)attr.test.retval; #ifndef __KERNEL__ errno = -err; diff --git a/tools/lib/bpf/str_error.c b/tools/lib/bpf/str_error.c index 5e6a1e27ddf9..8743049e32b7 100644 --- a/tools/lib/bpf/str_error.c +++ b/tools/lib/bpf/str_error.c @@ -5,6 +5,10 @@ #include #include "str_error.h" +#ifndef ENOTSUPP +#define ENOTSUPP 524 +#endif + /* make sure libbpf doesn't use kernel-only integer typedefs */ #pragma GCC poison u8 u16 u32 u64 s8 s16 s32 s64 @@ -31,3 +35,70 @@ char *libbpf_strerror_r(int err, char *dst, int len) } return dst; } + +const char *errstr(int err) +{ + static __thread char buf[12]; + + if (err > 0) + err = -err; + + switch (err) { + case -E2BIG: return "-E2BIG"; + case -EACCES: return "-EACCES"; + case -EADDRINUSE: return "-EADDRINUSE"; + case -EADDRNOTAVAIL: return "-EADDRNOTAVAIL"; + case -EAGAIN: return "-EAGAIN"; + case -EALREADY: return "-EALREADY"; + case -EBADF: return "-EBADF"; + case -EBADFD: return "-EBADFD"; + case -EBUSY: return "-EBUSY"; + case -ECANCELED: return "-ECANCELED"; + case -ECHILD: return "-ECHILD"; + case -EDEADLK: return "-EDEADLK"; + case -EDOM: return "-EDOM"; + case -EEXIST: return "-EEXIST"; + case -EFAULT: return "-EFAULT"; + case -EFBIG: return "-EFBIG"; + case -EILSEQ: return "-EILSEQ"; + case -EINPROGRESS: return "-EINPROGRESS"; + case -EINTR: return "-EINTR"; + case -EINVAL: return "-EINVAL"; + case -EIO: return "-EIO"; + case -EISDIR: return "-EISDIR"; + case -ELOOP: return "-ELOOP"; + case -EMFILE: return "-EMFILE"; + case -EMLINK: return "-EMLINK"; + case -EMSGSIZE: return "-EMSGSIZE"; + case -ENAMETOOLONG: return "-ENAMETOOLONG"; + case -ENFILE: return "-ENFILE"; + case -ENODATA: return "-ENODATA"; + case -ENODEV: return "-ENODEV"; + case -ENOENT: return "-ENOENT"; + case -ENOEXEC: return "-ENOEXEC"; + case -ENOLINK: return "-ENOLINK"; + case -ENOMEM: return "-ENOMEM"; + case -ENOSPC: return "-ENOSPC"; + case -ENOTBLK: return "-ENOTBLK"; + case -ENOTDIR: return "-ENOTDIR"; + case -ENOTSUPP: return "-ENOTSUPP"; + case -ENOTTY: return "-ENOTTY"; + case -ENXIO: return "-ENXIO"; + case -EOPNOTSUPP: return "-EOPNOTSUPP"; + case -EOVERFLOW: return "-EOVERFLOW"; + case -EPERM: return "-EPERM"; + case -EPIPE: return "-EPIPE"; + case -EPROTO: return "-EPROTO"; + case -EPROTONOSUPPORT: return "-EPROTONOSUPPORT"; + case -ERANGE: return "-ERANGE"; + case -EROFS: return "-EROFS"; + case -ESPIPE: return "-ESPIPE"; + case -ESRCH: return "-ESRCH"; + case -ETXTBSY: return "-ETXTBSY"; + case -EUCLEAN: return "-EUCLEAN"; + case -EXDEV: return "-EXDEV"; + default: + snprintf(buf, sizeof(buf), "%d", err); + return buf; + } +} diff --git a/tools/lib/bpf/str_error.h b/tools/lib/bpf/str_error.h index 626d7ffb03d6..66ffebde0684 100644 --- a/tools/lib/bpf/str_error.h +++ b/tools/lib/bpf/str_error.h @@ -6,4 +6,11 @@ char *libbpf_strerror_r(int err, char *dst, int len); +/** + * @brief **errstr()** returns string corresponding to numeric errno + * @param err negative numeric errno + * @return pointer to string representation of the errno, that is invalidated + * upon the next call. + */ +const char *errstr(int err); #endif /* __LIBBPF_STR_ERROR_H */ diff --git a/tools/lib/bpf/usdt.c b/tools/lib/bpf/usdt.c index 93794f01bb67..5f085736c6c4 100644 --- a/tools/lib/bpf/usdt.c +++ b/tools/lib/bpf/usdt.c @@ -20,6 +20,7 @@ #include "libbpf_common.h" #include "libbpf_internal.h" #include "hashmap.h" +#include "str_error.h" /* libbpf's USDT support consists of BPF-side state/code and user-space * state/code working together in concert. BPF-side parts are defined in @@ -465,8 +466,8 @@ static int parse_vma_segs(int pid, const char *lib_path, struct elf_seg **segs, goto proceed; if (!realpath(lib_path, path)) { - pr_warn("usdt: failed to get absolute path of '%s' (err %d), using path as is...\n", - lib_path, -errno); + pr_warn("usdt: failed to get absolute path of '%s' (err %s), using path as is...\n", + lib_path, errstr(-errno)); libbpf_strlcpy(path, lib_path, sizeof(path)); } @@ -475,8 +476,8 @@ static int parse_vma_segs(int pid, const char *lib_path, struct elf_seg **segs, f = fopen(line, "re"); if (!f) { err = -errno; - pr_warn("usdt: failed to open '%s' to get base addr of '%s': %d\n", - line, lib_path, err); + pr_warn("usdt: failed to open '%s' to get base addr of '%s': %s\n", + line, lib_path, errstr(err)); return err; } @@ -606,7 +607,8 @@ static int collect_usdt_targets(struct usdt_manager *man, Elf *elf, const char * err = parse_elf_segs(elf, path, &segs, &seg_cnt); if (err) { - pr_warn("usdt: failed to process ELF program segments for '%s': %d\n", path, err); + pr_warn("usdt: failed to process ELF program segments for '%s': %s\n", + path, errstr(err)); goto err_out; } @@ -708,8 +710,8 @@ static int collect_usdt_targets(struct usdt_manager *man, Elf *elf, const char * if (vma_seg_cnt == 0) { err = parse_vma_segs(pid, path, &vma_segs, &vma_seg_cnt); if (err) { - pr_warn("usdt: failed to get memory segments in PID %d for shared library '%s': %d\n", - pid, path, err); + pr_warn("usdt: failed to get memory segments in PID %d for shared library '%s': %s\n", + pid, path, errstr(err)); goto err_out; } } @@ -1047,8 +1049,8 @@ struct bpf_link *usdt_manager_attach_usdt(struct usdt_manager *man, const struct if (is_new && bpf_map_update_elem(spec_map_fd, &spec_id, &target->spec, BPF_ANY)) { err = -errno; - pr_warn("usdt: failed to set USDT spec #%d for '%s:%s' in '%s': %d\n", - spec_id, usdt_provider, usdt_name, path, err); + pr_warn("usdt: failed to set USDT spec #%d for '%s:%s' in '%s': %s\n", + spec_id, usdt_provider, usdt_name, path, errstr(err)); goto err_out; } if (!man->has_bpf_cookie && @@ -1058,9 +1060,9 @@ struct bpf_link *usdt_manager_attach_usdt(struct usdt_manager *man, const struct pr_warn("usdt: IP collision detected for spec #%d for '%s:%s' in '%s'\n", spec_id, usdt_provider, usdt_name, path); } else { - pr_warn("usdt: failed to map IP 0x%lx to spec #%d for '%s:%s' in '%s': %d\n", + pr_warn("usdt: failed to map IP 0x%lx to spec #%d for '%s:%s' in '%s': %s\n", target->abs_ip, spec_id, usdt_provider, usdt_name, - path, err); + path, errstr(err)); } goto err_out; } @@ -1076,8 +1078,8 @@ struct bpf_link *usdt_manager_attach_usdt(struct usdt_manager *man, const struct target->rel_ip, &opts); err = libbpf_get_error(uprobe_link); if (err) { - pr_warn("usdt: failed to attach uprobe #%d for '%s:%s' in '%s': %d\n", - i, usdt_provider, usdt_name, path, err); + pr_warn("usdt: failed to attach uprobe #%d for '%s:%s' in '%s': %s\n", + i, usdt_provider, usdt_name, path, errstr(err)); goto err_out; } @@ -1099,8 +1101,8 @@ struct bpf_link *usdt_manager_attach_usdt(struct usdt_manager *man, const struct NULL, &opts_multi); if (!link->multi_link) { err = -errno; - pr_warn("usdt: failed to attach uprobe multi for '%s:%s' in '%s': %d\n", - usdt_provider, usdt_name, path, err); + pr_warn("usdt: failed to attach uprobe multi for '%s:%s' in '%s': %s\n", + usdt_provider, usdt_name, path, errstr(err)); goto err_out; } diff --git a/tools/lib/bpf/zip.c b/tools/lib/bpf/zip.c index 3f26d629b2b4..88c376a8348d 100644 --- a/tools/lib/bpf/zip.c +++ b/tools/lib/bpf/zip.c @@ -223,7 +223,7 @@ struct zip_archive *zip_archive_open(const char *path) if (!archive) { munmap(data, size); return ERR_PTR(-ENOMEM); - }; + } archive->data = data; archive->size = size; diff --git a/tools/lib/subcmd/parse-options.c b/tools/lib/subcmd/parse-options.c index eb896d30545b..555d617c1f50 100644 --- a/tools/lib/subcmd/parse-options.c +++ b/tools/lib/subcmd/parse-options.c @@ -807,7 +807,7 @@ static int option__cmp(const void *va, const void *vb) static struct option *options__order(const struct option *opts) { int nr_opts = 0, nr_group = 0, nr_parent = 0, len; - const struct option *o, *p = opts; + const struct option *o = NULL, *p = opts; struct option *opt, *ordered = NULL, *group; /* flatten the options that have parents */ diff --git a/tools/testing/selftests/bpf/.gitignore b/tools/testing/selftests/bpf/.gitignore index e6533b3400de..d45c9a9b304d 100644 --- a/tools/testing/selftests/bpf/.gitignore +++ b/tools/testing/selftests/bpf/.gitignore @@ -16,7 +16,6 @@ fixdep /test_progs-cpuv4 test_verifier_log feature -test_sock urandom_read test_sockmap test_lirc_mode2_user diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile index 75016962f795..b1080284522d 100644 --- a/tools/testing/selftests/bpf/Makefile +++ b/tools/testing/selftests/bpf/Makefile @@ -10,6 +10,7 @@ TOOLSDIR := $(abspath ../../..) LIBDIR := $(TOOLSDIR)/lib BPFDIR := $(LIBDIR)/bpf TOOLSINCDIR := $(TOOLSDIR)/include +TOOLSARCHINCDIR := $(TOOLSDIR)/arch/$(SRCARCH)/include BPFTOOLDIR := $(TOOLSDIR)/bpf/bpftool APIDIR := $(TOOLSINCDIR)/uapi ifneq ($(O),) @@ -44,7 +45,7 @@ CFLAGS += -g $(OPT_FLAGS) -rdynamic \ -Wall -Werror -fno-omit-frame-pointer \ $(GENFLAGS) $(SAN_CFLAGS) $(LIBELF_CFLAGS) \ -I$(CURDIR) -I$(INCLUDE_DIR) -I$(GENDIR) -I$(LIBDIR) \ - -I$(TOOLSINCDIR) -I$(APIDIR) -I$(OUTPUT) + -I$(TOOLSINCDIR) -I$(TOOLSARCHINCDIR) -I$(APIDIR) -I$(OUTPUT) LDFLAGS += $(SAN_LDFLAGS) LDLIBS += $(LIBELF_LIBS) -lz -lrt -lpthread @@ -83,7 +84,7 @@ endif # Order correspond to 'make run_tests' order TEST_GEN_PROGS = test_verifier test_tag test_maps test_lru_map test_lpm_map test_progs \ - test_sock test_sockmap \ + test_sockmap \ test_tcpnotify_user test_sysctl \ test_progs-no_alu32 TEST_INST_SUBDIRS := no_alu32 @@ -132,7 +133,6 @@ TEST_PROGS := test_kmod.sh \ test_tunnel.sh \ test_lwt_seg6local.sh \ test_lirc_mode2.sh \ - test_skb_cgroup_id.sh \ test_flow_dissector.sh \ test_xdp_vlan_mode_generic.sh \ test_xdp_vlan_mode_native.sh \ @@ -274,6 +274,7 @@ $(OUTPUT)/liburandom_read.so: urandom_read_lib1.c urandom_read_lib2.c liburandom $(Q)$(CLANG) $(CLANG_TARGET_ARCH) \ $(filter-out -static,$(CFLAGS) $(LDFLAGS)) \ $(filter %.c,$^) $(filter-out -static,$(LDLIBS)) \ + -Wno-unused-command-line-argument \ -fuse-ld=$(LLD) -Wl,-znoseparate-code -Wl,--build-id=sha1 \ -Wl,--version-script=liburandom_read.map \ -fPIC -shared -o $@ @@ -282,6 +283,7 @@ $(OUTPUT)/urandom_read: urandom_read.c urandom_read_aux.c $(OUTPUT)/liburandom_r $(call msg,BINARY,,$@) $(Q)$(CLANG) $(CLANG_TARGET_ARCH) \ $(filter-out -static,$(CFLAGS) $(LDFLAGS)) $(filter %.c,$^) \ + -Wno-unused-command-line-argument \ -lurandom_read $(filter-out -static,$(LDLIBS)) -L$(OUTPUT) \ -fuse-ld=$(LLD) -Wl,-znoseparate-code -Wl,--build-id=sha1 \ -Wl,-rpath=. -o $@ @@ -295,25 +297,33 @@ $(OUTPUT)/sign-file: ../../../../scripts/sign-file.c $(OUTPUT)/bpf_testmod.ko: $(VMLINUX_BTF) $(RESOLVE_BTFIDS) $(wildcard bpf_testmod/Makefile bpf_testmod/*.[ch]) $(call msg,MOD,,$@) $(Q)$(RM) bpf_testmod/bpf_testmod.ko # force re-compilation - $(Q)$(MAKE) $(submake_extras) RESOLVE_BTFIDS=$(RESOLVE_BTFIDS) -C bpf_testmod + $(Q)$(MAKE) $(submake_extras) -C bpf_testmod \ + RESOLVE_BTFIDS=$(RESOLVE_BTFIDS) \ + EXTRA_CFLAGS='' EXTRA_LDFLAGS='' $(Q)cp bpf_testmod/bpf_testmod.ko $@ $(OUTPUT)/bpf_test_no_cfi.ko: $(VMLINUX_BTF) $(RESOLVE_BTFIDS) $(wildcard bpf_test_no_cfi/Makefile bpf_test_no_cfi/*.[ch]) $(call msg,MOD,,$@) $(Q)$(RM) bpf_test_no_cfi/bpf_test_no_cfi.ko # force re-compilation - $(Q)$(MAKE) $(submake_extras) RESOLVE_BTFIDS=$(RESOLVE_BTFIDS) -C bpf_test_no_cfi + $(Q)$(MAKE) $(submake_extras) -C bpf_test_no_cfi \ + RESOLVE_BTFIDS=$(RESOLVE_BTFIDS) \ + EXTRA_CFLAGS='' EXTRA_LDFLAGS='' $(Q)cp bpf_test_no_cfi/bpf_test_no_cfi.ko $@ $(OUTPUT)/bpf_test_modorder_x.ko: $(VMLINUX_BTF) $(RESOLVE_BTFIDS) $(wildcard bpf_test_modorder_x/Makefile bpf_test_modorder_x/*.[ch]) $(call msg,MOD,,$@) $(Q)$(RM) bpf_test_modorder_x/bpf_test_modorder_x.ko # force re-compilation - $(Q)$(MAKE) $(submake_extras) RESOLVE_BTFIDS=$(RESOLVE_BTFIDS) -C bpf_test_modorder_x + $(Q)$(MAKE) $(submake_extras) -C bpf_test_modorder_x \ + RESOLVE_BTFIDS=$(RESOLVE_BTFIDS) \ + EXTRA_CFLAGS='' EXTRA_LDFLAGS='' $(Q)cp bpf_test_modorder_x/bpf_test_modorder_x.ko $@ $(OUTPUT)/bpf_test_modorder_y.ko: $(VMLINUX_BTF) $(RESOLVE_BTFIDS) $(wildcard bpf_test_modorder_y/Makefile bpf_test_modorder_y/*.[ch]) $(call msg,MOD,,$@) $(Q)$(RM) bpf_test_modorder_y/bpf_test_modorder_y.ko # force re-compilation - $(Q)$(MAKE) $(submake_extras) RESOLVE_BTFIDS=$(RESOLVE_BTFIDS) -C bpf_test_modorder_y + $(Q)$(MAKE) $(submake_extras) -C bpf_test_modorder_y \ + RESOLVE_BTFIDS=$(RESOLVE_BTFIDS) \ + EXTRA_CFLAGS='' EXTRA_LDFLAGS='' $(Q)cp bpf_test_modorder_y/bpf_test_modorder_y.ko $@ @@ -333,8 +343,8 @@ $(OUTPUT)/runqslower: $(BPFOBJ) | $(DEFAULT_BPFTOOL) $(RUNQSLOWER_OUTPUT) BPFTOOL_OUTPUT=$(HOST_BUILD_DIR)/bpftool/ \ BPFOBJ_OUTPUT=$(BUILD_DIR)/libbpf/ \ BPFOBJ=$(BPFOBJ) BPF_INCLUDE=$(INCLUDE_DIR) \ - EXTRA_CFLAGS='-g $(OPT_FLAGS) $(SAN_CFLAGS)' \ - EXTRA_LDFLAGS='$(SAN_LDFLAGS)' && \ + EXTRA_CFLAGS='-g $(OPT_FLAGS) $(SAN_CFLAGS) $(EXTRA_CFLAGS)' \ + EXTRA_LDFLAGS='$(SAN_LDFLAGS) $(EXTRA_LDFLAGS)' && \ cp $(RUNQSLOWER_OUTPUT)runqslower $@ TEST_GEN_PROGS_EXTENDED += $(TRUNNER_BPFTOOL) @@ -349,7 +359,6 @@ JSON_WRITER := $(OUTPUT)/json_writer.o CAP_HELPERS := $(OUTPUT)/cap_helpers.o NETWORK_HELPERS := $(OUTPUT)/network_helpers.o -$(OUTPUT)/test_sock: $(CGROUP_HELPERS) $(TESTING_HELPERS) $(OUTPUT)/test_sockmap: $(CGROUP_HELPERS) $(TESTING_HELPERS) $(OUTPUT)/test_tcpnotify_user: $(CGROUP_HELPERS) $(TESTING_HELPERS) $(TRACE_HELPERS) $(OUTPUT)/test_sock_fields: $(CGROUP_HELPERS) $(TESTING_HELPERS) @@ -368,7 +377,8 @@ $(DEFAULT_BPFTOOL): $(wildcard $(BPFTOOLDIR)/*.[ch] $(BPFTOOLDIR)/Makefile) \ $(HOST_BPFOBJ) | $(HOST_BUILD_DIR)/bpftool $(Q)$(MAKE) $(submake_extras) -C $(BPFTOOLDIR) \ ARCH= CROSS_COMPILE= CC="$(HOSTCC)" LD="$(HOSTLD)" \ - EXTRA_CFLAGS='-g $(OPT_FLAGS)' \ + EXTRA_CFLAGS='-g $(OPT_FLAGS) $(EXTRA_CFLAGS)' \ + EXTRA_LDFLAGS='$(EXTRA_LDFLAGS)' \ OUTPUT=$(HOST_BUILD_DIR)/bpftool/ \ LIBBPF_OUTPUT=$(HOST_BUILD_DIR)/libbpf/ \ LIBBPF_DESTDIR=$(HOST_SCRATCH_DIR)/ \ @@ -379,7 +389,8 @@ $(CROSS_BPFTOOL): $(wildcard $(BPFTOOLDIR)/*.[ch] $(BPFTOOLDIR)/Makefile) \ $(BPFOBJ) | $(BUILD_DIR)/bpftool $(Q)$(MAKE) $(submake_extras) -C $(BPFTOOLDIR) \ ARCH=$(ARCH) CROSS_COMPILE=$(CROSS_COMPILE) \ - EXTRA_CFLAGS='-g $(OPT_FLAGS)' \ + EXTRA_CFLAGS='-g $(OPT_FLAGS) $(EXTRA_CFLAGS)' \ + EXTRA_LDFLAGS='$(EXTRA_LDFLAGS)' \ OUTPUT=$(BUILD_DIR)/bpftool/ \ LIBBPF_OUTPUT=$(BUILD_DIR)/libbpf/ \ LIBBPF_DESTDIR=$(SCRATCH_DIR)/ \ @@ -402,8 +413,8 @@ $(BPFOBJ): $(wildcard $(BPFDIR)/*.[ch] $(BPFDIR)/Makefile) \ $(APIDIR)/linux/bpf.h \ | $(BUILD_DIR)/libbpf $(Q)$(MAKE) $(submake_extras) -C $(BPFDIR) OUTPUT=$(BUILD_DIR)/libbpf/ \ - EXTRA_CFLAGS='-g $(OPT_FLAGS) $(SAN_CFLAGS)' \ - EXTRA_LDFLAGS='$(SAN_LDFLAGS)' \ + EXTRA_CFLAGS='-g $(OPT_FLAGS) $(SAN_CFLAGS) $(EXTRA_CFLAGS)' \ + EXTRA_LDFLAGS='$(SAN_LDFLAGS) $(EXTRA_LDFLAGS)' \ DESTDIR=$(SCRATCH_DIR) prefix= all install_headers ifneq ($(BPFOBJ),$(HOST_BPFOBJ)) @@ -411,7 +422,9 @@ $(HOST_BPFOBJ): $(wildcard $(BPFDIR)/*.[ch] $(BPFDIR)/Makefile) \ $(APIDIR)/linux/bpf.h \ | $(HOST_BUILD_DIR)/libbpf $(Q)$(MAKE) $(submake_extras) -C $(BPFDIR) \ - EXTRA_CFLAGS='-g $(OPT_FLAGS)' ARCH= CROSS_COMPILE= \ + ARCH= CROSS_COMPILE= \ + EXTRA_CFLAGS='-g $(OPT_FLAGS) $(EXTRA_CFLAGS)' \ + EXTRA_LDFLAGS='$(EXTRA_LDFLAGS)' \ OUTPUT=$(HOST_BUILD_DIR)/libbpf/ \ CC="$(HOSTCC)" LD="$(HOSTLD)" \ DESTDIR=$(HOST_SCRATCH_DIR)/ prefix= all install_headers @@ -460,6 +473,7 @@ endef IS_LITTLE_ENDIAN = $(shell $(CC) -dM -E - #include #include +#include "bpf_util.h" #include "bench.h" #include "trigger_bench.skel.h" #include "trace_helpers.h" @@ -72,7 +73,7 @@ static __always_inline void inc_counter(struct counter *counters) unsigned slot; if (unlikely(tid == 0)) - tid = syscall(SYS_gettid); + tid = sys_gettid(); /* multiplicative hashing, it's fast */ slot = 2654435769U * tid; diff --git a/tools/testing/selftests/bpf/bpf_experimental.h b/tools/testing/selftests/bpf/bpf_experimental.h index b0668f29f7b3..cd8ecd39c3f3 100644 --- a/tools/testing/selftests/bpf/bpf_experimental.h +++ b/tools/testing/selftests/bpf/bpf_experimental.h @@ -582,4 +582,10 @@ extern int bpf_wq_set_callback_impl(struct bpf_wq *wq, unsigned int flags__k, void *aux__ign) __ksym; #define bpf_wq_set_callback(timer, cb, flags) \ bpf_wq_set_callback_impl(timer, cb, flags, NULL) + +struct bpf_iter_kmem_cache; +extern int bpf_iter_kmem_cache_new(struct bpf_iter_kmem_cache *it) __weak __ksym; +extern struct kmem_cache *bpf_iter_kmem_cache_next(struct bpf_iter_kmem_cache *it) __weak __ksym; +extern void bpf_iter_kmem_cache_destroy(struct bpf_iter_kmem_cache *it) __weak __ksym; + #endif diff --git a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod-events.h b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod-events.h index 6c3b4d4f173a..aeef86b3da74 100644 --- a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod-events.h +++ b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod-events.h @@ -40,6 +40,14 @@ DECLARE_TRACE(bpf_testmod_test_nullable_bare, TP_ARGS(ctx__nullable) ); +struct sk_buff; + +DECLARE_TRACE(bpf_testmod_test_raw_tp_null, + TP_PROTO(struct sk_buff *skb), + TP_ARGS(skb) +); + + #undef BPF_TESTMOD_DECLARE_TRACE #ifdef DECLARE_TRACE_WRITABLE #define BPF_TESTMOD_DECLARE_TRACE(call, proto, args, size) \ diff --git a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c index 12005e3dc3e4..cc9dde507aba 100644 --- a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c +++ b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c @@ -245,6 +245,39 @@ __bpf_kfunc void bpf_testmod_ctx_release(struct bpf_testmod_ctx *ctx) call_rcu(&ctx->rcu, testmod_free_cb); } +static struct bpf_testmod_ops3 *st_ops3; + +static int bpf_testmod_test_3(void) +{ + return 0; +} + +static int bpf_testmod_test_4(void) +{ + return 0; +} + +static struct bpf_testmod_ops3 __bpf_testmod_ops3 = { + .test_1 = bpf_testmod_test_3, + .test_2 = bpf_testmod_test_4, +}; + +static void bpf_testmod_test_struct_ops3(void) +{ + if (st_ops3) + st_ops3->test_1(); +} + +__bpf_kfunc void bpf_testmod_ops3_call_test_1(void) +{ + st_ops3->test_1(); +} + +__bpf_kfunc void bpf_testmod_ops3_call_test_2(void) +{ + st_ops3->test_2(); +} + struct bpf_testmod_btf_type_tag_1 { int a; }; @@ -380,6 +413,10 @@ bpf_testmod_test_read(struct file *file, struct kobject *kobj, (void)bpf_testmod_test_arg_ptr_to_struct(&struct_arg1_2); + (void)trace_bpf_testmod_test_raw_tp_null(NULL); + + bpf_testmod_test_struct_ops3(); + struct_arg3 = kmalloc((sizeof(struct bpf_testmod_struct_arg_3) + sizeof(int)), GFP_KERNEL); if (struct_arg3 != NULL) { @@ -584,6 +621,8 @@ BTF_ID_FLAGS(func, bpf_kfunc_trusted_num_test, KF_TRUSTED_ARGS) BTF_ID_FLAGS(func, bpf_kfunc_rcu_task_test, KF_RCU) BTF_ID_FLAGS(func, bpf_testmod_ctx_create, KF_ACQUIRE | KF_RET_NULL) BTF_ID_FLAGS(func, bpf_testmod_ctx_release, KF_RELEASE) +BTF_ID_FLAGS(func, bpf_testmod_ops3_call_test_1) +BTF_ID_FLAGS(func, bpf_testmod_ops3_call_test_2) BTF_KFUNCS_END(bpf_testmod_common_kfunc_ids) BTF_ID_LIST(bpf_testmod_dtor_ids) @@ -1094,6 +1133,10 @@ static const struct bpf_verifier_ops bpf_testmod_verifier_ops = { .is_valid_access = bpf_testmod_ops_is_valid_access, }; +static const struct bpf_verifier_ops bpf_testmod_verifier_ops3 = { + .is_valid_access = bpf_testmod_ops_is_valid_access, +}; + static int bpf_dummy_reg(void *kdata, struct bpf_link *link) { struct bpf_testmod_ops *ops = kdata; @@ -1173,6 +1216,68 @@ struct bpf_struct_ops bpf_testmod_ops2 = { .owner = THIS_MODULE, }; +static int st_ops3_reg(void *kdata, struct bpf_link *link) +{ + int err = 0; + + mutex_lock(&st_ops_mutex); + if (st_ops3) { + pr_err("st_ops has already been registered\n"); + err = -EEXIST; + goto unlock; + } + st_ops3 = kdata; + +unlock: + mutex_unlock(&st_ops_mutex); + return err; +} + +static void st_ops3_unreg(void *kdata, struct bpf_link *link) +{ + mutex_lock(&st_ops_mutex); + st_ops3 = NULL; + mutex_unlock(&st_ops_mutex); +} + +static void test_1_recursion_detected(struct bpf_prog *prog) +{ + struct bpf_prog_stats *stats; + + stats = this_cpu_ptr(prog->stats); + printk("bpf_testmod: oh no, recursing into test_1, recursion_misses %llu", + u64_stats_read(&stats->misses)); +} + +static int st_ops3_check_member(const struct btf_type *t, + const struct btf_member *member, + const struct bpf_prog *prog) +{ + u32 moff = __btf_member_bit_offset(t, member) / 8; + + switch (moff) { + case offsetof(struct bpf_testmod_ops3, test_1): + prog->aux->priv_stack_requested = true; + prog->aux->recursion_detected = test_1_recursion_detected; + fallthrough; + default: + break; + } + return 0; +} + +struct bpf_struct_ops bpf_testmod_ops3 = { + .verifier_ops = &bpf_testmod_verifier_ops3, + .init = bpf_testmod_ops_init, + .init_member = bpf_testmod_ops_init_member, + .reg = st_ops3_reg, + .unreg = st_ops3_unreg, + .check_member = st_ops3_check_member, + .cfi_stubs = &__bpf_testmod_ops3, + .name = "bpf_testmod_ops3", + .owner = THIS_MODULE, +}; + static int bpf_test_mod_st_ops__test_prologue(struct st_ops_args *args) { return 0; @@ -1331,6 +1436,7 @@ static int bpf_testmod_init(void) ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS, &bpf_testmod_kfunc_set); ret = ret ?: register_bpf_struct_ops(&bpf_bpf_testmod_ops, bpf_testmod_ops); ret = ret ?: register_bpf_struct_ops(&bpf_testmod_ops2, bpf_testmod_ops2); + ret = ret ?: register_bpf_struct_ops(&bpf_testmod_ops3, bpf_testmod_ops3); ret = ret ?: register_bpf_struct_ops(&testmod_st_ops, bpf_testmod_st_ops); ret = ret ?: register_btf_id_dtor_kfuncs(bpf_testmod_dtors, ARRAY_SIZE(bpf_testmod_dtors), diff --git a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.h b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.h index fb7dff47597a..356803d1c10e 100644 --- a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.h +++ b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.h @@ -94,6 +94,11 @@ struct bpf_testmod_ops2 { int (*test_1)(void); }; +struct bpf_testmod_ops3 { + int (*test_1)(void); + int (*test_2)(void); +}; + struct st_ops_args { u64 a; }; diff --git a/tools/testing/selftests/bpf/bpf_util.h b/tools/testing/selftests/bpf/bpf_util.h index 10587a29b967..5f6963a320d7 100644 --- a/tools/testing/selftests/bpf/bpf_util.h +++ b/tools/testing/selftests/bpf/bpf_util.h @@ -6,6 +6,7 @@ #include #include #include +#include #include /* libbpf_num_possible_cpus */ static inline unsigned int bpf_num_possible_cpus(void) @@ -59,4 +60,15 @@ static inline void bpf_strlcpy(char *dst, const char *src, size_t sz) (offsetof(TYPE, MEMBER) + sizeof_field(TYPE, MEMBER)) #endif +/* Availability of gettid across glibc versions is hit-and-miss, therefore + * fallback to syscall in this macro and use it everywhere. + */ +#ifndef sys_gettid +#define sys_gettid() syscall(SYS_gettid) +#endif + +#ifndef ENOTSUPP +#define ENOTSUPP 524 +#endif + #endif /* __BPF_UTIL__ */ diff --git a/tools/testing/selftests/bpf/config.vm b/tools/testing/selftests/bpf/config.vm index a9746ca78777..da543b24c144 100644 --- a/tools/testing/selftests/bpf/config.vm +++ b/tools/testing/selftests/bpf/config.vm @@ -1,12 +1,15 @@ -CONFIG_9P_FS=y CONFIG_9P_FS_POSIX_ACL=y CONFIG_9P_FS_SECURITY=y +CONFIG_9P_FS=y CONFIG_CRYPTO_DEV_VIRTIO=y -CONFIG_NET_9P=y +CONFIG_FUSE_FS=y +CONFIG_FUSE_PASSTHROUGH=y CONFIG_NET_9P_VIRTIO=y +CONFIG_NET_9P=y CONFIG_VIRTIO_BALLOON=y CONFIG_VIRTIO_BLK=y CONFIG_VIRTIO_CONSOLE=y +CONFIG_VIRTIO_FS=y CONFIG_VIRTIO_NET=y CONFIG_VIRTIO_PCI=y CONFIG_VIRTIO_VSOCKETS_COMMON=y diff --git a/tools/testing/selftests/bpf/io_helpers.c b/tools/testing/selftests/bpf/io_helpers.c new file mode 100644 index 000000000000..4ada0a74aa1f --- /dev/null +++ b/tools/testing/selftests/bpf/io_helpers.c @@ -0,0 +1,21 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include +#include + +int read_with_timeout(int fd, char *buf, size_t count, long usec) +{ + const long M = 1000 * 1000; + struct timeval tv = { usec / M, usec % M }; + fd_set fds; + int err; + + FD_ZERO(&fds); + FD_SET(fd, &fds); + err = select(fd + 1, &fds, NULL, NULL, &tv); + if (err < 0) + return err; + if (FD_ISSET(fd, &fds)) + return read(fd, buf, count); + return -EAGAIN; +} diff --git a/tools/testing/selftests/bpf/io_helpers.h b/tools/testing/selftests/bpf/io_helpers.h new file mode 100644 index 000000000000..21e1134cd3ce --- /dev/null +++ b/tools/testing/selftests/bpf/io_helpers.h @@ -0,0 +1,7 @@ +// SPDX-License-Identifier: GPL-2.0 +#include + +/* As a regular read(2), but allows to specify a timeout in micro-seconds. + * Returns -EAGAIN on timeout. + */ +int read_with_timeout(int fd, char *buf, size_t count, long usec); diff --git a/tools/testing/selftests/bpf/map_tests/task_storage_map.c b/tools/testing/selftests/bpf/map_tests/task_storage_map.c index 7d050364efca..62971dbf2996 100644 --- a/tools/testing/selftests/bpf/map_tests/task_storage_map.c +++ b/tools/testing/selftests/bpf/map_tests/task_storage_map.c @@ -12,6 +12,7 @@ #include #include +#include "bpf_util.h" #include "test_maps.h" #include "task_local_storage_helpers.h" #include "read_bpf_task_storage_busy.skel.h" @@ -115,7 +116,7 @@ void test_task_storage_map_stress_lookup(void) CHECK(err, "attach", "error %d\n", err); /* Trigger program */ - syscall(SYS_gettid); + sys_gettid(); skel->bss->pid = 0; CHECK(skel->bss->busy != 0, "bad bpf_task_storage_busy", "got %d\n", skel->bss->busy); diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_cookie.c b/tools/testing/selftests/bpf/prog_tests/bpf_cookie.c index 070c52c312e5..6befa870434b 100644 --- a/tools/testing/selftests/bpf/prog_tests/bpf_cookie.c +++ b/tools/testing/selftests/bpf/prog_tests/bpf_cookie.c @@ -690,7 +690,7 @@ void test_bpf_cookie(void) if (!ASSERT_OK_PTR(skel, "skel_open")) return; - skel->bss->my_tid = syscall(SYS_gettid); + skel->bss->my_tid = sys_gettid(); if (test__start_subtest("kprobe")) kprobe_subtest(skel); diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_iter.c b/tools/testing/selftests/bpf/prog_tests/bpf_iter.c index f0a3a9c18e9e..6f1bfacd7375 100644 --- a/tools/testing/selftests/bpf/prog_tests/bpf_iter.c +++ b/tools/testing/selftests/bpf/prog_tests/bpf_iter.c @@ -226,7 +226,7 @@ static void test_task_common_nocheck(struct bpf_iter_attach_opts *opts, ASSERT_OK(pthread_create(&thread_id, NULL, &do_nothing_wait, NULL), "pthread_create"); - skel->bss->tid = gettid(); + skel->bss->tid = sys_gettid(); do_dummy_read_opts(skel->progs.dump_task, opts); @@ -255,20 +255,20 @@ static void *run_test_task_tid(void *arg) union bpf_iter_link_info linfo; int num_unknown_tid, num_known_tid; - ASSERT_NEQ(getpid(), gettid(), "check_new_thread_id"); + ASSERT_NEQ(getpid(), sys_gettid(), "check_new_thread_id"); memset(&linfo, 0, sizeof(linfo)); - linfo.task.tid = gettid(); + linfo.task.tid = sys_gettid(); opts.link_info = &linfo; opts.link_info_len = sizeof(linfo); test_task_common(&opts, 0, 1); linfo.task.tid = 0; linfo.task.pid = getpid(); - /* This includes the parent thread, this thread, + /* This includes the parent thread, this thread, watchdog timer thread * and the do_nothing_wait thread */ - test_task_common(&opts, 2, 1); + test_task_common(&opts, 3, 1); test_task_common_nocheck(NULL, &num_unknown_tid, &num_known_tid); ASSERT_GT(num_unknown_tid, 2, "check_num_unknown_tid"); @@ -297,7 +297,7 @@ static void test_task_pid(void) opts.link_info = &linfo; opts.link_info_len = sizeof(linfo); - test_task_common(&opts, 1, 1); + test_task_common(&opts, 2, 1); } static void test_task_pidfd(void) @@ -315,7 +315,7 @@ static void test_task_pidfd(void) opts.link_info = &linfo; opts.link_info_len = sizeof(linfo); - test_task_common(&opts, 1, 1); + test_task_common(&opts, 2, 1); close(pidfd); } diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c b/tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c index 409a06975823..b7d1b52309d0 100644 --- a/tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c +++ b/tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c @@ -16,10 +16,6 @@ #include "tcp_ca_kfunc.skel.h" #include "bpf_cc_cubic.skel.h" -#ifndef ENOTSUPP -#define ENOTSUPP 524 -#endif - static const unsigned int total_bytes = 10 * 1024 * 1024; static int expected_stg = 0xeB9F; diff --git a/tools/testing/selftests/bpf/prog_tests/cb_refs.c b/tools/testing/selftests/bpf/prog_tests/cb_refs.c index 3bff680de16c..c40df623a8f7 100644 --- a/tools/testing/selftests/bpf/prog_tests/cb_refs.c +++ b/tools/testing/selftests/bpf/prog_tests/cb_refs.c @@ -11,8 +11,8 @@ struct { const char *prog_name; const char *err_msg; } cb_refs_tests[] = { - { "underflow_prog", "reference has not been acquired before" }, - { "leak_prog", "Unreleased reference" }, + { "underflow_prog", "must point to scalar, or struct with scalar" }, + { "leak_prog", "Possibly NULL pointer passed to helper arg2" }, { "nested_cb", "Unreleased reference id=4 alloc_insn=2" }, /* alloc_insn=2{4,5} */ { "non_cb_transfer_ref", "Unreleased reference id=4 alloc_insn=1" }, /* alloc_insn=1{1,2} */ }; diff --git a/tools/testing/selftests/bpf/prog_tests/cgrp_local_storage.c b/tools/testing/selftests/bpf/prog_tests/cgrp_local_storage.c index 747761572098..9015e2c2ab12 100644 --- a/tools/testing/selftests/bpf/prog_tests/cgrp_local_storage.c +++ b/tools/testing/selftests/bpf/prog_tests/cgrp_local_storage.c @@ -63,14 +63,14 @@ static void test_tp_btf(int cgroup_fd) if (!ASSERT_OK(err, "map_delete_elem")) goto out; - skel->bss->target_pid = syscall(SYS_gettid); + skel->bss->target_pid = sys_gettid(); err = cgrp_ls_tp_btf__attach(skel); if (!ASSERT_OK(err, "skel_attach")) goto out; - syscall(SYS_gettid); - syscall(SYS_gettid); + sys_gettid(); + sys_gettid(); skel->bss->target_pid = 0; @@ -154,7 +154,7 @@ static void test_recursion(int cgroup_fd) goto out; /* trigger sys_enter, make sure it does not cause deadlock */ - syscall(SYS_gettid); + sys_gettid(); out: cgrp_ls_recursion__destroy(skel); @@ -224,7 +224,7 @@ static void test_yes_rcu_lock(__u64 cgroup_id) return; CGROUP_MODE_SET(skel); - skel->bss->target_pid = syscall(SYS_gettid); + skel->bss->target_pid = sys_gettid(); bpf_program__set_autoload(skel->progs.yes_rcu_lock, true); err = cgrp_ls_sleepable__load(skel); diff --git a/tools/testing/selftests/bpf/prog_tests/core_reloc.c b/tools/testing/selftests/bpf/prog_tests/core_reloc.c index 26019313e1fc..1c682550e0e7 100644 --- a/tools/testing/selftests/bpf/prog_tests/core_reloc.c +++ b/tools/testing/selftests/bpf/prog_tests/core_reloc.c @@ -1010,7 +1010,7 @@ static void run_core_reloc_tests(bool use_btfgen) struct data *data; void *mmap_data = NULL; - my_pid_tgid = getpid() | ((uint64_t)syscall(SYS_gettid) << 32); + my_pid_tgid = getpid() | ((uint64_t)sys_gettid() << 32); for (i = 0; i < ARRAY_SIZE(test_cases); i++) { char btf_file[] = "/tmp/core_reloc.btf.XXXXXX"; diff --git a/tools/testing/selftests/bpf/prog_tests/iters.c b/tools/testing/selftests/bpf/prog_tests/iters.c index 89ff23c4a8bc..3cea71f9c500 100644 --- a/tools/testing/selftests/bpf/prog_tests/iters.c +++ b/tools/testing/selftests/bpf/prog_tests/iters.c @@ -192,8 +192,8 @@ static void subtest_task_iters(void) syscall(SYS_getpgid); iters_task__detach(skel); ASSERT_EQ(skel->bss->procs_cnt, 1, "procs_cnt"); - ASSERT_EQ(skel->bss->threads_cnt, thread_num + 1, "threads_cnt"); - ASSERT_EQ(skel->bss->proc_threads_cnt, thread_num + 1, "proc_threads_cnt"); + ASSERT_EQ(skel->bss->threads_cnt, thread_num + 2, "threads_cnt"); + ASSERT_EQ(skel->bss->proc_threads_cnt, thread_num + 2, "proc_threads_cnt"); ASSERT_EQ(skel->bss->invalid_cnt, 0, "invalid_cnt"); pthread_mutex_unlock(&do_nothing_mutex); for (int i = 0; i < thread_num; i++) diff --git a/tools/testing/selftests/bpf/prog_tests/kmem_cache_iter.c b/tools/testing/selftests/bpf/prog_tests/kmem_cache_iter.c new file mode 100644 index 000000000000..8e13a3416a21 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/kmem_cache_iter.c @@ -0,0 +1,126 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2024 Google */ + +#include +#include +#include +#include "kmem_cache_iter.skel.h" + +#define SLAB_NAME_MAX 32 + +struct kmem_cache_result { + char name[SLAB_NAME_MAX]; + long obj_size; +}; + +static void subtest_kmem_cache_iter_check_task_struct(struct kmem_cache_iter *skel) +{ + LIBBPF_OPTS(bpf_test_run_opts, opts, + .flags = 0, /* Run it with the current task */ + ); + int prog_fd = bpf_program__fd(skel->progs.check_task_struct); + + /* Get task_struct and check it if's from a slab cache */ + ASSERT_OK(bpf_prog_test_run_opts(prog_fd, &opts), "prog_test_run"); + + /* The BPF program should set 'found' variable */ + ASSERT_EQ(skel->bss->task_struct_found, 1, "task_struct_found"); +} + +static void subtest_kmem_cache_iter_check_slabinfo(struct kmem_cache_iter *skel) +{ + FILE *fp; + int map_fd; + char name[SLAB_NAME_MAX]; + unsigned long objsize; + char rest_of_line[1000]; + struct kmem_cache_result r; + int seen = 0; + + fp = fopen("/proc/slabinfo", "r"); + if (fp == NULL) { + /* CONFIG_SLUB_DEBUG is not enabled */ + return; + } + + map_fd = bpf_map__fd(skel->maps.slab_result); + + /* Ignore first two lines for header */ + fscanf(fp, "slabinfo - version: %*d.%*d\n"); + fscanf(fp, "# %*s %*s %*s %*s %*s %*s : %[^\n]\n", rest_of_line); + + /* Compare name and objsize only - others can be changes frequently */ + while (fscanf(fp, "%s %*u %*u %lu %*u %*u : %[^\n]\n", + name, &objsize, rest_of_line) == 3) { + int ret = bpf_map_lookup_elem(map_fd, &seen, &r); + + if (!ASSERT_OK(ret, "kmem_cache_lookup")) + break; + + ASSERT_STREQ(r.name, name, "kmem_cache_name"); + ASSERT_EQ(r.obj_size, objsize, "kmem_cache_objsize"); + + seen++; + } + + ASSERT_EQ(skel->bss->kmem_cache_seen, seen, "kmem_cache_seen_eq"); + + fclose(fp); +} + +static void subtest_kmem_cache_iter_open_coded(struct kmem_cache_iter *skel) +{ + LIBBPF_OPTS(bpf_test_run_opts, topts); + int err, fd; + + /* No need to attach it, just run it directly */ + fd = bpf_program__fd(skel->progs.open_coded_iter); + + err = bpf_prog_test_run_opts(fd, &topts); + if (!ASSERT_OK(err, "test_run_opts err")) + return; + if (!ASSERT_OK(topts.retval, "test_run_opts retval")) + return; + + /* It should be same as we've seen from the explicit iterator */ + ASSERT_EQ(skel->bss->open_coded_seen, skel->bss->kmem_cache_seen, "open_code_seen_eq"); +} + +void test_kmem_cache_iter(void) +{ + struct kmem_cache_iter *skel = NULL; + char buf[256]; + int iter_fd; + + skel = kmem_cache_iter__open_and_load(); + if (!ASSERT_OK_PTR(skel, "kmem_cache_iter__open_and_load")) + return; + + if (!ASSERT_OK(kmem_cache_iter__attach(skel), "skel_attach")) + goto destroy; + + iter_fd = bpf_iter_create(bpf_link__fd(skel->links.slab_info_collector)); + if (!ASSERT_GE(iter_fd, 0, "iter_create")) + goto destroy; + + memset(buf, 0, sizeof(buf)); + while (read(iter_fd, buf, sizeof(buf) > 0)) { + /* Read out all contents */ + printf("%s", buf); + } + + /* Next reads should return 0 */ + ASSERT_EQ(read(iter_fd, buf, sizeof(buf)), 0, "read"); + + if (test__start_subtest("check_task_struct")) + subtest_kmem_cache_iter_check_task_struct(skel); + if (test__start_subtest("check_slabinfo")) + subtest_kmem_cache_iter_check_slabinfo(skel); + if (test__start_subtest("open_coded_iter")) + subtest_kmem_cache_iter_open_coded(skel); + + close(iter_fd); + +destroy: + kmem_cache_iter__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/kprobe_multi_test.c b/tools/testing/selftests/bpf/prog_tests/kprobe_multi_test.c index 960c9323d1e0..66ab1cae923e 100644 --- a/tools/testing/selftests/bpf/prog_tests/kprobe_multi_test.c +++ b/tools/testing/selftests/bpf/prog_tests/kprobe_multi_test.c @@ -6,6 +6,7 @@ #include "kprobe_multi_override.skel.h" #include "kprobe_multi_session.skel.h" #include "kprobe_multi_session_cookie.skel.h" +#include "kprobe_multi_verifier.skel.h" #include "bpf/libbpf_internal.h" #include "bpf/hashmap.h" @@ -764,4 +765,5 @@ void test_kprobe_multi_test(void) test_session_skel_api(); if (test__start_subtest("session_cookie")) test_session_cookie_skel_api(); + RUN_TESTS(kprobe_multi_verifier); } diff --git a/tools/testing/selftests/bpf/prog_tests/linked_funcs.c b/tools/testing/selftests/bpf/prog_tests/linked_funcs.c index cad664546912..fa639b021f7e 100644 --- a/tools/testing/selftests/bpf/prog_tests/linked_funcs.c +++ b/tools/testing/selftests/bpf/prog_tests/linked_funcs.c @@ -20,7 +20,7 @@ void test_linked_funcs(void) bpf_program__set_autoload(skel->progs.handler1, true); bpf_program__set_autoload(skel->progs.handler2, true); - skel->rodata->my_tid = syscall(SYS_gettid); + skel->rodata->my_tid = sys_gettid(); skel->bss->syscall_id = SYS_getpgid; err = linked_funcs__load(skel); diff --git a/tools/testing/selftests/bpf/prog_tests/log_buf.c b/tools/testing/selftests/bpf/prog_tests/log_buf.c index 27676a04d0b6..169ce689b97c 100644 --- a/tools/testing/selftests/bpf/prog_tests/log_buf.c +++ b/tools/testing/selftests/bpf/prog_tests/log_buf.c @@ -169,7 +169,6 @@ static void bpf_prog_load_log_buf(void) ASSERT_GE(fd, 0, "good_fd1"); if (fd >= 0) close(fd); - fd = -1; /* log_level == 2 should always fill log_buf, even for good prog */ log_buf[0] = '\0'; @@ -180,7 +179,6 @@ static void bpf_prog_load_log_buf(void) ASSERT_GE(fd, 0, "good_fd2"); if (fd >= 0) close(fd); - fd = -1; /* log_level == 0 should fill log_buf for bad prog */ log_buf[0] = '\0'; @@ -191,7 +189,6 @@ static void bpf_prog_load_log_buf(void) ASSERT_LT(fd, 0, "bad_fd"); if (fd >= 0) close(fd); - fd = -1; free(log_buf); } diff --git a/tools/testing/selftests/bpf/prog_tests/lsm_cgroup.c b/tools/testing/selftests/bpf/prog_tests/lsm_cgroup.c index 130a3b21e467..6df25de8f080 100644 --- a/tools/testing/selftests/bpf/prog_tests/lsm_cgroup.c +++ b/tools/testing/selftests/bpf/prog_tests/lsm_cgroup.c @@ -10,10 +10,6 @@ #include "cgroup_helpers.h" #include "network_helpers.h" -#ifndef ENOTSUPP -#define ENOTSUPP 524 -#endif - static struct btf *btf; static __u32 query_prog_cnt(int cgroup_fd, const char *attach_func) diff --git a/tools/testing/selftests/bpf/prog_tests/map_in_map.c b/tools/testing/selftests/bpf/prog_tests/map_in_map.c index d2a10eb4e5b5..286a9fb469e2 100644 --- a/tools/testing/selftests/bpf/prog_tests/map_in_map.c +++ b/tools/testing/selftests/bpf/prog_tests/map_in_map.c @@ -5,7 +5,9 @@ #include #include #include + #include "access_map_in_map.skel.h" +#include "update_map_in_htab.skel.h" struct thread_ctx { pthread_barrier_t barrier; @@ -127,6 +129,131 @@ static void test_map_in_map_access(const char *prog_name, const char *map_name) access_map_in_map__destroy(skel); } +static void add_del_fd_htab(int outer_fd) +{ + int inner_fd, err; + int key = 1; + + inner_fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, "arr1", 4, 4, 1, NULL); + if (!ASSERT_OK_FD(inner_fd, "inner1")) + return; + err = bpf_map_update_elem(outer_fd, &key, &inner_fd, BPF_NOEXIST); + close(inner_fd); + if (!ASSERT_OK(err, "add")) + return; + + /* Delete */ + err = bpf_map_delete_elem(outer_fd, &key); + ASSERT_OK(err, "del"); +} + +static void overwrite_fd_htab(int outer_fd) +{ + int inner_fd, err; + int key = 1; + + inner_fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, "arr1", 4, 4, 1, NULL); + if (!ASSERT_OK_FD(inner_fd, "inner1")) + return; + err = bpf_map_update_elem(outer_fd, &key, &inner_fd, BPF_NOEXIST); + close(inner_fd); + if (!ASSERT_OK(err, "add")) + return; + + /* Overwrite */ + inner_fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, "arr2", 4, 4, 1, NULL); + if (!ASSERT_OK_FD(inner_fd, "inner2")) + goto out; + err = bpf_map_update_elem(outer_fd, &key, &inner_fd, BPF_EXIST); + close(inner_fd); + if (!ASSERT_OK(err, "overwrite")) + goto out; + + err = bpf_map_delete_elem(outer_fd, &key); + ASSERT_OK(err, "del"); + return; +out: + bpf_map_delete_elem(outer_fd, &key); +} + +static void lookup_delete_fd_htab(int outer_fd) +{ + int key = 1, value; + int inner_fd, err; + + inner_fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, "arr1", 4, 4, 1, NULL); + if (!ASSERT_OK_FD(inner_fd, "inner1")) + return; + err = bpf_map_update_elem(outer_fd, &key, &inner_fd, BPF_NOEXIST); + close(inner_fd); + if (!ASSERT_OK(err, "add")) + return; + + /* lookup_and_delete is not supported for htab of maps */ + err = bpf_map_lookup_and_delete_elem(outer_fd, &key, &value); + ASSERT_EQ(err, -ENOTSUPP, "lookup_del"); + + err = bpf_map_delete_elem(outer_fd, &key); + ASSERT_OK(err, "del"); +} + +static void batched_lookup_delete_fd_htab(int outer_fd) +{ + int keys[2] = {1, 2}, values[2]; + unsigned int cnt, batch; + int inner_fd, err; + + inner_fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, "arr1", 4, 4, 1, NULL); + if (!ASSERT_OK_FD(inner_fd, "inner1")) + return; + + err = bpf_map_update_elem(outer_fd, &keys[0], &inner_fd, BPF_NOEXIST); + close(inner_fd); + if (!ASSERT_OK(err, "add1")) + return; + + inner_fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, "arr2", 4, 4, 1, NULL); + if (!ASSERT_OK_FD(inner_fd, "inner2")) + goto out; + err = bpf_map_update_elem(outer_fd, &keys[1], &inner_fd, BPF_NOEXIST); + close(inner_fd); + if (!ASSERT_OK(err, "add2")) + goto out; + + /* batched lookup_and_delete */ + cnt = ARRAY_SIZE(keys); + err = bpf_map_lookup_and_delete_batch(outer_fd, NULL, &batch, keys, values, &cnt, NULL); + ASSERT_TRUE((!err || err == -ENOENT), "delete_batch ret"); + ASSERT_EQ(cnt, ARRAY_SIZE(keys), "delete_batch cnt"); + +out: + bpf_map_delete_elem(outer_fd, &keys[0]); +} + +static void test_update_map_in_htab(bool preallocate) +{ + struct update_map_in_htab *skel; + int err, fd; + + skel = update_map_in_htab__open(); + if (!ASSERT_OK_PTR(skel, "open")) + return; + + err = update_map_in_htab__load(skel); + if (!ASSERT_OK(err, "load")) + goto out; + + fd = preallocate ? bpf_map__fd(skel->maps.outer_htab_map) : + bpf_map__fd(skel->maps.outer_alloc_htab_map); + + add_del_fd_htab(fd); + overwrite_fd_htab(fd); + lookup_delete_fd_htab(fd); + batched_lookup_delete_fd_htab(fd); +out: + update_map_in_htab__destroy(skel); +} + void test_map_in_map(void) { if (test__start_subtest("acc_map_in_array")) @@ -137,5 +264,8 @@ void test_map_in_map(void) test_map_in_map_access("access_map_in_htab", "outer_htab_map"); if (test__start_subtest("sleepable_acc_map_in_htab")) test_map_in_map_access("sleepable_access_map_in_htab", "outer_htab_map"); + if (test__start_subtest("update_map_in_htab")) + test_update_map_in_htab(true); + if (test__start_subtest("update_map_in_alloc_htab")) + test_update_map_in_htab(false); } - diff --git a/tools/testing/selftests/bpf/prog_tests/ns_current_pid_tgid.c b/tools/testing/selftests/bpf/prog_tests/ns_current_pid_tgid.c index c29787e092d6..761ce24bce38 100644 --- a/tools/testing/selftests/bpf/prog_tests/ns_current_pid_tgid.c +++ b/tools/testing/selftests/bpf/prog_tests/ns_current_pid_tgid.c @@ -23,7 +23,7 @@ static int get_pid_tgid(pid_t *pid, pid_t *tgid, struct stat st; int err; - *pid = syscall(SYS_gettid); + *pid = sys_gettid(); *tgid = getpid(); err = stat("/proc/self/ns/pid", &st); diff --git a/tools/testing/selftests/bpf/prog_tests/perf_link.c b/tools/testing/selftests/bpf/prog_tests/perf_link.c index 3a25f1c743a1..d940ff87fa08 100644 --- a/tools/testing/selftests/bpf/prog_tests/perf_link.c +++ b/tools/testing/selftests/bpf/prog_tests/perf_link.c @@ -4,8 +4,12 @@ #include #include #include +#include "testing_helpers.h" #include "test_perf_link.skel.h" +#define BURN_TIMEOUT_MS 100 +#define BURN_TIMEOUT_NS BURN_TIMEOUT_MS * 1000000 + static void burn_cpu(void) { volatile int j = 0; @@ -32,6 +36,7 @@ void serial_test_perf_link(void) int run_cnt_before, run_cnt_after; struct bpf_link_info info; __u32 info_len = sizeof(info); + __u64 timeout_time_ns; /* create perf event */ memset(&attr, 0, sizeof(attr)); @@ -63,8 +68,14 @@ void serial_test_perf_link(void) ASSERT_GT(info.prog_id, 0, "link_prog_id"); /* ensure we get at least one perf_event prog execution */ - burn_cpu(); - ASSERT_GT(skel->bss->run_cnt, 0, "run_cnt"); + timeout_time_ns = get_time_ns() + BURN_TIMEOUT_NS; + while (true) { + burn_cpu(); + if (skel->bss->run_cnt > 0) + break; + if (!ASSERT_LT(get_time_ns(), timeout_time_ns, "run_cnt_timeout")) + break; + } /* perf_event is still active, but we close link and BPF program * shouldn't be executed anymore diff --git a/tools/testing/selftests/bpf/prog_tests/raw_tp_null.c b/tools/testing/selftests/bpf/prog_tests/raw_tp_null.c new file mode 100644 index 000000000000..6fa19449297e --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/raw_tp_null.c @@ -0,0 +1,25 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */ + +#include +#include "raw_tp_null.skel.h" + +void test_raw_tp_null(void) +{ + struct raw_tp_null *skel; + + skel = raw_tp_null__open_and_load(); + if (!ASSERT_OK_PTR(skel, "raw_tp_null__open_and_load")) + return; + + skel->bss->tid = sys_gettid(); + + if (!ASSERT_OK(raw_tp_null__attach(skel), "raw_tp_null__attach")) + goto end; + + ASSERT_OK(trigger_module_test_read(2), "trigger testmod read"); + ASSERT_EQ(skel->bss->i, 3, "invocations"); + +end: + raw_tp_null__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/rcu_read_lock.c b/tools/testing/selftests/bpf/prog_tests/rcu_read_lock.c index a1f7e7378a64..ebe0c12b5536 100644 --- a/tools/testing/selftests/bpf/prog_tests/rcu_read_lock.c +++ b/tools/testing/selftests/bpf/prog_tests/rcu_read_lock.c @@ -21,7 +21,7 @@ static void test_success(void) if (!ASSERT_OK_PTR(skel, "skel_open")) return; - skel->bss->target_pid = syscall(SYS_gettid); + skel->bss->target_pid = sys_gettid(); bpf_program__set_autoload(skel->progs.get_cgroup_id, true); bpf_program__set_autoload(skel->progs.task_succ, true); @@ -58,7 +58,7 @@ static void test_rcuptr_acquire(void) if (!ASSERT_OK_PTR(skel, "skel_open")) return; - skel->bss->target_pid = syscall(SYS_gettid); + skel->bss->target_pid = sys_gettid(); bpf_program__set_autoload(skel->progs.task_acquire, true); err = rcu_read_lock__load(skel); diff --git a/tools/testing/selftests/bpf/prog_tests/send_signal.c b/tools/testing/selftests/bpf/prog_tests/send_signal.c index 6cc69900b310..1702aa592c2c 100644 --- a/tools/testing/selftests/bpf/prog_tests/send_signal.c +++ b/tools/testing/selftests/bpf/prog_tests/send_signal.c @@ -3,22 +3,32 @@ #include #include #include "test_send_signal_kern.skel.h" +#include "io_helpers.h" static int sigusr1_received; static void sigusr1_handler(int signum) { - sigusr1_received = 1; + sigusr1_received = 8; +} + +static void sigusr1_siginfo_handler(int s, siginfo_t *i, void *v) +{ + sigusr1_received = (int)(long long)i->si_value.sival_ptr; } static void test_send_signal_common(struct perf_event_attr *attr, - bool signal_thread) + bool signal_thread, bool remote) { struct test_send_signal_kern *skel; + struct sigaction sa; int pipe_c2p[2], pipe_p2c[2]; int err = -1, pmu_fd = -1; + volatile int j = 0; + int retry_count; char buf[256]; pid_t pid; + int old_prio; if (!ASSERT_OK(pipe(pipe_c2p), "pipe_c2p")) return; @@ -39,11 +49,14 @@ static void test_send_signal_common(struct perf_event_attr *attr, } if (pid == 0) { - int old_prio; - volatile int j = 0; - /* install signal handler and notify parent */ - ASSERT_NEQ(signal(SIGUSR1, sigusr1_handler), SIG_ERR, "signal"); + if (remote) { + sa.sa_sigaction = sigusr1_siginfo_handler; + sa.sa_flags = SA_RESTART | SA_SIGINFO; + ASSERT_NEQ(sigaction(SIGUSR1, &sa, NULL), -1, "sigaction"); + } else { + ASSERT_NEQ(signal(SIGUSR1, sigusr1_handler), SIG_ERR, "signal"); + } close(pipe_c2p[0]); /* close read */ close(pipe_p2c[1]); /* close write */ @@ -52,10 +65,12 @@ static void test_send_signal_common(struct perf_event_attr *attr, * that if an interrupt happens, the underlying task * is this process. */ - errno = 0; - old_prio = getpriority(PRIO_PROCESS, 0); - ASSERT_OK(errno, "getpriority"); - ASSERT_OK(setpriority(PRIO_PROCESS, 0, -20), "setpriority"); + if (!remote) { + errno = 0; + old_prio = getpriority(PRIO_PROCESS, 0); + ASSERT_OK(errno, "getpriority"); + ASSERT_OK(setpriority(PRIO_PROCESS, 0, -20), "setpriority"); + } /* notify parent signal handler is installed */ ASSERT_EQ(write(pipe_c2p[1], buf, 1), 1, "pipe_write"); @@ -66,20 +81,25 @@ static void test_send_signal_common(struct perf_event_attr *attr, /* wait a little for signal handler */ for (int i = 0; i < 1000000000 && !sigusr1_received; i++) { j /= i + j + 1; - if (!attr) - /* trigger the nanosleep tracepoint program. */ - usleep(1); + if (remote) + sleep(1); + else + if (!attr) + /* trigger the nanosleep tracepoint program. */ + usleep(1); } - buf[0] = sigusr1_received ? '2' : '0'; - ASSERT_EQ(sigusr1_received, 1, "sigusr1_received"); + buf[0] = sigusr1_received; + + ASSERT_EQ(sigusr1_received, 8, "sigusr1_received"); ASSERT_EQ(write(pipe_c2p[1], buf, 1), 1, "pipe_write"); /* wait for parent notification and exit */ ASSERT_EQ(read(pipe_p2c[0], buf, 1), 1, "pipe_read"); /* restore the old priority */ - ASSERT_OK(setpriority(PRIO_PROCESS, 0, old_prio), "setpriority"); + if (!remote) + ASSERT_OK(setpriority(PRIO_PROCESS, 0, old_prio), "setpriority"); close(pipe_c2p[1]); close(pipe_p2c[0]); @@ -93,6 +113,17 @@ static void test_send_signal_common(struct perf_event_attr *attr, if (!ASSERT_OK_PTR(skel, "skel_open_and_load")) goto skel_open_load_failure; + /* boost with a high priority so we got a higher chance + * that if an interrupt happens, the underlying task + * is this process. + */ + if (remote) { + errno = 0; + old_prio = getpriority(PRIO_PROCESS, 0); + ASSERT_OK(errno, "getpriority"); + ASSERT_OK(setpriority(PRIO_PROCESS, 0, -20), "setpriority"); + } + if (!attr) { err = test_send_signal_kern__attach(skel); if (!ASSERT_OK(err, "skel_attach")) { @@ -100,8 +131,12 @@ static void test_send_signal_common(struct perf_event_attr *attr, goto destroy_skel; } } else { - pmu_fd = syscall(__NR_perf_event_open, attr, pid, -1 /* cpu */, - -1 /* group id */, 0 /* flags */); + if (!remote) + pmu_fd = syscall(__NR_perf_event_open, attr, pid, -1 /* cpu */, + -1 /* group id */, 0 /* flags */); + else + pmu_fd = syscall(__NR_perf_event_open, attr, getpid(), -1 /* cpu */, + -1 /* group id */, 0 /* flags */); if (!ASSERT_GE(pmu_fd, 0, "perf_event_open")) { err = -1; goto destroy_skel; @@ -119,13 +154,36 @@ static void test_send_signal_common(struct perf_event_attr *attr, /* trigger the bpf send_signal */ skel->bss->signal_thread = signal_thread; skel->bss->sig = SIGUSR1; - skel->bss->pid = pid; + if (!remote) { + skel->bss->target_pid = 0; + skel->bss->pid = pid; + } else { + skel->bss->target_pid = pid; + skel->bss->pid = getpid(); + } /* notify child that bpf program can send_signal now */ ASSERT_EQ(write(pipe_p2c[1], buf, 1), 1, "pipe_write"); - /* wait for result */ - err = read(pipe_c2p[0], buf, 1); + for (retry_count = 0;;) { + /* For the remote test, the BPF program is triggered from this + * process but the other process/thread is signaled. + */ + if (remote) { + if (!attr) { + for (int i = 0; i < 10; i++) + usleep(1); + } else { + for (int i = 0; i < 100000000; i++) + j /= i + 1; + } + } + /* wait for result */ + err = read_with_timeout(pipe_c2p[0], buf, 1, 100); + if (err == -EAGAIN && retry_count++ < 10000) + continue; + break; + } if (!ASSERT_GE(err, 0, "reading pipe")) goto disable_pmu; if (!ASSERT_GT(err, 0, "reading pipe error: size 0")) { @@ -133,7 +191,7 @@ static void test_send_signal_common(struct perf_event_attr *attr, goto disable_pmu; } - ASSERT_EQ(buf[0], '2', "incorrect result"); + ASSERT_EQ(buf[0], 8, "incorrect result"); /* notify child safe to exit */ ASSERT_EQ(write(pipe_p2c[1], buf, 1), 1, "pipe_write"); @@ -142,18 +200,21 @@ static void test_send_signal_common(struct perf_event_attr *attr, close(pmu_fd); destroy_skel: test_send_signal_kern__destroy(skel); + /* restore the old priority */ + if (remote) + ASSERT_OK(setpriority(PRIO_PROCESS, 0, old_prio), "setpriority"); skel_open_load_failure: close(pipe_c2p[0]); close(pipe_p2c[1]); wait(NULL); } -static void test_send_signal_tracepoint(bool signal_thread) +static void test_send_signal_tracepoint(bool signal_thread, bool remote) { - test_send_signal_common(NULL, signal_thread); + test_send_signal_common(NULL, signal_thread, remote); } -static void test_send_signal_perf(bool signal_thread) +static void test_send_signal_perf(bool signal_thread, bool remote) { struct perf_event_attr attr = { .freq = 1, @@ -162,13 +223,14 @@ static void test_send_signal_perf(bool signal_thread) .config = PERF_COUNT_SW_CPU_CLOCK, }; - test_send_signal_common(&attr, signal_thread); + test_send_signal_common(&attr, signal_thread, remote); } -static void test_send_signal_nmi(bool signal_thread) +static void test_send_signal_nmi(bool signal_thread, bool remote) { struct perf_event_attr attr = { - .sample_period = 1, + .freq = 1, + .sample_freq = 1000, .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_CPU_CYCLES, }; @@ -191,21 +253,35 @@ static void test_send_signal_nmi(bool signal_thread) close(pmu_fd); } - test_send_signal_common(&attr, signal_thread); + test_send_signal_common(&attr, signal_thread, remote); } void test_send_signal(void) { if (test__start_subtest("send_signal_tracepoint")) - test_send_signal_tracepoint(false); + test_send_signal_tracepoint(false, false); if (test__start_subtest("send_signal_perf")) - test_send_signal_perf(false); + test_send_signal_perf(false, false); if (test__start_subtest("send_signal_nmi")) - test_send_signal_nmi(false); + test_send_signal_nmi(false, false); if (test__start_subtest("send_signal_tracepoint_thread")) - test_send_signal_tracepoint(true); + test_send_signal_tracepoint(true, false); if (test__start_subtest("send_signal_perf_thread")) - test_send_signal_perf(true); + test_send_signal_perf(true, false); if (test__start_subtest("send_signal_nmi_thread")) - test_send_signal_nmi(true); + test_send_signal_nmi(true, false); + + /* Signal remote thread and thread group */ + if (test__start_subtest("send_signal_tracepoint_remote")) + test_send_signal_tracepoint(false, true); + if (test__start_subtest("send_signal_perf_remote")) + test_send_signal_perf(false, true); + if (test__start_subtest("send_signal_nmi_remote")) + test_send_signal_nmi(false, true); + if (test__start_subtest("send_signal_tracepoint_thread_remote")) + test_send_signal_tracepoint(true, true); + if (test__start_subtest("send_signal_perf_thread_remote")) + test_send_signal_perf(true, true); + if (test__start_subtest("send_signal_nmi_thread_remote")) + test_send_signal_nmi(true, true); } diff --git a/tools/testing/selftests/bpf/prog_tests/sock_addr.c b/tools/testing/selftests/bpf/prog_tests/sock_addr.c index a6ee7f8d4f79..b2efabbed220 100644 --- a/tools/testing/selftests/bpf/prog_tests/sock_addr.c +++ b/tools/testing/selftests/bpf/prog_tests/sock_addr.c @@ -23,10 +23,6 @@ #include "getpeername_unix_prog.skel.h" #include "network_helpers.h" -#ifndef ENOTSUPP -# define ENOTSUPP 524 -#endif - #define TEST_NS "sock_addr" #define TEST_IF_PREFIX "test_sock_addr" #define TEST_IPV4 "127.0.0.4" diff --git a/tools/testing/selftests/bpf/prog_tests/sock_create.c b/tools/testing/selftests/bpf/prog_tests/sock_create.c new file mode 100644 index 000000000000..187ffc5e60c4 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/sock_create.c @@ -0,0 +1,348 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include +#include "cgroup_helpers.h" + +static char bpf_log_buf[4096]; +static bool verbose; + +enum sock_create_test_error { + OK = 0, + DENY_CREATE, +}; + +static struct sock_create_test { + const char *descr; + const struct bpf_insn insns[64]; + enum bpf_attach_type attach_type; + enum bpf_attach_type expected_attach_type; + + int domain; + int type; + int protocol; + + int optname; + int optval; + enum sock_create_test_error error; +} tests[] = { + { + .descr = "AF_INET set priority", + .insns = { + /* r3 = 123 (priority) */ + BPF_MOV64_IMM(BPF_REG_3, 123), + BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3, + offsetof(struct bpf_sock, priority)), + + /* return 1 */ + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .expected_attach_type = BPF_CGROUP_INET_SOCK_CREATE, + .attach_type = BPF_CGROUP_INET_SOCK_CREATE, + + .domain = AF_INET, + .type = SOCK_DGRAM, + + .optname = SO_PRIORITY, + .optval = 123, + }, + { + .descr = "AF_INET6 set priority", + .insns = { + /* r3 = 123 (priority) */ + BPF_MOV64_IMM(BPF_REG_3, 123), + BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3, + offsetof(struct bpf_sock, priority)), + + /* return 1 */ + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .expected_attach_type = BPF_CGROUP_INET_SOCK_CREATE, + .attach_type = BPF_CGROUP_INET_SOCK_CREATE, + + .domain = AF_INET6, + .type = SOCK_DGRAM, + + .optname = SO_PRIORITY, + .optval = 123, + }, + { + .descr = "AF_INET set mark", + .insns = { + BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), + + /* get uid of process */ + BPF_EMIT_CALL(BPF_FUNC_get_current_uid_gid), + BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0xffffffff), + + /* if uid is 0, use given mark(666), else use uid as the mark */ + BPF_MOV64_REG(BPF_REG_3, BPF_REG_0), + BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), + BPF_MOV64_IMM(BPF_REG_3, 666), + + BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), + BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3, + offsetof(struct bpf_sock, mark)), + + /* return 1 */ + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .expected_attach_type = BPF_CGROUP_INET_SOCK_CREATE, + .attach_type = BPF_CGROUP_INET_SOCK_CREATE, + + .domain = AF_INET, + .type = SOCK_DGRAM, + + .optname = SO_MARK, + .optval = 666, + }, + { + .descr = "AF_INET6 set mark", + .insns = { + BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), + + /* get uid of process */ + BPF_EMIT_CALL(BPF_FUNC_get_current_uid_gid), + BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0xffffffff), + + /* if uid is 0, use given mark(666), else use uid as the mark */ + BPF_MOV64_REG(BPF_REG_3, BPF_REG_0), + BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), + BPF_MOV64_IMM(BPF_REG_3, 666), + + BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), + BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3, + offsetof(struct bpf_sock, mark)), + + /* return 1 */ + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .expected_attach_type = BPF_CGROUP_INET_SOCK_CREATE, + .attach_type = BPF_CGROUP_INET_SOCK_CREATE, + + .domain = AF_INET6, + .type = SOCK_DGRAM, + + .optname = SO_MARK, + .optval = 666, + }, + { + .descr = "AF_INET bound to iface", + .insns = { + /* r3 = 1 (lo interface) */ + BPF_MOV64_IMM(BPF_REG_3, 1), + BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3, + offsetof(struct bpf_sock, bound_dev_if)), + + /* return 1 */ + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .expected_attach_type = BPF_CGROUP_INET_SOCK_CREATE, + .attach_type = BPF_CGROUP_INET_SOCK_CREATE, + + .domain = AF_INET, + .type = SOCK_DGRAM, + + .optname = SO_BINDTOIFINDEX, + .optval = 1, + }, + { + .descr = "AF_INET6 bound to iface", + .insns = { + /* r3 = 1 (lo interface) */ + BPF_MOV64_IMM(BPF_REG_3, 1), + BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3, + offsetof(struct bpf_sock, bound_dev_if)), + + /* return 1 */ + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .expected_attach_type = BPF_CGROUP_INET_SOCK_CREATE, + .attach_type = BPF_CGROUP_INET_SOCK_CREATE, + + .domain = AF_INET6, + .type = SOCK_DGRAM, + + .optname = SO_BINDTOIFINDEX, + .optval = 1, + }, + { + .descr = "block AF_INET, SOCK_DGRAM, IPPROTO_ICMP socket", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 1), /* r0 = verdict */ + + /* sock->family == AF_INET */ + BPF_LDX_MEM(BPF_H, BPF_REG_2, BPF_REG_1, + offsetof(struct bpf_sock, family)), + BPF_JMP_IMM(BPF_JNE, BPF_REG_2, AF_INET, 5), + + /* sock->type == SOCK_DGRAM */ + BPF_LDX_MEM(BPF_H, BPF_REG_2, BPF_REG_1, + offsetof(struct bpf_sock, type)), + BPF_JMP_IMM(BPF_JNE, BPF_REG_2, SOCK_DGRAM, 3), + + /* sock->protocol == IPPROTO_ICMP */ + BPF_LDX_MEM(BPF_H, BPF_REG_2, BPF_REG_1, + offsetof(struct bpf_sock, protocol)), + BPF_JMP_IMM(BPF_JNE, BPF_REG_2, IPPROTO_ICMP, 1), + + /* return 0 (block) */ + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .expected_attach_type = BPF_CGROUP_INET_SOCK_CREATE, + .attach_type = BPF_CGROUP_INET_SOCK_CREATE, + + .domain = AF_INET, + .type = SOCK_DGRAM, + .protocol = IPPROTO_ICMP, + + .error = DENY_CREATE, + }, + { + .descr = "block AF_INET6, SOCK_DGRAM, IPPROTO_ICMPV6 socket", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 1), /* r0 = verdict */ + + /* sock->family == AF_INET6 */ + BPF_LDX_MEM(BPF_H, BPF_REG_2, BPF_REG_1, + offsetof(struct bpf_sock, family)), + BPF_JMP_IMM(BPF_JNE, BPF_REG_2, AF_INET6, 5), + + /* sock->type == SOCK_DGRAM */ + BPF_LDX_MEM(BPF_H, BPF_REG_2, BPF_REG_1, + offsetof(struct bpf_sock, type)), + BPF_JMP_IMM(BPF_JNE, BPF_REG_2, SOCK_DGRAM, 3), + + /* sock->protocol == IPPROTO_ICMPV6 */ + BPF_LDX_MEM(BPF_H, BPF_REG_2, BPF_REG_1, + offsetof(struct bpf_sock, protocol)), + BPF_JMP_IMM(BPF_JNE, BPF_REG_2, IPPROTO_ICMPV6, 1), + + /* return 0 (block) */ + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .expected_attach_type = BPF_CGROUP_INET_SOCK_CREATE, + .attach_type = BPF_CGROUP_INET_SOCK_CREATE, + + .domain = AF_INET, + .type = SOCK_DGRAM, + .protocol = IPPROTO_ICMPV6, + + .error = DENY_CREATE, + }, + { + .descr = "load w/o expected_attach_type (compat mode)", + .insns = { + /* return 1 */ + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .expected_attach_type = 0, + .attach_type = BPF_CGROUP_INET_SOCK_CREATE, + + .domain = AF_INET, + .type = SOCK_STREAM, + }, +}; + +static int load_prog(const struct bpf_insn *insns, + enum bpf_attach_type expected_attach_type) +{ + LIBBPF_OPTS(bpf_prog_load_opts, opts, + .expected_attach_type = expected_attach_type, + .log_level = 2, + .log_buf = bpf_log_buf, + .log_size = sizeof(bpf_log_buf), + ); + int fd, insns_cnt = 0; + + for (; + insns[insns_cnt].code != (BPF_JMP | BPF_EXIT); + insns_cnt++) { + } + insns_cnt++; + + fd = bpf_prog_load(BPF_PROG_TYPE_CGROUP_SOCK, NULL, "GPL", insns, + insns_cnt, &opts); + if (verbose && fd < 0) + fprintf(stderr, "%s\n", bpf_log_buf); + + return fd; +} + +static int run_test(int cgroup_fd, struct sock_create_test *test) +{ + int sock_fd, err, prog_fd, optval, ret = -1; + socklen_t optlen = sizeof(optval); + + prog_fd = load_prog(test->insns, test->expected_attach_type); + if (prog_fd < 0) { + log_err("Failed to load BPF program"); + return -1; + } + + err = bpf_prog_attach(prog_fd, cgroup_fd, test->attach_type, 0); + if (err < 0) { + log_err("Failed to attach BPF program"); + goto close_prog_fd; + } + + sock_fd = socket(test->domain, test->type, test->protocol); + if (sock_fd < 0) { + if (test->error == DENY_CREATE) + ret = 0; + else + log_err("Failed to create socket"); + + goto detach_prog; + } + + if (test->optname) { + err = getsockopt(sock_fd, SOL_SOCKET, test->optname, &optval, &optlen); + if (err) { + log_err("Failed to call getsockopt"); + goto cleanup; + } + + if (optval != test->optval) { + errno = 0; + log_err("getsockopt returned unexpected optval"); + goto cleanup; + } + } + + ret = test->error != OK; + +cleanup: + close(sock_fd); +detach_prog: + bpf_prog_detach2(prog_fd, cgroup_fd, test->attach_type); +close_prog_fd: + close(prog_fd); + return ret; +} + +void test_sock_create(void) +{ + int cgroup_fd, i; + + cgroup_fd = test__join_cgroup("/sock_create"); + if (!ASSERT_GE(cgroup_fd, 0, "join_cgroup")) + return; + + for (i = 0; i < ARRAY_SIZE(tests); i++) { + if (!test__start_subtest(tests[i].descr)) + continue; + + ASSERT_OK(run_test(cgroup_fd, &tests[i]), tests[i].descr); + } + + close(cgroup_fd); +} diff --git a/tools/testing/selftests/bpf/test_sock.c b/tools/testing/selftests/bpf/prog_tests/sock_post_bind.c similarity index 64% rename from tools/testing/selftests/bpf/test_sock.c rename to tools/testing/selftests/bpf/prog_tests/sock_post_bind.c index 810c3740b2cc..788135c9c673 100644 --- a/tools/testing/selftests/bpf/test_sock.c +++ b/tools/testing/selftests/bpf/prog_tests/sock_post_bind.c @@ -1,132 +1,35 @@ // SPDX-License-Identifier: GPL-2.0 -// Copyright (c) 2018 Facebook - -#include -#include - -#include -#include -#include - -#include - -#include - +#include +#include #include "cgroup_helpers.h" -#include -#include "bpf_util.h" -#define CG_PATH "/foo" -#define MAX_INSNS 512 +#define TEST_NS "sock_post_bind" -char bpf_log_buf[BPF_LOG_BUF_SIZE]; -static bool verbose = false; +static char bpf_log_buf[4096]; -struct sock_test { - const char *descr; +static struct sock_post_bind_test { + const char *descr; /* BPF prog properties */ - struct bpf_insn insns[MAX_INSNS]; - enum bpf_attach_type expected_attach_type; - enum bpf_attach_type attach_type; + const struct bpf_insn insns[64]; + enum bpf_attach_type attach_type; + enum bpf_attach_type expected_attach_type; /* Socket properties */ - int domain; - int type; + int domain; + int type; /* Endpoint to bind() to */ const char *ip; unsigned short port; unsigned short port_retry; + /* Expected test result */ enum { - LOAD_REJECT, ATTACH_REJECT, BIND_REJECT, SUCCESS, RETRY_SUCCESS, RETRY_REJECT } result; -}; - -static struct sock_test tests[] = { - { - .descr = "bind4 load with invalid access: src_ip6", - .insns = { - BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), - BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_6, - offsetof(struct bpf_sock, src_ip6[0])), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .expected_attach_type = BPF_CGROUP_INET4_POST_BIND, - .attach_type = BPF_CGROUP_INET4_POST_BIND, - .result = LOAD_REJECT, - }, - { - .descr = "bind4 load with invalid access: mark", - .insns = { - BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), - BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_6, - offsetof(struct bpf_sock, mark)), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .expected_attach_type = BPF_CGROUP_INET4_POST_BIND, - .attach_type = BPF_CGROUP_INET4_POST_BIND, - .result = LOAD_REJECT, - }, - { - .descr = "bind6 load with invalid access: src_ip4", - .insns = { - BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), - BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_6, - offsetof(struct bpf_sock, src_ip4)), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .expected_attach_type = BPF_CGROUP_INET6_POST_BIND, - .attach_type = BPF_CGROUP_INET6_POST_BIND, - .result = LOAD_REJECT, - }, - { - .descr = "sock_create load with invalid access: src_port", - .insns = { - BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), - BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_6, - offsetof(struct bpf_sock, src_port)), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .expected_attach_type = BPF_CGROUP_INET_SOCK_CREATE, - .attach_type = BPF_CGROUP_INET_SOCK_CREATE, - .result = LOAD_REJECT, - }, - { - .descr = "sock_create load w/o expected_attach_type (compat mode)", - .insns = { - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .expected_attach_type = 0, - .attach_type = BPF_CGROUP_INET_SOCK_CREATE, - .domain = AF_INET, - .type = SOCK_STREAM, - .ip = "127.0.0.1", - .port = 8097, - .result = SUCCESS, - }, - { - .descr = "sock_create load w/ expected_attach_type", - .insns = { - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .expected_attach_type = BPF_CGROUP_INET_SOCK_CREATE, - .attach_type = BPF_CGROUP_INET_SOCK_CREATE, - .domain = AF_INET, - .type = SOCK_STREAM, - .ip = "127.0.0.1", - .port = 8097, - .result = SUCCESS, - }, +} tests[] = { { .descr = "attach type mismatch bind4 vs bind6", .insns = { @@ -374,40 +277,29 @@ static struct sock_test tests[] = { }, }; -static size_t probe_prog_length(const struct bpf_insn *fp) +static int load_prog(const struct bpf_insn *insns, + enum bpf_attach_type expected_attach_type) { - size_t len; + LIBBPF_OPTS(bpf_prog_load_opts, opts, + .expected_attach_type = expected_attach_type, + .log_level = 2, + .log_buf = bpf_log_buf, + .log_size = sizeof(bpf_log_buf), + ); + int fd, insns_cnt = 0; - for (len = MAX_INSNS - 1; len > 0; --len) - if (fp[len].code != 0 || fp[len].imm != 0) - break; - return len + 1; -} + for (; + insns[insns_cnt].code != (BPF_JMP | BPF_EXIT); + insns_cnt++) { + } + insns_cnt++; -static int load_sock_prog(const struct bpf_insn *prog, - enum bpf_attach_type attach_type) -{ - LIBBPF_OPTS(bpf_prog_load_opts, opts); - int ret, insn_cnt; - - insn_cnt = probe_prog_length(prog); - - opts.expected_attach_type = attach_type; - opts.log_buf = bpf_log_buf; - opts.log_size = BPF_LOG_BUF_SIZE; - opts.log_level = 2; - - ret = bpf_prog_load(BPF_PROG_TYPE_CGROUP_SOCK, NULL, "GPL", prog, insn_cnt, &opts); - if (verbose && ret < 0) + fd = bpf_prog_load(BPF_PROG_TYPE_CGROUP_SOCK, NULL, "GPL", insns, + insns_cnt, &opts); + if (fd < 0) fprintf(stderr, "%s\n", bpf_log_buf); - return ret; -} - -static int attach_sock_prog(int cgfd, int progfd, - enum bpf_attach_type attach_type) -{ - return bpf_prog_attach(progfd, cgfd, attach_type, BPF_F_ALLOW_OVERRIDE); + return fd; } static int bind_sock(int domain, int type, const char *ip, @@ -477,22 +369,16 @@ static int bind_sock(int domain, int type, const char *ip, return res; } -static int run_test_case(int cgfd, const struct sock_test *test) +static int run_test(int cgroup_fd, struct sock_post_bind_test *test) { - int progfd = -1; - int err = 0; - int res; + int err, prog_fd, res, ret = 0; - printf("Test case: %s .. ", test->descr); - progfd = load_sock_prog(test->insns, test->expected_attach_type); - if (progfd < 0) { - if (test->result == LOAD_REJECT) - goto out; - else - goto err; - } + prog_fd = load_prog(test->insns, test->expected_attach_type); + if (prog_fd < 0) + goto err; - if (attach_sock_prog(cgfd, progfd, test->attach_type) < 0) { + err = bpf_prog_attach(prog_fd, cgroup_fd, test->attach_type, 0); + if (err < 0) { if (test->result == ATTACH_REJECT) goto out; else @@ -503,54 +389,38 @@ static int run_test_case(int cgfd, const struct sock_test *test) test->port_retry); if (res > 0 && test->result == res) goto out; - err: - err = -1; + ret = -1; out: /* Detaching w/o checking return code: best effort attempt. */ - if (progfd != -1) - bpf_prog_detach(cgfd, test->attach_type); - close(progfd); - printf("[%s]\n", err ? "FAIL" : "PASS"); - return err; + if (prog_fd != -1) + bpf_prog_detach(cgroup_fd, test->attach_type); + close(prog_fd); + return ret; } -static int run_tests(int cgfd) +void test_sock_post_bind(void) { - int passes = 0; - int fails = 0; + struct netns_obj *ns; + int cgroup_fd; int i; - for (i = 0; i < ARRAY_SIZE(tests); ++i) { - if (run_test_case(cgfd, &tests[i])) - ++fails; - else - ++passes; + cgroup_fd = test__join_cgroup("/post_bind"); + if (!ASSERT_OK_FD(cgroup_fd, "join_cgroup")) + return; + + ns = netns_new(TEST_NS, true); + if (!ASSERT_OK_PTR(ns, "netns_new")) + goto cleanup; + + for (i = 0; i < ARRAY_SIZE(tests); i++) { + if (!test__start_subtest(tests[i].descr)) + continue; + + ASSERT_OK(run_test(cgroup_fd, &tests[i]), tests[i].descr); } - printf("Summary: %d PASSED, %d FAILED\n", passes, fails); - return fails ? -1 : 0; -} - -int main(int argc, char **argv) -{ - int cgfd = -1; - int err = 0; - - cgfd = cgroup_setup_and_join(CG_PATH); - if (cgfd < 0) - goto err; - - /* Use libbpf 1.0 API mode */ - libbpf_set_strict_mode(LIBBPF_STRICT_ALL); - - if (run_tests(cgfd)) - goto err; - - goto out; -err: - err = -1; -out: - close(cgfd); - cleanup_cgroup_environment(); - return err; + +cleanup: + netns_free(ns); + close(cgroup_fd); } diff --git a/tools/testing/selftests/bpf/prog_tests/struct_ops_private_stack.c b/tools/testing/selftests/bpf/prog_tests/struct_ops_private_stack.c new file mode 100644 index 000000000000..4006879ca3fe --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/struct_ops_private_stack.c @@ -0,0 +1,106 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include "struct_ops_private_stack.skel.h" +#include "struct_ops_private_stack_fail.skel.h" +#include "struct_ops_private_stack_recur.skel.h" + +static void test_private_stack(void) +{ + struct struct_ops_private_stack *skel; + struct bpf_link *link; + int err; + + skel = struct_ops_private_stack__open(); + if (!ASSERT_OK_PTR(skel, "struct_ops_private_stack__open")) + return; + + if (skel->data->skip) { + test__skip(); + goto cleanup; + } + + err = struct_ops_private_stack__load(skel); + if (!ASSERT_OK(err, "struct_ops_private_stack__load")) + goto cleanup; + + link = bpf_map__attach_struct_ops(skel->maps.testmod_1); + if (!ASSERT_OK_PTR(link, "attach_struct_ops")) + goto cleanup; + + ASSERT_OK(trigger_module_test_read(256), "trigger_read"); + + ASSERT_EQ(skel->bss->val_i, 3, "val_i"); + ASSERT_EQ(skel->bss->val_j, 8, "val_j"); + + bpf_link__destroy(link); + +cleanup: + struct_ops_private_stack__destroy(skel); +} + +static void test_private_stack_fail(void) +{ + struct struct_ops_private_stack_fail *skel; + int err; + + skel = struct_ops_private_stack_fail__open(); + if (!ASSERT_OK_PTR(skel, "struct_ops_private_stack_fail__open")) + return; + + if (skel->data->skip) { + test__skip(); + goto cleanup; + } + + err = struct_ops_private_stack_fail__load(skel); + if (!ASSERT_ERR(err, "struct_ops_private_stack_fail__load")) + goto cleanup; + return; + +cleanup: + struct_ops_private_stack_fail__destroy(skel); +} + +static void test_private_stack_recur(void) +{ + struct struct_ops_private_stack_recur *skel; + struct bpf_link *link; + int err; + + skel = struct_ops_private_stack_recur__open(); + if (!ASSERT_OK_PTR(skel, "struct_ops_private_stack_recur__open")) + return; + + if (skel->data->skip) { + test__skip(); + goto cleanup; + } + + err = struct_ops_private_stack_recur__load(skel); + if (!ASSERT_OK(err, "struct_ops_private_stack_recur__load")) + goto cleanup; + + link = bpf_map__attach_struct_ops(skel->maps.testmod_1); + if (!ASSERT_OK_PTR(link, "attach_struct_ops")) + goto cleanup; + + ASSERT_OK(trigger_module_test_read(256), "trigger_read"); + + ASSERT_EQ(skel->bss->val_j, 3, "val_j"); + + bpf_link__destroy(link); + +cleanup: + struct_ops_private_stack_recur__destroy(skel); +} + +void test_struct_ops_private_stack(void) +{ + if (test__start_subtest("private_stack")) + test_private_stack(); + if (test__start_subtest("private_stack_fail")) + test_private_stack_fail(); + if (test__start_subtest("private_stack_recur")) + test_private_stack_recur(); +} diff --git a/tools/testing/selftests/bpf/prog_tests/subskeleton.c b/tools/testing/selftests/bpf/prog_tests/subskeleton.c index 9c31b7004f9c..fdf13ed0152a 100644 --- a/tools/testing/selftests/bpf/prog_tests/subskeleton.c +++ b/tools/testing/selftests/bpf/prog_tests/subskeleton.c @@ -46,7 +46,8 @@ static int subskeleton_lib_subresult(struct bpf_object *obj) return result; } -void test_subskeleton(void) +/* initialize and load through skeleton, then instantiate subskeleton out of it */ +static void subtest_skel_subskeleton(void) { int err, result; struct test_subskeleton *skel; @@ -76,3 +77,76 @@ void test_subskeleton(void) cleanup: test_subskeleton__destroy(skel); } + +/* initialize and load through generic bpf_object API, then instantiate subskeleton out of it */ +static void subtest_obj_subskeleton(void) +{ + int err, result; + const void *elf_bytes; + size_t elf_bytes_sz = 0, rodata_sz = 0, bss_sz = 0; + struct bpf_object *obj; + const struct bpf_map *map; + const struct bpf_program *prog; + struct bpf_link *link = NULL; + struct test_subskeleton__rodata *rodata; + struct test_subskeleton__bss *bss; + + elf_bytes = test_subskeleton__elf_bytes(&elf_bytes_sz); + if (!ASSERT_OK_PTR(elf_bytes, "elf_bytes")) + return; + + obj = bpf_object__open_mem(elf_bytes, elf_bytes_sz, NULL); + if (!ASSERT_OK_PTR(obj, "obj_open_mem")) + return; + + map = bpf_object__find_map_by_name(obj, ".rodata"); + if (!ASSERT_OK_PTR(map, "rodata_map_by_name")) + goto cleanup; + + rodata = bpf_map__initial_value(map, &rodata_sz); + if (!ASSERT_OK_PTR(rodata, "rodata_get")) + goto cleanup; + + rodata->rovar1 = 10; + rodata->var1 = 1; + subskeleton_lib_setup(obj); + + err = bpf_object__load(obj); + if (!ASSERT_OK(err, "obj_load")) + goto cleanup; + + prog = bpf_object__find_program_by_name(obj, "handler1"); + if (!ASSERT_OK_PTR(prog, "prog_by_name")) + goto cleanup; + + link = bpf_program__attach(prog); + if (!ASSERT_OK_PTR(link, "prog_attach")) + goto cleanup; + + /* trigger tracepoint */ + usleep(1); + + map = bpf_object__find_map_by_name(obj, ".bss"); + if (!ASSERT_OK_PTR(map, "bss_map_by_name")) + goto cleanup; + + bss = bpf_map__initial_value(map, &bss_sz); + if (!ASSERT_OK_PTR(rodata, "rodata_get")) + goto cleanup; + + result = subskeleton_lib_subresult(obj) * 10; + ASSERT_EQ(bss->out1, result, "out1"); + +cleanup: + bpf_link__destroy(link); + bpf_object__close(obj); +} + + +void test_subskeleton(void) +{ + if (test__start_subtest("skel_subskel")) + subtest_skel_subskeleton(); + if (test__start_subtest("obj_subskel")) + subtest_obj_subskeleton(); +} diff --git a/tools/testing/selftests/bpf/prog_tests/tailcalls.c b/tools/testing/selftests/bpf/prog_tests/tailcalls.c index 21c5a37846ad..544144620ca6 100644 --- a/tools/testing/selftests/bpf/prog_tests/tailcalls.c +++ b/tools/testing/selftests/bpf/prog_tests/tailcalls.c @@ -7,6 +7,7 @@ #include "tailcall_bpf2bpf_hierarchy3.skel.h" #include "tailcall_freplace.skel.h" #include "tc_bpf2bpf.skel.h" +#include "tailcall_fail.skel.h" /* test_tailcall_1 checks basic functionality by patching multiple locations * in a single program for a single tail call slot with nop->jmp, jmp->nop @@ -1496,8 +1497,8 @@ static void test_tailcall_bpf2bpf_hierarchy_3(void) RUN_TESTS(tailcall_bpf2bpf_hierarchy3); } -/* test_tailcall_freplace checks that the attached freplace prog is OK to - * update the prog_array map. +/* test_tailcall_freplace checks that the freplace prog fails to update the + * prog_array map, no matter whether the freplace prog attaches to its target. */ static void test_tailcall_freplace(void) { @@ -1505,7 +1506,7 @@ static void test_tailcall_freplace(void) struct bpf_link *freplace_link = NULL; struct bpf_program *freplace_prog; struct tc_bpf2bpf *tc_skel = NULL; - int prog_fd, map_fd; + int prog_fd, tc_prog_fd, map_fd; char buff[128] = {}; int err, key; @@ -1523,9 +1524,10 @@ static void test_tailcall_freplace(void) if (!ASSERT_OK_PTR(tc_skel, "tc_bpf2bpf__open_and_load")) goto out; - prog_fd = bpf_program__fd(tc_skel->progs.entry_tc); + tc_prog_fd = bpf_program__fd(tc_skel->progs.entry_tc); freplace_prog = freplace_skel->progs.entry_freplace; - err = bpf_program__set_attach_target(freplace_prog, prog_fd, "subprog"); + err = bpf_program__set_attach_target(freplace_prog, tc_prog_fd, + "subprog_tc"); if (!ASSERT_OK(err, "set_attach_target")) goto out; @@ -1533,27 +1535,121 @@ static void test_tailcall_freplace(void) if (!ASSERT_OK(err, "tailcall_freplace__load")) goto out; - freplace_link = bpf_program__attach_freplace(freplace_prog, prog_fd, - "subprog"); - if (!ASSERT_OK_PTR(freplace_link, "attach_freplace")) - goto out; - map_fd = bpf_map__fd(freplace_skel->maps.jmp_table); prog_fd = bpf_program__fd(freplace_prog); key = 0; err = bpf_map_update_elem(map_fd, &key, &prog_fd, BPF_ANY); - if (!ASSERT_OK(err, "update jmp_table")) + ASSERT_ERR(err, "update jmp_table failure"); + + freplace_link = bpf_program__attach_freplace(freplace_prog, tc_prog_fd, + "subprog_tc"); + if (!ASSERT_OK_PTR(freplace_link, "attach_freplace")) goto out; - prog_fd = bpf_program__fd(tc_skel->progs.entry_tc); - err = bpf_prog_test_run_opts(prog_fd, &topts); - ASSERT_OK(err, "test_run"); - ASSERT_EQ(topts.retval, 34, "test_run retval"); + err = bpf_map_update_elem(map_fd, &key, &prog_fd, BPF_ANY); + ASSERT_ERR(err, "update jmp_table failure"); out: bpf_link__destroy(freplace_link); - tc_bpf2bpf__destroy(tc_skel); tailcall_freplace__destroy(freplace_skel); + tc_bpf2bpf__destroy(tc_skel); +} + +/* test_tailcall_bpf2bpf_freplace checks the failure that fails to attach a tail + * callee prog with freplace prog or fails to update an extended prog to + * prog_array map. + */ +static void test_tailcall_bpf2bpf_freplace(void) +{ + struct tailcall_freplace *freplace_skel = NULL; + struct bpf_link *freplace_link = NULL; + struct tc_bpf2bpf *tc_skel = NULL; + char buff[128] = {}; + int prog_fd, map_fd; + int err, key; + + LIBBPF_OPTS(bpf_test_run_opts, topts, + .data_in = buff, + .data_size_in = sizeof(buff), + .repeat = 1, + ); + + tc_skel = tc_bpf2bpf__open_and_load(); + if (!ASSERT_OK_PTR(tc_skel, "tc_bpf2bpf__open_and_load")) + goto out; + + prog_fd = bpf_program__fd(tc_skel->progs.entry_tc); + freplace_skel = tailcall_freplace__open(); + if (!ASSERT_OK_PTR(freplace_skel, "tailcall_freplace__open")) + goto out; + + err = bpf_program__set_attach_target(freplace_skel->progs.entry_freplace, + prog_fd, "subprog_tc"); + if (!ASSERT_OK(err, "set_attach_target")) + goto out; + + err = tailcall_freplace__load(freplace_skel); + if (!ASSERT_OK(err, "tailcall_freplace__load")) + goto out; + + /* OK to attach then detach freplace prog. */ + + freplace_link = bpf_program__attach_freplace(freplace_skel->progs.entry_freplace, + prog_fd, "subprog_tc"); + if (!ASSERT_OK_PTR(freplace_link, "attach_freplace")) + goto out; + + err = bpf_link__destroy(freplace_link); + if (!ASSERT_OK(err, "destroy link")) + goto out; + + /* OK to update prog_array map then delete element from the map. */ + + key = 0; + map_fd = bpf_map__fd(freplace_skel->maps.jmp_table); + err = bpf_map_update_elem(map_fd, &key, &prog_fd, BPF_ANY); + if (!ASSERT_OK(err, "update jmp_table")) + goto out; + + err = bpf_map_delete_elem(map_fd, &key); + if (!ASSERT_OK(err, "delete_elem from jmp_table")) + goto out; + + /* Fail to attach a tail callee prog with freplace prog. */ + + err = bpf_map_update_elem(map_fd, &key, &prog_fd, BPF_ANY); + if (!ASSERT_OK(err, "update jmp_table")) + goto out; + + freplace_link = bpf_program__attach_freplace(freplace_skel->progs.entry_freplace, + prog_fd, "subprog_tc"); + if (!ASSERT_ERR_PTR(freplace_link, "attach_freplace failure")) + goto out; + + err = bpf_map_delete_elem(map_fd, &key); + if (!ASSERT_OK(err, "delete_elem from jmp_table")) + goto out; + + /* Fail to update an extended prog to prog_array map. */ + + freplace_link = bpf_program__attach_freplace(freplace_skel->progs.entry_freplace, + prog_fd, "subprog_tc"); + if (!ASSERT_OK_PTR(freplace_link, "attach_freplace")) + goto out; + + err = bpf_map_update_elem(map_fd, &key, &prog_fd, BPF_ANY); + if (!ASSERT_ERR(err, "update jmp_table failure")) + goto out; + +out: + bpf_link__destroy(freplace_link); + tailcall_freplace__destroy(freplace_skel); + tc_bpf2bpf__destroy(tc_skel); +} + +static void test_tailcall_failure() +{ + RUN_TESTS(tailcall_fail); } void test_tailcalls(void) @@ -1606,4 +1702,8 @@ void test_tailcalls(void) test_tailcall_bpf2bpf_hierarchy_3(); if (test__start_subtest("tailcall_freplace")) test_tailcall_freplace(); + if (test__start_subtest("tailcall_bpf2bpf_freplace")) + test_tailcall_bpf2bpf_freplace(); + if (test__start_subtest("tailcall_failure")) + test_tailcall_failure(); } diff --git a/tools/testing/selftests/bpf/prog_tests/task_kfunc.c b/tools/testing/selftests/bpf/prog_tests/task_kfunc.c index d4579f735398..83b90335967a 100644 --- a/tools/testing/selftests/bpf/prog_tests/task_kfunc.c +++ b/tools/testing/selftests/bpf/prog_tests/task_kfunc.c @@ -68,6 +68,74 @@ static void run_success_test(const char *prog_name) task_kfunc_success__destroy(skel); } +static int run_vpid_test(void *prog_name) +{ + struct task_kfunc_success *skel; + struct bpf_program *prog; + int prog_fd, err = 0; + + if (getpid() != 1) + return 1; + + skel = open_load_task_kfunc_skel(); + if (!skel) + return 2; + + if (skel->bss->err) { + err = 3; + goto cleanup; + } + + prog = bpf_object__find_program_by_name(skel->obj, prog_name); + if (!prog) { + err = 4; + goto cleanup; + } + + prog_fd = bpf_program__fd(prog); + if (prog_fd < 0) { + err = 5; + goto cleanup; + } + + if (bpf_prog_test_run_opts(prog_fd, NULL)) { + err = 6; + goto cleanup; + } + + if (skel->bss->err) + err = 7 + skel->bss->err; +cleanup: + task_kfunc_success__destroy(skel); + return err; +} + +static void run_vpid_success_test(const char *prog_name) +{ + const int stack_size = 1024 * 1024; + int child_pid, wstatus; + char *stack; + + stack = (char *)malloc(stack_size); + if (!ASSERT_OK_PTR(stack, "clone_stack")) + return; + + child_pid = clone(run_vpid_test, stack + stack_size, + CLONE_NEWPID | SIGCHLD, (void *)prog_name); + if (!ASSERT_GT(child_pid, -1, "child_pid")) + goto cleanup; + + if (!ASSERT_GT(waitpid(child_pid, &wstatus, 0), -1, "waitpid")) + goto cleanup; + + if (WEXITSTATUS(wstatus) > 7) + ASSERT_OK(WEXITSTATUS(wstatus) - 7, "vpid_test_failure"); + else + ASSERT_OK(WEXITSTATUS(wstatus), "run_vpid_test_err"); +cleanup: + free(stack); +} + static const char * const success_tests[] = { "test_task_acquire_release_argument", "test_task_acquire_release_current", @@ -83,6 +151,11 @@ static const char * const success_tests[] = { "test_task_kfunc_flavor_relo_not_found", }; +static const char * const vpid_success_tests[] = { + "test_task_from_vpid_current", + "test_task_from_vpid_invalid", +}; + void test_task_kfunc(void) { int i; @@ -94,5 +167,12 @@ void test_task_kfunc(void) run_success_test(success_tests[i]); } + for (i = 0; i < ARRAY_SIZE(vpid_success_tests); i++) { + if (!test__start_subtest(vpid_success_tests[i])) + continue; + + run_vpid_success_test(vpid_success_tests[i]); + } + RUN_TESTS(task_kfunc_failure); } diff --git a/tools/testing/selftests/bpf/prog_tests/task_local_storage.c b/tools/testing/selftests/bpf/prog_tests/task_local_storage.c index c33c05161a9e..60f474d965a9 100644 --- a/tools/testing/selftests/bpf/prog_tests/task_local_storage.c +++ b/tools/testing/selftests/bpf/prog_tests/task_local_storage.c @@ -7,12 +7,20 @@ #include #include /* For SYS_xxx definitions */ #include +#include +#include #include +#include #include "task_local_storage_helpers.h" #include "task_local_storage.skel.h" #include "task_local_storage_exit_creds.skel.h" #include "task_ls_recursion.skel.h" #include "task_storage_nodeadlock.skel.h" +#include "uptr_test_common.h" +#include "task_ls_uptr.skel.h" +#include "uptr_update_failure.skel.h" +#include "uptr_failure.skel.h" +#include "uptr_map_failure.skel.h" static void test_sys_enter_exit(void) { @@ -23,14 +31,14 @@ static void test_sys_enter_exit(void) if (!ASSERT_OK_PTR(skel, "skel_open_and_load")) return; - skel->bss->target_pid = syscall(SYS_gettid); + skel->bss->target_pid = sys_gettid(); err = task_local_storage__attach(skel); if (!ASSERT_OK(err, "skel_attach")) goto out; - syscall(SYS_gettid); - syscall(SYS_gettid); + sys_gettid(); + sys_gettid(); /* 3x syscalls: 1x attach and 2x gettid */ ASSERT_EQ(skel->bss->enter_cnt, 3, "enter_cnt"); @@ -99,7 +107,7 @@ static void test_recursion(void) /* trigger sys_enter, make sure it does not cause deadlock */ skel->bss->test_pid = getpid(); - syscall(SYS_gettid); + sys_gettid(); skel->bss->test_pid = 0; task_ls_recursion__detach(skel); @@ -227,6 +235,259 @@ static void test_nodeadlock(void) sched_setaffinity(getpid(), sizeof(old), &old); } +static struct user_data udata __attribute__((aligned(16))) = { + .a = 1, + .b = 2, +}; + +static struct user_data udata2 __attribute__((aligned(16))) = { + .a = 3, + .b = 4, +}; + +static void check_udata2(int expected) +{ + udata2.result = udata2.nested_result = 0; + usleep(1); + ASSERT_EQ(udata2.result, expected, "udata2.result"); + ASSERT_EQ(udata2.nested_result, expected, "udata2.nested_result"); +} + +static void test_uptr_basic(void) +{ + int map_fd, parent_task_fd, ev_fd; + struct value_type value = {}; + struct task_ls_uptr *skel; + pid_t child_pid, my_tid; + __u64 ev_dummy_data = 1; + int err; + + my_tid = sys_gettid(); + parent_task_fd = sys_pidfd_open(my_tid, 0); + if (!ASSERT_OK_FD(parent_task_fd, "parent_task_fd")) + return; + + ev_fd = eventfd(0, 0); + if (!ASSERT_OK_FD(ev_fd, "ev_fd")) { + close(parent_task_fd); + return; + } + + skel = task_ls_uptr__open_and_load(); + if (!ASSERT_OK_PTR(skel, "skel_open_and_load")) + goto out; + + map_fd = bpf_map__fd(skel->maps.datamap); + value.udata = &udata; + value.nested.udata = &udata; + err = bpf_map_update_elem(map_fd, &parent_task_fd, &value, BPF_NOEXIST); + if (!ASSERT_OK(err, "update_elem(udata)")) + goto out; + + err = task_ls_uptr__attach(skel); + if (!ASSERT_OK(err, "skel_attach")) + goto out; + + child_pid = fork(); + if (!ASSERT_NEQ(child_pid, -1, "fork")) + goto out; + + /* Call syscall in the child process, but access the map value of + * the parent process in the BPF program to check if the user kptr + * is translated/mapped correctly. + */ + if (child_pid == 0) { + /* child */ + + /* Overwrite the user_data in the child process to check if + * the BPF program accesses the user_data of the parent. + */ + udata.a = 0; + udata.b = 0; + + /* Wait for the parent to set child_pid */ + read(ev_fd, &ev_dummy_data, sizeof(ev_dummy_data)); + exit(0); + } + + skel->bss->parent_pid = my_tid; + skel->bss->target_pid = child_pid; + + write(ev_fd, &ev_dummy_data, sizeof(ev_dummy_data)); + + err = waitpid(child_pid, NULL, 0); + ASSERT_EQ(err, child_pid, "waitpid"); + ASSERT_EQ(udata.result, MAGIC_VALUE + udata.a + udata.b, "udata.result"); + ASSERT_EQ(udata.nested_result, MAGIC_VALUE + udata.a + udata.b, "udata.nested_result"); + + skel->bss->target_pid = my_tid; + + /* update_elem: uptr changes from udata1 to udata2 */ + value.udata = &udata2; + value.nested.udata = &udata2; + err = bpf_map_update_elem(map_fd, &parent_task_fd, &value, BPF_EXIST); + if (!ASSERT_OK(err, "update_elem(udata2)")) + goto out; + check_udata2(MAGIC_VALUE + udata2.a + udata2.b); + + /* update_elem: uptr changes from udata2 uptr to NULL */ + memset(&value, 0, sizeof(value)); + err = bpf_map_update_elem(map_fd, &parent_task_fd, &value, BPF_EXIST); + if (!ASSERT_OK(err, "update_elem(udata2)")) + goto out; + check_udata2(0); + + /* update_elem: uptr changes from NULL to udata2 */ + value.udata = &udata2; + value.nested.udata = &udata2; + err = bpf_map_update_elem(map_fd, &parent_task_fd, &value, BPF_EXIST); + if (!ASSERT_OK(err, "update_elem(udata2)")) + goto out; + check_udata2(MAGIC_VALUE + udata2.a + udata2.b); + + /* Check if user programs can access the value of user kptrs + * through bpf_map_lookup_elem(). Make sure the kernel value is not + * leaked. + */ + err = bpf_map_lookup_elem(map_fd, &parent_task_fd, &value); + if (!ASSERT_OK(err, "bpf_map_lookup_elem")) + goto out; + ASSERT_EQ(value.udata, NULL, "value.udata"); + ASSERT_EQ(value.nested.udata, NULL, "value.nested.udata"); + + /* delete_elem */ + err = bpf_map_delete_elem(map_fd, &parent_task_fd); + ASSERT_OK(err, "delete_elem(udata2)"); + check_udata2(0); + + /* update_elem: add uptr back to test map_free */ + value.udata = &udata2; + value.nested.udata = &udata2; + err = bpf_map_update_elem(map_fd, &parent_task_fd, &value, BPF_NOEXIST); + ASSERT_OK(err, "update_elem(udata2)"); + +out: + task_ls_uptr__destroy(skel); + close(ev_fd); + close(parent_task_fd); +} + +static void test_uptr_across_pages(void) +{ + int page_size = getpagesize(); + struct value_type value = {}; + struct task_ls_uptr *skel; + int err, task_fd, map_fd; + void *mem; + + task_fd = sys_pidfd_open(getpid(), 0); + if (!ASSERT_OK_FD(task_fd, "task_fd")) + return; + + mem = mmap(NULL, page_size * 2, PROT_READ | PROT_WRITE, + MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); + if (!ASSERT_OK_PTR(mem, "mmap(page_size * 2)")) { + close(task_fd); + return; + } + + skel = task_ls_uptr__open_and_load(); + if (!ASSERT_OK_PTR(skel, "skel_open_and_load")) + goto out; + + map_fd = bpf_map__fd(skel->maps.datamap); + value.udata = mem + page_size - offsetof(struct user_data, b); + err = bpf_map_update_elem(map_fd, &task_fd, &value, 0); + if (!ASSERT_ERR(err, "update_elem(udata)")) + goto out; + ASSERT_EQ(errno, EOPNOTSUPP, "errno"); + + value.udata = mem + page_size - sizeof(struct user_data); + err = bpf_map_update_elem(map_fd, &task_fd, &value, 0); + ASSERT_OK(err, "update_elem(udata)"); + +out: + task_ls_uptr__destroy(skel); + close(task_fd); + munmap(mem, page_size * 2); +} + +static void test_uptr_update_failure(void) +{ + struct value_lock_type value = {}; + struct uptr_update_failure *skel; + int err, task_fd, map_fd; + + task_fd = sys_pidfd_open(getpid(), 0); + if (!ASSERT_OK_FD(task_fd, "task_fd")) + return; + + skel = uptr_update_failure__open_and_load(); + if (!ASSERT_OK_PTR(skel, "skel_open_and_load")) + goto out; + + map_fd = bpf_map__fd(skel->maps.datamap); + + value.udata = &udata; + err = bpf_map_update_elem(map_fd, &task_fd, &value, BPF_F_LOCK); + if (!ASSERT_ERR(err, "update_elem(udata, BPF_F_LOCK)")) + goto out; + ASSERT_EQ(errno, EOPNOTSUPP, "errno"); + + err = bpf_map_update_elem(map_fd, &task_fd, &value, BPF_EXIST); + if (!ASSERT_ERR(err, "update_elem(udata, BPF_EXIST)")) + goto out; + ASSERT_EQ(errno, ENOENT, "errno"); + + err = bpf_map_update_elem(map_fd, &task_fd, &value, BPF_NOEXIST); + if (!ASSERT_OK(err, "update_elem(udata, BPF_NOEXIST)")) + goto out; + + value.udata = &udata2; + err = bpf_map_update_elem(map_fd, &task_fd, &value, BPF_NOEXIST); + if (!ASSERT_ERR(err, "update_elem(udata2, BPF_NOEXIST)")) + goto out; + ASSERT_EQ(errno, EEXIST, "errno"); + +out: + uptr_update_failure__destroy(skel); + close(task_fd); +} + +static void test_uptr_map_failure(const char *map_name, int expected_errno) +{ + LIBBPF_OPTS(bpf_map_create_opts, create_attr); + struct uptr_map_failure *skel; + struct bpf_map *map; + struct btf *btf; + int map_fd, err; + + skel = uptr_map_failure__open(); + if (!ASSERT_OK_PTR(skel, "uptr_map_failure__open")) + return; + + map = bpf_object__find_map_by_name(skel->obj, map_name); + btf = bpf_object__btf(skel->obj); + err = btf__load_into_kernel(btf); + if (!ASSERT_OK(err, "btf__load_into_kernel")) + goto done; + + create_attr.map_flags = bpf_map__map_flags(map); + create_attr.btf_fd = btf__fd(btf); + create_attr.btf_key_type_id = bpf_map__btf_key_type_id(map); + create_attr.btf_value_type_id = bpf_map__btf_value_type_id(map); + map_fd = bpf_map_create(bpf_map__type(map), map_name, + bpf_map__key_size(map), bpf_map__value_size(map), + 0, &create_attr); + if (ASSERT_ERR_FD(map_fd, "map_create")) + ASSERT_EQ(errno, expected_errno, "errno"); + else + close(map_fd); + +done: + uptr_map_failure__destroy(skel); +} + void test_task_local_storage(void) { if (test__start_subtest("sys_enter_exit")) @@ -237,4 +498,21 @@ void test_task_local_storage(void) test_recursion(); if (test__start_subtest("nodeadlock")) test_nodeadlock(); + if (test__start_subtest("uptr_basic")) + test_uptr_basic(); + if (test__start_subtest("uptr_across_pages")) + test_uptr_across_pages(); + if (test__start_subtest("uptr_update_failure")) + test_uptr_update_failure(); + if (test__start_subtest("uptr_map_failure_e2big")) { + if (getpagesize() == PAGE_SIZE) + test_uptr_map_failure("large_uptr_map", E2BIG); + else + test__skip(); + } + if (test__start_subtest("uptr_map_failure_size0")) + test_uptr_map_failure("empty_uptr_map", EINVAL); + if (test__start_subtest("uptr_map_failure_kstruct")) + test_uptr_map_failure("kstruct_uptr_map", EINVAL); + RUN_TESTS(uptr_failure); } diff --git a/tools/testing/selftests/bpf/prog_tests/timer_lockup.c b/tools/testing/selftests/bpf/prog_tests/timer_lockup.c index 871d16cb95cf..1a2f99596916 100644 --- a/tools/testing/selftests/bpf/prog_tests/timer_lockup.c +++ b/tools/testing/selftests/bpf/prog_tests/timer_lockup.c @@ -5,6 +5,7 @@ #include #include #include +#include #include "timer_lockup.skel.h" @@ -52,6 +53,11 @@ void test_timer_lockup(void) pthread_t thrds[2]; void *ret; + if (get_nprocs() < 2) { + test__skip(); + return; + } + skel = timer_lockup__open_and_load(); if (!ASSERT_OK_PTR(skel, "timer_lockup__open_and_load")) return; diff --git a/tools/testing/selftests/bpf/prog_tests/token.c b/tools/testing/selftests/bpf/prog_tests/token.c index fe86e4fdb89c..c3ab9b6fb069 100644 --- a/tools/testing/selftests/bpf/prog_tests/token.c +++ b/tools/testing/selftests/bpf/prog_tests/token.c @@ -828,8 +828,12 @@ static int userns_obj_priv_btf_success(int mnt_fd, struct token_lsm *lsm_skel) return validate_struct_ops_load(mnt_fd, true /* should succeed */); } +static const char *token_bpffs_custom_dir() +{ + return getenv("BPF_SELFTESTS_BPF_TOKEN_DIR") ?: "/tmp/bpf-token-fs"; +} + #define TOKEN_ENVVAR "LIBBPF_BPF_TOKEN_PATH" -#define TOKEN_BPFFS_CUSTOM "/bpf-token-fs" static int userns_obj_priv_implicit_token(int mnt_fd, struct token_lsm *lsm_skel) { @@ -892,6 +896,7 @@ static int userns_obj_priv_implicit_token(int mnt_fd, struct token_lsm *lsm_skel static int userns_obj_priv_implicit_token_envvar(int mnt_fd, struct token_lsm *lsm_skel) { + const char *custom_dir = token_bpffs_custom_dir(); LIBBPF_OPTS(bpf_object_open_opts, opts); struct dummy_st_ops_success *skel; int err; @@ -909,10 +914,10 @@ static int userns_obj_priv_implicit_token_envvar(int mnt_fd, struct token_lsm *l * BPF token implicitly, unless pointed to it through * LIBBPF_BPF_TOKEN_PATH envvar */ - rmdir(TOKEN_BPFFS_CUSTOM); - if (!ASSERT_OK(mkdir(TOKEN_BPFFS_CUSTOM, 0777), "mkdir_bpffs_custom")) + rmdir(custom_dir); + if (!ASSERT_OK(mkdir(custom_dir, 0777), "mkdir_bpffs_custom")) goto err_out; - err = sys_move_mount(mnt_fd, "", AT_FDCWD, TOKEN_BPFFS_CUSTOM, MOVE_MOUNT_F_EMPTY_PATH); + err = sys_move_mount(mnt_fd, "", AT_FDCWD, custom_dir, MOVE_MOUNT_F_EMPTY_PATH); if (!ASSERT_OK(err, "move_mount_bpffs")) goto err_out; @@ -925,7 +930,7 @@ static int userns_obj_priv_implicit_token_envvar(int mnt_fd, struct token_lsm *l goto err_out; } - err = setenv(TOKEN_ENVVAR, TOKEN_BPFFS_CUSTOM, 1 /*overwrite*/); + err = setenv(TOKEN_ENVVAR, custom_dir, 1 /*overwrite*/); if (!ASSERT_OK(err, "setenv_token_path")) goto err_out; @@ -951,11 +956,11 @@ static int userns_obj_priv_implicit_token_envvar(int mnt_fd, struct token_lsm *l if (!ASSERT_ERR(err, "obj_empty_token_path_load")) goto err_out; - rmdir(TOKEN_BPFFS_CUSTOM); + rmdir(custom_dir); unsetenv(TOKEN_ENVVAR); return 0; err_out: - rmdir(TOKEN_BPFFS_CUSTOM); + rmdir(custom_dir); unsetenv(TOKEN_ENVVAR); return -EINVAL; } diff --git a/tools/testing/selftests/bpf/prog_tests/uprobe_multi_test.c b/tools/testing/selftests/bpf/prog_tests/uprobe_multi_test.c index 844f6fc8487b..2ee17ef1dae2 100644 --- a/tools/testing/selftests/bpf/prog_tests/uprobe_multi_test.c +++ b/tools/testing/selftests/bpf/prog_tests/uprobe_multi_test.c @@ -8,6 +8,11 @@ #include "uprobe_multi_usdt.skel.h" #include "uprobe_multi_consumers.skel.h" #include "uprobe_multi_pid_filter.skel.h" +#include "uprobe_multi_session.skel.h" +#include "uprobe_multi_session_single.skel.h" +#include "uprobe_multi_session_cookie.skel.h" +#include "uprobe_multi_session_recursive.skel.h" +#include "uprobe_multi_verifier.skel.h" #include "bpf/libbpf_internal.h" #include "testing_helpers.h" #include "../sdt.h" @@ -34,6 +39,12 @@ noinline void usdt_trigger(void) STAP_PROBE(test, pid_filter_usdt); } +noinline void uprobe_session_recursive(int i) +{ + if (i) + uprobe_session_recursive(i - 1); +} + struct child { int go[2]; int c2p[2]; /* child -> parent channel */ @@ -125,7 +136,7 @@ static void *child_thread(void *ctx) struct child *child = ctx; int c = 0, err; - child->tid = syscall(SYS_gettid); + child->tid = sys_gettid(); /* let parent know we are ready */ err = write(child->c2p[1], &c, 1); @@ -778,7 +789,7 @@ get_link(struct uprobe_multi_consumers *skel, int link) } } -static int uprobe_attach(struct uprobe_multi_consumers *skel, int idx) +static int uprobe_attach(struct uprobe_multi_consumers *skel, int idx, unsigned long offset) { struct bpf_program *prog = get_program(skel, idx); struct bpf_link **link = get_link(skel, idx); @@ -787,15 +798,19 @@ static int uprobe_attach(struct uprobe_multi_consumers *skel, int idx) if (!prog || !link) return -1; - /* - * bit/prog: 0,1 uprobe entry - * bit/prog: 2,3 uprobe return - */ - opts.retprobe = idx == 2 || idx == 3; + opts.offsets = &offset; + opts.cnt = 1; - *link = bpf_program__attach_uprobe_multi(prog, 0, "/proc/self/exe", - "uprobe_consumer_test", - &opts); + /* + * bit/prog: 0 uprobe entry + * bit/prog: 1 uprobe return + * bit/prog: 2 uprobe session without return + * bit/prog: 3 uprobe session with return + */ + opts.retprobe = idx == 1; + opts.session = idx == 2 || idx == 3; + + *link = bpf_program__attach_uprobe_multi(prog, 0, "/proc/self/exe", NULL, &opts); if (!ASSERT_OK_PTR(*link, "bpf_program__attach_uprobe_multi")) return -1; return 0; @@ -816,7 +831,8 @@ static bool test_bit(int bit, unsigned long val) noinline int uprobe_consumer_test(struct uprobe_multi_consumers *skel, - unsigned long before, unsigned long after) + unsigned long before, unsigned long after, + unsigned long offset) { int idx; @@ -829,89 +845,174 @@ uprobe_consumer_test(struct uprobe_multi_consumers *skel, /* ... and attach all new programs in 'after' state */ for (idx = 0; idx < 4; idx++) { if (!test_bit(idx, before) && test_bit(idx, after)) { - if (!ASSERT_OK(uprobe_attach(skel, idx), "uprobe_attach_after")) + if (!ASSERT_OK(uprobe_attach(skel, idx, offset), "uprobe_attach_after")) return -1; } } return 0; } -static void consumer_test(struct uprobe_multi_consumers *skel, - unsigned long before, unsigned long after) +/* + * We generate 16 consumer_testX functions that will have uprobe installed on + * and will be called in separate threads. All function pointer are stored in + * "consumers" section and each thread will pick one function based on index. + */ + +extern const void *__start_consumers; + +#define __CONSUMER_TEST(func) \ +noinline int func(struct uprobe_multi_consumers *skel, unsigned long before, \ + unsigned long after, unsigned long offset) \ +{ \ + return uprobe_consumer_test(skel, before, after, offset); \ +} \ +void *__ ## func __used __attribute__((section("consumers"))) = (void *) func; + +#define CONSUMER_TEST(func) __CONSUMER_TEST(func) + +#define C1 CONSUMER_TEST(__PASTE(consumer_test, __COUNTER__)) +#define C4 C1 C1 C1 C1 +#define C16 C4 C4 C4 C4 + +C16 + +typedef int (*test_t)(struct uprobe_multi_consumers *, unsigned long, + unsigned long, unsigned long); + +static int consumer_test(struct uprobe_multi_consumers *skel, + unsigned long before, unsigned long after, + test_t test, unsigned long offset) { - int err, idx; + int err, idx, ret = -1; printf("consumer_test before %lu after %lu\n", before, after); /* 'before' is each, we attach uprobe for every set idx */ for (idx = 0; idx < 4; idx++) { if (test_bit(idx, before)) { - if (!ASSERT_OK(uprobe_attach(skel, idx), "uprobe_attach_before")) + if (!ASSERT_OK(uprobe_attach(skel, idx, offset), "uprobe_attach_before")) goto cleanup; } } - err = uprobe_consumer_test(skel, before, after); + err = test(skel, before, after, offset); if (!ASSERT_EQ(err, 0, "uprobe_consumer_test")) goto cleanup; for (idx = 0; idx < 4; idx++) { + bool uret_stays, uret_survives; const char *fmt = "BUG"; __u64 val = 0; - if (idx < 2) { + switch (idx) { + case 0: /* * uprobe entry * +1 if define in 'before' */ if (test_bit(idx, before)) val++; - fmt = "prog 0/1: uprobe"; - } else { + fmt = "prog 0: uprobe"; + break; + case 1: /* - * uprobe return is tricky ;-) - * - * to trigger uretprobe consumer, the uretprobe needs to be installed, - * which means one of the 'return' uprobes was alive when probe was hit: - * - * idxs: 2/3 uprobe return in 'installed' mask - * - * in addition if 'after' state removes everything that was installed in - * 'before' state, then uprobe kernel object goes away and return uprobe - * is not installed and we won't hit it even if it's in 'after' state. + * To trigger uretprobe consumer, the uretprobe under test either stayed from + * before to after (uret_stays + test_bit) or uretprobe instance survived and + * we have uretprobe active in after (uret_survives + test_bit) */ - unsigned long had_uretprobes = before & 0b1100; /* is uretprobe installed */ - unsigned long probe_preserved = before & after; /* did uprobe go away */ + uret_stays = before & after & 0b0110; + uret_survives = ((before & 0b0110) && (after & 0b0110) && (before & 0b1001)); - if (had_uretprobes && probe_preserved && test_bit(idx, after)) + if ((uret_stays || uret_survives) && test_bit(idx, after)) val++; - fmt = "idx 2/3: uretprobe"; + fmt = "prog 1: uretprobe"; + break; + case 2: + /* + * session with return + * +1 if defined in 'before' + * +1 if defined in 'after' + */ + if (test_bit(idx, before)) { + val++; + if (test_bit(idx, after)) + val++; + } + fmt = "prog 2: session with return"; + break; + case 3: + /* + * session without return + * +1 if defined in 'before' + */ + if (test_bit(idx, before)) + val++; + fmt = "prog 3: session with NO return"; + break; } - ASSERT_EQ(skel->bss->uprobe_result[idx], val, fmt); + if (!ASSERT_EQ(skel->bss->uprobe_result[idx], val, fmt)) + goto cleanup; skel->bss->uprobe_result[idx] = 0; } + ret = 0; + cleanup: for (idx = 0; idx < 4; idx++) uprobe_detach(skel, idx); + return ret; } -static void test_consumers(void) +#define CONSUMER_MAX 16 + +/* + * Each thread runs 1/16 of the load by running test for single + * 'before' number (based on thread index) and full scale of + * 'after' numbers. + */ +static void *consumer_thread(void *arg) { + unsigned long idx = (unsigned long) arg; struct uprobe_multi_consumers *skel; - int before, after; + unsigned long offset; + const void *func; + int after; skel = uprobe_multi_consumers__open_and_load(); if (!ASSERT_OK_PTR(skel, "uprobe_multi_consumers__open_and_load")) - return; + return NULL; + + func = *((&__start_consumers) + idx); + + offset = get_uprobe_offset(func); + if (!ASSERT_GE(offset, 0, "uprobe_offset")) + goto out; + + for (after = 0; after < CONSUMER_MAX; after++) + if (consumer_test(skel, idx, after, func, offset)) + goto out; + +out: + uprobe_multi_consumers__destroy(skel); + return NULL; +} + + +static void test_consumers(void) +{ + pthread_t pt[CONSUMER_MAX]; + unsigned long idx; + int err; /* * The idea of this test is to try all possible combinations of * uprobes consumers attached on single function. * - * - 2 uprobe entry consumer - * - 2 uprobe exit consumers + * - 1 uprobe entry consumer + * - 1 uprobe exit consumer + * - 1 uprobe session with return + * - 1 uprobe session without return * * The test uses 4 uprobes attached on single function, but that * translates into single uprobe with 4 consumers in kernel. @@ -919,37 +1020,38 @@ static void test_consumers(void) * The before/after values present the state of attached consumers * before and after the probed function: * - * bit/prog 0,1 : uprobe entry - * bit/prog 2,3 : uprobe return + * bit/prog 0 : uprobe entry + * bit/prog 1 : uprobe return * * For example for: * - * before = 0b0101 - * after = 0b0110 + * before = 0b01 + * after = 0b10 * * it means that before we call 'uprobe_consumer_test' we attach * uprobes defined in 'before' value: * - * - bit/prog 0: uprobe entry - * - bit/prog 2: uprobe return + * - bit/prog 1: uprobe entry * * uprobe_consumer_test is called and inside it we attach and detach * uprobes based on 'after' value: * - * - bit/prog 0: stays untouched - * - bit/prog 2: uprobe return is detached + * - bit/prog 0: is detached + * - bit/prog 1: is attached * * uprobe_consumer_test returns and we check counters values increased * by bpf programs on each uprobe to match the expected count based on * before/after bits. */ - for (before = 0; before < 16; before++) { - for (after = 0; after < 16; after++) - consumer_test(skel, before, after); + for (idx = 0; idx < CONSUMER_MAX; idx++) { + err = pthread_create(&pt[idx], NULL, consumer_thread, (void *) idx); + if (!ASSERT_OK(err, "pthread_create")) + break; } - uprobe_multi_consumers__destroy(skel); + while (idx) + pthread_join(pt[--idx], NULL); } static struct bpf_program *uprobe_multi_program(struct uprobe_multi_pid_filter *skel, int idx) @@ -1016,6 +1118,156 @@ static void test_pid_filter_process(bool clone_vm) uprobe_multi_pid_filter__destroy(skel); } +static void test_session_skel_api(void) +{ + struct uprobe_multi_session *skel = NULL; + LIBBPF_OPTS(bpf_kprobe_multi_opts, opts); + struct bpf_link *link = NULL; + int err; + + skel = uprobe_multi_session__open_and_load(); + if (!ASSERT_OK_PTR(skel, "uprobe_multi_session__open_and_load")) + goto cleanup; + + skel->bss->pid = getpid(); + skel->bss->user_ptr = test_data; + + err = uprobe_multi_session__attach(skel); + if (!ASSERT_OK(err, "uprobe_multi_session__attach")) + goto cleanup; + + /* trigger all probes */ + skel->bss->uprobe_multi_func_1_addr = (__u64) uprobe_multi_func_1; + skel->bss->uprobe_multi_func_2_addr = (__u64) uprobe_multi_func_2; + skel->bss->uprobe_multi_func_3_addr = (__u64) uprobe_multi_func_3; + + uprobe_multi_func_1(); + uprobe_multi_func_2(); + uprobe_multi_func_3(); + + /* + * We expect 2 for uprobe_multi_func_2 because it runs both entry/return probe, + * uprobe_multi_func_[13] run just the entry probe. All expected numbers are + * doubled, because we run extra test for sleepable session. + */ + ASSERT_EQ(skel->bss->uprobe_session_result[0], 2, "uprobe_multi_func_1_result"); + ASSERT_EQ(skel->bss->uprobe_session_result[1], 4, "uprobe_multi_func_2_result"); + ASSERT_EQ(skel->bss->uprobe_session_result[2], 2, "uprobe_multi_func_3_result"); + + /* We expect increase in 3 entry and 1 return session calls -> 4 */ + ASSERT_EQ(skel->bss->uprobe_multi_sleep_result, 4, "uprobe_multi_sleep_result"); + +cleanup: + bpf_link__destroy(link); + uprobe_multi_session__destroy(skel); +} + +static void test_session_single_skel_api(void) +{ + struct uprobe_multi_session_single *skel = NULL; + LIBBPF_OPTS(bpf_kprobe_multi_opts, opts); + int err; + + skel = uprobe_multi_session_single__open_and_load(); + if (!ASSERT_OK_PTR(skel, "uprobe_multi_session_single__open_and_load")) + goto cleanup; + + skel->bss->pid = getpid(); + + err = uprobe_multi_session_single__attach(skel); + if (!ASSERT_OK(err, "uprobe_multi_session_single__attach")) + goto cleanup; + + uprobe_multi_func_1(); + + /* + * We expect consumer 0 and 2 to trigger just entry handler (value 1) + * and consumer 1 to hit both (value 2). + */ + ASSERT_EQ(skel->bss->uprobe_session_result[0], 1, "uprobe_session_result_0"); + ASSERT_EQ(skel->bss->uprobe_session_result[1], 2, "uprobe_session_result_1"); + ASSERT_EQ(skel->bss->uprobe_session_result[2], 1, "uprobe_session_result_2"); + +cleanup: + uprobe_multi_session_single__destroy(skel); +} + +static void test_session_cookie_skel_api(void) +{ + struct uprobe_multi_session_cookie *skel = NULL; + int err; + + skel = uprobe_multi_session_cookie__open_and_load(); + if (!ASSERT_OK_PTR(skel, "uprobe_multi_session_cookie__open_and_load")) + goto cleanup; + + skel->bss->pid = getpid(); + + err = uprobe_multi_session_cookie__attach(skel); + if (!ASSERT_OK(err, "uprobe_multi_session_cookie__attach")) + goto cleanup; + + /* trigger all probes */ + uprobe_multi_func_1(); + uprobe_multi_func_2(); + uprobe_multi_func_3(); + + ASSERT_EQ(skel->bss->test_uprobe_1_result, 1, "test_uprobe_1_result"); + ASSERT_EQ(skel->bss->test_uprobe_2_result, 2, "test_uprobe_2_result"); + ASSERT_EQ(skel->bss->test_uprobe_3_result, 3, "test_uprobe_3_result"); + +cleanup: + uprobe_multi_session_cookie__destroy(skel); +} + +static void test_session_recursive_skel_api(void) +{ + struct uprobe_multi_session_recursive *skel = NULL; + int i, err; + + skel = uprobe_multi_session_recursive__open_and_load(); + if (!ASSERT_OK_PTR(skel, "uprobe_multi_session_recursive__open_and_load")) + goto cleanup; + + skel->bss->pid = getpid(); + + err = uprobe_multi_session_recursive__attach(skel); + if (!ASSERT_OK(err, "uprobe_multi_session_recursive__attach")) + goto cleanup; + + for (i = 0; i < ARRAY_SIZE(skel->bss->test_uprobe_cookie_entry); i++) + skel->bss->test_uprobe_cookie_entry[i] = i + 1; + + uprobe_session_recursive(5); + + /* + * entry uprobe: + * uprobe_session_recursive(5) { *cookie = 1, return 0 + * uprobe_session_recursive(4) { *cookie = 2, return 1 + * uprobe_session_recursive(3) { *cookie = 3, return 0 + * uprobe_session_recursive(2) { *cookie = 4, return 1 + * uprobe_session_recursive(1) { *cookie = 5, return 0 + * uprobe_session_recursive(0) { *cookie = 6, return 1 + * return uprobe: + * } i = 0 not executed + * } i = 1 test_uprobe_cookie_return[0] = 5 + * } i = 2 not executed + * } i = 3 test_uprobe_cookie_return[1] = 3 + * } i = 4 not executed + * } i = 5 test_uprobe_cookie_return[2] = 1 + */ + + ASSERT_EQ(skel->bss->idx_entry, 6, "idx_entry"); + ASSERT_EQ(skel->bss->idx_return, 3, "idx_return"); + + ASSERT_EQ(skel->bss->test_uprobe_cookie_return[0], 5, "test_uprobe_cookie_return[0]"); + ASSERT_EQ(skel->bss->test_uprobe_cookie_return[1], 3, "test_uprobe_cookie_return[1]"); + ASSERT_EQ(skel->bss->test_uprobe_cookie_return[2], 1, "test_uprobe_cookie_return[2]"); + +cleanup: + uprobe_multi_session_recursive__destroy(skel); +} + static void test_bench_attach_uprobe(void) { long attach_start_ns = 0, attach_end_ns = 0; @@ -1112,4 +1364,13 @@ void test_uprobe_multi_test(void) test_pid_filter_process(false); if (test__start_subtest("filter_clone_vm")) test_pid_filter_process(true); + if (test__start_subtest("session")) + test_session_skel_api(); + if (test__start_subtest("session_single")) + test_session_single_skel_api(); + if (test__start_subtest("session_cookie")) + test_session_cookie_skel_api(); + if (test__start_subtest("session_cookie_recursive")) + test_session_recursive_skel_api(); + RUN_TESTS(uprobe_multi_verifier); } diff --git a/tools/testing/selftests/bpf/prog_tests/verifier.c b/tools/testing/selftests/bpf/prog_tests/verifier.c index 75f7a2ce334b..d9f65adb456b 100644 --- a/tools/testing/selftests/bpf/prog_tests/verifier.c +++ b/tools/testing/selftests/bpf/prog_tests/verifier.c @@ -61,6 +61,7 @@ #include "verifier_or_jmp32_k.skel.h" #include "verifier_precision.skel.h" #include "verifier_prevent_map_lookup.skel.h" +#include "verifier_private_stack.skel.h" #include "verifier_raw_stack.skel.h" #include "verifier_raw_tp_writable.skel.h" #include "verifier_reg_equal.skel.h" @@ -188,6 +189,7 @@ void test_verifier_bpf_fastcall(void) { RUN(verifier_bpf_fastcall); } void test_verifier_or_jmp32_k(void) { RUN(verifier_or_jmp32_k); } void test_verifier_precision(void) { RUN(verifier_precision); } void test_verifier_prevent_map_lookup(void) { RUN(verifier_prevent_map_lookup); } +void test_verifier_private_stack(void) { RUN(verifier_private_stack); } void test_verifier_raw_stack(void) { RUN(verifier_raw_stack); } void test_verifier_raw_tp_writable(void) { RUN(verifier_raw_tp_writable); } void test_verifier_reg_equal(void) { RUN(verifier_reg_equal); } diff --git a/tools/testing/selftests/bpf/progs/bpf_iter.h b/tools/testing/selftests/bpf/progs/bpf_iter.h deleted file mode 100644 index c41ee80533ca..000000000000 --- a/tools/testing/selftests/bpf/progs/bpf_iter.h +++ /dev/null @@ -1,167 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright (c) 2020 Facebook */ -/* "undefine" structs in vmlinux.h, because we "override" them below */ -#define bpf_iter_meta bpf_iter_meta___not_used -#define bpf_iter__bpf_map bpf_iter__bpf_map___not_used -#define bpf_iter__ipv6_route bpf_iter__ipv6_route___not_used -#define bpf_iter__netlink bpf_iter__netlink___not_used -#define bpf_iter__task bpf_iter__task___not_used -#define bpf_iter__task_file bpf_iter__task_file___not_used -#define bpf_iter__task_vma bpf_iter__task_vma___not_used -#define bpf_iter__tcp bpf_iter__tcp___not_used -#define tcp6_sock tcp6_sock___not_used -#define bpf_iter__udp bpf_iter__udp___not_used -#define udp6_sock udp6_sock___not_used -#define bpf_iter__unix bpf_iter__unix___not_used -#define bpf_iter__bpf_map_elem bpf_iter__bpf_map_elem___not_used -#define bpf_iter__bpf_sk_storage_map bpf_iter__bpf_sk_storage_map___not_used -#define bpf_iter__sockmap bpf_iter__sockmap___not_used -#define bpf_iter__bpf_link bpf_iter__bpf_link___not_used -#define bpf_iter__cgroup bpf_iter__cgroup___not_used -#define btf_ptr btf_ptr___not_used -#define BTF_F_COMPACT BTF_F_COMPACT___not_used -#define BTF_F_NONAME BTF_F_NONAME___not_used -#define BTF_F_PTR_RAW BTF_F_PTR_RAW___not_used -#define BTF_F_ZERO BTF_F_ZERO___not_used -#define bpf_iter__ksym bpf_iter__ksym___not_used -#include "vmlinux.h" -#undef bpf_iter_meta -#undef bpf_iter__bpf_map -#undef bpf_iter__ipv6_route -#undef bpf_iter__netlink -#undef bpf_iter__task -#undef bpf_iter__task_file -#undef bpf_iter__task_vma -#undef bpf_iter__tcp -#undef tcp6_sock -#undef bpf_iter__udp -#undef udp6_sock -#undef bpf_iter__unix -#undef bpf_iter__bpf_map_elem -#undef bpf_iter__bpf_sk_storage_map -#undef bpf_iter__sockmap -#undef bpf_iter__bpf_link -#undef bpf_iter__cgroup -#undef btf_ptr -#undef BTF_F_COMPACT -#undef BTF_F_NONAME -#undef BTF_F_PTR_RAW -#undef BTF_F_ZERO -#undef bpf_iter__ksym - -struct bpf_iter_meta { - struct seq_file *seq; - __u64 session_id; - __u64 seq_num; -} __attribute__((preserve_access_index)); - -struct bpf_iter__ipv6_route { - struct bpf_iter_meta *meta; - struct fib6_info *rt; -} __attribute__((preserve_access_index)); - -struct bpf_iter__netlink { - struct bpf_iter_meta *meta; - struct netlink_sock *sk; -} __attribute__((preserve_access_index)); - -struct bpf_iter__task { - struct bpf_iter_meta *meta; - struct task_struct *task; -} __attribute__((preserve_access_index)); - -struct bpf_iter__task_file { - struct bpf_iter_meta *meta; - struct task_struct *task; - __u32 fd; - struct file *file; -} __attribute__((preserve_access_index)); - -struct bpf_iter__task_vma { - struct bpf_iter_meta *meta; - struct task_struct *task; - struct vm_area_struct *vma; -} __attribute__((preserve_access_index)); - -struct bpf_iter__bpf_map { - struct bpf_iter_meta *meta; - struct bpf_map *map; -} __attribute__((preserve_access_index)); - -struct bpf_iter__tcp { - struct bpf_iter_meta *meta; - struct sock_common *sk_common; - uid_t uid; -} __attribute__((preserve_access_index)); - -struct tcp6_sock { - struct tcp_sock tcp; - struct ipv6_pinfo inet6; -} __attribute__((preserve_access_index)); - -struct bpf_iter__udp { - struct bpf_iter_meta *meta; - struct udp_sock *udp_sk; - uid_t uid __attribute__((aligned(8))); - int bucket __attribute__((aligned(8))); -} __attribute__((preserve_access_index)); - -struct udp6_sock { - struct udp_sock udp; - struct ipv6_pinfo inet6; -} __attribute__((preserve_access_index)); - -struct bpf_iter__unix { - struct bpf_iter_meta *meta; - struct unix_sock *unix_sk; - uid_t uid; -} __attribute__((preserve_access_index)); - -struct bpf_iter__bpf_map_elem { - struct bpf_iter_meta *meta; - struct bpf_map *map; - void *key; - void *value; -}; - -struct bpf_iter__bpf_sk_storage_map { - struct bpf_iter_meta *meta; - struct bpf_map *map; - struct sock *sk; - void *value; -}; - -struct bpf_iter__sockmap { - struct bpf_iter_meta *meta; - struct bpf_map *map; - void *key; - struct sock *sk; -}; - -struct bpf_iter__bpf_link { - struct bpf_iter_meta *meta; - struct bpf_link *link; -}; - -struct bpf_iter__cgroup { - struct bpf_iter_meta *meta; - struct cgroup *cgroup; -} __attribute__((preserve_access_index)); - -struct btf_ptr { - void *ptr; - __u32 type_id; - __u32 flags; -}; - -enum { - BTF_F_COMPACT = (1ULL << 0), - BTF_F_NONAME = (1ULL << 1), - BTF_F_PTR_RAW = (1ULL << 2), - BTF_F_ZERO = (1ULL << 3), -}; - -struct bpf_iter__ksym { - struct bpf_iter_meta *meta; - struct kallsym_iter *ksym; -}; diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_bpf_array_map.c b/tools/testing/selftests/bpf/progs/bpf_iter_bpf_array_map.c index 564835ba7d51..19710cc0f250 100644 --- a/tools/testing/selftests/bpf/progs/bpf_iter_bpf_array_map.c +++ b/tools/testing/selftests/bpf/progs/bpf_iter_bpf_array_map.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2020 Facebook */ -#include "bpf_iter.h" +#include #include #include diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_bpf_hash_map.c b/tools/testing/selftests/bpf/progs/bpf_iter_bpf_hash_map.c index d7a69217fb68..f47da665f7e0 100644 --- a/tools/testing/selftests/bpf/progs/bpf_iter_bpf_hash_map.c +++ b/tools/testing/selftests/bpf/progs/bpf_iter_bpf_hash_map.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2020 Facebook */ -#include "bpf_iter.h" +#include #include char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_bpf_link.c b/tools/testing/selftests/bpf/progs/bpf_iter_bpf_link.c index e1af2f8f75a6..7b69e1887705 100644 --- a/tools/testing/selftests/bpf/progs/bpf_iter_bpf_link.c +++ b/tools/testing/selftests/bpf/progs/bpf_iter_bpf_link.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2022 Red Hat, Inc. */ -#include "bpf_iter.h" +#include #include char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_bpf_map.c b/tools/testing/selftests/bpf/progs/bpf_iter_bpf_map.c index 6c39e86b666f..c868ffb8080f 100644 --- a/tools/testing/selftests/bpf/progs/bpf_iter_bpf_map.c +++ b/tools/testing/selftests/bpf/progs/bpf_iter_bpf_map.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2020 Facebook */ -#include "bpf_iter.h" +#include #include char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_bpf_percpu_array_map.c b/tools/testing/selftests/bpf/progs/bpf_iter_bpf_percpu_array_map.c index 9f0e0705b2bf..9fdea8cd4c6f 100644 --- a/tools/testing/selftests/bpf/progs/bpf_iter_bpf_percpu_array_map.c +++ b/tools/testing/selftests/bpf/progs/bpf_iter_bpf_percpu_array_map.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2020 Facebook */ -#include "bpf_iter.h" +#include #include #include diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_bpf_percpu_hash_map.c b/tools/testing/selftests/bpf/progs/bpf_iter_bpf_percpu_hash_map.c index 5014a17d6c02..aa529f76c7fc 100644 --- a/tools/testing/selftests/bpf/progs/bpf_iter_bpf_percpu_hash_map.c +++ b/tools/testing/selftests/bpf/progs/bpf_iter_bpf_percpu_hash_map.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2020 Facebook */ -#include "bpf_iter.h" +#include #include #include diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_bpf_sk_storage_helpers.c b/tools/testing/selftests/bpf/progs/bpf_iter_bpf_sk_storage_helpers.c index 6cecab2b32ba..e88dab196e0f 100644 --- a/tools/testing/selftests/bpf/progs/bpf_iter_bpf_sk_storage_helpers.c +++ b/tools/testing/selftests/bpf/progs/bpf_iter_bpf_sk_storage_helpers.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2020 Google LLC. */ -#include "bpf_iter.h" +#include #include #include diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_bpf_sk_storage_map.c b/tools/testing/selftests/bpf/progs/bpf_iter_bpf_sk_storage_map.c index c7b8e006b171..eb9642923e1c 100644 --- a/tools/testing/selftests/bpf/progs/bpf_iter_bpf_sk_storage_map.c +++ b/tools/testing/selftests/bpf/progs/bpf_iter_bpf_sk_storage_map.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2020 Facebook */ -#include "bpf_iter.h" +#include #include "bpf_tracing_net.h" #include #include diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_ipv6_route.c b/tools/testing/selftests/bpf/progs/bpf_iter_ipv6_route.c index 784a610ce039..73a5cf3ba3d3 100644 --- a/tools/testing/selftests/bpf/progs/bpf_iter_ipv6_route.c +++ b/tools/testing/selftests/bpf/progs/bpf_iter_ipv6_route.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2020 Facebook */ -#include "bpf_iter.h" +#include #include "bpf_tracing_net.h" #include diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_ksym.c b/tools/testing/selftests/bpf/progs/bpf_iter_ksym.c index 521267818f4d..3e725b1fce37 100644 --- a/tools/testing/selftests/bpf/progs/bpf_iter_ksym.c +++ b/tools/testing/selftests/bpf/progs/bpf_iter_ksym.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2022, Oracle and/or its affiliates. */ -#include "bpf_iter.h" +#include #include char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_netlink.c b/tools/testing/selftests/bpf/progs/bpf_iter_netlink.c index a28e51e2dcee..00b2ceae81fb 100644 --- a/tools/testing/selftests/bpf/progs/bpf_iter_netlink.c +++ b/tools/testing/selftests/bpf/progs/bpf_iter_netlink.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2020 Facebook */ -#include "bpf_iter.h" +#include #include "bpf_tracing_net.h" #include diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_setsockopt.c b/tools/testing/selftests/bpf/progs/bpf_iter_setsockopt.c index ec7f91850dec..774d4dbe8189 100644 --- a/tools/testing/selftests/bpf/progs/bpf_iter_setsockopt.c +++ b/tools/testing/selftests/bpf/progs/bpf_iter_setsockopt.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2021 Facebook */ -#include "bpf_iter.h" +#include #include "bpf_tracing_net.h" #include #include diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_setsockopt_unix.c b/tools/testing/selftests/bpf/progs/bpf_iter_setsockopt_unix.c index eafc877ea460..d92631ec6161 100644 --- a/tools/testing/selftests/bpf/progs/bpf_iter_setsockopt_unix.c +++ b/tools/testing/selftests/bpf/progs/bpf_iter_setsockopt_unix.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright Amazon.com Inc. or its affiliates. */ -#include "bpf_iter.h" +#include #include "bpf_tracing_net.h" #include #include diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_sockmap.c b/tools/testing/selftests/bpf/progs/bpf_iter_sockmap.c index f3af0e30cead..317fe49760cc 100644 --- a/tools/testing/selftests/bpf/progs/bpf_iter_sockmap.c +++ b/tools/testing/selftests/bpf/progs/bpf_iter_sockmap.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2020 Cloudflare */ -#include "bpf_iter.h" +#include #include "bpf_tracing_net.h" #include #include diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_task_btf.c b/tools/testing/selftests/bpf/progs/bpf_iter_task_btf.c index bca8b889cb10..ef2f7c8d9373 100644 --- a/tools/testing/selftests/bpf/progs/bpf_iter_task_btf.c +++ b/tools/testing/selftests/bpf/progs/bpf_iter_task_btf.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2020, Oracle and/or its affiliates. */ -#include "bpf_iter.h" +#include #include #include diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_task_file.c b/tools/testing/selftests/bpf/progs/bpf_iter_task_file.c index b0255080662d..959a8d899eaf 100644 --- a/tools/testing/selftests/bpf/progs/bpf_iter_task_file.c +++ b/tools/testing/selftests/bpf/progs/bpf_iter_task_file.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2020 Facebook */ -#include "bpf_iter.h" +#include #include char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_task_stack.c b/tools/testing/selftests/bpf/progs/bpf_iter_task_stack.c index 442f4ca39fd7..f5a309455490 100644 --- a/tools/testing/selftests/bpf/progs/bpf_iter_task_stack.c +++ b/tools/testing/selftests/bpf/progs/bpf_iter_task_stack.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2020 Facebook */ -#include "bpf_iter.h" +#include #include char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_task_vmas.c b/tools/testing/selftests/bpf/progs/bpf_iter_task_vmas.c index 423b39e60b6f..d64ba7ddaed5 100644 --- a/tools/testing/selftests/bpf/progs/bpf_iter_task_vmas.c +++ b/tools/testing/selftests/bpf/progs/bpf_iter_task_vmas.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2020 Facebook */ -#include "bpf_iter.h" +#include #include char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_tasks.c b/tools/testing/selftests/bpf/progs/bpf_iter_tasks.c index 6cbb3393f243..bc10c4e4b4fa 100644 --- a/tools/testing/selftests/bpf/progs/bpf_iter_tasks.c +++ b/tools/testing/selftests/bpf/progs/bpf_iter_tasks.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2020 Facebook */ -#include "bpf_iter.h" +#include #include #include diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_tcp4.c b/tools/testing/selftests/bpf/progs/bpf_iter_tcp4.c index 92267abb462f..d22449c69363 100644 --- a/tools/testing/selftests/bpf/progs/bpf_iter_tcp4.c +++ b/tools/testing/selftests/bpf/progs/bpf_iter_tcp4.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2020 Facebook */ -#include "bpf_iter.h" +#include #include "bpf_tracing_net.h" #include #include diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_tcp6.c b/tools/testing/selftests/bpf/progs/bpf_iter_tcp6.c index 943f7bba180e..8b072666f9d9 100644 --- a/tools/testing/selftests/bpf/progs/bpf_iter_tcp6.c +++ b/tools/testing/selftests/bpf/progs/bpf_iter_tcp6.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2020 Facebook */ -#include "bpf_iter.h" +#include #include "bpf_tracing_net.h" #include #include diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_test_kern3.c b/tools/testing/selftests/bpf/progs/bpf_iter_test_kern3.c index 2a4647f20c46..6b17e7e86a48 100644 --- a/tools/testing/selftests/bpf/progs/bpf_iter_test_kern3.c +++ b/tools/testing/selftests/bpf/progs/bpf_iter_test_kern3.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2020 Facebook */ -#include "bpf_iter.h" +#include #include char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_test_kern4.c b/tools/testing/selftests/bpf/progs/bpf_iter_test_kern4.c index dbf61c44acac..56177508798f 100644 --- a/tools/testing/selftests/bpf/progs/bpf_iter_test_kern4.c +++ b/tools/testing/selftests/bpf/progs/bpf_iter_test_kern4.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2020 Facebook */ -#include "bpf_iter.h" +#include #include char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_test_kern5.c b/tools/testing/selftests/bpf/progs/bpf_iter_test_kern5.c index e3a7575e81d2..9d8b7310d2c2 100644 --- a/tools/testing/selftests/bpf/progs/bpf_iter_test_kern5.c +++ b/tools/testing/selftests/bpf/progs/bpf_iter_test_kern5.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2020 Facebook */ -#include "bpf_iter.h" +#include #include #include diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_test_kern6.c b/tools/testing/selftests/bpf/progs/bpf_iter_test_kern6.c index 1c7304f56b1e..b150bd468824 100644 --- a/tools/testing/selftests/bpf/progs/bpf_iter_test_kern6.c +++ b/tools/testing/selftests/bpf/progs/bpf_iter_test_kern6.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2020 Facebook */ -#include "bpf_iter.h" +#include #include char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_test_kern_common.h b/tools/testing/selftests/bpf/progs/bpf_iter_test_kern_common.h index d5e3df66ad9a..6a4c50497c5e 100644 --- a/tools/testing/selftests/bpf/progs/bpf_iter_test_kern_common.h +++ b/tools/testing/selftests/bpf/progs/bpf_iter_test_kern_common.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* Copyright (c) 2020 Facebook */ -#include "bpf_iter.h" +#include #include char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_udp4.c b/tools/testing/selftests/bpf/progs/bpf_iter_udp4.c index cf0c485b1ed7..ffbd4b116d17 100644 --- a/tools/testing/selftests/bpf/progs/bpf_iter_udp4.c +++ b/tools/testing/selftests/bpf/progs/bpf_iter_udp4.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2020 Facebook */ -#include "bpf_iter.h" +#include #include "bpf_tracing_net.h" #include #include diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_udp6.c b/tools/testing/selftests/bpf/progs/bpf_iter_udp6.c index 5031e21c433f..47ff7754f4fd 100644 --- a/tools/testing/selftests/bpf/progs/bpf_iter_udp6.c +++ b/tools/testing/selftests/bpf/progs/bpf_iter_udp6.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2020 Facebook */ -#include "bpf_iter.h" +#include #include "bpf_tracing_net.h" #include #include diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_unix.c b/tools/testing/selftests/bpf/progs/bpf_iter_unix.c index e6aefae38894..fea275df9e22 100644 --- a/tools/testing/selftests/bpf/progs/bpf_iter_unix.c +++ b/tools/testing/selftests/bpf/progs/bpf_iter_unix.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright Amazon.com Inc. or its affiliates. */ -#include "bpf_iter.h" +#include #include "bpf_tracing_net.h" #include #include diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_vma_offset.c b/tools/testing/selftests/bpf/progs/bpf_iter_vma_offset.c index ee7455d2623a..174298e122d3 100644 --- a/tools/testing/selftests/bpf/progs/bpf_iter_vma_offset.c +++ b/tools/testing/selftests/bpf/progs/bpf_iter_vma_offset.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */ -#include "bpf_iter.h" +#include #include char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/cgroup_iter.c b/tools/testing/selftests/bpf/progs/cgroup_iter.c index de03997322a7..f30841997a8d 100644 --- a/tools/testing/selftests/bpf/progs/cgroup_iter.c +++ b/tools/testing/selftests/bpf/progs/cgroup_iter.c @@ -1,7 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2022 Google */ - -#include "bpf_iter.h" +#include #include #include diff --git a/tools/testing/selftests/bpf/progs/cgrp_ls_sleepable.c b/tools/testing/selftests/bpf/progs/cgrp_ls_sleepable.c index 5e282c16eadc..a2de95f85648 100644 --- a/tools/testing/selftests/bpf/progs/cgrp_ls_sleepable.c +++ b/tools/testing/selftests/bpf/progs/cgrp_ls_sleepable.c @@ -1,7 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */ - -#include "bpf_iter.h" +#include #include #include #include "bpf_misc.h" diff --git a/tools/testing/selftests/bpf/progs/exceptions_fail.c b/tools/testing/selftests/bpf/progs/exceptions_fail.c index 9cceb6521143..fe0f3fa5aab6 100644 --- a/tools/testing/selftests/bpf/progs/exceptions_fail.c +++ b/tools/testing/selftests/bpf/progs/exceptions_fail.c @@ -131,7 +131,7 @@ int reject_subprog_with_lock(void *ctx) } SEC("?tc") -__failure __msg("bpf_rcu_read_unlock is missing") +__failure __msg("BPF_EXIT instruction cannot be used inside bpf_rcu_read_lock-ed region") int reject_with_rcu_read_lock(void *ctx) { bpf_rcu_read_lock(); @@ -147,7 +147,7 @@ __noinline static int throwing_subprog(struct __sk_buff *ctx) } SEC("?tc") -__failure __msg("bpf_rcu_read_unlock is missing") +__failure __msg("BPF_EXIT instruction cannot be used inside bpf_rcu_read_lock-ed region") int reject_subprog_with_rcu_read_lock(void *ctx) { bpf_rcu_read_lock(); diff --git a/tools/testing/selftests/bpf/progs/kmem_cache_iter.c b/tools/testing/selftests/bpf/progs/kmem_cache_iter.c new file mode 100644 index 000000000000..b9c8f9457492 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/kmem_cache_iter.c @@ -0,0 +1,108 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2024 Google */ +#include +#include +#include +#include "bpf_experimental.h" + +char _license[] SEC("license") = "GPL"; + +#define SLAB_NAME_MAX 32 + +struct kmem_cache_result { + char name[SLAB_NAME_MAX]; + long obj_size; +}; + +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __uint(key_size, sizeof(void *)); + __uint(value_size, SLAB_NAME_MAX); + __uint(max_entries, 1); +} slab_hash SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __uint(key_size, sizeof(int)); + __uint(value_size, sizeof(struct kmem_cache_result)); + __uint(max_entries, 1024); +} slab_result SEC(".maps"); + +extern struct kmem_cache *bpf_get_kmem_cache(u64 addr) __ksym; + +/* Result, will be checked by userspace */ +int task_struct_found; +int kmem_cache_seen; +int open_coded_seen; + +SEC("iter/kmem_cache") +int slab_info_collector(struct bpf_iter__kmem_cache *ctx) +{ + struct seq_file *seq = ctx->meta->seq; + struct kmem_cache *s = ctx->s; + struct kmem_cache_result *r; + int idx; + + if (s) { + /* To make sure if the slab_iter implements the seq interface + * properly and it's also useful for debugging. + */ + BPF_SEQ_PRINTF(seq, "%s: %u\n", s->name, s->size); + + idx = kmem_cache_seen; + r = bpf_map_lookup_elem(&slab_result, &idx); + if (r == NULL) + return 0; + + kmem_cache_seen++; + + /* Save name and size to match /proc/slabinfo */ + bpf_probe_read_kernel_str(r->name, sizeof(r->name), s->name); + r->obj_size = s->size; + + if (!bpf_strncmp(r->name, 11, "task_struct")) + bpf_map_update_elem(&slab_hash, &s, r->name, BPF_NOEXIST); + } + + return 0; +} + +SEC("raw_tp/bpf_test_finish") +int BPF_PROG(check_task_struct) +{ + u64 curr = bpf_get_current_task(); + struct kmem_cache *s; + char *name; + + s = bpf_get_kmem_cache(curr); + if (s == NULL) { + task_struct_found = -1; + return 0; + } + name = bpf_map_lookup_elem(&slab_hash, &s); + if (name && !bpf_strncmp(name, 11, "task_struct")) + task_struct_found = 1; + else + task_struct_found = -2; + return 0; +} + +SEC("syscall") +int open_coded_iter(const void *ctx) +{ + struct kmem_cache *s; + + bpf_for_each(kmem_cache, s) { + struct kmem_cache_result *r; + + r = bpf_map_lookup_elem(&slab_result, &open_coded_seen); + if (!r) + break; + + if (r->obj_size != s->size) + break; + + open_coded_seen++; + } + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/kprobe_multi_verifier.c b/tools/testing/selftests/bpf/progs/kprobe_multi_verifier.c new file mode 100644 index 000000000000..288577e81deb --- /dev/null +++ b/tools/testing/selftests/bpf/progs/kprobe_multi_verifier.c @@ -0,0 +1,31 @@ +// SPDX-License-Identifier: GPL-2.0 +#include "vmlinux.h" +#include +#include +#include +#include "bpf_misc.h" + +char _license[] SEC("license") = "GPL"; + + +SEC("kprobe.session") +__success +int kprobe_session_return_0(struct pt_regs *ctx) +{ + return 0; +} + +SEC("kprobe.session") +__success +int kprobe_session_return_1(struct pt_regs *ctx) +{ + return 1; +} + +SEC("kprobe.session") +__failure +__msg("At program exit the register R0 has smin=2 smax=2 should have been in [0, 1]") +int kprobe_session_return_2(struct pt_regs *ctx) +{ + return 2; +} diff --git a/tools/testing/selftests/bpf/progs/linked_funcs1.c b/tools/testing/selftests/bpf/progs/linked_funcs1.c index cc79dddac182..049a1f78de3f 100644 --- a/tools/testing/selftests/bpf/progs/linked_funcs1.c +++ b/tools/testing/selftests/bpf/progs/linked_funcs1.c @@ -63,6 +63,8 @@ extern int set_output_val2(int x); /* here we'll force set_output_ctx2() to be __hidden in the final obj file */ __hidden extern void set_output_ctx2(__u64 *ctx); +void *bpf_cast_to_kern_ctx(void *obj) __ksym; + SEC("?raw_tp/sys_enter") int BPF_PROG(handler1, struct pt_regs *regs, long id) { @@ -86,4 +88,10 @@ int BPF_PROG(handler1, struct pt_regs *regs, long id) return 0; } +/* Generate BTF FUNC record and test linking with duplicate extern functions */ +void kfunc_gen1(void) +{ + bpf_cast_to_kern_ctx(0); +} + char LICENSE[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/linked_funcs2.c b/tools/testing/selftests/bpf/progs/linked_funcs2.c index 942cc5526ddf..96850759fd8d 100644 --- a/tools/testing/selftests/bpf/progs/linked_funcs2.c +++ b/tools/testing/selftests/bpf/progs/linked_funcs2.c @@ -63,6 +63,8 @@ extern int set_output_val1(int x); /* here we'll force set_output_ctx1() to be __hidden in the final obj file */ __hidden extern void set_output_ctx1(__u64 *ctx); +void *bpf_cast_to_kern_ctx(void *obj) __ksym; + SEC("?raw_tp/sys_enter") int BPF_PROG(handler2, struct pt_regs *regs, long id) { @@ -86,4 +88,10 @@ int BPF_PROG(handler2, struct pt_regs *regs, long id) return 0; } +/* Generate BTF FUNC record and test linking with duplicate extern functions */ +void kfunc_gen2(void) +{ + bpf_cast_to_kern_ctx(0); +} + char LICENSE[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/preempt_lock.c b/tools/testing/selftests/bpf/progs/preempt_lock.c index 672fc368d9c4..885377e83607 100644 --- a/tools/testing/selftests/bpf/progs/preempt_lock.c +++ b/tools/testing/selftests/bpf/progs/preempt_lock.c @@ -6,7 +6,7 @@ #include "bpf_experimental.h" SEC("?tc") -__failure __msg("1 bpf_preempt_enable is missing") +__failure __msg("BPF_EXIT instruction cannot be used inside bpf_preempt_disable-ed region") int preempt_lock_missing_1(struct __sk_buff *ctx) { bpf_preempt_disable(); @@ -14,7 +14,7 @@ int preempt_lock_missing_1(struct __sk_buff *ctx) } SEC("?tc") -__failure __msg("2 bpf_preempt_enable(s) are missing") +__failure __msg("BPF_EXIT instruction cannot be used inside bpf_preempt_disable-ed region") int preempt_lock_missing_2(struct __sk_buff *ctx) { bpf_preempt_disable(); @@ -23,7 +23,7 @@ int preempt_lock_missing_2(struct __sk_buff *ctx) } SEC("?tc") -__failure __msg("3 bpf_preempt_enable(s) are missing") +__failure __msg("BPF_EXIT instruction cannot be used inside bpf_preempt_disable-ed region") int preempt_lock_missing_3(struct __sk_buff *ctx) { bpf_preempt_disable(); @@ -33,7 +33,7 @@ int preempt_lock_missing_3(struct __sk_buff *ctx) } SEC("?tc") -__failure __msg("1 bpf_preempt_enable is missing") +__failure __msg("BPF_EXIT instruction cannot be used inside bpf_preempt_disable-ed region") int preempt_lock_missing_3_minus_2(struct __sk_buff *ctx) { bpf_preempt_disable(); @@ -55,7 +55,7 @@ static __noinline void preempt_enable(void) } SEC("?tc") -__failure __msg("1 bpf_preempt_enable is missing") +__failure __msg("BPF_EXIT instruction cannot be used inside bpf_preempt_disable-ed region") int preempt_lock_missing_1_subprog(struct __sk_buff *ctx) { preempt_disable(); @@ -63,7 +63,7 @@ int preempt_lock_missing_1_subprog(struct __sk_buff *ctx) } SEC("?tc") -__failure __msg("2 bpf_preempt_enable(s) are missing") +__failure __msg("BPF_EXIT instruction cannot be used inside bpf_preempt_disable-ed region") int preempt_lock_missing_2_subprog(struct __sk_buff *ctx) { preempt_disable(); @@ -72,7 +72,7 @@ int preempt_lock_missing_2_subprog(struct __sk_buff *ctx) } SEC("?tc") -__failure __msg("1 bpf_preempt_enable is missing") +__failure __msg("BPF_EXIT instruction cannot be used inside bpf_preempt_disable-ed region") int preempt_lock_missing_2_minus_1_subprog(struct __sk_buff *ctx) { preempt_disable(); diff --git a/tools/testing/selftests/bpf/progs/raw_tp_null.c b/tools/testing/selftests/bpf/progs/raw_tp_null.c new file mode 100644 index 000000000000..457f34c151e3 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/raw_tp_null.c @@ -0,0 +1,32 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */ + +#include +#include + +char _license[] SEC("license") = "GPL"; + +int tid; +int i; + +SEC("tp_btf/bpf_testmod_test_raw_tp_null") +int BPF_PROG(test_raw_tp_null, struct sk_buff *skb) +{ + struct task_struct *task = bpf_get_current_task_btf(); + + if (task->pid != tid) + return 0; + + i = i + skb->mark + 1; + /* The compiler may move the NULL check before this deref, which causes + * the load to fail as deref of scalar. Prevent that by using a barrier. + */ + barrier(); + /* If dead code elimination kicks in, the increment below will + * be removed. For raw_tp programs, we mark input arguments as + * PTR_MAYBE_NULL, so branch prediction should never kick in. + */ + if (!skb) + i += 2; + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/struct_ops_detach.c b/tools/testing/selftests/bpf/progs/struct_ops_detach.c index 56b787a89876..d7fdcabe7d90 100644 --- a/tools/testing/selftests/bpf/progs/struct_ops_detach.c +++ b/tools/testing/selftests/bpf/progs/struct_ops_detach.c @@ -6,5 +6,17 @@ char _license[] SEC("license") = "GPL"; +/* + * This subprogram validates that libbpf handles the situation in which BPF + * object has subprograms in .text section, but has no entry BPF programs. + * At some point that was causing issues due to legacy logic of treating such + * subprogram as entry program (with unknown program type, which would fail). + */ +int dangling_subprog(void) +{ + /* do nothing, just be here */ + return 0; +} + SEC(".struct_ops.link") struct bpf_testmod_ops testmod_do_detach; diff --git a/tools/testing/selftests/bpf/progs/struct_ops_private_stack.c b/tools/testing/selftests/bpf/progs/struct_ops_private_stack.c new file mode 100644 index 000000000000..8ea57e5348ab --- /dev/null +++ b/tools/testing/selftests/bpf/progs/struct_ops_private_stack.c @@ -0,0 +1,62 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include +#include +#include "../bpf_testmod/bpf_testmod.h" + +char _license[] SEC("license") = "GPL"; + +#if defined(__TARGET_ARCH_x86) +bool skip __attribute((__section__(".data"))) = false; +#else +bool skip = true; +#endif + +void bpf_testmod_ops3_call_test_2(void) __ksym; + +int val_i, val_j; + +__noinline static int subprog2(int *a, int *b) +{ + return val_i + a[10] + b[20]; +} + +__noinline static int subprog1(int *a) +{ + /* stack size 200 bytes */ + int b[50] = {}; + + b[20] = 2; + return subprog2(a, b); +} + + +SEC("struct_ops") +int BPF_PROG(test_1) +{ + /* stack size 400 bytes */ + int a[100] = {}; + + a[10] = 1; + val_i = subprog1(a); + bpf_testmod_ops3_call_test_2(); + return 0; +} + +SEC("struct_ops") +int BPF_PROG(test_2) +{ + /* stack size 200 bytes */ + int a[50] = {}; + + a[10] = 3; + val_j = subprog1(a); + return 0; +} + +SEC(".struct_ops") +struct bpf_testmod_ops3 testmod_1 = { + .test_1 = (void *)test_1, + .test_2 = (void *)test_2, +}; diff --git a/tools/testing/selftests/bpf/progs/struct_ops_private_stack_fail.c b/tools/testing/selftests/bpf/progs/struct_ops_private_stack_fail.c new file mode 100644 index 000000000000..1f55ec4cee37 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/struct_ops_private_stack_fail.c @@ -0,0 +1,62 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include +#include +#include "../bpf_testmod/bpf_testmod.h" + +char _license[] SEC("license") = "GPL"; + +#if defined(__TARGET_ARCH_x86) +bool skip __attribute((__section__(".data"))) = false; +#else +bool skip = true; +#endif + +void bpf_testmod_ops3_call_test_2(void) __ksym; + +int val_i, val_j; + +__noinline static int subprog2(int *a, int *b) +{ + return val_i + a[10] + b[20]; +} + +__noinline static int subprog1(int *a) +{ + /* stack size 200 bytes */ + int b[50] = {}; + + b[20] = 2; + return subprog2(a, b); +} + + +SEC("struct_ops") +int BPF_PROG(test_1) +{ + /* stack size 100 bytes */ + int a[25] = {}; + + a[10] = 1; + val_i = subprog1(a); + bpf_testmod_ops3_call_test_2(); + return 0; +} + +SEC("struct_ops") +int BPF_PROG(test_2) +{ + /* stack size 400 bytes */ + int a[100] = {}; + + a[10] = 3; + val_j = subprog1(a); + return 0; +} + +SEC(".struct_ops") +struct bpf_testmod_ops3 testmod_1 = { + .test_1 = (void *)test_1, + .test_2 = (void *)test_2, +}; diff --git a/tools/testing/selftests/bpf/progs/struct_ops_private_stack_recur.c b/tools/testing/selftests/bpf/progs/struct_ops_private_stack_recur.c new file mode 100644 index 000000000000..f2f300d50988 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/struct_ops_private_stack_recur.c @@ -0,0 +1,50 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include +#include +#include "../bpf_testmod/bpf_testmod.h" + +char _license[] SEC("license") = "GPL"; + +#if defined(__TARGET_ARCH_x86) +bool skip __attribute((__section__(".data"))) = false; +#else +bool skip = true; +#endif + +void bpf_testmod_ops3_call_test_1(void) __ksym; + +int val_i, val_j; + +__noinline static int subprog2(int *a, int *b) +{ + return val_i + a[1] + b[20]; +} + +__noinline static int subprog1(int *a) +{ + /* stack size 400 bytes */ + int b[100] = {}; + + b[20] = 2; + return subprog2(a, b); +} + + +SEC("struct_ops") +int BPF_PROG(test_1) +{ + /* stack size 20 bytes */ + int a[5] = {}; + + a[1] = 1; + val_j += subprog1(a); + bpf_testmod_ops3_call_test_1(); + return 0; +} + +SEC(".struct_ops") +struct bpf_testmod_ops3 testmod_1 = { + .test_1 = (void *)test_1, +}; diff --git a/tools/testing/selftests/bpf/progs/tailcall_fail.c b/tools/testing/selftests/bpf/progs/tailcall_fail.c new file mode 100644 index 000000000000..bc77921d2bb0 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/tailcall_fail.c @@ -0,0 +1,64 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */ +#include +#include +#include +#include + +#include "bpf_misc.h" +#include "bpf_experimental.h" + +extern void bpf_rcu_read_lock(void) __ksym; +extern void bpf_rcu_read_unlock(void) __ksym; + +#define private(name) SEC(".bss." #name) __hidden __attribute__((aligned(8))) + +private(A) struct bpf_spin_lock lock; + +struct { + __uint(type, BPF_MAP_TYPE_PROG_ARRAY); + __uint(max_entries, 3); + __uint(key_size, sizeof(__u32)); + __uint(value_size, sizeof(__u32)); +} jmp_table SEC(".maps"); + +SEC("?tc") +__failure __msg("function calls are not allowed while holding a lock") +int reject_tail_call_spin_lock(struct __sk_buff *ctx) +{ + bpf_spin_lock(&lock); + bpf_tail_call_static(ctx, &jmp_table, 0); + return 0; +} + +SEC("?tc") +__failure __msg("tail_call cannot be used inside bpf_rcu_read_lock-ed region") +int reject_tail_call_rcu_lock(struct __sk_buff *ctx) +{ + bpf_rcu_read_lock(); + bpf_tail_call_static(ctx, &jmp_table, 0); + bpf_rcu_read_unlock(); + return 0; +} + +SEC("?tc") +__failure __msg("tail_call cannot be used inside bpf_preempt_disable-ed region") +int reject_tail_call_preempt_lock(struct __sk_buff *ctx) +{ + bpf_guard_preempt(); + bpf_tail_call_static(ctx, &jmp_table, 0); + return 0; +} + +SEC("?tc") +__failure __msg("tail_call would lead to reference leak") +int reject_tail_call_ref(struct __sk_buff *ctx) +{ + struct foo { int i; } *p; + + p = bpf_obj_new(typeof(*p)); + bpf_tail_call_static(ctx, &jmp_table, 0); + return 0; +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/task_kfunc_common.h b/tools/testing/selftests/bpf/progs/task_kfunc_common.h index 6720c4b5be41..e9c4fea7a4bb 100644 --- a/tools/testing/selftests/bpf/progs/task_kfunc_common.h +++ b/tools/testing/selftests/bpf/progs/task_kfunc_common.h @@ -23,6 +23,7 @@ struct { struct task_struct *bpf_task_acquire(struct task_struct *p) __ksym; void bpf_task_release(struct task_struct *p) __ksym; struct task_struct *bpf_task_from_pid(s32 pid) __ksym; +struct task_struct *bpf_task_from_vpid(s32 vpid) __ksym; void bpf_rcu_read_lock(void) __ksym; void bpf_rcu_read_unlock(void) __ksym; diff --git a/tools/testing/selftests/bpf/progs/task_kfunc_failure.c b/tools/testing/selftests/bpf/progs/task_kfunc_failure.c index ad88a3796ddf..4c07ea193f72 100644 --- a/tools/testing/selftests/bpf/progs/task_kfunc_failure.c +++ b/tools/testing/selftests/bpf/progs/task_kfunc_failure.c @@ -247,6 +247,20 @@ int BPF_PROG(task_kfunc_from_pid_no_null_check, struct task_struct *task, u64 cl return 0; } +SEC("tp_btf/task_newtask") +__failure __msg("Possibly NULL pointer passed to trusted arg0") +int BPF_PROG(task_kfunc_from_vpid_no_null_check, struct task_struct *task, u64 clone_flags) +{ + struct task_struct *acquired; + + acquired = bpf_task_from_vpid(task->pid); + + /* Releasing bpf_task_from_vpid() lookup without a NULL check. */ + bpf_task_release(acquired); + + return 0; +} + SEC("lsm/task_free") __failure __msg("R1 must be a rcu pointer") int BPF_PROG(task_kfunc_from_lsm_task_free, struct task_struct *task) diff --git a/tools/testing/selftests/bpf/progs/task_kfunc_success.c b/tools/testing/selftests/bpf/progs/task_kfunc_success.c index a55149015063..5fb4fc19d26a 100644 --- a/tools/testing/selftests/bpf/progs/task_kfunc_success.c +++ b/tools/testing/selftests/bpf/progs/task_kfunc_success.c @@ -366,3 +366,54 @@ int BPF_PROG(task_kfunc_acquire_trusted_walked, struct task_struct *task, u64 cl return 0; } + +SEC("syscall") +int test_task_from_vpid_current(const void *ctx) +{ + struct task_struct *current, *v_task; + + v_task = bpf_task_from_vpid(1); + if (!v_task) { + err = 1; + return 0; + } + + current = bpf_get_current_task_btf(); + + /* The current process should be the init process (pid 1) in the new pid namespace. */ + if (current != v_task) + err = 2; + + bpf_task_release(v_task); + return 0; +} + +SEC("syscall") +int test_task_from_vpid_invalid(const void *ctx) +{ + struct task_struct *v_task; + + v_task = bpf_task_from_vpid(-1); + if (v_task) { + err = 1; + goto err; + } + + /* There should be only one process (current process) in the new pid namespace. */ + v_task = bpf_task_from_vpid(2); + if (v_task) { + err = 2; + goto err; + } + + v_task = bpf_task_from_vpid(9999); + if (v_task) { + err = 3; + goto err; + } + + return 0; +err: + bpf_task_release(v_task); + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/task_ls_uptr.c b/tools/testing/selftests/bpf/progs/task_ls_uptr.c new file mode 100644 index 000000000000..ddbe11b46eef --- /dev/null +++ b/tools/testing/selftests/bpf/progs/task_ls_uptr.c @@ -0,0 +1,63 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */ + +#include +#include +#include "uptr_test_common.h" + +struct task_struct *bpf_task_from_pid(s32 pid) __ksym; +void bpf_task_release(struct task_struct *p) __ksym; +void bpf_cgroup_release(struct cgroup *cgrp) __ksym; + +struct { + __uint(type, BPF_MAP_TYPE_TASK_STORAGE); + __uint(map_flags, BPF_F_NO_PREALLOC); + __type(key, int); + __type(value, struct value_type); +} datamap SEC(".maps"); + +pid_t target_pid = 0; +pid_t parent_pid = 0; + +SEC("tp_btf/sys_enter") +int on_enter(__u64 *ctx) +{ + struct task_struct *task, *data_task; + struct value_type *ptr; + struct user_data *udata; + struct cgroup *cgrp; + + task = bpf_get_current_task_btf(); + if (task->pid != target_pid) + return 0; + + data_task = bpf_task_from_pid(parent_pid); + if (!data_task) + return 0; + + ptr = bpf_task_storage_get(&datamap, data_task, 0, 0); + bpf_task_release(data_task); + if (!ptr) + return 0; + + cgrp = bpf_kptr_xchg(&ptr->cgrp, NULL); + if (cgrp) { + int lvl = cgrp->level; + + bpf_cgroup_release(cgrp); + return lvl; + } + + udata = ptr->udata; + if (!udata || udata->result) + return 0; + udata->result = MAGIC_VALUE + udata->a + udata->b; + + udata = ptr->nested.udata; + if (udata && !udata->nested_result) + udata->nested_result = udata->result; + + return 0; +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/tc_bpf2bpf.c b/tools/testing/selftests/bpf/progs/tc_bpf2bpf.c index 8a0632c37839..d1a57f7d09bd 100644 --- a/tools/testing/selftests/bpf/progs/tc_bpf2bpf.c +++ b/tools/testing/selftests/bpf/progs/tc_bpf2bpf.c @@ -5,10 +5,11 @@ #include "bpf_misc.h" __noinline -int subprog(struct __sk_buff *skb) +int subprog_tc(struct __sk_buff *skb) { int ret = 1; + __sink(skb); __sink(ret); return ret; } @@ -16,7 +17,7 @@ int subprog(struct __sk_buff *skb) SEC("tc") int entry_tc(struct __sk_buff *skb) { - return subprog(skb); + return subprog_tc(skb); } char __license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/test_send_signal_kern.c b/tools/testing/selftests/bpf/progs/test_send_signal_kern.c index 92354cd72044..176a355e3062 100644 --- a/tools/testing/selftests/bpf/progs/test_send_signal_kern.c +++ b/tools/testing/selftests/bpf/progs/test_send_signal_kern.c @@ -1,27 +1,50 @@ // SPDX-License-Identifier: GPL-2.0 // Copyright (c) 2019 Facebook -#include +#include #include #include -__u32 sig = 0, pid = 0, status = 0, signal_thread = 0; +struct task_struct *bpf_task_from_pid(int pid) __ksym; +void bpf_task_release(struct task_struct *p) __ksym; +int bpf_send_signal_task(struct task_struct *task, int sig, enum pid_type type, u64 value) __ksym; + +__u32 sig = 0, pid = 0, status = 0, signal_thread = 0, target_pid = 0; static __always_inline int bpf_send_signal_test(void *ctx) { + struct task_struct *target_task = NULL; int ret; + u64 value; if (status != 0 || pid == 0) return 0; if ((bpf_get_current_pid_tgid() >> 32) == pid) { - if (signal_thread) - ret = bpf_send_signal_thread(sig); - else - ret = bpf_send_signal(sig); + if (target_pid) { + target_task = bpf_task_from_pid(target_pid); + if (!target_task) + return 0; + value = 8; + } + + if (signal_thread) { + if (target_pid) + ret = bpf_send_signal_task(target_task, sig, PIDTYPE_PID, value); + else + ret = bpf_send_signal_thread(sig); + } else { + if (target_pid) + ret = bpf_send_signal_task(target_task, sig, PIDTYPE_TGID, value); + else + ret = bpf_send_signal(sig); + } if (ret == 0) status = 1; } + if (target_task) + bpf_task_release(target_task); + return 0; } diff --git a/tools/testing/selftests/bpf/progs/test_spin_lock_fail.c b/tools/testing/selftests/bpf/progs/test_spin_lock_fail.c index 43f40c4fe241..1c8b678e2e9a 100644 --- a/tools/testing/selftests/bpf/progs/test_spin_lock_fail.c +++ b/tools/testing/selftests/bpf/progs/test_spin_lock_fail.c @@ -28,8 +28,8 @@ struct { }, }; -SEC(".data.A") struct bpf_spin_lock lockA; -SEC(".data.B") struct bpf_spin_lock lockB; +static struct bpf_spin_lock lockA SEC(".data.A"); +static struct bpf_spin_lock lockB SEC(".data.B"); SEC("?tc") int lock_id_kptr_preserve(void *ctx) diff --git a/tools/testing/selftests/bpf/progs/test_tp_btf_nullable.c b/tools/testing/selftests/bpf/progs/test_tp_btf_nullable.c index bba3e37f749b..5aaf2b065f86 100644 --- a/tools/testing/selftests/bpf/progs/test_tp_btf_nullable.c +++ b/tools/testing/selftests/bpf/progs/test_tp_btf_nullable.c @@ -7,7 +7,11 @@ #include "bpf_misc.h" SEC("tp_btf/bpf_testmod_test_nullable_bare") -__failure __msg("R1 invalid mem access 'trusted_ptr_or_null_'") +/* This used to be a failure test, but raw_tp nullable arguments can now + * directly be dereferenced, whether they have nullable annotation or not, + * and don't need to be explicitly checked. + */ +__success int BPF_PROG(handle_tp_btf_nullable_bare1, struct bpf_testmod_test_read_ctx *nullable_ctx) { return nullable_ctx->len; diff --git a/tools/testing/selftests/bpf/progs/update_map_in_htab.c b/tools/testing/selftests/bpf/progs/update_map_in_htab.c new file mode 100644 index 000000000000..c2066247cd9c --- /dev/null +++ b/tools/testing/selftests/bpf/progs/update_map_in_htab.c @@ -0,0 +1,30 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2024. Huawei Technologies Co., Ltd */ +#include +#include + +struct inner_map_type { + __uint(type, BPF_MAP_TYPE_ARRAY); + __uint(key_size, 4); + __uint(value_size, 4); + __uint(max_entries, 1); +} inner_map SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_HASH_OF_MAPS); + __type(key, int); + __type(value, int); + __uint(max_entries, 2); + __array(values, struct inner_map_type); +} outer_htab_map SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_HASH_OF_MAPS); + __uint(map_flags, BPF_F_NO_PREALLOC); + __type(key, int); + __type(value, int); + __uint(max_entries, 2); + __array(values, struct inner_map_type); +} outer_alloc_htab_map SEC(".maps"); + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/uprobe_multi_consumers.c b/tools/testing/selftests/bpf/progs/uprobe_multi_consumers.c index 7e0fdcbbd242..93752bb5690b 100644 --- a/tools/testing/selftests/bpf/progs/uprobe_multi_consumers.c +++ b/tools/testing/selftests/bpf/progs/uprobe_multi_consumers.c @@ -24,16 +24,16 @@ int uprobe_1(struct pt_regs *ctx) return 0; } -SEC("uprobe.multi") +SEC("uprobe.session") int uprobe_2(struct pt_regs *ctx) { uprobe_result[2]++; return 0; } -SEC("uprobe.multi") +SEC("uprobe.session") int uprobe_3(struct pt_regs *ctx) { uprobe_result[3]++; - return 0; + return 1; } diff --git a/tools/testing/selftests/bpf/progs/uprobe_multi_session.c b/tools/testing/selftests/bpf/progs/uprobe_multi_session.c new file mode 100644 index 000000000000..30bff90b68dc --- /dev/null +++ b/tools/testing/selftests/bpf/progs/uprobe_multi_session.c @@ -0,0 +1,71 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include +#include +#include +#include "bpf_kfuncs.h" +#include "bpf_misc.h" + +char _license[] SEC("license") = "GPL"; + +__u64 uprobe_multi_func_1_addr = 0; +__u64 uprobe_multi_func_2_addr = 0; +__u64 uprobe_multi_func_3_addr = 0; + +__u64 uprobe_session_result[3] = {}; +__u64 uprobe_multi_sleep_result = 0; + +void *user_ptr = 0; +int pid = 0; + +static int uprobe_multi_check(void *ctx, bool is_return) +{ + const __u64 funcs[] = { + uprobe_multi_func_1_addr, + uprobe_multi_func_2_addr, + uprobe_multi_func_3_addr, + }; + unsigned int i; + __u64 addr; + + if (bpf_get_current_pid_tgid() >> 32 != pid) + return 1; + + addr = bpf_get_func_ip(ctx); + + for (i = 0; i < ARRAY_SIZE(funcs); i++) { + if (funcs[i] == addr) { + uprobe_session_result[i]++; + break; + } + } + + /* only uprobe_multi_func_2 executes return probe */ + if ((addr == uprobe_multi_func_1_addr) || + (addr == uprobe_multi_func_3_addr)) + return 1; + + return 0; +} + +SEC("uprobe.session//proc/self/exe:uprobe_multi_func_*") +int uprobe(struct pt_regs *ctx) +{ + return uprobe_multi_check(ctx, bpf_session_is_return()); +} + +static __always_inline bool verify_sleepable_user_copy(void) +{ + char data[9]; + + bpf_copy_from_user(data, sizeof(data), user_ptr); + return bpf_strncmp(data, sizeof(data), "test_data") == 0; +} + +SEC("uprobe.session.s//proc/self/exe:uprobe_multi_func_*") +int uprobe_sleepable(struct pt_regs *ctx) +{ + if (verify_sleepable_user_copy()) + uprobe_multi_sleep_result++; + return uprobe_multi_check(ctx, bpf_session_is_return()); +} diff --git a/tools/testing/selftests/bpf/progs/uprobe_multi_session_cookie.c b/tools/testing/selftests/bpf/progs/uprobe_multi_session_cookie.c new file mode 100644 index 000000000000..5befdf944dc6 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/uprobe_multi_session_cookie.c @@ -0,0 +1,48 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include +#include +#include +#include "bpf_kfuncs.h" + +char _license[] SEC("license") = "GPL"; + +int pid = 0; + +__u64 test_uprobe_1_result = 0; +__u64 test_uprobe_2_result = 0; +__u64 test_uprobe_3_result = 0; + +static int check_cookie(__u64 val, __u64 *result) +{ + __u64 *cookie; + + if (bpf_get_current_pid_tgid() >> 32 != pid) + return 1; + + cookie = bpf_session_cookie(); + + if (bpf_session_is_return()) + *result = *cookie == val ? val : 0; + else + *cookie = val; + return 0; +} + +SEC("uprobe.session//proc/self/exe:uprobe_multi_func_1") +int uprobe_1(struct pt_regs *ctx) +{ + return check_cookie(1, &test_uprobe_1_result); +} + +SEC("uprobe.session//proc/self/exe:uprobe_multi_func_2") +int uprobe_2(struct pt_regs *ctx) +{ + return check_cookie(2, &test_uprobe_2_result); +} + +SEC("uprobe.session//proc/self/exe:uprobe_multi_func_3") +int uprobe_3(struct pt_regs *ctx) +{ + return check_cookie(3, &test_uprobe_3_result); +} diff --git a/tools/testing/selftests/bpf/progs/uprobe_multi_session_recursive.c b/tools/testing/selftests/bpf/progs/uprobe_multi_session_recursive.c new file mode 100644 index 000000000000..8fbcd69fae22 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/uprobe_multi_session_recursive.c @@ -0,0 +1,44 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include +#include +#include +#include "bpf_kfuncs.h" +#include "bpf_misc.h" + +char _license[] SEC("license") = "GPL"; + +int pid = 0; + +int idx_entry = 0; +int idx_return = 0; + +__u64 test_uprobe_cookie_entry[6]; +__u64 test_uprobe_cookie_return[3]; + +static int check_cookie(void) +{ + __u64 *cookie = bpf_session_cookie(); + + if (bpf_session_is_return()) { + if (idx_return >= ARRAY_SIZE(test_uprobe_cookie_return)) + return 1; + test_uprobe_cookie_return[idx_return++] = *cookie; + return 0; + } + + if (idx_entry >= ARRAY_SIZE(test_uprobe_cookie_entry)) + return 1; + *cookie = test_uprobe_cookie_entry[idx_entry]; + return idx_entry++ % 2; +} + + +SEC("uprobe.session//proc/self/exe:uprobe_session_recursive") +int uprobe_recursive(struct pt_regs *ctx) +{ + if (bpf_get_current_pid_tgid() >> 32 != pid) + return 1; + + return check_cookie(); +} diff --git a/tools/testing/selftests/bpf/progs/uprobe_multi_session_single.c b/tools/testing/selftests/bpf/progs/uprobe_multi_session_single.c new file mode 100644 index 000000000000..7c960376ae97 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/uprobe_multi_session_single.c @@ -0,0 +1,44 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include +#include +#include +#include "bpf_kfuncs.h" +#include "bpf_misc.h" + +char _license[] SEC("license") = "GPL"; + +__u64 uprobe_session_result[3] = {}; +int pid = 0; + +static int uprobe_multi_check(void *ctx, int idx) +{ + if (bpf_get_current_pid_tgid() >> 32 != pid) + return 1; + + uprobe_session_result[idx]++; + + /* only consumer 1 executes return probe */ + if (idx == 0 || idx == 2) + return 1; + + return 0; +} + +SEC("uprobe.session//proc/self/exe:uprobe_multi_func_1") +int uprobe_0(struct pt_regs *ctx) +{ + return uprobe_multi_check(ctx, 0); +} + +SEC("uprobe.session//proc/self/exe:uprobe_multi_func_1") +int uprobe_1(struct pt_regs *ctx) +{ + return uprobe_multi_check(ctx, 1); +} + +SEC("uprobe.session//proc/self/exe:uprobe_multi_func_1") +int uprobe_2(struct pt_regs *ctx) +{ + return uprobe_multi_check(ctx, 2); +} diff --git a/tools/testing/selftests/bpf/progs/uprobe_multi_verifier.c b/tools/testing/selftests/bpf/progs/uprobe_multi_verifier.c new file mode 100644 index 000000000000..fe49f2cb5360 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/uprobe_multi_verifier.c @@ -0,0 +1,31 @@ +// SPDX-License-Identifier: GPL-2.0 +#include "vmlinux.h" +#include +#include +#include +#include "bpf_misc.h" + +char _license[] SEC("license") = "GPL"; + + +SEC("uprobe.session") +__success +int uprobe_sesison_return_0(struct pt_regs *ctx) +{ + return 0; +} + +SEC("uprobe.session") +__success +int uprobe_sesison_return_1(struct pt_regs *ctx) +{ + return 1; +} + +SEC("uprobe.session") +__failure +__msg("At program exit the register R0 has smin=2 smax=2 should have been in [0, 1]") +int uprobe_sesison_return_2(struct pt_regs *ctx) +{ + return 2; +} diff --git a/tools/testing/selftests/bpf/progs/uptr_failure.c b/tools/testing/selftests/bpf/progs/uptr_failure.c new file mode 100644 index 000000000000..0cfa1fd61440 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/uptr_failure.c @@ -0,0 +1,105 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */ + +#include +#include +#include "bpf_experimental.h" +#include "bpf_misc.h" +#include "uptr_test_common.h" + +struct { + __uint(type, BPF_MAP_TYPE_TASK_STORAGE); + __uint(map_flags, BPF_F_NO_PREALLOC); + __type(key, int); + __type(value, struct value_type); +} datamap SEC(".maps"); + +SEC("?syscall") +__failure __msg("store to uptr disallowed") +int uptr_write(const void *ctx) +{ + struct task_struct *task; + struct value_type *v; + + task = bpf_get_current_task_btf(); + v = bpf_task_storage_get(&datamap, task, 0, + BPF_LOCAL_STORAGE_GET_F_CREATE); + if (!v) + return 0; + + v->udata = NULL; + return 0; +} + +SEC("?syscall") +__failure __msg("store to uptr disallowed") +int uptr_write_nested(const void *ctx) +{ + struct task_struct *task; + struct value_type *v; + + task = bpf_get_current_task_btf(); + v = bpf_task_storage_get(&datamap, task, 0, + BPF_LOCAL_STORAGE_GET_F_CREATE); + if (!v) + return 0; + + v->nested.udata = NULL; + return 0; +} + +SEC("?syscall") +__failure __msg("R1 invalid mem access 'mem_or_null'") +int uptr_no_null_check(const void *ctx) +{ + struct task_struct *task; + struct value_type *v; + + task = bpf_get_current_task_btf(); + v = bpf_task_storage_get(&datamap, task, 0, + BPF_LOCAL_STORAGE_GET_F_CREATE); + if (!v) + return 0; + + v->udata->result = 0; + + return 0; +} + +SEC("?syscall") +__failure __msg("doesn't point to kptr") +int uptr_kptr_xchg(const void *ctx) +{ + struct task_struct *task; + struct value_type *v; + + task = bpf_get_current_task_btf(); + v = bpf_task_storage_get(&datamap, task, 0, + BPF_LOCAL_STORAGE_GET_F_CREATE); + if (!v) + return 0; + + bpf_kptr_xchg(&v->udata, NULL); + + return 0; +} + +SEC("?syscall") +__failure __msg("invalid mem access 'scalar'") +int uptr_obj_new(const void *ctx) +{ + struct value_type *v; + + v = bpf_obj_new(typeof(*v)); + if (!v) + return 0; + + if (v->udata) + v->udata->result = 0; + + bpf_obj_drop(v); + + return 0; +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/uptr_map_failure.c b/tools/testing/selftests/bpf/progs/uptr_map_failure.c new file mode 100644 index 000000000000..417b763d76b4 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/uptr_map_failure.c @@ -0,0 +1,27 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */ + +#include +#include +#include "uptr_test_common.h" + +struct { + __uint(type, BPF_MAP_TYPE_TASK_STORAGE); + __uint(map_flags, BPF_F_NO_PREALLOC); + __type(key, int); + __type(value, struct large_uptr); +} large_uptr_map SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_TASK_STORAGE); + __uint(map_flags, BPF_F_NO_PREALLOC); + __type(key, int); + __type(value, struct empty_uptr); +} empty_uptr_map SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_TASK_STORAGE); + __uint(map_flags, BPF_F_NO_PREALLOC); + __type(key, int); + __type(value, struct kstruct_uptr); +} kstruct_uptr_map SEC(".maps"); diff --git a/tools/testing/selftests/bpf/progs/uptr_update_failure.c b/tools/testing/selftests/bpf/progs/uptr_update_failure.c new file mode 100644 index 000000000000..86c3bb954abc --- /dev/null +++ b/tools/testing/selftests/bpf/progs/uptr_update_failure.c @@ -0,0 +1,42 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */ + +#include +#include +#include "uptr_test_common.h" + +struct { + __uint(type, BPF_MAP_TYPE_TASK_STORAGE); + __uint(map_flags, BPF_F_NO_PREALLOC); + __type(key, int); + __type(value, struct value_lock_type); +} datamap SEC(".maps"); + +/* load test only. not used */ +SEC("syscall") +int not_used(void *ctx) +{ + struct value_lock_type *ptr; + struct task_struct *task; + struct user_data *udata; + + task = bpf_get_current_task_btf(); + ptr = bpf_task_storage_get(&datamap, task, 0, 0); + if (!ptr) + return 0; + + bpf_spin_lock(&ptr->lock); + + udata = ptr->udata; + if (!udata) { + bpf_spin_unlock(&ptr->lock); + return 0; + } + udata->result = MAGIC_VALUE + udata->a + udata->b; + + bpf_spin_unlock(&ptr->lock); + + return 0; +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/verifier_arena_large.c b/tools/testing/selftests/bpf/progs/verifier_arena_large.c index 6065f862d964..f94f30cf1bb8 100644 --- a/tools/testing/selftests/bpf/progs/verifier_arena_large.c +++ b/tools/testing/selftests/bpf/progs/verifier_arena_large.c @@ -29,12 +29,12 @@ int big_alloc1(void *ctx) if (!page1) return 1; *page1 = 1; - page2 = bpf_arena_alloc_pages(&arena, base + ARENA_SIZE - PAGE_SIZE, + page2 = bpf_arena_alloc_pages(&arena, base + ARENA_SIZE - PAGE_SIZE * 2, 1, NUMA_NO_NODE, 0); if (!page2) return 2; *page2 = 2; - no_page = bpf_arena_alloc_pages(&arena, base + ARENA_SIZE, + no_page = bpf_arena_alloc_pages(&arena, base + ARENA_SIZE - PAGE_SIZE, 1, NUMA_NO_NODE, 0); if (no_page) return 3; @@ -66,4 +66,110 @@ int big_alloc1(void *ctx) #endif return 0; } + +#if defined(__BPF_FEATURE_ADDR_SPACE_CAST) +#define PAGE_CNT 100 +__u8 __arena * __arena page[PAGE_CNT]; /* occupies the first page */ +__u8 __arena *base; + +/* + * Check that arena's range_tree algorithm allocates pages sequentially + * on the first pass and then fills in all gaps on the second pass. + */ +__noinline int alloc_pages(int page_cnt, int pages_atonce, bool first_pass, + int max_idx, int step) +{ + __u8 __arena *pg; + int i, pg_idx; + + for (i = 0; i < page_cnt; i++) { + pg = bpf_arena_alloc_pages(&arena, NULL, pages_atonce, + NUMA_NO_NODE, 0); + if (!pg) + return step; + pg_idx = (unsigned long) (pg - base) / PAGE_SIZE; + if (first_pass) { + /* Pages must be allocated sequentially */ + if (pg_idx != i) + return step + 100; + } else { + /* Allocator must fill into gaps */ + if (pg_idx >= max_idx || (pg_idx & 1)) + return step + 200; + } + *pg = pg_idx; + page[pg_idx] = pg; + cond_break; + } + return 0; +} + +SEC("syscall") +__success __retval(0) +int big_alloc2(void *ctx) +{ + __u8 __arena *pg; + int i, err; + + base = bpf_arena_alloc_pages(&arena, NULL, 1, NUMA_NO_NODE, 0); + if (!base) + return 1; + bpf_arena_free_pages(&arena, (void __arena *)base, 1); + + err = alloc_pages(PAGE_CNT, 1, true, PAGE_CNT, 2); + if (err) + return err; + + /* Clear all even pages */ + for (i = 0; i < PAGE_CNT; i += 2) { + pg = page[i]; + if (*pg != i) + return 3; + bpf_arena_free_pages(&arena, (void __arena *)pg, 1); + page[i] = NULL; + cond_break; + } + + /* Allocate into freed gaps */ + err = alloc_pages(PAGE_CNT / 2, 1, false, PAGE_CNT, 4); + if (err) + return err; + + /* Free pairs of pages */ + for (i = 0; i < PAGE_CNT; i += 4) { + pg = page[i]; + if (*pg != i) + return 5; + bpf_arena_free_pages(&arena, (void __arena *)pg, 2); + page[i] = NULL; + page[i + 1] = NULL; + cond_break; + } + + /* Allocate 2 pages at a time into freed gaps */ + err = alloc_pages(PAGE_CNT / 4, 2, false, PAGE_CNT, 6); + if (err) + return err; + + /* Check pages without freeing */ + for (i = 0; i < PAGE_CNT; i += 2) { + pg = page[i]; + if (*pg != i) + return 7; + cond_break; + } + + pg = bpf_arena_alloc_pages(&arena, NULL, 1, NUMA_NO_NODE, 0); + + if (!pg) + return 8; + /* + * The first PAGE_CNT pages are occupied. The new page + * must be above. + */ + if ((pg - base) / PAGE_SIZE < PAGE_CNT) + return 9; + return 0; +} +#endif char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/verifier_private_stack.c b/tools/testing/selftests/bpf/progs/verifier_private_stack.c new file mode 100644 index 000000000000..b1fbdf119553 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_private_stack.c @@ -0,0 +1,272 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include +#include "bpf_misc.h" +#include "bpf_experimental.h" + +/* From include/linux/filter.h */ +#define MAX_BPF_STACK 512 + +#if defined(__TARGET_ARCH_x86) + +struct elem { + struct bpf_timer t; + char pad[256]; +}; + +struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __uint(max_entries, 1); + __type(key, int); + __type(value, struct elem); +} array SEC(".maps"); + +SEC("kprobe") +__description("Private stack, single prog") +__success +__arch_x86_64 +__jited(" movabsq $0x{{.*}}, %r9") +__jited(" addq %gs:0x{{.*}}, %r9") +__jited(" movl $0x2a, %edi") +__jited(" movq %rdi, -0x100(%r9)") +__naked void private_stack_single_prog(void) +{ + asm volatile (" \ + r1 = 42; \ + *(u64 *)(r10 - 256) = r1; \ + r0 = 0; \ + exit; \ +" ::: __clobber_all); +} + +SEC("raw_tp") +__description("No private stack") +__success +__arch_x86_64 +__jited(" subq $0x8, %rsp") +__naked void no_private_stack_nested(void) +{ + asm volatile (" \ + r1 = 42; \ + *(u64 *)(r10 - 8) = r1; \ + r0 = 0; \ + exit; \ +" ::: __clobber_all); +} + +__used +__naked static void cumulative_stack_depth_subprog(void) +{ + asm volatile (" \ + r1 = 41; \ + *(u64 *)(r10 - 32) = r1; \ + call %[bpf_get_smp_processor_id]; \ + exit; \ +" : + : __imm(bpf_get_smp_processor_id) + : __clobber_all); +} + +SEC("kprobe") +__description("Private stack, subtree > MAX_BPF_STACK") +__success +__arch_x86_64 +/* private stack fp for the main prog */ +__jited(" movabsq $0x{{.*}}, %r9") +__jited(" addq %gs:0x{{.*}}, %r9") +__jited(" movl $0x2a, %edi") +__jited(" movq %rdi, -0x200(%r9)") +__jited(" pushq %r9") +__jited(" callq 0x{{.*}}") +__jited(" popq %r9") +__jited(" xorl %eax, %eax") +__naked void private_stack_nested_1(void) +{ + asm volatile (" \ + r1 = 42; \ + *(u64 *)(r10 - %[max_bpf_stack]) = r1; \ + call cumulative_stack_depth_subprog; \ + r0 = 0; \ + exit; \ +" : + : __imm_const(max_bpf_stack, MAX_BPF_STACK) + : __clobber_all); +} + +__naked __noinline __used +static unsigned long loop_callback(void) +{ + asm volatile (" \ + call %[bpf_get_prandom_u32]; \ + r1 = 42; \ + *(u64 *)(r10 - 512) = r1; \ + call cumulative_stack_depth_subprog; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_common); +} + +SEC("raw_tp") +__description("Private stack, callback") +__success +__arch_x86_64 +/* for func loop_callback */ +__jited("func #1") +__jited(" endbr64") +__jited(" nopl (%rax,%rax)") +__jited(" nopl (%rax)") +__jited(" pushq %rbp") +__jited(" movq %rsp, %rbp") +__jited(" endbr64") +__jited(" movabsq $0x{{.*}}, %r9") +__jited(" addq %gs:0x{{.*}}, %r9") +__jited(" pushq %r9") +__jited(" callq") +__jited(" popq %r9") +__jited(" movl $0x2a, %edi") +__jited(" movq %rdi, -0x200(%r9)") +__jited(" pushq %r9") +__jited(" callq") +__jited(" popq %r9") +__naked void private_stack_callback(void) +{ + asm volatile (" \ + r1 = 1; \ + r2 = %[loop_callback]; \ + r3 = 0; \ + r4 = 0; \ + call %[bpf_loop]; \ + r0 = 0; \ + exit; \ +" : + : __imm_ptr(loop_callback), + __imm(bpf_loop) + : __clobber_common); +} + +SEC("fentry/bpf_fentry_test9") +__description("Private stack, exception in main prog") +__success __retval(0) +__arch_x86_64 +__jited(" pushq %r9") +__jited(" callq") +__jited(" popq %r9") +int private_stack_exception_main_prog(void) +{ + asm volatile (" \ + r1 = 42; \ + *(u64 *)(r10 - 512) = r1; \ +" ::: __clobber_common); + + bpf_throw(0); + return 0; +} + +__used static int subprog_exception(void) +{ + bpf_throw(0); + return 0; +} + +SEC("fentry/bpf_fentry_test9") +__description("Private stack, exception in subprog") +__success __retval(0) +__arch_x86_64 +__jited(" movq %rdi, -0x200(%r9)") +__jited(" pushq %r9") +__jited(" callq") +__jited(" popq %r9") +int private_stack_exception_sub_prog(void) +{ + asm volatile (" \ + r1 = 42; \ + *(u64 *)(r10 - 512) = r1; \ + call subprog_exception; \ +" ::: __clobber_common); + + return 0; +} + +int glob; +__noinline static void subprog2(int *val) +{ + glob += val[0] * 2; +} + +__noinline static void subprog1(int *val) +{ + int tmp[64] = {}; + + tmp[0] = *val; + subprog2(tmp); +} + +__noinline static int timer_cb1(void *map, int *key, struct bpf_timer *timer) +{ + subprog1(key); + return 0; +} + +__noinline static int timer_cb2(void *map, int *key, struct bpf_timer *timer) +{ + return 0; +} + +SEC("fentry/bpf_fentry_test9") +__description("Private stack, async callback, not nested") +__success __retval(0) +__arch_x86_64 +__jited(" movabsq $0x{{.*}}, %r9") +int private_stack_async_callback_1(void) +{ + struct bpf_timer *arr_timer; + int array_key = 0; + + arr_timer = bpf_map_lookup_elem(&array, &array_key); + if (!arr_timer) + return 0; + + bpf_timer_init(arr_timer, &array, 1); + bpf_timer_set_callback(arr_timer, timer_cb2); + bpf_timer_start(arr_timer, 0, 0); + subprog1(&array_key); + return 0; +} + +SEC("fentry/bpf_fentry_test9") +__description("Private stack, async callback, potential nesting") +__success __retval(0) +__arch_x86_64 +__jited(" subq $0x100, %rsp") +int private_stack_async_callback_2(void) +{ + struct bpf_timer *arr_timer; + int array_key = 0; + + arr_timer = bpf_map_lookup_elem(&array, &array_key); + if (!arr_timer) + return 0; + + bpf_timer_init(arr_timer, &array, 1); + bpf_timer_set_callback(arr_timer, timer_cb1); + bpf_timer_start(arr_timer, 0, 0); + subprog1(&array_key); + return 0; +} + +#else + +SEC("kprobe") +__description("private stack is not supported, use a dummy test") +__success +int dummy_test(void) +{ + return 0; +} + +#endif + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/verifier_ref_tracking.c b/tools/testing/selftests/bpf/progs/verifier_ref_tracking.c index c4c6da21265e..683a882b3e6d 100644 --- a/tools/testing/selftests/bpf/progs/verifier_ref_tracking.c +++ b/tools/testing/selftests/bpf/progs/verifier_ref_tracking.c @@ -791,7 +791,7 @@ l0_%=: r0 = *(u8*)skb[0]; \ SEC("tc") __description("reference tracking: forbid LD_ABS while holding reference") -__failure __msg("BPF_LD_[ABS|IND] cannot be mixed with socket references") +__failure __msg("BPF_LD_[ABS|IND] would lead to reference leak") __naked void ld_abs_while_holding_reference(void) { asm volatile (" \ @@ -836,7 +836,7 @@ l0_%=: r7 = 1; \ SEC("tc") __description("reference tracking: forbid LD_IND while holding reference") -__failure __msg("BPF_LD_[ABS|IND] cannot be mixed with socket references") +__failure __msg("BPF_LD_[ABS|IND] would lead to reference leak") __naked void ld_ind_while_holding_reference(void) { asm volatile (" \ diff --git a/tools/testing/selftests/bpf/progs/verifier_sock.c b/tools/testing/selftests/bpf/progs/verifier_sock.c index ee76b51005ab..d3e70e38e442 100644 --- a/tools/testing/selftests/bpf/progs/verifier_sock.c +++ b/tools/testing/selftests/bpf/progs/verifier_sock.c @@ -977,4 +977,64 @@ l1_%=: r0 = *(u8*)(r7 + 0); \ : __clobber_all); } +SEC("cgroup/post_bind4") +__description("sk->src_ip6[0] [load 1st byte]") +__failure __msg("invalid bpf_context access off=28 size=2") +__naked void post_bind4_read_src_ip6(void) +{ + asm volatile (" \ + r6 = r1; \ + r7 = *(u16*)(r6 + %[bpf_sock_src_ip6_0]); \ + r0 = 1; \ + exit; \ +" : + : __imm_const(bpf_sock_src_ip6_0, offsetof(struct bpf_sock, src_ip6[0])) + : __clobber_all); +} + +SEC("cgroup/post_bind4") +__description("sk->mark [load mark]") +__failure __msg("invalid bpf_context access off=16 size=2") +__naked void post_bind4_read_mark(void) +{ + asm volatile (" \ + r6 = r1; \ + r7 = *(u16*)(r6 + %[bpf_sock_mark]); \ + r0 = 1; \ + exit; \ +" : + : __imm_const(bpf_sock_mark, offsetof(struct bpf_sock, mark)) + : __clobber_all); +} + +SEC("cgroup/post_bind6") +__description("sk->src_ip4 [load src_ip4]") +__failure __msg("invalid bpf_context access off=24 size=2") +__naked void post_bind6_read_src_ip4(void) +{ + asm volatile (" \ + r6 = r1; \ + r7 = *(u16*)(r6 + %[bpf_sock_src_ip4]); \ + r0 = 1; \ + exit; \ +" : + : __imm_const(bpf_sock_src_ip4, offsetof(struct bpf_sock, src_ip4)) + : __clobber_all); +} + +SEC("cgroup/sock_create") +__description("sk->src_port [word load]") +__failure __msg("invalid bpf_context access off=44 size=2") +__naked void sock_create_read_src_port(void) +{ + asm volatile (" \ + r6 = r1; \ + r7 = *(u16*)(r6 + %[bpf_sock_src_port]); \ + r0 = 1; \ + exit; \ +" : + : __imm_const(bpf_sock_src_port, offsetof(struct bpf_sock, src_port)) + : __clobber_all); +} + char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/verifier_spin_lock.c b/tools/testing/selftests/bpf/progs/verifier_spin_lock.c index fb316c080c84..3f679de73229 100644 --- a/tools/testing/selftests/bpf/progs/verifier_spin_lock.c +++ b/tools/testing/selftests/bpf/progs/verifier_spin_lock.c @@ -187,7 +187,7 @@ l0_%=: r6 = r0; \ SEC("cgroup/skb") __description("spin_lock: test6 missing unlock") -__failure __msg("unlock is missing") +__failure __msg("BPF_EXIT instruction cannot be used inside bpf_spin_lock-ed region") __failure_unpriv __msg_unpriv("") __naked void spin_lock_test6_missing_unlock(void) { diff --git a/tools/testing/selftests/bpf/test_maps.c b/tools/testing/selftests/bpf/test_maps.c index 905d5981ace1..8b40e9496af1 100644 --- a/tools/testing/selftests/bpf/test_maps.c +++ b/tools/testing/selftests/bpf/test_maps.c @@ -26,10 +26,6 @@ #include "test_maps.h" #include "testing_helpers.h" -#ifndef ENOTSUPP -#define ENOTSUPP 524 -#endif - int skips; static struct bpf_map_create_opts map_opts = { .sz = sizeof(map_opts) }; diff --git a/tools/testing/selftests/bpf/test_progs.c b/tools/testing/selftests/bpf/test_progs.c index c7a70e1a1085..6088d8222d59 100644 --- a/tools/testing/selftests/bpf/test_progs.c +++ b/tools/testing/selftests/bpf/test_progs.c @@ -16,15 +16,18 @@ #include #include #include +#include #include "json_writer.h" #include "network_helpers.h" +/* backtrace() and backtrace_symbols_fd() are glibc specific, + * use header file when glibc is available and provide stub + * implementations when another libc implementation is used. + */ #ifdef __GLIBC__ #include /* backtrace */ -#endif - -/* Default backtrace funcs if missing at link */ +#else __weak int backtrace(void **buffer, int size) { return 0; @@ -34,6 +37,7 @@ __weak void backtrace_symbols_fd(void *const *buffer, int size, int fd) { dprintf(fd, "\n"); } +#endif /*__GLIBC__ */ int env_verbosity = 0; @@ -176,6 +180,88 @@ int usleep(useconds_t usec) return syscall(__NR_nanosleep, &ts, NULL); } +/* Watchdog timer is started by watchdog_start() and stopped by watchdog_stop(). + * If timer is active for longer than env.secs_till_notify, + * it prints the name of the current test to the stderr. + * If timer is active for longer than env.secs_till_kill, + * it kills the thread executing the test by sending a SIGSEGV signal to it. + */ +static void watchdog_timer_func(union sigval sigval) +{ + struct itimerspec timeout = {}; + char test_name[256]; + int err; + + if (env.subtest_state) + snprintf(test_name, sizeof(test_name), "%s/%s", + env.test->test_name, env.subtest_state->name); + else + snprintf(test_name, sizeof(test_name), "%s", + env.test->test_name); + + switch (env.watchdog_state) { + case WD_NOTIFY: + fprintf(env.stderr_saved, "WATCHDOG: test case %s executes for %d seconds...\n", + test_name, env.secs_till_notify); + timeout.it_value.tv_sec = env.secs_till_kill - env.secs_till_notify; + env.watchdog_state = WD_KILL; + err = timer_settime(env.watchdog, 0, &timeout, NULL); + if (err) + fprintf(env.stderr_saved, "Failed to arm watchdog timer\n"); + break; + case WD_KILL: + fprintf(env.stderr_saved, + "WATCHDOG: test case %s executes for %d seconds, terminating with SIGSEGV\n", + test_name, env.secs_till_kill); + pthread_kill(env.main_thread, SIGSEGV); + break; + } +} + +static void watchdog_start(void) +{ + struct itimerspec timeout = {}; + int err; + + if (env.secs_till_kill == 0) + return; + if (env.secs_till_notify > 0) { + env.watchdog_state = WD_NOTIFY; + timeout.it_value.tv_sec = env.secs_till_notify; + } else { + env.watchdog_state = WD_KILL; + timeout.it_value.tv_sec = env.secs_till_kill; + } + err = timer_settime(env.watchdog, 0, &timeout, NULL); + if (err) + fprintf(env.stderr_saved, "Failed to start watchdog timer\n"); +} + +static void watchdog_stop(void) +{ + struct itimerspec timeout = {}; + int err; + + env.watchdog_state = WD_NOTIFY; + err = timer_settime(env.watchdog, 0, &timeout, NULL); + if (err) + fprintf(env.stderr_saved, "Failed to stop watchdog timer\n"); +} + +static void watchdog_init(void) +{ + struct sigevent watchdog_sev = { + .sigev_notify = SIGEV_THREAD, + .sigev_notify_function = watchdog_timer_func, + }; + int err; + + env.main_thread = pthread_self(); + err = timer_create(CLOCK_MONOTONIC, &watchdog_sev, &env.watchdog); + if (err) + fprintf(stderr, "Failed to initialize watchdog timer\n"); +} + static bool should_run(struct test_selector *sel, int num, const char *name) { int i; @@ -512,6 +598,7 @@ bool test__start_subtest(const char *subtest_name) env.subtest_state = subtest_state; stdio_hijack_init(&subtest_state->log_buf, &subtest_state->log_cnt); + watchdog_start(); return true; } @@ -777,6 +864,7 @@ enum ARG_KEYS { ARG_DEBUG = -1, ARG_JSON_SUMMARY = 'J', ARG_TRAFFIC_MONITOR = 'm', + ARG_WATCHDOG_TIMEOUT = 'w', }; static const struct argp_option opts[] = { @@ -807,6 +895,8 @@ static const struct argp_option opts[] = { { "traffic-monitor", ARG_TRAFFIC_MONITOR, "NAMES", 0, "Monitor network traffic of tests with name matching the pattern (supports '*' wildcard)." }, #endif + { "watchdog-timeout", ARG_WATCHDOG_TIMEOUT, "SECONDS", 0, + "Kill the process if tests are not making progress for specified number of seconds." }, {}, }; @@ -868,6 +958,7 @@ static int libbpf_print_fn(enum libbpf_print_level level, va_copy(args2, args); vfprintf(libbpf_capture_stream, format, args2); + va_end(args2); } if (env.verbosity < VERBOSE_VERY && level == LIBBPF_DEBUG) @@ -1031,6 +1122,16 @@ static error_t parse_arg(int key, char *arg, struct argp_state *state) true); break; #endif + case ARG_WATCHDOG_TIMEOUT: + env->secs_till_kill = atoi(arg); + if (env->secs_till_kill < 0) { + fprintf(stderr, "Invalid watchdog timeout: %s.\n", arg); + return -EINVAL; + } + if (env->secs_till_kill < env->secs_till_notify) { + env->secs_till_notify = 0; + } + break; default: return ARGP_ERR_UNKNOWN; } @@ -1259,10 +1360,12 @@ static void run_one_test(int test_num) stdio_hijack(&state->log_buf, &state->log_cnt); + watchdog_start(); if (test->run_test) test->run_test(); else if (test->run_serial_test) test->run_serial_test(); + watchdog_stop(); /* ensure last sub-test is finalized properly */ if (env.subtest_state) @@ -1703,6 +1806,7 @@ static int worker_main_send_subtests(int sock, struct test_state *state) static int worker_main(int sock) { save_netns(); + watchdog_init(); while (true) { /* receive command */ @@ -1812,6 +1916,8 @@ int main(int argc, char **argv) sigaction(SIGSEGV, &sigact, NULL); + env.secs_till_notify = 10; + env.secs_till_kill = 120; err = argp_parse(&argp, argc, argv, 0, NULL, &env); if (err) return err; @@ -1820,6 +1926,8 @@ int main(int argc, char **argv) if (err) return err; + watchdog_init(); + /* Use libbpf 1.0 API mode */ libbpf_set_strict_mode(LIBBPF_STRICT_ALL); libbpf_set_print(libbpf_print_fn); diff --git a/tools/testing/selftests/bpf/test_progs.h b/tools/testing/selftests/bpf/test_progs.h index 7767d9a825ae..74de33ae37e5 100644 --- a/tools/testing/selftests/bpf/test_progs.h +++ b/tools/testing/selftests/bpf/test_progs.h @@ -131,6 +131,12 @@ struct test_env { pid_t *worker_pids; /* array of worker pids */ int *worker_socks; /* array of worker socks */ int *worker_current_test; /* array of current running test for each worker */ + + pthread_t main_thread; + int secs_till_notify; + int secs_till_kill; + timer_t watchdog; /* watch for stalled tests/subtests */ + enum { WD_NOTIFY, WD_KILL } watchdog_state; }; #define MAX_LOG_TRUNK_SIZE 8192 @@ -390,6 +396,14 @@ int test__join_cgroup(const char *path); ___ok; \ }) +#define ASSERT_ERR_FD(fd, name) ({ \ + static int duration = 0; \ + int ___fd = (fd); \ + bool ___ok = ___fd < 0; \ + CHECK(!___ok, (name), "unexpected fd: %d\n", ___fd); \ + ___ok; \ +}) + #define SYS(goto_label, fmt, ...) \ ({ \ char cmd[1024]; \ diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c index 610392dfc4fb..447b68509d76 100644 --- a/tools/testing/selftests/bpf/test_verifier.c +++ b/tools/testing/selftests/bpf/test_verifier.c @@ -42,10 +42,6 @@ #include "../../../include/linux/filter.h" #include "testing_helpers.h" -#ifndef ENOTSUPP -#define ENOTSUPP 524 -#endif - #define MAX_INSNS BPF_MAXINSNS #define MAX_EXPECTED_INSNS 32 #define MAX_UNEXPECTED_INSNS 32 diff --git a/tools/testing/selftests/bpf/uprobe_multi.c b/tools/testing/selftests/bpf/uprobe_multi.c index c7828b13e5ff..dd38dc68f635 100644 --- a/tools/testing/selftests/bpf/uprobe_multi.c +++ b/tools/testing/selftests/bpf/uprobe_multi.c @@ -12,6 +12,10 @@ #define MADV_POPULATE_READ 22 #endif +#ifndef MADV_PAGEOUT +#define MADV_PAGEOUT 21 +#endif + int __attribute__((weak)) uprobe(void) { return 0; diff --git a/tools/testing/selftests/bpf/uptr_test_common.h b/tools/testing/selftests/bpf/uptr_test_common.h new file mode 100644 index 000000000000..f8a134ba12f9 --- /dev/null +++ b/tools/testing/selftests/bpf/uptr_test_common.h @@ -0,0 +1,63 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */ + +#ifndef _UPTR_TEST_COMMON_H +#define _UPTR_TEST_COMMON_H + +#define MAGIC_VALUE 0xabcd1234 +#define PAGE_SIZE 4096 + +#ifdef __BPF__ +/* Avoid fwd btf type being generated for the following struct */ +struct large_data *dummy_large_data; +struct empty_data *dummy_empty_data; +struct user_data *dummy_data; +struct cgroup *dummy_cgrp; +#else +#define __uptr +#define __kptr +#endif + +struct user_data { + int a; + int b; + int result; + int nested_result; +}; + +struct nested_udata { + struct user_data __uptr *udata; +}; + +struct value_type { + struct user_data __uptr *udata; + struct cgroup __kptr *cgrp; + struct nested_udata nested; +}; + +struct value_lock_type { + struct user_data __uptr *udata; + struct bpf_spin_lock lock; +}; + +struct large_data { + __u8 one_page[PAGE_SIZE]; + int a; +}; + +struct large_uptr { + struct large_data __uptr *udata; +}; + +struct empty_data { +}; + +struct empty_uptr { + struct empty_data __uptr *udata; +}; + +struct kstruct_uptr { + struct cgroup __uptr *cgrp; +}; + +#endif diff --git a/tools/testing/selftests/bpf/veristat.c b/tools/testing/selftests/bpf/veristat.c index 1ec5c4c47235..e12ef953fba8 100644 --- a/tools/testing/selftests/bpf/veristat.c +++ b/tools/testing/selftests/bpf/veristat.c @@ -16,6 +16,7 @@ #include #include #include +#include #include #include #include @@ -179,6 +180,7 @@ static struct env { int files_skipped; int progs_processed; int progs_skipped; + int top_src_lines; } env; static int libbpf_print_fn(enum libbpf_print_level level, const char *format, va_list args) @@ -228,6 +230,7 @@ static const struct argp_option opts[] = { "Force frequent BPF verifier state checkpointing (set BPF_F_TEST_STATE_FREQ program flag)" }, { "test-reg-invariants", 'r', NULL, 0, "Force BPF verifier failure on register invariant violation (BPF_F_TEST_REG_INVARIANTS program flag)" }, + { "top-src-lines", 'S', "N", 0, "Emit N most frequent source code lines" }, {}, }; @@ -327,6 +330,14 @@ static error_t parse_arg(int key, char *arg, struct argp_state *state) return err; } break; + case 'S': + errno = 0; + env.top_src_lines = strtol(arg, NULL, 10); + if (errno) { + fprintf(stderr, "invalid top lines N specifier: %s\n", arg); + argp_usage(state); + } + break; case ARGP_KEY_ARG: tmp = realloc(env.filenames, (env.filename_cnt + 1) * sizeof(*env.filenames)); if (!tmp) @@ -854,6 +865,118 @@ static int parse_verif_log(char * const buf, size_t buf_sz, struct verif_stats * return 0; } +struct line_cnt { + char *line; + int cnt; +}; + +static int str_cmp(const void *a, const void *b) +{ + const char **str1 = (const char **)a; + const char **str2 = (const char **)b; + + return strcmp(*str1, *str2); +} + +static int line_cnt_cmp(const void *a, const void *b) +{ + const struct line_cnt *a_cnt = (const struct line_cnt *)a; + const struct line_cnt *b_cnt = (const struct line_cnt *)b; + + if (a_cnt->cnt != b_cnt->cnt) + return a_cnt->cnt < b_cnt->cnt ? -1 : 1; + return strcmp(a_cnt->line, b_cnt->line); +} + +static int print_top_src_lines(char * const buf, size_t buf_sz, const char *prog_name) +{ + int lines_cap = 0; + int lines_size = 0; + char **lines = NULL; + char *line = NULL; + char *state; + struct line_cnt *freq = NULL; + struct line_cnt *cur; + int unique_lines; + int err = 0; + int i; + + while ((line = strtok_r(line ? NULL : buf, "\n", &state))) { + if (strncmp(line, "; ", 2) != 0) + continue; + line += 2; + + if (lines_size == lines_cap) { + char **tmp; + + lines_cap = max(16, lines_cap * 2); + tmp = realloc(lines, lines_cap * sizeof(*tmp)); + if (!tmp) { + err = -ENOMEM; + goto cleanup; + } + lines = tmp; + } + lines[lines_size] = line; + lines_size++; + } + + if (lines_size == 0) + goto cleanup; + + qsort(lines, lines_size, sizeof(*lines), str_cmp); + + freq = calloc(lines_size, sizeof(*freq)); + if (!freq) { + err = -ENOMEM; + goto cleanup; + } + + cur = freq; + cur->line = lines[0]; + cur->cnt = 1; + for (i = 1; i < lines_size; ++i) { + if (strcmp(lines[i], cur->line) != 0) { + cur++; + cur->line = lines[i]; + cur->cnt = 0; + } + cur->cnt++; + } + unique_lines = cur - freq + 1; + + qsort(freq, unique_lines, sizeof(struct line_cnt), line_cnt_cmp); + + printf("Top source lines (%s):\n", prog_name); + for (i = 0; i < min(unique_lines, env.top_src_lines); ++i) { + const char *src_code = freq[i].line; + const char *src_line = NULL; + char *split = strrchr(freq[i].line, '@'); + + if (split) { + src_line = split + 1; + + while (*src_line && isspace(*src_line)) + src_line++; + + while (split > src_code && isspace(*split)) + split--; + *split = '\0'; + } + + if (src_line) + printf("%5d: (%s)\t%s\n", freq[i].cnt, src_line, src_code); + else + printf("%5d: %s\n", freq[i].cnt, src_code); + } + printf("\n"); + +cleanup: + free(freq); + free(lines); + return err; +} + static int guess_prog_type_by_ctx_name(const char *ctx_name, enum bpf_prog_type *prog_type, enum bpf_attach_type *attach_type) @@ -987,6 +1110,35 @@ static void fixup_obj(struct bpf_object *obj, struct bpf_program *prog, const ch return; } +static int max_verifier_log_size(void) +{ + const int SMALL_LOG_SIZE = UINT_MAX >> 8; + const int BIG_LOG_SIZE = UINT_MAX >> 2; + struct bpf_insn insns[] = { + { .code = BPF_ALU | BPF_MOV | BPF_X, .dst_reg = BPF_REG_0, }, + { .code = BPF_JMP | BPF_EXIT, }, + }; + LIBBPF_OPTS(bpf_prog_load_opts, opts, + .log_size = BIG_LOG_SIZE, + .log_buf = (void *)-1, + .log_level = 4 + ); + int ret, insn_cnt = ARRAY_SIZE(insns); + static int log_size; + + if (log_size != 0) + return log_size; + + ret = bpf_prog_load(BPF_PROG_TYPE_TRACEPOINT, NULL, "GPL", insns, insn_cnt, &opts); + + if (ret == -EFAULT) + log_size = BIG_LOG_SIZE; + else /* ret == -EINVAL, big log size is not supported by the verifier */ + log_size = SMALL_LOG_SIZE; + + return log_size; +} + static int process_prog(const char *filename, struct bpf_object *obj, struct bpf_program *prog) { const char *base_filename = basename(strdupa(filename)); @@ -1009,13 +1161,16 @@ static int process_prog(const char *filename, struct bpf_object *obj, struct bpf stats = &env.prog_stats[env.prog_stat_cnt++]; memset(stats, 0, sizeof(*stats)); - if (env.verbose) { - buf_sz = env.log_size ? env.log_size : 16 * 1024 * 1024; + if (env.verbose || env.top_src_lines > 0) { + buf_sz = env.log_size ? env.log_size : max_verifier_log_size(); buf = malloc(buf_sz); if (!buf) return -ENOMEM; /* ensure we always request stats */ log_level = env.log_level | 4 | (env.log_fixed ? 8 : 0); + /* --top-src-lines needs verifier log */ + if (env.top_src_lines > 0 && env.log_level == 0) + log_level |= 2; } else { buf = verif_log_buf; buf_sz = sizeof(verif_log_buf); @@ -1048,6 +1203,8 @@ static int process_prog(const char *filename, struct bpf_object *obj, struct bpf filename, prog_name, stats->stats[DURATION], err ? "failure" : "success", buf); } + if (env.top_src_lines > 0) + print_top_src_lines(buf, buf_sz, stats->prog_name); if (verif_log_buf != buf) free(buf);