Merge branch 'handle-possible-null-trusted-raw_tp-arguments'

Kumar Kartikeya Dwivedi says:

====================
Handle possible NULL trusted raw_tp arguments

More context is available in [0], but the TLDR; is that the verifier
incorrectly assumes that any raw tracepoint argument will always be
non-NULL. This means that even when users correctly check possible NULL
arguments, the verifier can remove the NULL check due to incorrect
knowledge of the NULL-ness of the pointer. Secondly, kernel helpers or
kfuncs taking these trusted tracepoint arguments incorrectly assume that
all arguments will always be valid non-NULL.

In this set, we mark raw_tp arguments as PTR_MAYBE_NULL on top of
PTR_TRUSTED, but special case their behavior when dereferencing them or
pointer arithmetic over them is involved. When passing trusted args to
helpers or kfuncs, raw_tp programs are permitted to pass possibly NULL
pointers in such cases.

Any loads into such maybe NULL trusted PTR_TO_BTF_ID is promoted to a
PROBE_MEM load to handle emanating page faults. The verifier will ensure
NULL checks on such pointers are preserved and do not lead to dead code
elimination.

This new behavior is not applied when ref_obj_id is non-zero, as those
pointers do not belong to raw_tp arguments, but instead acquired
objects.

Since helpers and kfuncs already require attention for PTR_TO_BTF_ID
(non-trusted) pointers, we do not implement any protection for such
cases in this patch set, and leave it as future work for an upcoming
series.

A selftest is included with this patch set to verify the new behavior,
and it crashes the kernel without the first patch.

 [0]: https://lore.kernel.org/bpf/CAADnVQLMPPavJQR6JFsi3dtaaLHB816JN4HCV_TFWohJ61D+wQ@mail.gmail.com

Changelog:
----------
v2 -> v3
v2: https://lore.kernel.org/bpf/20241103184144.3765700-1-memxor@gmail.com

 * Fix lenient check around check_ptr_to_btf_access allowing any
   PTR_TO_BTF_ID with PTR_MAYBE_NULL to be deref'd.
 * Add Juri and Jiri's Tested-by, Reviewed-by resp.

v1 -> v2
v1: https://lore.kernel.org/bpf/20241101000017.3424165-1-memxor@gmail.com

 * Add patch to clean up users of gettid (Andrii)
 * Avoid nested blocks in sefltest (Andrii)
 * Prevent code motion optimization in selftest using barrier()
====================

Link: https://lore.kernel.org/r/20241104171959.2938862-1-memxor@gmail.com
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
This commit is contained in:
Alexei Starovoitov 2024-11-04 11:37:36 -08:00
commit 1850ce1bdd
20 changed files with 187 additions and 31 deletions

View File

@ -3495,4 +3495,10 @@ static inline bool bpf_is_subprog(const struct bpf_prog *prog)
return prog->aux->func_idx != 0;
}
static inline bool bpf_prog_is_raw_tp(const struct bpf_prog *prog)
{
return prog->type == BPF_PROG_TYPE_TRACING &&
prog->expected_attach_type == BPF_TRACE_RAW_TP;
}
#endif /* _LINUX_BPF_H */

View File

@ -6588,7 +6588,10 @@ bool btf_ctx_access(int off, int size, enum bpf_access_type type,
if (prog_args_trusted(prog))
info->reg_type |= PTR_TRUSTED;
if (btf_param_match_suffix(btf, &args[arg], "__nullable"))
/* Raw tracepoint arguments always get marked as maybe NULL */
if (bpf_prog_is_raw_tp(prog))
info->reg_type |= PTR_MAYBE_NULL;
else if (btf_param_match_suffix(btf, &args[arg], "__nullable"))
info->reg_type |= PTR_MAYBE_NULL;
if (tgt_prog) {

View File

@ -418,6 +418,25 @@ static struct btf_record *reg_btf_record(const struct bpf_reg_state *reg)
return rec;
}
static bool mask_raw_tp_reg_cond(const struct bpf_verifier_env *env, struct bpf_reg_state *reg) {
return reg->type == (PTR_TO_BTF_ID | PTR_TRUSTED | PTR_MAYBE_NULL) &&
bpf_prog_is_raw_tp(env->prog) && !reg->ref_obj_id;
}
static bool mask_raw_tp_reg(const struct bpf_verifier_env *env, struct bpf_reg_state *reg)
{
if (!mask_raw_tp_reg_cond(env, reg))
return false;
reg->type &= ~PTR_MAYBE_NULL;
return true;
}
static void unmask_raw_tp_reg(struct bpf_reg_state *reg, bool result)
{
if (result)
reg->type |= PTR_MAYBE_NULL;
}
static bool subprog_is_global(const struct bpf_verifier_env *env, int subprog)
{
struct bpf_func_info_aux *aux = env->prog->aux->func_info_aux;
@ -6622,6 +6641,7 @@ static int check_ptr_to_btf_access(struct bpf_verifier_env *env,
const char *field_name = NULL;
enum bpf_type_flag flag = 0;
u32 btf_id = 0;
bool mask;
int ret;
if (!env->allow_ptr_leaks) {
@ -6693,7 +6713,21 @@ static int check_ptr_to_btf_access(struct bpf_verifier_env *env,
if (ret < 0)
return ret;
/* For raw_tp progs, we allow dereference of PTR_MAYBE_NULL
* trusted PTR_TO_BTF_ID, these are the ones that are possibly
* arguments to the raw_tp. Since internal checks in for trusted
* reg in check_ptr_to_btf_access would consider PTR_MAYBE_NULL
* modifier as problematic, mask it out temporarily for the
* check. Don't apply this to pointers with ref_obj_id > 0, as
* those won't be raw_tp args.
*
* We may end up applying this relaxation to other trusted
* PTR_TO_BTF_ID with maybe null flag, since we cannot
* distinguish PTR_MAYBE_NULL tagged for arguments vs normal
* tagging, but that should expand allowed behavior, and not
* cause regression for existing behavior.
*/
mask = mask_raw_tp_reg(env, reg);
if (ret != PTR_TO_BTF_ID) {
/* just mark; */
@ -6754,8 +6788,13 @@ static int check_ptr_to_btf_access(struct bpf_verifier_env *env,
clear_trusted_flags(&flag);
}
if (atype == BPF_READ && value_regno >= 0)
if (atype == BPF_READ && value_regno >= 0) {
mark_btf_ld_reg(env, regs, value_regno, ret, reg->btf, btf_id, flag);
/* We've assigned a new type to regno, so don't undo masking. */
if (regno == value_regno)
mask = false;
}
unmask_raw_tp_reg(reg, mask);
return 0;
}
@ -7140,7 +7179,7 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn
if (!err && t == BPF_READ && value_regno >= 0)
mark_reg_unknown(env, regs, value_regno);
} else if (base_type(reg->type) == PTR_TO_BTF_ID &&
!type_may_be_null(reg->type)) {
(mask_raw_tp_reg_cond(env, reg) || !type_may_be_null(reg->type))) {
err = check_ptr_to_btf_access(env, regs, regno, off, size, t,
value_regno);
} else if (reg->type == CONST_PTR_TO_MAP) {
@ -8833,6 +8872,7 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 arg,
enum bpf_reg_type type = reg->type;
u32 *arg_btf_id = NULL;
int err = 0;
bool mask;
if (arg_type == ARG_DONTCARE)
return 0;
@ -8873,11 +8913,11 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 arg,
base_type(arg_type) == ARG_PTR_TO_SPIN_LOCK)
arg_btf_id = fn->arg_btf_id[arg];
mask = mask_raw_tp_reg(env, reg);
err = check_reg_type(env, regno, arg_type, arg_btf_id, meta);
if (err)
return err;
err = check_func_arg_reg_off(env, reg, regno, arg_type);
err = err ?: check_func_arg_reg_off(env, reg, regno, arg_type);
unmask_raw_tp_reg(reg, mask);
if (err)
return err;
@ -9672,14 +9712,17 @@ static int btf_check_func_arg_match(struct bpf_verifier_env *env, int subprog,
return ret;
} else if (base_type(arg->arg_type) == ARG_PTR_TO_BTF_ID) {
struct bpf_call_arg_meta meta;
bool mask;
int err;
if (register_is_null(reg) && type_may_be_null(arg->arg_type))
continue;
memset(&meta, 0, sizeof(meta)); /* leave func_id as zero */
mask = mask_raw_tp_reg(env, reg);
err = check_reg_type(env, regno, arg->arg_type, &arg->btf_id, &meta);
err = err ?: check_func_arg_reg_off(env, reg, regno, arg->arg_type);
unmask_raw_tp_reg(reg, mask);
if (err)
return err;
} else {
@ -12007,6 +12050,7 @@ static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_
enum bpf_arg_type arg_type = ARG_DONTCARE;
u32 regno = i + 1, ref_id, type_size;
bool is_ret_buf_sz = false;
bool mask = false;
int kf_arg_type;
t = btf_type_skip_modifiers(btf, args[i].type, NULL);
@ -12065,12 +12109,15 @@ static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_
return -EINVAL;
}
mask = mask_raw_tp_reg(env, reg);
if ((is_kfunc_trusted_args(meta) || is_kfunc_rcu(meta)) &&
(register_is_null(reg) || type_may_be_null(reg->type)) &&
!is_kfunc_arg_nullable(meta->btf, &args[i])) {
verbose(env, "Possibly NULL pointer passed to trusted arg%d\n", i);
unmask_raw_tp_reg(reg, mask);
return -EACCES;
}
unmask_raw_tp_reg(reg, mask);
if (reg->ref_obj_id) {
if (is_kfunc_release(meta) && meta->ref_obj_id) {
@ -12128,16 +12175,24 @@ static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_
if (!is_kfunc_trusted_args(meta) && !is_kfunc_rcu(meta))
break;
/* Allow passing maybe NULL raw_tp arguments to
* kfuncs for compatibility. Don't apply this to
* arguments with ref_obj_id > 0.
*/
mask = mask_raw_tp_reg(env, reg);
if (!is_trusted_reg(reg)) {
if (!is_kfunc_rcu(meta)) {
verbose(env, "R%d must be referenced or trusted\n", regno);
unmask_raw_tp_reg(reg, mask);
return -EINVAL;
}
if (!is_rcu_reg(reg)) {
verbose(env, "R%d must be a rcu pointer\n", regno);
unmask_raw_tp_reg(reg, mask);
return -EINVAL;
}
}
unmask_raw_tp_reg(reg, mask);
fallthrough;
case KF_ARG_PTR_TO_CTX:
case KF_ARG_PTR_TO_DYNPTR:
@ -12160,7 +12215,9 @@ static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_
if (is_kfunc_release(meta) && reg->ref_obj_id)
arg_type |= OBJ_RELEASE;
mask = mask_raw_tp_reg(env, reg);
ret = check_func_arg_reg_off(env, reg, regno, arg_type);
unmask_raw_tp_reg(reg, mask);
if (ret < 0)
return ret;
@ -12337,6 +12394,7 @@ static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_
ref_tname = btf_name_by_offset(btf, ref_t->name_off);
fallthrough;
case KF_ARG_PTR_TO_BTF_ID:
mask = mask_raw_tp_reg(env, reg);
/* Only base_type is checked, further checks are done here */
if ((base_type(reg->type) != PTR_TO_BTF_ID ||
(bpf_type_has_unsafe_modifiers(reg->type) && !is_rcu_reg(reg))) &&
@ -12345,9 +12403,11 @@ static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_
verbose(env, "expected %s or socket\n",
reg_type_str(env, base_type(reg->type) |
(type_flag(reg->type) & BPF_REG_TRUSTED_MODIFIERS)));
unmask_raw_tp_reg(reg, mask);
return -EINVAL;
}
ret = process_kf_arg_ptr_to_btf_id(env, reg, ref_t, ref_tname, ref_id, meta, i);
unmask_raw_tp_reg(reg, mask);
if (ret < 0)
return ret;
break;
@ -13320,7 +13380,7 @@ static int sanitize_check_bounds(struct bpf_verifier_env *env,
*/
static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
struct bpf_insn *insn,
const struct bpf_reg_state *ptr_reg,
struct bpf_reg_state *ptr_reg,
const struct bpf_reg_state *off_reg)
{
struct bpf_verifier_state *vstate = env->cur_state;
@ -13334,6 +13394,7 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
struct bpf_sanitize_info info = {};
u8 opcode = BPF_OP(insn->code);
u32 dst = insn->dst_reg;
bool mask;
int ret;
dst_reg = &regs[dst];
@ -13360,11 +13421,14 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
return -EACCES;
}
mask = mask_raw_tp_reg(env, ptr_reg);
if (ptr_reg->type & PTR_MAYBE_NULL) {
verbose(env, "R%d pointer arithmetic on %s prohibited, null-check it first\n",
dst, reg_type_str(env, ptr_reg->type));
unmask_raw_tp_reg(ptr_reg, mask);
return -EACCES;
}
unmask_raw_tp_reg(ptr_reg, mask);
switch (base_type(ptr_reg->type)) {
case PTR_TO_CTX:
@ -19866,6 +19930,7 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env)
* for this case.
*/
case PTR_TO_BTF_ID | MEM_ALLOC | PTR_UNTRUSTED:
case PTR_TO_BTF_ID | PTR_TRUSTED | PTR_MAYBE_NULL:
if (type == BPF_READ) {
if (BPF_MODE(insn->code) == BPF_MEM)
insn->code = BPF_LDX | BPF_PROBE_MEM |

View File

@ -4,6 +4,7 @@
#include <argp.h>
#include <unistd.h>
#include <stdint.h>
#include "bpf_util.h"
#include "bench.h"
#include "trigger_bench.skel.h"
#include "trace_helpers.h"
@ -72,7 +73,7 @@ static __always_inline void inc_counter(struct counter *counters)
unsigned slot;
if (unlikely(tid == 0))
tid = syscall(SYS_gettid);
tid = sys_gettid();
/* multiplicative hashing, it's fast */
slot = 2654435769U * tid;

View File

@ -40,6 +40,14 @@ DECLARE_TRACE(bpf_testmod_test_nullable_bare,
TP_ARGS(ctx__nullable)
);
struct sk_buff;
DECLARE_TRACE(bpf_testmod_test_raw_tp_null,
TP_PROTO(struct sk_buff *skb),
TP_ARGS(skb)
);
#undef BPF_TESTMOD_DECLARE_TRACE
#ifdef DECLARE_TRACE_WRITABLE
#define BPF_TESTMOD_DECLARE_TRACE(call, proto, args, size) \

View File

@ -380,6 +380,8 @@ bpf_testmod_test_read(struct file *file, struct kobject *kobj,
(void)bpf_testmod_test_arg_ptr_to_struct(&struct_arg1_2);
(void)trace_bpf_testmod_test_raw_tp_null(NULL);
struct_arg3 = kmalloc((sizeof(struct bpf_testmod_struct_arg_3) +
sizeof(int)), GFP_KERNEL);
if (struct_arg3 != NULL) {

View File

@ -6,6 +6,7 @@
#include <stdlib.h>
#include <string.h>
#include <errno.h>
#include <syscall.h>
#include <bpf/libbpf.h> /* libbpf_num_possible_cpus */
static inline unsigned int bpf_num_possible_cpus(void)
@ -59,4 +60,12 @@ static inline void bpf_strlcpy(char *dst, const char *src, size_t sz)
(offsetof(TYPE, MEMBER) + sizeof_field(TYPE, MEMBER))
#endif
/* Availability of gettid across glibc versions is hit-and-miss, therefore
* fallback to syscall in this macro and use it everywhere.
*/
#ifndef sys_gettid
#define sys_gettid() syscall(SYS_gettid)
#endif
#endif /* __BPF_UTIL__ */

View File

@ -12,6 +12,7 @@
#include <bpf/bpf.h>
#include <bpf/libbpf.h>
#include "bpf_util.h"
#include "test_maps.h"
#include "task_local_storage_helpers.h"
#include "read_bpf_task_storage_busy.skel.h"
@ -115,7 +116,7 @@ void test_task_storage_map_stress_lookup(void)
CHECK(err, "attach", "error %d\n", err);
/* Trigger program */
syscall(SYS_gettid);
sys_gettid();
skel->bss->pid = 0;
CHECK(skel->bss->busy != 0, "bad bpf_task_storage_busy", "got %d\n", skel->bss->busy);

View File

@ -690,7 +690,7 @@ void test_bpf_cookie(void)
if (!ASSERT_OK_PTR(skel, "skel_open"))
return;
skel->bss->my_tid = syscall(SYS_gettid);
skel->bss->my_tid = sys_gettid();
if (test__start_subtest("kprobe"))
kprobe_subtest(skel);

View File

@ -226,7 +226,7 @@ static void test_task_common_nocheck(struct bpf_iter_attach_opts *opts,
ASSERT_OK(pthread_create(&thread_id, NULL, &do_nothing_wait, NULL),
"pthread_create");
skel->bss->tid = syscall(SYS_gettid);
skel->bss->tid = sys_gettid();
do_dummy_read_opts(skel->progs.dump_task, opts);
@ -255,10 +255,10 @@ static void *run_test_task_tid(void *arg)
union bpf_iter_link_info linfo;
int num_unknown_tid, num_known_tid;
ASSERT_NEQ(getpid(), syscall(SYS_gettid), "check_new_thread_id");
ASSERT_NEQ(getpid(), sys_gettid(), "check_new_thread_id");
memset(&linfo, 0, sizeof(linfo));
linfo.task.tid = syscall(SYS_gettid);
linfo.task.tid = sys_gettid();
opts.link_info = &linfo;
opts.link_info_len = sizeof(linfo);
test_task_common(&opts, 0, 1);

View File

@ -63,14 +63,14 @@ static void test_tp_btf(int cgroup_fd)
if (!ASSERT_OK(err, "map_delete_elem"))
goto out;
skel->bss->target_pid = syscall(SYS_gettid);
skel->bss->target_pid = sys_gettid();
err = cgrp_ls_tp_btf__attach(skel);
if (!ASSERT_OK(err, "skel_attach"))
goto out;
syscall(SYS_gettid);
syscall(SYS_gettid);
sys_gettid();
sys_gettid();
skel->bss->target_pid = 0;
@ -154,7 +154,7 @@ static void test_recursion(int cgroup_fd)
goto out;
/* trigger sys_enter, make sure it does not cause deadlock */
syscall(SYS_gettid);
sys_gettid();
out:
cgrp_ls_recursion__destroy(skel);
@ -224,7 +224,7 @@ static void test_yes_rcu_lock(__u64 cgroup_id)
return;
CGROUP_MODE_SET(skel);
skel->bss->target_pid = syscall(SYS_gettid);
skel->bss->target_pid = sys_gettid();
bpf_program__set_autoload(skel->progs.yes_rcu_lock, true);
err = cgrp_ls_sleepable__load(skel);

View File

@ -1010,7 +1010,7 @@ static void run_core_reloc_tests(bool use_btfgen)
struct data *data;
void *mmap_data = NULL;
my_pid_tgid = getpid() | ((uint64_t)syscall(SYS_gettid) << 32);
my_pid_tgid = getpid() | ((uint64_t)sys_gettid() << 32);
for (i = 0; i < ARRAY_SIZE(test_cases); i++) {
char btf_file[] = "/tmp/core_reloc.btf.XXXXXX";

View File

@ -20,7 +20,7 @@ void test_linked_funcs(void)
bpf_program__set_autoload(skel->progs.handler1, true);
bpf_program__set_autoload(skel->progs.handler2, true);
skel->rodata->my_tid = syscall(SYS_gettid);
skel->rodata->my_tid = sys_gettid();
skel->bss->syscall_id = SYS_getpgid;
err = linked_funcs__load(skel);

View File

@ -23,7 +23,7 @@ static int get_pid_tgid(pid_t *pid, pid_t *tgid,
struct stat st;
int err;
*pid = syscall(SYS_gettid);
*pid = sys_gettid();
*tgid = getpid();
err = stat("/proc/self/ns/pid", &st);

View File

@ -0,0 +1,25 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
#include <test_progs.h>
#include "raw_tp_null.skel.h"
void test_raw_tp_null(void)
{
struct raw_tp_null *skel;
skel = raw_tp_null__open_and_load();
if (!ASSERT_OK_PTR(skel, "raw_tp_null__open_and_load"))
return;
skel->bss->tid = sys_gettid();
if (!ASSERT_OK(raw_tp_null__attach(skel), "raw_tp_null__attach"))
goto end;
ASSERT_OK(trigger_module_test_read(2), "trigger testmod read");
ASSERT_EQ(skel->bss->i, 3, "invocations");
end:
raw_tp_null__destroy(skel);
}

View File

@ -21,7 +21,7 @@ static void test_success(void)
if (!ASSERT_OK_PTR(skel, "skel_open"))
return;
skel->bss->target_pid = syscall(SYS_gettid);
skel->bss->target_pid = sys_gettid();
bpf_program__set_autoload(skel->progs.get_cgroup_id, true);
bpf_program__set_autoload(skel->progs.task_succ, true);
@ -58,7 +58,7 @@ static void test_rcuptr_acquire(void)
if (!ASSERT_OK_PTR(skel, "skel_open"))
return;
skel->bss->target_pid = syscall(SYS_gettid);
skel->bss->target_pid = sys_gettid();
bpf_program__set_autoload(skel->progs.task_acquire, true);
err = rcu_read_lock__load(skel);

View File

@ -31,14 +31,14 @@ static void test_sys_enter_exit(void)
if (!ASSERT_OK_PTR(skel, "skel_open_and_load"))
return;
skel->bss->target_pid = syscall(SYS_gettid);
skel->bss->target_pid = sys_gettid();
err = task_local_storage__attach(skel);
if (!ASSERT_OK(err, "skel_attach"))
goto out;
syscall(SYS_gettid);
syscall(SYS_gettid);
sys_gettid();
sys_gettid();
/* 3x syscalls: 1x attach and 2x gettid */
ASSERT_EQ(skel->bss->enter_cnt, 3, "enter_cnt");
@ -107,7 +107,7 @@ static void test_recursion(void)
/* trigger sys_enter, make sure it does not cause deadlock */
skel->bss->test_pid = getpid();
syscall(SYS_gettid);
sys_gettid();
skel->bss->test_pid = 0;
task_ls_recursion__detach(skel);
@ -262,7 +262,7 @@ static void test_uptr_basic(void)
__u64 ev_dummy_data = 1;
int err;
my_tid = syscall(SYS_gettid);
my_tid = sys_gettid();
parent_task_fd = sys_pidfd_open(my_tid, 0);
if (!ASSERT_OK_FD(parent_task_fd, "parent_task_fd"))
return;

View File

@ -125,7 +125,7 @@ static void *child_thread(void *ctx)
struct child *child = ctx;
int c = 0, err;
child->tid = syscall(SYS_gettid);
child->tid = sys_gettid();
/* let parent know we are ready */
err = write(child->c2p[1], &c, 1);

View File

@ -0,0 +1,32 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
#include <vmlinux.h>
#include <bpf/bpf_tracing.h>
char _license[] SEC("license") = "GPL";
int tid;
int i;
SEC("tp_btf/bpf_testmod_test_raw_tp_null")
int BPF_PROG(test_raw_tp_null, struct sk_buff *skb)
{
struct task_struct *task = bpf_get_current_task_btf();
if (task->pid != tid)
return 0;
i = i + skb->mark + 1;
/* The compiler may move the NULL check before this deref, which causes
* the load to fail as deref of scalar. Prevent that by using a barrier.
*/
barrier();
/* If dead code elimination kicks in, the increment below will
* be removed. For raw_tp programs, we mark input arguments as
* PTR_MAYBE_NULL, so branch prediction should never kick in.
*/
if (!skb)
i += 2;
return 0;
}

View File

@ -7,7 +7,11 @@
#include "bpf_misc.h"
SEC("tp_btf/bpf_testmod_test_nullable_bare")
__failure __msg("R1 invalid mem access 'trusted_ptr_or_null_'")
/* This used to be a failure test, but raw_tp nullable arguments can now
* directly be dereferenced, whether they have nullable annotation or not,
* and don't need to be explicitly checked.
*/
__success
int BPF_PROG(handle_tp_btf_nullable_bare1, struct bpf_testmod_test_read_ctx *nullable_ctx)
{
return nullable_ctx->len;