mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2025-01-03 19:55:31 +00:00
bpf: Remove bpf_kfunc_call_test_kptr_get() test kfunc
We've managed to improve the UX for kptrs significantly over the last 9 months. All of the prior main use cases, struct bpf_cpumask *, struct task_struct *, and struct cgroup *, have all been updated to be synchronized mainly using RCU. In other words, their KF_ACQUIRE kfunc calls are all KF_RCU, and the pointers themselves are MEM_RCU and can be accessed in an RCU read region in BPF. In a follow-on change, we'll be removing the KF_KPTR_GET kfunc flag. This patch prepares for that by removing the bpf_kfunc_call_test_kptr_get() kfunc, and all associated selftests. Signed-off-by: David Vernet <void@manifault.com> Link: https://lore.kernel.org/r/20230416084928.326135-2-void@manifault.com Signed-off-by: Alexei Starovoitov <ast@kernel.org>
This commit is contained in:
parent
7a0788fe83
commit
09b501d905
@ -679,17 +679,6 @@ __bpf_kfunc void bpf_kfunc_call_int_mem_release(int *p)
|
||||
{
|
||||
}
|
||||
|
||||
__bpf_kfunc struct prog_test_ref_kfunc *
|
||||
bpf_kfunc_call_test_kptr_get(struct prog_test_ref_kfunc **pp, int a, int b)
|
||||
{
|
||||
struct prog_test_ref_kfunc *p = READ_ONCE(*pp);
|
||||
|
||||
if (!p)
|
||||
return NULL;
|
||||
refcount_inc(&p->cnt);
|
||||
return p;
|
||||
}
|
||||
|
||||
struct prog_test_pass1 {
|
||||
int x0;
|
||||
struct {
|
||||
@ -804,7 +793,6 @@ BTF_ID_FLAGS(func, bpf_kfunc_call_test_get_rdwr_mem, KF_RET_NULL)
|
||||
BTF_ID_FLAGS(func, bpf_kfunc_call_test_get_rdonly_mem, KF_RET_NULL)
|
||||
BTF_ID_FLAGS(func, bpf_kfunc_call_test_acq_rdonly_mem, KF_ACQUIRE | KF_RET_NULL)
|
||||
BTF_ID_FLAGS(func, bpf_kfunc_call_int_mem_release, KF_RELEASE)
|
||||
BTF_ID_FLAGS(func, bpf_kfunc_call_test_kptr_get, KF_ACQUIRE | KF_RET_NULL | KF_KPTR_GET)
|
||||
BTF_ID_FLAGS(func, bpf_kfunc_call_test_pass_ctx)
|
||||
BTF_ID_FLAGS(func, bpf_kfunc_call_test_pass1)
|
||||
BTF_ID_FLAGS(func, bpf_kfunc_call_test_pass2)
|
||||
|
@ -115,8 +115,6 @@ DEFINE_MAP_OF_MAP(BPF_MAP_TYPE_HASH_OF_MAPS, hash_malloc_map, hash_of_hash_mallo
|
||||
DEFINE_MAP_OF_MAP(BPF_MAP_TYPE_HASH_OF_MAPS, lru_hash_map, hash_of_lru_hash_maps);
|
||||
|
||||
extern struct prog_test_ref_kfunc *bpf_kfunc_call_test_acquire(unsigned long *sp) __ksym;
|
||||
extern struct prog_test_ref_kfunc *
|
||||
bpf_kfunc_call_test_kptr_get(struct prog_test_ref_kfunc **p, int a, int b) __ksym;
|
||||
extern void bpf_kfunc_call_test_release(struct prog_test_ref_kfunc *p) __ksym;
|
||||
void bpf_kfunc_call_test_ref(struct prog_test_ref_kfunc *p) __ksym;
|
||||
|
||||
@ -187,25 +185,10 @@ static void test_kptr_ref(struct map_value *v)
|
||||
bpf_kfunc_call_test_release(p);
|
||||
}
|
||||
|
||||
static void test_kptr_get(struct map_value *v)
|
||||
{
|
||||
struct prog_test_ref_kfunc *p;
|
||||
|
||||
p = bpf_kfunc_call_test_kptr_get(&v->ref_ptr, 0, 0);
|
||||
if (!p)
|
||||
return;
|
||||
if (p->a + p->b > 100) {
|
||||
bpf_kfunc_call_test_release(p);
|
||||
return;
|
||||
}
|
||||
bpf_kfunc_call_test_release(p);
|
||||
}
|
||||
|
||||
static void test_kptr(struct map_value *v)
|
||||
{
|
||||
test_kptr_unref(v);
|
||||
test_kptr_ref(v);
|
||||
test_kptr_get(v);
|
||||
}
|
||||
|
||||
SEC("tc")
|
||||
@ -338,38 +321,25 @@ int test_map_kptr_ref_pre(struct map_value *v)
|
||||
if (p_st->cnt.refs.counter != ref)
|
||||
return 4;
|
||||
|
||||
p = bpf_kfunc_call_test_kptr_get(&v->ref_ptr, 0, 0);
|
||||
if (!p)
|
||||
return 5;
|
||||
ref++;
|
||||
if (p_st->cnt.refs.counter != ref) {
|
||||
ret = 6;
|
||||
goto end;
|
||||
}
|
||||
bpf_kfunc_call_test_release(p);
|
||||
ref--;
|
||||
if (p_st->cnt.refs.counter != ref)
|
||||
return 7;
|
||||
|
||||
p = bpf_kptr_xchg(&v->ref_ptr, NULL);
|
||||
if (!p)
|
||||
return 8;
|
||||
return 5;
|
||||
bpf_kfunc_call_test_release(p);
|
||||
ref--;
|
||||
if (p_st->cnt.refs.counter != ref)
|
||||
return 9;
|
||||
return 6;
|
||||
|
||||
p = bpf_kfunc_call_test_acquire(&arg);
|
||||
if (!p)
|
||||
return 10;
|
||||
return 7;
|
||||
ref++;
|
||||
p = bpf_kptr_xchg(&v->ref_ptr, p);
|
||||
if (p) {
|
||||
ret = 11;
|
||||
ret = 8;
|
||||
goto end;
|
||||
}
|
||||
if (p_st->cnt.refs.counter != ref)
|
||||
return 12;
|
||||
return 9;
|
||||
/* Leave in map */
|
||||
|
||||
return 0;
|
||||
|
@ -21,8 +21,6 @@ struct array_map {
|
||||
|
||||
extern struct prog_test_ref_kfunc *bpf_kfunc_call_test_acquire(unsigned long *sp) __ksym;
|
||||
extern void bpf_kfunc_call_test_release(struct prog_test_ref_kfunc *p) __ksym;
|
||||
extern struct prog_test_ref_kfunc *
|
||||
bpf_kfunc_call_test_kptr_get(struct prog_test_ref_kfunc **p, int a, int b) __ksym;
|
||||
|
||||
SEC("?tc")
|
||||
__failure __msg("kptr access size must be BPF_DW")
|
||||
@ -220,67 +218,6 @@ int reject_kptr_xchg_on_unref(struct __sk_buff *ctx)
|
||||
return 0;
|
||||
}
|
||||
|
||||
SEC("?tc")
|
||||
__failure __msg("arg#0 expected pointer to map value")
|
||||
int reject_kptr_get_no_map_val(struct __sk_buff *ctx)
|
||||
{
|
||||
bpf_kfunc_call_test_kptr_get((void *)&ctx, 0, 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
SEC("?tc")
|
||||
__failure __msg("arg#0 expected pointer to map value")
|
||||
int reject_kptr_get_no_null_map_val(struct __sk_buff *ctx)
|
||||
{
|
||||
bpf_kfunc_call_test_kptr_get(bpf_map_lookup_elem(&array_map, &(int){0}), 0, 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
SEC("?tc")
|
||||
__failure __msg("arg#0 no referenced kptr at map value offset=0")
|
||||
int reject_kptr_get_no_kptr(struct __sk_buff *ctx)
|
||||
{
|
||||
struct map_value *v;
|
||||
int key = 0;
|
||||
|
||||
v = bpf_map_lookup_elem(&array_map, &key);
|
||||
if (!v)
|
||||
return 0;
|
||||
|
||||
bpf_kfunc_call_test_kptr_get((void *)v, 0, 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
SEC("?tc")
|
||||
__failure __msg("arg#0 no referenced kptr at map value offset=8")
|
||||
int reject_kptr_get_on_unref(struct __sk_buff *ctx)
|
||||
{
|
||||
struct map_value *v;
|
||||
int key = 0;
|
||||
|
||||
v = bpf_map_lookup_elem(&array_map, &key);
|
||||
if (!v)
|
||||
return 0;
|
||||
|
||||
bpf_kfunc_call_test_kptr_get(&v->unref_ptr, 0, 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
SEC("?tc")
|
||||
__failure __msg("kernel function bpf_kfunc_call_test_kptr_get args#0")
|
||||
int reject_kptr_get_bad_type_match(struct __sk_buff *ctx)
|
||||
{
|
||||
struct map_value *v;
|
||||
int key = 0;
|
||||
|
||||
v = bpf_map_lookup_elem(&array_map, &key);
|
||||
if (!v)
|
||||
return 0;
|
||||
|
||||
bpf_kfunc_call_test_kptr_get((void *)&v->ref_memb_ptr, 0, 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
SEC("?tc")
|
||||
__failure __msg("R1 type=rcu_ptr_or_null_ expected=percpu_ptr_")
|
||||
int mark_ref_as_untrusted_or_null(struct __sk_buff *ctx)
|
||||
@ -428,21 +365,6 @@ int kptr_xchg_ref_state(struct __sk_buff *ctx)
|
||||
return 0;
|
||||
}
|
||||
|
||||
SEC("?tc")
|
||||
__failure __msg("Unreleased reference id=3 alloc_insn=")
|
||||
int kptr_get_ref_state(struct __sk_buff *ctx)
|
||||
{
|
||||
struct map_value *v;
|
||||
int key = 0;
|
||||
|
||||
v = bpf_map_lookup_elem(&array_map, &key);
|
||||
if (!v)
|
||||
return 0;
|
||||
|
||||
bpf_kfunc_call_test_kptr_get(&v->ref_ptr, 0, 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
SEC("?tc")
|
||||
__failure __msg("Possibly NULL pointer passed to helper arg2")
|
||||
int kptr_xchg_possibly_null(struct __sk_buff *ctx)
|
||||
|
@ -288,33 +288,6 @@
|
||||
.result = REJECT,
|
||||
.errstr = "off=0 kptr isn't referenced kptr",
|
||||
},
|
||||
{
|
||||
"map_kptr: unref: bpf_kfunc_call_test_kptr_get rejected",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_LD_MAP_FD(BPF_REG_6, 0),
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
|
||||
BPF_EXIT_INSN(),
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
|
||||
BPF_MOV64_IMM(BPF_REG_2, 0),
|
||||
BPF_MOV64_IMM(BPF_REG_3, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
||||
.fixup_map_kptr = { 1 },
|
||||
.result = REJECT,
|
||||
.errstr = "arg#0 no referenced kptr at map value offset=0",
|
||||
.fixup_kfunc_btf_id = {
|
||||
{ "bpf_kfunc_call_test_kptr_get", 13 },
|
||||
}
|
||||
},
|
||||
/* Tests for referenced PTR_TO_BTF_ID */
|
||||
{
|
||||
"map_kptr: ref: loaded pointer marked as untrusted",
|
||||
|
Loading…
Reference in New Issue
Block a user