mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2024-12-29 09:12:07 +00:00
Merge branch 'refactor-lock-management'
Kumar Kartikeya Dwivedi says: ==================== Refactor lock management This set refactors lock management in the verifier in preparation for spin locks that can be acquired multiple times. In addition to this, unnecessary code special case reference leak logic for callbacks is also dropped, that is no longer necessary. See patches for details. Changelog: ---------- v5 -> v6 v5: https://lore.kernel.org/bpf/20241109225243.2306756-1-memxor@gmail.com * Move active_locks mutation to {acquire,release}_lock_state (Alexei) v4 -> v5 v4: https://lore.kernel.org/bpf/20241109074347.1434011-1-memxor@gmail.com * Make active_locks part of bpf_func_state (Alexei) * Remove unneeded in_callback_fn logic for references v3 -> v4 v3: https://lore.kernel.org/bpf/20241104151716.2079893-1-memxor@gmail.com * Address comments from Alexei * Drop struct bpf_active_lock definition * Name enum type, expand definition to multiple lines * s/REF_TYPE_BPF_LOCK/REF_TYPE_LOCK/g * Change active_lock type to int * Fix type of 'type' in acquire_lock_state * Filter by taking type explicitly in find_lock_state * WARN for default case in refsafe switch statement v2 -> v3 v2: https://lore.kernel.org/bpf/20241103212252.547071-1-memxor@gmail.com * Rebase on bpf-next to resolve merge conflict v1 -> v2 v1: https://lore.kernel.org/bpf/20241103205856.345580-1-memxor@gmail.com * Fix refsafe state comparison to check callback_ref and ptr separately. ==================== Link: https://lore.kernel.org/r/20241109231430.2475236-1-memxor@gmail.com Signed-off-by: Alexei Starovoitov <ast@kernel.org> Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
This commit is contained in:
commit
7b6e5bfa25
@ -48,22 +48,6 @@ enum bpf_reg_liveness {
|
||||
REG_LIVE_DONE = 0x8, /* liveness won't be updating this register anymore */
|
||||
};
|
||||
|
||||
/* For every reg representing a map value or allocated object pointer,
|
||||
* we consider the tuple of (ptr, id) for them to be unique in verifier
|
||||
* context and conside them to not alias each other for the purposes of
|
||||
* tracking lock state.
|
||||
*/
|
||||
struct bpf_active_lock {
|
||||
/* This can either be reg->map_ptr or reg->btf. If ptr is NULL,
|
||||
* there's no active lock held, and other fields have no
|
||||
* meaning. If non-NULL, it indicates that a lock is held and
|
||||
* id member has the reg->id of the register which can be >= 0.
|
||||
*/
|
||||
void *ptr;
|
||||
/* This will be reg->id */
|
||||
u32 id;
|
||||
};
|
||||
|
||||
#define ITER_PREFIX "bpf_iter_"
|
||||
|
||||
enum bpf_iter_state {
|
||||
@ -266,6 +250,13 @@ struct bpf_stack_state {
|
||||
};
|
||||
|
||||
struct bpf_reference_state {
|
||||
/* Each reference object has a type. Ensure REF_TYPE_PTR is zero to
|
||||
* default to pointer reference on zero initialization of a state.
|
||||
*/
|
||||
enum ref_state_type {
|
||||
REF_TYPE_PTR = 0,
|
||||
REF_TYPE_LOCK,
|
||||
} type;
|
||||
/* Track each reference created with a unique id, even if the same
|
||||
* instruction creates the reference multiple times (eg, via CALL).
|
||||
*/
|
||||
@ -274,17 +265,10 @@ struct bpf_reference_state {
|
||||
* is used purely to inform the user of a reference leak.
|
||||
*/
|
||||
int insn_idx;
|
||||
/* There can be a case like:
|
||||
* main (frame 0)
|
||||
* cb (frame 1)
|
||||
* func (frame 3)
|
||||
* cb (frame 4)
|
||||
* Hence for frame 4, if callback_ref just stored boolean, it would be
|
||||
* impossible to distinguish nested callback refs. Hence store the
|
||||
* frameno and compare that to callback_ref in check_reference_leak when
|
||||
* exiting a callback function.
|
||||
/* Use to keep track of the source object of a lock, to ensure
|
||||
* it matches on unlock.
|
||||
*/
|
||||
int callback_ref;
|
||||
void *ptr;
|
||||
};
|
||||
|
||||
struct bpf_retval_range {
|
||||
@ -332,6 +316,7 @@ struct bpf_func_state {
|
||||
|
||||
/* The following fields should be last. See copy_func_state() */
|
||||
int acquired_refs;
|
||||
int active_locks;
|
||||
struct bpf_reference_state *refs;
|
||||
/* The state of the stack. Each element of the array describes BPF_REG_SIZE
|
||||
* (i.e. 8) bytes worth of stack memory.
|
||||
@ -434,7 +419,6 @@ struct bpf_verifier_state {
|
||||
u32 insn_idx;
|
||||
u32 curframe;
|
||||
|
||||
struct bpf_active_lock active_lock;
|
||||
bool speculative;
|
||||
bool active_rcu_lock;
|
||||
u32 active_preempt_lock;
|
||||
|
@ -1284,6 +1284,7 @@ static int copy_reference_state(struct bpf_func_state *dst, const struct bpf_fun
|
||||
if (!dst->refs)
|
||||
return -ENOMEM;
|
||||
|
||||
dst->active_locks = src->active_locks;
|
||||
dst->acquired_refs = src->acquired_refs;
|
||||
return 0;
|
||||
}
|
||||
@ -1354,13 +1355,32 @@ static int acquire_reference_state(struct bpf_verifier_env *env, int insn_idx)
|
||||
if (err)
|
||||
return err;
|
||||
id = ++env->id_gen;
|
||||
state->refs[new_ofs].type = REF_TYPE_PTR;
|
||||
state->refs[new_ofs].id = id;
|
||||
state->refs[new_ofs].insn_idx = insn_idx;
|
||||
state->refs[new_ofs].callback_ref = state->in_callback_fn ? state->frameno : 0;
|
||||
|
||||
return id;
|
||||
}
|
||||
|
||||
static int acquire_lock_state(struct bpf_verifier_env *env, int insn_idx, enum ref_state_type type,
|
||||
int id, void *ptr)
|
||||
{
|
||||
struct bpf_func_state *state = cur_func(env);
|
||||
int new_ofs = state->acquired_refs;
|
||||
int err;
|
||||
|
||||
err = resize_reference_state(state, state->acquired_refs + 1);
|
||||
if (err)
|
||||
return err;
|
||||
state->refs[new_ofs].type = type;
|
||||
state->refs[new_ofs].id = id;
|
||||
state->refs[new_ofs].insn_idx = insn_idx;
|
||||
state->refs[new_ofs].ptr = ptr;
|
||||
|
||||
state->active_locks++;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* release function corresponding to acquire_reference_state(). Idempotent. */
|
||||
static int release_reference_state(struct bpf_func_state *state, int ptr_id)
|
||||
{
|
||||
@ -1368,10 +1388,9 @@ static int release_reference_state(struct bpf_func_state *state, int ptr_id)
|
||||
|
||||
last_idx = state->acquired_refs - 1;
|
||||
for (i = 0; i < state->acquired_refs; i++) {
|
||||
if (state->refs[i].type != REF_TYPE_PTR)
|
||||
continue;
|
||||
if (state->refs[i].id == ptr_id) {
|
||||
/* Cannot release caller references in callbacks */
|
||||
if (state->in_callback_fn && state->refs[i].callback_ref != state->frameno)
|
||||
return -EINVAL;
|
||||
if (last_idx && i != last_idx)
|
||||
memcpy(&state->refs[i], &state->refs[last_idx],
|
||||
sizeof(*state->refs));
|
||||
@ -1383,6 +1402,45 @@ static int release_reference_state(struct bpf_func_state *state, int ptr_id)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static int release_lock_state(struct bpf_func_state *state, int type, int id, void *ptr)
|
||||
{
|
||||
int i, last_idx;
|
||||
|
||||
last_idx = state->acquired_refs - 1;
|
||||
for (i = 0; i < state->acquired_refs; i++) {
|
||||
if (state->refs[i].type != type)
|
||||
continue;
|
||||
if (state->refs[i].id == id && state->refs[i].ptr == ptr) {
|
||||
if (last_idx && i != last_idx)
|
||||
memcpy(&state->refs[i], &state->refs[last_idx],
|
||||
sizeof(*state->refs));
|
||||
memset(&state->refs[last_idx], 0, sizeof(*state->refs));
|
||||
state->acquired_refs--;
|
||||
state->active_locks--;
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static struct bpf_reference_state *find_lock_state(struct bpf_verifier_env *env, enum ref_state_type type,
|
||||
int id, void *ptr)
|
||||
{
|
||||
struct bpf_func_state *state = cur_func(env);
|
||||
int i;
|
||||
|
||||
for (i = 0; i < state->acquired_refs; i++) {
|
||||
struct bpf_reference_state *s = &state->refs[i];
|
||||
|
||||
if (s->type == REF_TYPE_PTR || s->type != type)
|
||||
continue;
|
||||
|
||||
if (s->id == id && s->ptr == ptr)
|
||||
return s;
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void free_func_state(struct bpf_func_state *state)
|
||||
{
|
||||
if (!state)
|
||||
@ -1453,8 +1511,6 @@ static int copy_verifier_state(struct bpf_verifier_state *dst_state,
|
||||
dst_state->active_preempt_lock = src->active_preempt_lock;
|
||||
dst_state->in_sleepable = src->in_sleepable;
|
||||
dst_state->curframe = src->curframe;
|
||||
dst_state->active_lock.ptr = src->active_lock.ptr;
|
||||
dst_state->active_lock.id = src->active_lock.id;
|
||||
dst_state->branches = src->branches;
|
||||
dst_state->parent = src->parent;
|
||||
dst_state->first_insn_idx = src->first_insn_idx;
|
||||
@ -5442,7 +5498,7 @@ static bool in_sleepable(struct bpf_verifier_env *env)
|
||||
static bool in_rcu_cs(struct bpf_verifier_env *env)
|
||||
{
|
||||
return env->cur_state->active_rcu_lock ||
|
||||
env->cur_state->active_lock.ptr ||
|
||||
cur_func(env)->active_locks ||
|
||||
!in_sleepable(env);
|
||||
}
|
||||
|
||||
@ -7724,19 +7780,20 @@ static int check_kfunc_mem_size_reg(struct bpf_verifier_env *env, struct bpf_reg
|
||||
* Since only one bpf_spin_lock is allowed the checks are simpler than
|
||||
* reg_is_refcounted() logic. The verifier needs to remember only
|
||||
* one spin_lock instead of array of acquired_refs.
|
||||
* cur_state->active_lock remembers which map value element or allocated
|
||||
* cur_func(env)->active_locks remembers which map value element or allocated
|
||||
* object got locked and clears it after bpf_spin_unlock.
|
||||
*/
|
||||
static int process_spin_lock(struct bpf_verifier_env *env, int regno,
|
||||
bool is_lock)
|
||||
{
|
||||
struct bpf_reg_state *regs = cur_regs(env), *reg = ®s[regno];
|
||||
struct bpf_verifier_state *cur = env->cur_state;
|
||||
bool is_const = tnum_is_const(reg->var_off);
|
||||
struct bpf_func_state *cur = cur_func(env);
|
||||
u64 val = reg->var_off.value;
|
||||
struct bpf_map *map = NULL;
|
||||
struct btf *btf = NULL;
|
||||
struct btf_record *rec;
|
||||
int err;
|
||||
|
||||
if (!is_const) {
|
||||
verbose(env,
|
||||
@ -7768,16 +7825,23 @@ static int process_spin_lock(struct bpf_verifier_env *env, int regno,
|
||||
return -EINVAL;
|
||||
}
|
||||
if (is_lock) {
|
||||
if (cur->active_lock.ptr) {
|
||||
void *ptr;
|
||||
|
||||
if (map)
|
||||
ptr = map;
|
||||
else
|
||||
ptr = btf;
|
||||
|
||||
if (cur->active_locks) {
|
||||
verbose(env,
|
||||
"Locking two bpf_spin_locks are not allowed\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
if (map)
|
||||
cur->active_lock.ptr = map;
|
||||
else
|
||||
cur->active_lock.ptr = btf;
|
||||
cur->active_lock.id = reg->id;
|
||||
err = acquire_lock_state(env, env->insn_idx, REF_TYPE_LOCK, reg->id, ptr);
|
||||
if (err < 0) {
|
||||
verbose(env, "Failed to acquire lock state\n");
|
||||
return err;
|
||||
}
|
||||
} else {
|
||||
void *ptr;
|
||||
|
||||
@ -7786,20 +7850,17 @@ static int process_spin_lock(struct bpf_verifier_env *env, int regno,
|
||||
else
|
||||
ptr = btf;
|
||||
|
||||
if (!cur->active_lock.ptr) {
|
||||
if (!cur->active_locks) {
|
||||
verbose(env, "bpf_spin_unlock without taking a lock\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
if (cur->active_lock.ptr != ptr ||
|
||||
cur->active_lock.id != reg->id) {
|
||||
|
||||
if (release_lock_state(cur_func(env), REF_TYPE_LOCK, reg->id, ptr)) {
|
||||
verbose(env, "bpf_spin_unlock of different lock\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
invalidate_non_owning_refs(env);
|
||||
|
||||
cur->active_lock.ptr = NULL;
|
||||
cur->active_lock.id = 0;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
@ -9861,7 +9922,7 @@ static int check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
|
||||
const char *sub_name = subprog_name(env, subprog);
|
||||
|
||||
/* Only global subprogs cannot be called with a lock held. */
|
||||
if (env->cur_state->active_lock.ptr) {
|
||||
if (cur_func(env)->active_locks) {
|
||||
verbose(env, "global function calls are not allowed while holding a lock,\n"
|
||||
"use static function instead\n");
|
||||
return -EINVAL;
|
||||
@ -10202,17 +10263,10 @@ static int prepare_func_exit(struct bpf_verifier_env *env, int *insn_idx)
|
||||
caller->regs[BPF_REG_0] = *r0;
|
||||
}
|
||||
|
||||
/* callback_fn frame should have released its own additions to parent's
|
||||
* reference state at this point, or check_reference_leak would
|
||||
* complain, hence it must be the same as the caller. There is no need
|
||||
* to copy it back.
|
||||
*/
|
||||
if (!callee->in_callback_fn) {
|
||||
/* Transfer references to the caller */
|
||||
err = copy_reference_state(caller, callee);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
/* Transfer references to the caller */
|
||||
err = copy_reference_state(caller, callee);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
/* for callbacks like bpf_loop or bpf_for_each_map_elem go back to callsite,
|
||||
* there function call logic would reschedule callback visit. If iteration
|
||||
@ -10382,11 +10436,11 @@ static int check_reference_leak(struct bpf_verifier_env *env, bool exception_exi
|
||||
bool refs_lingering = false;
|
||||
int i;
|
||||
|
||||
if (!exception_exit && state->frameno && !state->in_callback_fn)
|
||||
if (!exception_exit && state->frameno)
|
||||
return 0;
|
||||
|
||||
for (i = 0; i < state->acquired_refs; i++) {
|
||||
if (!exception_exit && state->in_callback_fn && state->refs[i].callback_ref != state->frameno)
|
||||
if (state->refs[i].type != REF_TYPE_PTR)
|
||||
continue;
|
||||
verbose(env, "Unreleased reference id=%d alloc_insn=%d\n",
|
||||
state->refs[i].id, state->refs[i].insn_idx);
|
||||
@ -10399,7 +10453,7 @@ static int check_resource_leak(struct bpf_verifier_env *env, bool exception_exit
|
||||
{
|
||||
int err;
|
||||
|
||||
if (check_lock && env->cur_state->active_lock.ptr) {
|
||||
if (check_lock && cur_func(env)->active_locks) {
|
||||
verbose(env, "%s cannot be used inside bpf_spin_lock-ed region\n", prefix);
|
||||
return -EINVAL;
|
||||
}
|
||||
@ -11620,10 +11674,9 @@ static int process_kf_arg_ptr_to_btf_id(struct bpf_verifier_env *env,
|
||||
|
||||
static int ref_set_non_owning(struct bpf_verifier_env *env, struct bpf_reg_state *reg)
|
||||
{
|
||||
struct bpf_verifier_state *state = env->cur_state;
|
||||
struct btf_record *rec = reg_btf_record(reg);
|
||||
|
||||
if (!state->active_lock.ptr) {
|
||||
if (!cur_func(env)->active_locks) {
|
||||
verbose(env, "verifier internal error: ref_set_non_owning w/o active lock\n");
|
||||
return -EFAULT;
|
||||
}
|
||||
@ -11720,6 +11773,7 @@ static int ref_convert_owning_non_owning(struct bpf_verifier_env *env, u32 ref_o
|
||||
*/
|
||||
static int check_reg_allocation_locked(struct bpf_verifier_env *env, struct bpf_reg_state *reg)
|
||||
{
|
||||
struct bpf_reference_state *s;
|
||||
void *ptr;
|
||||
u32 id;
|
||||
|
||||
@ -11736,10 +11790,10 @@ static int check_reg_allocation_locked(struct bpf_verifier_env *env, struct bpf_
|
||||
}
|
||||
id = reg->id;
|
||||
|
||||
if (!env->cur_state->active_lock.ptr)
|
||||
if (!cur_func(env)->active_locks)
|
||||
return -EINVAL;
|
||||
if (env->cur_state->active_lock.ptr != ptr ||
|
||||
env->cur_state->active_lock.id != id) {
|
||||
s = find_lock_state(env, REF_TYPE_LOCK, id, ptr);
|
||||
if (!s) {
|
||||
verbose(env, "held lock and object are not in the same allocation\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
@ -17635,8 +17689,20 @@ static bool refsafe(struct bpf_func_state *old, struct bpf_func_state *cur,
|
||||
return false;
|
||||
|
||||
for (i = 0; i < old->acquired_refs; i++) {
|
||||
if (!check_ids(old->refs[i].id, cur->refs[i].id, idmap))
|
||||
if (!check_ids(old->refs[i].id, cur->refs[i].id, idmap) ||
|
||||
old->refs[i].type != cur->refs[i].type)
|
||||
return false;
|
||||
switch (old->refs[i].type) {
|
||||
case REF_TYPE_PTR:
|
||||
break;
|
||||
case REF_TYPE_LOCK:
|
||||
if (old->refs[i].ptr != cur->refs[i].ptr)
|
||||
return false;
|
||||
break;
|
||||
default:
|
||||
WARN_ONCE(1, "Unhandled enum type for reference state: %d\n", old->refs[i].type);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
@ -17714,19 +17780,6 @@ static bool states_equal(struct bpf_verifier_env *env,
|
||||
if (old->speculative && !cur->speculative)
|
||||
return false;
|
||||
|
||||
if (old->active_lock.ptr != cur->active_lock.ptr)
|
||||
return false;
|
||||
|
||||
/* Old and cur active_lock's have to be either both present
|
||||
* or both absent.
|
||||
*/
|
||||
if (!!old->active_lock.id != !!cur->active_lock.id)
|
||||
return false;
|
||||
|
||||
if (old->active_lock.id &&
|
||||
!check_ids(old->active_lock.id, cur->active_lock.id, &env->idmap_scratch))
|
||||
return false;
|
||||
|
||||
if (old->active_rcu_lock != cur->active_rcu_lock)
|
||||
return false;
|
||||
|
||||
@ -18625,7 +18678,7 @@ static int do_check(struct bpf_verifier_env *env)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (env->cur_state->active_lock.ptr) {
|
||||
if (cur_func(env)->active_locks) {
|
||||
if ((insn->src_reg == BPF_REG_0 && insn->imm != BPF_FUNC_spin_unlock) ||
|
||||
(insn->src_reg == BPF_PSEUDO_KFUNC_CALL &&
|
||||
(insn->off != 0 || !is_bpf_graph_api_kfunc(insn->imm)))) {
|
||||
|
@ -11,8 +11,8 @@ struct {
|
||||
const char *prog_name;
|
||||
const char *err_msg;
|
||||
} cb_refs_tests[] = {
|
||||
{ "underflow_prog", "reference has not been acquired before" },
|
||||
{ "leak_prog", "Unreleased reference" },
|
||||
{ "underflow_prog", "must point to scalar, or struct with scalar" },
|
||||
{ "leak_prog", "Possibly NULL pointer passed to helper arg2" },
|
||||
{ "nested_cb", "Unreleased reference id=4 alloc_insn=2" }, /* alloc_insn=2{4,5} */
|
||||
{ "non_cb_transfer_ref", "Unreleased reference id=4 alloc_insn=1" }, /* alloc_insn=1{1,2} */
|
||||
};
|
||||
|
Loading…
Reference in New Issue
Block a user