mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2024-12-28 16:52:18 +00:00
BPF fixes:
- Fix an out-of-bounds read in bpf_link_show_fdinfo for BPF sockmap link file descriptors (Hou Tao) - Fix BPF arm64 JIT's address emission with tag-based KASAN enabled reserving not enough size (Peter Collingbourne) - Fix BPF verifier do_misc_fixups patching for inlining of the bpf_get_branch_snapshot BPF helper (Andrii Nakryiko) - Fix a BPF verifier bug and reject BPF program write attempts into read-only marked BPF maps (Daniel Borkmann) - Fix perf_event_detach_bpf_prog error handling by removing an invalid check which would skip BPF program release (Jiri Olsa) - Fix memory leak when parsing mount options for the BPF filesystem (Hou Tao) Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> -----BEGIN PGP SIGNATURE----- iIsEABYIADMWIQTFp0I1jqZrAX+hPRXbK58LschIgwUCZxrAzxUcZGFuaWVsQGlv Z2VhcmJveC5uZXQACgkQ2yufC7HISIPcHwD8DnBSPlHX9OezMWCm8mjVx2Fd26W9 /IaiW2tyOPtoSGIA/3hfgfLrxkb3Raoh0miQB2+FRrz9e+y7i8c4Q91mcUgJ =Hvht -----END PGP SIGNATURE----- Merge tag 'bpf-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf Pull bpf fixes from Daniel Borkmann: - Fix an out-of-bounds read in bpf_link_show_fdinfo for BPF sockmap link file descriptors (Hou Tao) - Fix BPF arm64 JIT's address emission with tag-based KASAN enabled reserving not enough size (Peter Collingbourne) - Fix BPF verifier do_misc_fixups patching for inlining of the bpf_get_branch_snapshot BPF helper (Andrii Nakryiko) - Fix a BPF verifier bug and reject BPF program write attempts into read-only marked BPF maps (Daniel Borkmann) - Fix perf_event_detach_bpf_prog error handling by removing an invalid check which would skip BPF program release (Jiri Olsa) - Fix memory leak when parsing mount options for the BPF filesystem (Hou Tao) * tag 'bpf-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf: bpf: Check validity of link->type in bpf_link_show_fdinfo() bpf: Add the missing BPF_LINK_TYPE invocation for sockmap bpf: fix do_misc_fixups() for bpf_get_branch_snapshot() bpf,perf: Fix perf_event_detach_bpf_prog error handling selftests/bpf: Add test for passing in uninit mtu_len selftests/bpf: Add test for writes to .rodata bpf: Remove MEM_UNINIT from skb/xdp MTU helpers bpf: Fix overloading of MEM_UNINIT's meaning bpf: Add MEM_WRITE attribute bpf: Preserve param->string when parsing mount options bpf, arm64: Fix address emission with tag-based KASAN enabled
This commit is contained in:
commit
ae90f6a617
@ -2220,7 +2220,11 @@ static int prepare_trampoline(struct jit_ctx *ctx, struct bpf_tramp_image *im,
|
||||
emit(A64_STR64I(A64_R(20), A64_SP, regs_off + 8), ctx);
|
||||
|
||||
if (flags & BPF_TRAMP_F_CALL_ORIG) {
|
||||
emit_a64_mov_i64(A64_R(0), (const u64)im, ctx);
|
||||
/* for the first pass, assume the worst case */
|
||||
if (!ctx->image)
|
||||
ctx->idx += 4;
|
||||
else
|
||||
emit_a64_mov_i64(A64_R(0), (const u64)im, ctx);
|
||||
emit_call((const u64)__bpf_tramp_enter, ctx);
|
||||
}
|
||||
|
||||
@ -2264,7 +2268,11 @@ static int prepare_trampoline(struct jit_ctx *ctx, struct bpf_tramp_image *im,
|
||||
|
||||
if (flags & BPF_TRAMP_F_CALL_ORIG) {
|
||||
im->ip_epilogue = ctx->ro_image + ctx->idx;
|
||||
emit_a64_mov_i64(A64_R(0), (const u64)im, ctx);
|
||||
/* for the first pass, assume the worst case */
|
||||
if (!ctx->image)
|
||||
ctx->idx += 4;
|
||||
else
|
||||
emit_a64_mov_i64(A64_R(0), (const u64)im, ctx);
|
||||
emit_call((const u64)__bpf_tramp_exit, ctx);
|
||||
}
|
||||
|
||||
|
@ -635,6 +635,7 @@ enum bpf_type_flag {
|
||||
*/
|
||||
PTR_UNTRUSTED = BIT(6 + BPF_BASE_TYPE_BITS),
|
||||
|
||||
/* MEM can be uninitialized. */
|
||||
MEM_UNINIT = BIT(7 + BPF_BASE_TYPE_BITS),
|
||||
|
||||
/* DYNPTR points to memory local to the bpf program. */
|
||||
@ -700,6 +701,13 @@ enum bpf_type_flag {
|
||||
*/
|
||||
MEM_ALIGNED = BIT(17 + BPF_BASE_TYPE_BITS),
|
||||
|
||||
/* MEM is being written to, often combined with MEM_UNINIT. Non-presence
|
||||
* of MEM_WRITE means that MEM is only being read. MEM_WRITE without the
|
||||
* MEM_UNINIT means that memory needs to be initialized since it is also
|
||||
* read.
|
||||
*/
|
||||
MEM_WRITE = BIT(18 + BPF_BASE_TYPE_BITS),
|
||||
|
||||
__BPF_TYPE_FLAG_MAX,
|
||||
__BPF_TYPE_LAST_FLAG = __BPF_TYPE_FLAG_MAX - 1,
|
||||
};
|
||||
@ -758,10 +766,10 @@ enum bpf_arg_type {
|
||||
ARG_PTR_TO_SOCKET_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_SOCKET,
|
||||
ARG_PTR_TO_STACK_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_STACK,
|
||||
ARG_PTR_TO_BTF_ID_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_BTF_ID,
|
||||
/* pointer to memory does not need to be initialized, helper function must fill
|
||||
* all bytes or clear them in error case.
|
||||
/* Pointer to memory does not need to be initialized, since helper function
|
||||
* fills all bytes or clears them in error case.
|
||||
*/
|
||||
ARG_PTR_TO_UNINIT_MEM = MEM_UNINIT | ARG_PTR_TO_MEM,
|
||||
ARG_PTR_TO_UNINIT_MEM = MEM_UNINIT | MEM_WRITE | ARG_PTR_TO_MEM,
|
||||
/* Pointer to valid memory of size known at compile time. */
|
||||
ARG_PTR_TO_FIXED_SIZE_MEM = MEM_FIXED_SIZE | ARG_PTR_TO_MEM,
|
||||
|
||||
|
@ -146,6 +146,7 @@ BPF_LINK_TYPE(BPF_LINK_TYPE_XDP, xdp)
|
||||
BPF_LINK_TYPE(BPF_LINK_TYPE_NETFILTER, netfilter)
|
||||
BPF_LINK_TYPE(BPF_LINK_TYPE_TCX, tcx)
|
||||
BPF_LINK_TYPE(BPF_LINK_TYPE_NETKIT, netkit)
|
||||
BPF_LINK_TYPE(BPF_LINK_TYPE_SOCKMAP, sockmap)
|
||||
#endif
|
||||
#ifdef CONFIG_PERF_EVENTS
|
||||
BPF_LINK_TYPE(BPF_LINK_TYPE_PERF_EVENT, perf)
|
||||
|
@ -1121,6 +1121,9 @@ enum bpf_attach_type {
|
||||
|
||||
#define MAX_BPF_ATTACH_TYPE __MAX_BPF_ATTACH_TYPE
|
||||
|
||||
/* Add BPF_LINK_TYPE(type, name) in bpf_types.h to keep bpf_link_type_strs[]
|
||||
* in sync with the definitions below.
|
||||
*/
|
||||
enum bpf_link_type {
|
||||
BPF_LINK_TYPE_UNSPEC = 0,
|
||||
BPF_LINK_TYPE_RAW_TRACEPOINT = 1,
|
||||
|
@ -111,7 +111,7 @@ const struct bpf_func_proto bpf_map_pop_elem_proto = {
|
||||
.gpl_only = false,
|
||||
.ret_type = RET_INTEGER,
|
||||
.arg1_type = ARG_CONST_MAP_PTR,
|
||||
.arg2_type = ARG_PTR_TO_MAP_VALUE | MEM_UNINIT,
|
||||
.arg2_type = ARG_PTR_TO_MAP_VALUE | MEM_UNINIT | MEM_WRITE,
|
||||
};
|
||||
|
||||
BPF_CALL_2(bpf_map_peek_elem, struct bpf_map *, map, void *, value)
|
||||
@ -124,7 +124,7 @@ const struct bpf_func_proto bpf_map_peek_elem_proto = {
|
||||
.gpl_only = false,
|
||||
.ret_type = RET_INTEGER,
|
||||
.arg1_type = ARG_CONST_MAP_PTR,
|
||||
.arg2_type = ARG_PTR_TO_MAP_VALUE | MEM_UNINIT,
|
||||
.arg2_type = ARG_PTR_TO_MAP_VALUE | MEM_UNINIT | MEM_WRITE,
|
||||
};
|
||||
|
||||
BPF_CALL_3(bpf_map_lookup_percpu_elem, struct bpf_map *, map, void *, key, u32, cpu)
|
||||
@ -538,7 +538,7 @@ const struct bpf_func_proto bpf_strtol_proto = {
|
||||
.arg1_type = ARG_PTR_TO_MEM | MEM_RDONLY,
|
||||
.arg2_type = ARG_CONST_SIZE,
|
||||
.arg3_type = ARG_ANYTHING,
|
||||
.arg4_type = ARG_PTR_TO_FIXED_SIZE_MEM | MEM_UNINIT | MEM_ALIGNED,
|
||||
.arg4_type = ARG_PTR_TO_FIXED_SIZE_MEM | MEM_UNINIT | MEM_WRITE | MEM_ALIGNED,
|
||||
.arg4_size = sizeof(s64),
|
||||
};
|
||||
|
||||
@ -566,7 +566,7 @@ const struct bpf_func_proto bpf_strtoul_proto = {
|
||||
.arg1_type = ARG_PTR_TO_MEM | MEM_RDONLY,
|
||||
.arg2_type = ARG_CONST_SIZE,
|
||||
.arg3_type = ARG_ANYTHING,
|
||||
.arg4_type = ARG_PTR_TO_FIXED_SIZE_MEM | MEM_UNINIT | MEM_ALIGNED,
|
||||
.arg4_type = ARG_PTR_TO_FIXED_SIZE_MEM | MEM_UNINIT | MEM_WRITE | MEM_ALIGNED,
|
||||
.arg4_size = sizeof(u64),
|
||||
};
|
||||
|
||||
@ -1742,7 +1742,7 @@ static const struct bpf_func_proto bpf_dynptr_from_mem_proto = {
|
||||
.arg1_type = ARG_PTR_TO_UNINIT_MEM,
|
||||
.arg2_type = ARG_CONST_SIZE_OR_ZERO,
|
||||
.arg3_type = ARG_ANYTHING,
|
||||
.arg4_type = ARG_PTR_TO_DYNPTR | DYNPTR_TYPE_LOCAL | MEM_UNINIT,
|
||||
.arg4_type = ARG_PTR_TO_DYNPTR | DYNPTR_TYPE_LOCAL | MEM_UNINIT | MEM_WRITE,
|
||||
};
|
||||
|
||||
BPF_CALL_5(bpf_dynptr_read, void *, dst, u32, len, const struct bpf_dynptr_kern *, src,
|
||||
|
@ -880,7 +880,7 @@ static int bpf_parse_param(struct fs_context *fc, struct fs_parameter *param)
|
||||
const struct btf_type *enum_t;
|
||||
const char *enum_pfx;
|
||||
u64 *delegate_msk, msk = 0;
|
||||
char *p;
|
||||
char *p, *str;
|
||||
int val;
|
||||
|
||||
/* ignore errors, fallback to hex */
|
||||
@ -911,7 +911,8 @@ static int bpf_parse_param(struct fs_context *fc, struct fs_parameter *param)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
while ((p = strsep(¶m->string, ":"))) {
|
||||
str = param->string;
|
||||
while ((p = strsep(&str, ":"))) {
|
||||
if (strcmp(p, "any") == 0) {
|
||||
msk |= ~0ULL;
|
||||
} else if (find_btf_enum_const(info.btf, enum_t, enum_pfx, p, &val)) {
|
||||
|
@ -632,7 +632,7 @@ const struct bpf_func_proto bpf_ringbuf_reserve_dynptr_proto = {
|
||||
.arg1_type = ARG_CONST_MAP_PTR,
|
||||
.arg2_type = ARG_ANYTHING,
|
||||
.arg3_type = ARG_ANYTHING,
|
||||
.arg4_type = ARG_PTR_TO_DYNPTR | DYNPTR_TYPE_RINGBUF | MEM_UNINIT,
|
||||
.arg4_type = ARG_PTR_TO_DYNPTR | DYNPTR_TYPE_RINGBUF | MEM_UNINIT | MEM_WRITE,
|
||||
};
|
||||
|
||||
BPF_CALL_2(bpf_ringbuf_submit_dynptr, struct bpf_dynptr_kern *, ptr, u64, flags)
|
||||
|
@ -3069,13 +3069,17 @@ static void bpf_link_show_fdinfo(struct seq_file *m, struct file *filp)
|
||||
{
|
||||
const struct bpf_link *link = filp->private_data;
|
||||
const struct bpf_prog *prog = link->prog;
|
||||
enum bpf_link_type type = link->type;
|
||||
char prog_tag[sizeof(prog->tag) * 2 + 1] = { };
|
||||
|
||||
seq_printf(m,
|
||||
"link_type:\t%s\n"
|
||||
"link_id:\t%u\n",
|
||||
bpf_link_type_strs[link->type],
|
||||
link->id);
|
||||
if (type < ARRAY_SIZE(bpf_link_type_strs) && bpf_link_type_strs[type]) {
|
||||
seq_printf(m, "link_type:\t%s\n", bpf_link_type_strs[type]);
|
||||
} else {
|
||||
WARN_ONCE(1, "missing BPF_LINK_TYPE(...) for link type %u\n", type);
|
||||
seq_printf(m, "link_type:\t<%u>\n", type);
|
||||
}
|
||||
seq_printf(m, "link_id:\t%u\n", link->id);
|
||||
|
||||
if (prog) {
|
||||
bin2hex(prog_tag, prog->tag, sizeof(prog->tag));
|
||||
seq_printf(m,
|
||||
@ -5892,7 +5896,7 @@ static const struct bpf_func_proto bpf_kallsyms_lookup_name_proto = {
|
||||
.arg1_type = ARG_PTR_TO_MEM,
|
||||
.arg2_type = ARG_CONST_SIZE_OR_ZERO,
|
||||
.arg3_type = ARG_ANYTHING,
|
||||
.arg4_type = ARG_PTR_TO_FIXED_SIZE_MEM | MEM_UNINIT | MEM_ALIGNED,
|
||||
.arg4_type = ARG_PTR_TO_FIXED_SIZE_MEM | MEM_UNINIT | MEM_WRITE | MEM_ALIGNED,
|
||||
.arg4_size = sizeof(u64),
|
||||
};
|
||||
|
||||
|
@ -7438,7 +7438,8 @@ static int check_stack_range_initialized(
|
||||
}
|
||||
|
||||
static int check_helper_mem_access(struct bpf_verifier_env *env, int regno,
|
||||
int access_size, bool zero_size_allowed,
|
||||
int access_size, enum bpf_access_type access_type,
|
||||
bool zero_size_allowed,
|
||||
struct bpf_call_arg_meta *meta)
|
||||
{
|
||||
struct bpf_reg_state *regs = cur_regs(env), *reg = ®s[regno];
|
||||
@ -7450,7 +7451,7 @@ static int check_helper_mem_access(struct bpf_verifier_env *env, int regno,
|
||||
return check_packet_access(env, regno, reg->off, access_size,
|
||||
zero_size_allowed);
|
||||
case PTR_TO_MAP_KEY:
|
||||
if (meta && meta->raw_mode) {
|
||||
if (access_type == BPF_WRITE) {
|
||||
verbose(env, "R%d cannot write into %s\n", regno,
|
||||
reg_type_str(env, reg->type));
|
||||
return -EACCES;
|
||||
@ -7458,15 +7459,13 @@ static int check_helper_mem_access(struct bpf_verifier_env *env, int regno,
|
||||
return check_mem_region_access(env, regno, reg->off, access_size,
|
||||
reg->map_ptr->key_size, false);
|
||||
case PTR_TO_MAP_VALUE:
|
||||
if (check_map_access_type(env, regno, reg->off, access_size,
|
||||
meta && meta->raw_mode ? BPF_WRITE :
|
||||
BPF_READ))
|
||||
if (check_map_access_type(env, regno, reg->off, access_size, access_type))
|
||||
return -EACCES;
|
||||
return check_map_access(env, regno, reg->off, access_size,
|
||||
zero_size_allowed, ACCESS_HELPER);
|
||||
case PTR_TO_MEM:
|
||||
if (type_is_rdonly_mem(reg->type)) {
|
||||
if (meta && meta->raw_mode) {
|
||||
if (access_type == BPF_WRITE) {
|
||||
verbose(env, "R%d cannot write into %s\n", regno,
|
||||
reg_type_str(env, reg->type));
|
||||
return -EACCES;
|
||||
@ -7477,7 +7476,7 @@ static int check_helper_mem_access(struct bpf_verifier_env *env, int regno,
|
||||
zero_size_allowed);
|
||||
case PTR_TO_BUF:
|
||||
if (type_is_rdonly_mem(reg->type)) {
|
||||
if (meta && meta->raw_mode) {
|
||||
if (access_type == BPF_WRITE) {
|
||||
verbose(env, "R%d cannot write into %s\n", regno,
|
||||
reg_type_str(env, reg->type));
|
||||
return -EACCES;
|
||||
@ -7505,7 +7504,6 @@ static int check_helper_mem_access(struct bpf_verifier_env *env, int regno,
|
||||
* Dynamically check it now.
|
||||
*/
|
||||
if (!env->ops->convert_ctx_access) {
|
||||
enum bpf_access_type atype = meta && meta->raw_mode ? BPF_WRITE : BPF_READ;
|
||||
int offset = access_size - 1;
|
||||
|
||||
/* Allow zero-byte read from PTR_TO_CTX */
|
||||
@ -7513,7 +7511,7 @@ static int check_helper_mem_access(struct bpf_verifier_env *env, int regno,
|
||||
return zero_size_allowed ? 0 : -EACCES;
|
||||
|
||||
return check_mem_access(env, env->insn_idx, regno, offset, BPF_B,
|
||||
atype, -1, false, false);
|
||||
access_type, -1, false, false);
|
||||
}
|
||||
|
||||
fallthrough;
|
||||
@ -7538,6 +7536,7 @@ static int check_helper_mem_access(struct bpf_verifier_env *env, int regno,
|
||||
*/
|
||||
static int check_mem_size_reg(struct bpf_verifier_env *env,
|
||||
struct bpf_reg_state *reg, u32 regno,
|
||||
enum bpf_access_type access_type,
|
||||
bool zero_size_allowed,
|
||||
struct bpf_call_arg_meta *meta)
|
||||
{
|
||||
@ -7553,15 +7552,12 @@ static int check_mem_size_reg(struct bpf_verifier_env *env,
|
||||
*/
|
||||
meta->msize_max_value = reg->umax_value;
|
||||
|
||||
/* The register is SCALAR_VALUE; the access check
|
||||
* happens using its boundaries.
|
||||
/* The register is SCALAR_VALUE; the access check happens using
|
||||
* its boundaries. For unprivileged variable accesses, disable
|
||||
* raw mode so that the program is required to initialize all
|
||||
* the memory that the helper could just partially fill up.
|
||||
*/
|
||||
if (!tnum_is_const(reg->var_off))
|
||||
/* For unprivileged variable accesses, disable raw
|
||||
* mode so that the program is required to
|
||||
* initialize all the memory that the helper could
|
||||
* just partially fill up.
|
||||
*/
|
||||
meta = NULL;
|
||||
|
||||
if (reg->smin_value < 0) {
|
||||
@ -7581,9 +7577,8 @@ static int check_mem_size_reg(struct bpf_verifier_env *env,
|
||||
regno);
|
||||
return -EACCES;
|
||||
}
|
||||
err = check_helper_mem_access(env, regno - 1,
|
||||
reg->umax_value,
|
||||
zero_size_allowed, meta);
|
||||
err = check_helper_mem_access(env, regno - 1, reg->umax_value,
|
||||
access_type, zero_size_allowed, meta);
|
||||
if (!err)
|
||||
err = mark_chain_precision(env, regno);
|
||||
return err;
|
||||
@ -7594,13 +7589,11 @@ static int check_mem_reg(struct bpf_verifier_env *env, struct bpf_reg_state *reg
|
||||
{
|
||||
bool may_be_null = type_may_be_null(reg->type);
|
||||
struct bpf_reg_state saved_reg;
|
||||
struct bpf_call_arg_meta meta;
|
||||
int err;
|
||||
|
||||
if (register_is_null(reg))
|
||||
return 0;
|
||||
|
||||
memset(&meta, 0, sizeof(meta));
|
||||
/* Assuming that the register contains a value check if the memory
|
||||
* access is safe. Temporarily save and restore the register's state as
|
||||
* the conversion shouldn't be visible to a caller.
|
||||
@ -7610,10 +7603,8 @@ static int check_mem_reg(struct bpf_verifier_env *env, struct bpf_reg_state *reg
|
||||
mark_ptr_not_null_reg(reg);
|
||||
}
|
||||
|
||||
err = check_helper_mem_access(env, regno, mem_size, true, &meta);
|
||||
/* Check access for BPF_WRITE */
|
||||
meta.raw_mode = true;
|
||||
err = err ?: check_helper_mem_access(env, regno, mem_size, true, &meta);
|
||||
err = check_helper_mem_access(env, regno, mem_size, BPF_READ, true, NULL);
|
||||
err = err ?: check_helper_mem_access(env, regno, mem_size, BPF_WRITE, true, NULL);
|
||||
|
||||
if (may_be_null)
|
||||
*reg = saved_reg;
|
||||
@ -7639,13 +7630,12 @@ static int check_kfunc_mem_size_reg(struct bpf_verifier_env *env, struct bpf_reg
|
||||
mark_ptr_not_null_reg(mem_reg);
|
||||
}
|
||||
|
||||
err = check_mem_size_reg(env, reg, regno, true, &meta);
|
||||
/* Check access for BPF_WRITE */
|
||||
meta.raw_mode = true;
|
||||
err = err ?: check_mem_size_reg(env, reg, regno, true, &meta);
|
||||
err = check_mem_size_reg(env, reg, regno, BPF_READ, true, &meta);
|
||||
err = err ?: check_mem_size_reg(env, reg, regno, BPF_WRITE, true, &meta);
|
||||
|
||||
if (may_be_null)
|
||||
*mem_reg = saved_reg;
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
@ -8948,9 +8938,8 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 arg,
|
||||
verbose(env, "invalid map_ptr to access map->key\n");
|
||||
return -EACCES;
|
||||
}
|
||||
err = check_helper_mem_access(env, regno,
|
||||
meta->map_ptr->key_size, false,
|
||||
NULL);
|
||||
err = check_helper_mem_access(env, regno, meta->map_ptr->key_size,
|
||||
BPF_READ, false, NULL);
|
||||
break;
|
||||
case ARG_PTR_TO_MAP_VALUE:
|
||||
if (type_may_be_null(arg_type) && register_is_null(reg))
|
||||
@ -8965,9 +8954,9 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 arg,
|
||||
return -EACCES;
|
||||
}
|
||||
meta->raw_mode = arg_type & MEM_UNINIT;
|
||||
err = check_helper_mem_access(env, regno,
|
||||
meta->map_ptr->value_size, false,
|
||||
meta);
|
||||
err = check_helper_mem_access(env, regno, meta->map_ptr->value_size,
|
||||
arg_type & MEM_WRITE ? BPF_WRITE : BPF_READ,
|
||||
false, meta);
|
||||
break;
|
||||
case ARG_PTR_TO_PERCPU_BTF_ID:
|
||||
if (!reg->btf_id) {
|
||||
@ -9009,7 +8998,9 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 arg,
|
||||
*/
|
||||
meta->raw_mode = arg_type & MEM_UNINIT;
|
||||
if (arg_type & MEM_FIXED_SIZE) {
|
||||
err = check_helper_mem_access(env, regno, fn->arg_size[arg], false, meta);
|
||||
err = check_helper_mem_access(env, regno, fn->arg_size[arg],
|
||||
arg_type & MEM_WRITE ? BPF_WRITE : BPF_READ,
|
||||
false, meta);
|
||||
if (err)
|
||||
return err;
|
||||
if (arg_type & MEM_ALIGNED)
|
||||
@ -9017,10 +9008,16 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 arg,
|
||||
}
|
||||
break;
|
||||
case ARG_CONST_SIZE:
|
||||
err = check_mem_size_reg(env, reg, regno, false, meta);
|
||||
err = check_mem_size_reg(env, reg, regno,
|
||||
fn->arg_type[arg - 1] & MEM_WRITE ?
|
||||
BPF_WRITE : BPF_READ,
|
||||
false, meta);
|
||||
break;
|
||||
case ARG_CONST_SIZE_OR_ZERO:
|
||||
err = check_mem_size_reg(env, reg, regno, true, meta);
|
||||
err = check_mem_size_reg(env, reg, regno,
|
||||
fn->arg_type[arg - 1] & MEM_WRITE ?
|
||||
BPF_WRITE : BPF_READ,
|
||||
true, meta);
|
||||
break;
|
||||
case ARG_PTR_TO_DYNPTR:
|
||||
err = process_dynptr_func(env, regno, insn_idx, arg_type, 0);
|
||||
@ -21213,7 +21210,7 @@ static int do_misc_fixups(struct bpf_verifier_env *env)
|
||||
delta += cnt - 1;
|
||||
env->prog = prog = new_prog;
|
||||
insn = new_prog->insnsi + i + delta;
|
||||
continue;
|
||||
goto next_insn;
|
||||
}
|
||||
|
||||
/* Implement bpf_kptr_xchg inline */
|
||||
|
@ -1202,7 +1202,7 @@ static const struct bpf_func_proto bpf_get_func_arg_proto = {
|
||||
.ret_type = RET_INTEGER,
|
||||
.arg1_type = ARG_PTR_TO_CTX,
|
||||
.arg2_type = ARG_ANYTHING,
|
||||
.arg3_type = ARG_PTR_TO_FIXED_SIZE_MEM | MEM_UNINIT | MEM_ALIGNED,
|
||||
.arg3_type = ARG_PTR_TO_FIXED_SIZE_MEM | MEM_UNINIT | MEM_WRITE | MEM_ALIGNED,
|
||||
.arg3_size = sizeof(u64),
|
||||
};
|
||||
|
||||
@ -1219,7 +1219,7 @@ static const struct bpf_func_proto bpf_get_func_ret_proto = {
|
||||
.func = get_func_ret,
|
||||
.ret_type = RET_INTEGER,
|
||||
.arg1_type = ARG_PTR_TO_CTX,
|
||||
.arg2_type = ARG_PTR_TO_FIXED_SIZE_MEM | MEM_UNINIT | MEM_ALIGNED,
|
||||
.arg2_type = ARG_PTR_TO_FIXED_SIZE_MEM | MEM_UNINIT | MEM_WRITE | MEM_ALIGNED,
|
||||
.arg2_size = sizeof(u64),
|
||||
};
|
||||
|
||||
@ -2216,8 +2216,6 @@ void perf_event_detach_bpf_prog(struct perf_event *event)
|
||||
|
||||
old_array = bpf_event_rcu_dereference(event->tp_event->prog_array);
|
||||
ret = bpf_prog_array_copy(old_array, event->prog, NULL, 0, &new_array);
|
||||
if (ret == -ENOENT)
|
||||
goto unlock;
|
||||
if (ret < 0) {
|
||||
bpf_prog_array_delete_safe(old_array, event->prog);
|
||||
} else {
|
||||
|
@ -6281,24 +6281,16 @@ BPF_CALL_5(bpf_skb_check_mtu, struct sk_buff *, skb,
|
||||
{
|
||||
int ret = BPF_MTU_CHK_RET_FRAG_NEEDED;
|
||||
struct net_device *dev = skb->dev;
|
||||
int skb_len, dev_len;
|
||||
int mtu = 0;
|
||||
int mtu, dev_len, skb_len;
|
||||
|
||||
if (unlikely(flags & ~(BPF_MTU_CHK_SEGS))) {
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (unlikely(flags & BPF_MTU_CHK_SEGS && (len_diff || *mtu_len))) {
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
if (unlikely(flags & ~(BPF_MTU_CHK_SEGS)))
|
||||
return -EINVAL;
|
||||
if (unlikely(flags & BPF_MTU_CHK_SEGS && (len_diff || *mtu_len)))
|
||||
return -EINVAL;
|
||||
|
||||
dev = __dev_via_ifindex(dev, ifindex);
|
||||
if (unlikely(!dev)) {
|
||||
ret = -ENODEV;
|
||||
goto out;
|
||||
}
|
||||
if (unlikely(!dev))
|
||||
return -ENODEV;
|
||||
|
||||
mtu = READ_ONCE(dev->mtu);
|
||||
dev_len = mtu + dev->hard_header_len;
|
||||
@ -6333,19 +6325,15 @@ BPF_CALL_5(bpf_xdp_check_mtu, struct xdp_buff *, xdp,
|
||||
struct net_device *dev = xdp->rxq->dev;
|
||||
int xdp_len = xdp->data_end - xdp->data;
|
||||
int ret = BPF_MTU_CHK_RET_SUCCESS;
|
||||
int mtu = 0, dev_len;
|
||||
int mtu, dev_len;
|
||||
|
||||
/* XDP variant doesn't support multi-buffer segment check (yet) */
|
||||
if (unlikely(flags)) {
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
if (unlikely(flags))
|
||||
return -EINVAL;
|
||||
|
||||
dev = __dev_via_ifindex(dev, ifindex);
|
||||
if (unlikely(!dev)) {
|
||||
ret = -ENODEV;
|
||||
goto out;
|
||||
}
|
||||
if (unlikely(!dev))
|
||||
return -ENODEV;
|
||||
|
||||
mtu = READ_ONCE(dev->mtu);
|
||||
dev_len = mtu + dev->hard_header_len;
|
||||
@ -6357,7 +6345,7 @@ BPF_CALL_5(bpf_xdp_check_mtu, struct xdp_buff *, xdp,
|
||||
xdp_len += len_diff; /* minus result pass check */
|
||||
if (xdp_len > dev_len)
|
||||
ret = BPF_MTU_CHK_RET_FRAG_NEEDED;
|
||||
out:
|
||||
|
||||
*mtu_len = mtu;
|
||||
return ret;
|
||||
}
|
||||
@ -6368,7 +6356,7 @@ static const struct bpf_func_proto bpf_skb_check_mtu_proto = {
|
||||
.ret_type = RET_INTEGER,
|
||||
.arg1_type = ARG_PTR_TO_CTX,
|
||||
.arg2_type = ARG_ANYTHING,
|
||||
.arg3_type = ARG_PTR_TO_FIXED_SIZE_MEM | MEM_UNINIT | MEM_ALIGNED,
|
||||
.arg3_type = ARG_PTR_TO_FIXED_SIZE_MEM | MEM_WRITE | MEM_ALIGNED,
|
||||
.arg3_size = sizeof(u32),
|
||||
.arg4_type = ARG_ANYTHING,
|
||||
.arg5_type = ARG_ANYTHING,
|
||||
@ -6380,7 +6368,7 @@ static const struct bpf_func_proto bpf_xdp_check_mtu_proto = {
|
||||
.ret_type = RET_INTEGER,
|
||||
.arg1_type = ARG_PTR_TO_CTX,
|
||||
.arg2_type = ARG_ANYTHING,
|
||||
.arg3_type = ARG_PTR_TO_FIXED_SIZE_MEM | MEM_UNINIT | MEM_ALIGNED,
|
||||
.arg3_type = ARG_PTR_TO_FIXED_SIZE_MEM | MEM_WRITE | MEM_ALIGNED,
|
||||
.arg3_size = sizeof(u32),
|
||||
.arg4_type = ARG_ANYTHING,
|
||||
.arg5_type = ARG_ANYTHING,
|
||||
|
@ -1121,6 +1121,9 @@ enum bpf_attach_type {
|
||||
|
||||
#define MAX_BPF_ATTACH_TYPE __MAX_BPF_ATTACH_TYPE
|
||||
|
||||
/* Add BPF_LINK_TYPE(type, name) in bpf_types.h to keep bpf_link_type_strs[]
|
||||
* in sync with the definitions below.
|
||||
*/
|
||||
enum bpf_link_type {
|
||||
BPF_LINK_TYPE_UNSPEC = 0,
|
||||
BPF_LINK_TYPE_RAW_TRACEPOINT = 1,
|
||||
|
@ -54,6 +54,7 @@
|
||||
#include "verifier_masking.skel.h"
|
||||
#include "verifier_meta_access.skel.h"
|
||||
#include "verifier_movsx.skel.h"
|
||||
#include "verifier_mtu.skel.h"
|
||||
#include "verifier_netfilter_ctx.skel.h"
|
||||
#include "verifier_netfilter_retcode.skel.h"
|
||||
#include "verifier_bpf_fastcall.skel.h"
|
||||
@ -223,6 +224,24 @@ void test_verifier_xdp_direct_packet_access(void) { RUN(verifier_xdp_direct_pack
|
||||
void test_verifier_bits_iter(void) { RUN(verifier_bits_iter); }
|
||||
void test_verifier_lsm(void) { RUN(verifier_lsm); }
|
||||
|
||||
void test_verifier_mtu(void)
|
||||
{
|
||||
__u64 caps = 0;
|
||||
int ret;
|
||||
|
||||
/* In case CAP_BPF and CAP_PERFMON is not set */
|
||||
ret = cap_enable_effective(1ULL << CAP_BPF | 1ULL << CAP_NET_ADMIN, &caps);
|
||||
if (!ASSERT_OK(ret, "set_cap_bpf_cap_net_admin"))
|
||||
return;
|
||||
ret = cap_disable_effective(1ULL << CAP_SYS_ADMIN | 1ULL << CAP_PERFMON, NULL);
|
||||
if (!ASSERT_OK(ret, "disable_cap_sys_admin"))
|
||||
goto restore_cap;
|
||||
RUN(verifier_mtu);
|
||||
restore_cap:
|
||||
if (caps)
|
||||
cap_enable_effective(caps, NULL);
|
||||
}
|
||||
|
||||
static int init_test_val_map(struct bpf_object *obj, char *map_name)
|
||||
{
|
||||
struct test_val value = {
|
||||
|
@ -1,8 +1,9 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/* Copyright (c) 2024 Isovalent */
|
||||
|
||||
#include <linux/bpf.h>
|
||||
#include "vmlinux.h"
|
||||
#include <bpf/bpf_helpers.h>
|
||||
#include <bpf/bpf_tracing.h>
|
||||
#include "bpf_misc.h"
|
||||
|
||||
const volatile long foo = 42;
|
||||
@ -66,4 +67,32 @@ int tcx6(struct __sk_buff *skb)
|
||||
return TCX_PASS;
|
||||
}
|
||||
|
||||
static inline void write_fixed(volatile void *p, __u32 val)
|
||||
{
|
||||
*(volatile __u32 *)p = val;
|
||||
}
|
||||
|
||||
static inline void write_dyn(void *p, void *val, int len)
|
||||
{
|
||||
bpf_copy_from_user(p, len, val);
|
||||
}
|
||||
|
||||
SEC("tc/ingress")
|
||||
__description("rodata/mark: write with unknown reg rejected")
|
||||
__failure __msg("write into map forbidden")
|
||||
int tcx7(struct __sk_buff *skb)
|
||||
{
|
||||
write_fixed((void *)&foo, skb->mark);
|
||||
return TCX_PASS;
|
||||
}
|
||||
|
||||
SEC("lsm.s/bprm_committed_creds")
|
||||
__description("rodata/mark: write with unknown reg rejected")
|
||||
__failure __msg("write into map forbidden")
|
||||
int BPF_PROG(bprm, struct linux_binprm *bprm)
|
||||
{
|
||||
write_dyn((void *)&foo, &bart, bpf_get_prandom_u32() & 3);
|
||||
return 0;
|
||||
}
|
||||
|
||||
char LICENSE[] SEC("license") = "GPL";
|
||||
|
18
tools/testing/selftests/bpf/progs/verifier_mtu.c
Normal file
18
tools/testing/selftests/bpf/progs/verifier_mtu.c
Normal file
@ -0,0 +1,18 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
|
||||
#include "vmlinux.h"
|
||||
#include <bpf/bpf_helpers.h>
|
||||
#include "bpf_misc.h"
|
||||
|
||||
SEC("tc/ingress")
|
||||
__description("uninit/mtu: write rejected")
|
||||
__failure __msg("invalid indirect read from stack")
|
||||
int tc_uninit_mtu(struct __sk_buff *ctx)
|
||||
{
|
||||
__u32 mtu;
|
||||
|
||||
bpf_check_mtu(ctx, 0, &mtu, 0, 0);
|
||||
return TCX_PASS;
|
||||
}
|
||||
|
||||
char LICENSE[] SEC("license") = "GPL";
|
Loading…
Reference in New Issue
Block a user