mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2025-01-06 05:06:29 +00:00
bpf: support bpf_fastcall patterns for kfuncs
Recognize bpf_fastcall patterns around kfunc calls. For example, suppose bpf_cast_to_kern_ctx() follows bpf_fastcall contract (which it does), in such a case allow verifier to rewrite BPF program below: r2 = 1; *(u64 *)(r10 - 32) = r2; call %[bpf_cast_to_kern_ctx]; r2 = *(u64 *)(r10 - 32); r0 = r2; By removing the spill/fill pair: r2 = 1; call %[bpf_cast_to_kern_ctx]; r0 = r2; Acked-by: Yonghong Song <yonghong.song@linux.dev> Signed-off-by: Eduard Zingerman <eddyz87@gmail.com> Link: https://lore.kernel.org/r/20240822084112.3257995-4-eddyz87@gmail.com Signed-off-by: Alexei Starovoitov <ast@kernel.org>
This commit is contained in:
parent
adec67d372
commit
b2ee6d27e9
@ -16125,7 +16125,7 @@ static int visit_func_call_insn(int t, struct bpf_insn *insns,
|
||||
*/
|
||||
static u32 helper_fastcall_clobber_mask(const struct bpf_func_proto *fn)
|
||||
{
|
||||
u8 mask;
|
||||
u32 mask;
|
||||
int i;
|
||||
|
||||
mask = 0;
|
||||
@ -16153,6 +16153,26 @@ static bool verifier_inlines_helper_call(struct bpf_verifier_env *env, s32 imm)
|
||||
}
|
||||
}
|
||||
|
||||
/* Same as helper_fastcall_clobber_mask() but for kfuncs, see comment above */
|
||||
static u32 kfunc_fastcall_clobber_mask(struct bpf_kfunc_call_arg_meta *meta)
|
||||
{
|
||||
u32 vlen, i, mask;
|
||||
|
||||
vlen = btf_type_vlen(meta->func_proto);
|
||||
mask = 0;
|
||||
if (!btf_type_is_void(btf_type_by_id(meta->btf, meta->func_proto->type)))
|
||||
mask |= BIT(BPF_REG_0);
|
||||
for (i = 0; i < vlen; ++i)
|
||||
mask |= BIT(BPF_REG_1 + i);
|
||||
return mask;
|
||||
}
|
||||
|
||||
/* Same as verifier_inlines_helper_call() but for kfuncs, see comment above */
|
||||
static bool is_fastcall_kfunc_call(struct bpf_kfunc_call_arg_meta *meta)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
/* LLVM define a bpf_fastcall function attribute.
|
||||
* This attribute means that function scratches only some of
|
||||
* the caller saved registers defined by ABI.
|
||||
@ -16250,6 +16270,19 @@ static void mark_fastcall_pattern_for_call(struct bpf_verifier_env *env,
|
||||
bpf_jit_inlines_helper_call(call->imm));
|
||||
}
|
||||
|
||||
if (bpf_pseudo_kfunc_call(call)) {
|
||||
struct bpf_kfunc_call_arg_meta meta;
|
||||
int err;
|
||||
|
||||
err = fetch_kfunc_meta(env, call, &meta, NULL);
|
||||
if (err < 0)
|
||||
/* error would be reported later */
|
||||
return;
|
||||
|
||||
clobbered_regs_mask = kfunc_fastcall_clobber_mask(&meta);
|
||||
can_be_inlined = is_fastcall_kfunc_call(&meta);
|
||||
}
|
||||
|
||||
if (clobbered_regs_mask == ALL_CALLER_SAVED_REGS)
|
||||
return;
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user