mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-12-28 16:56:26 +00:00
bpf: inline bpf_get_smp_processor_id() helper
If BPF JIT supports per-CPU MOV instruction, inline bpf_get_smp_processor_id() to eliminate unnecessary function calls. Signed-off-by: Andrii Nakryiko <andrii@kernel.org> Acked-by: John Fastabend <john.fastabend@gmail.com> Link: https://lore.kernel.org/r/20240402021307.1012571-3-andrii@kernel.org Signed-off-by: Alexei Starovoitov <ast@kernel.org>
This commit is contained in:
parent
7bdbf74463
commit
1ae6921009
@ -20074,6 +20074,30 @@ static int do_misc_fixups(struct bpf_verifier_env *env)
|
||||
goto next_insn;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
/* Implement bpf_get_smp_processor_id() inline. */
|
||||
if (insn->imm == BPF_FUNC_get_smp_processor_id &&
|
||||
prog->jit_requested && bpf_jit_supports_percpu_insn()) {
|
||||
/* BPF_FUNC_get_smp_processor_id inlining is an
|
||||
* optimization, so if pcpu_hot.cpu_number is ever
|
||||
* changed in some incompatible and hard to support
|
||||
* way, it's fine to back out this inlining logic
|
||||
*/
|
||||
insn_buf[0] = BPF_MOV32_IMM(BPF_REG_0, (u32)(unsigned long)&pcpu_hot.cpu_number);
|
||||
insn_buf[1] = BPF_MOV64_PERCPU_REG(BPF_REG_0, BPF_REG_0);
|
||||
insn_buf[2] = BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, 0);
|
||||
cnt = 3;
|
||||
|
||||
new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
|
||||
if (!new_prog)
|
||||
return -ENOMEM;
|
||||
|
||||
delta += cnt - 1;
|
||||
env->prog = prog = new_prog;
|
||||
insn = new_prog->insnsi + i + delta;
|
||||
goto next_insn;
|
||||
}
|
||||
#endif
|
||||
/* Implement bpf_get_func_arg inline. */
|
||||
if (prog_type == BPF_PROG_TYPE_TRACING &&
|
||||
insn->imm == BPF_FUNC_get_func_arg) {
|
||||
|
Loading…
Reference in New Issue
Block a user