mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2025-01-19 20:12:32 +00:00
bpf: Do cleanup in bpf_bprintf_cleanup only when needed
commit f19a4050455aad847fb93f18dc1fe502eb60f989 upstream. Currently we always cleanup/decrement bpf_bprintf_nest_level variable in bpf_bprintf_cleanup if it's > 0. There's possible scenario where this could cause a problem, when bpf_bprintf_prepare does not get bin_args buffer (because num_args is 0) and following bpf_bprintf_cleanup call decrements bpf_bprintf_nest_level variable, like: in task context: bpf_bprintf_prepare(num_args != 0) increments 'bpf_bprintf_nest_level = 1' -> first irq : bpf_bprintf_prepare(num_args == 0) bpf_bprintf_cleanup decrements 'bpf_bprintf_nest_level = 0' -> second irq: bpf_bprintf_prepare(num_args != 0) bpf_bprintf_nest_level = 1 gets same buffer as task context above Adding check to bpf_bprintf_cleanup and doing the real cleanup only if we got bin_args data in the first place. Signed-off-by: Jiri Olsa <jolsa@kernel.org> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Acked-by: Yonghong Song <yhs@fb.com> Link: https://lore.kernel.org/bpf/20221215214430.1336195-3-jolsa@kernel.org Signed-off-by: Thadeu Lima de Souza Cascardo <cascardo@igalia.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
parent
f7bbad9561
commit
95b7476f6f
@ -2747,7 +2747,7 @@ struct bpf_bprintf_data {
|
||||
|
||||
int bpf_bprintf_prepare(char *fmt, u32 fmt_size, const u64 *raw_args,
|
||||
u32 num_args, struct bpf_bprintf_data *data);
|
||||
void bpf_bprintf_cleanup(void);
|
||||
void bpf_bprintf_cleanup(struct bpf_bprintf_data *data);
|
||||
|
||||
/* the implementation of the opaque uapi struct bpf_dynptr */
|
||||
struct bpf_dynptr_kern {
|
||||
|
@ -781,12 +781,14 @@ static int try_get_fmt_tmp_buf(char **tmp_buf)
|
||||
return 0;
|
||||
}
|
||||
|
||||
void bpf_bprintf_cleanup(void)
|
||||
void bpf_bprintf_cleanup(struct bpf_bprintf_data *data)
|
||||
{
|
||||
if (this_cpu_read(bpf_bprintf_nest_level)) {
|
||||
this_cpu_dec(bpf_bprintf_nest_level);
|
||||
preempt_enable();
|
||||
}
|
||||
if (!data->bin_args)
|
||||
return;
|
||||
if (WARN_ON_ONCE(this_cpu_read(bpf_bprintf_nest_level) == 0))
|
||||
return;
|
||||
this_cpu_dec(bpf_bprintf_nest_level);
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1018,7 +1020,7 @@ nocopy_fmt:
|
||||
err = 0;
|
||||
out:
|
||||
if (err)
|
||||
bpf_bprintf_cleanup();
|
||||
bpf_bprintf_cleanup(data);
|
||||
return err;
|
||||
}
|
||||
|
||||
@ -1044,7 +1046,7 @@ BPF_CALL_5(bpf_snprintf, char *, str, u32, str_size, char *, fmt,
|
||||
|
||||
err = bstr_printf(str, str_size, fmt, data.bin_args);
|
||||
|
||||
bpf_bprintf_cleanup();
|
||||
bpf_bprintf_cleanup(&data);
|
||||
|
||||
return err + 1;
|
||||
}
|
||||
|
@ -395,7 +395,7 @@ BPF_CALL_5(bpf_trace_printk, char *, fmt, u32, fmt_size, u64, arg1,
|
||||
trace_bpf_trace_printk(buf);
|
||||
raw_spin_unlock_irqrestore(&trace_printk_lock, flags);
|
||||
|
||||
bpf_bprintf_cleanup();
|
||||
bpf_bprintf_cleanup(&data);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -453,7 +453,7 @@ BPF_CALL_4(bpf_trace_vprintk, char *, fmt, u32, fmt_size, const void *, args,
|
||||
trace_bpf_trace_printk(buf);
|
||||
raw_spin_unlock_irqrestore(&trace_printk_lock, flags);
|
||||
|
||||
bpf_bprintf_cleanup();
|
||||
bpf_bprintf_cleanup(&data);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -493,7 +493,7 @@ BPF_CALL_5(bpf_seq_printf, struct seq_file *, m, char *, fmt, u32, fmt_size,
|
||||
|
||||
seq_bprintf(m, fmt, data.bin_args);
|
||||
|
||||
bpf_bprintf_cleanup();
|
||||
bpf_bprintf_cleanup(&data);
|
||||
|
||||
return seq_has_overflowed(m) ? -EOVERFLOW : 0;
|
||||
}
|
||||
|
Loading…
x
Reference in New Issue
Block a user