mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2025-01-01 18:52:02 +00:00
for-netdev
-----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEE+soXsSLHKoYyzcli6rmadz2vbToFAmSvKGIACgkQ6rmadz2v bTorMBAAl25eNvQbLlNGRK4o8BE3ykND/tQT285rrgpvzBd6+7okGeEGKIfj5OCv 1QxNwN6bfn6dvwKCloxJKt5Us43WyxgbuwCXXC34trdpUb2AsLcBDVImUIafWdIE ZAks8ty84war9fhRGmpB79ZmFFscQ51L+jzdAwS0pcSQXImWuEgFsHR2iwGV3wtW s3zdvxmkEE4HFKeKkjFKACkQz45BCMjW+L1Lu6bEyV/8CKRRx0F4RnUEoeUzEN5m dB9GncDW12FRsAC0U8vkmutoHeAUCKfCKW/mvw27xadzZ3gSfdg5HXsQ6oo+oGgZ +nOPbUzqP5NH/D8X7ZZAN5lQmx55edxPbFcm9x9dFbOWB/qlef/mt2yWUPmnpt9c 8TOlf0Wt9SxeUABORoXdXvXBoNvje7jBfjKgIj/ewaKqjeOlM9JwxW/+m4CHbpa1 5wfu4Gm0zsIRymalKbUzOlCeQPZYB4Zp6MlhddkzfxNh449/mXQWKkKa7sFhCg3X MQ38CSXtkWZ48PUcWuRPOQu1fyJixYqkt821xbfbDRf7GfLfWFXdVv0xrPIbfLti jT2GEhA8MTU/PvN2uQACWDNnUPbkC+ijVuRu3OPwrLx7F8lkf+6uMIRke62ToxjW uL9xWc7Y4jYdq+wzRl5/5OyYIY15ZHLMB2ujCprl7BPwOo8wrBw= =Mt88 -----END PGP SIGNATURE----- Merge tag 'for-netdev' of https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf Alexei Starovoitov says: ==================== pull-request: bpf 2023-07-12 We've added 5 non-merge commits during the last 7 day(s) which contain a total of 7 files changed, 93 insertions(+), 28 deletions(-). The main changes are: 1) Fix max stack depth check for async callbacks, from Kumar. 2) Fix inconsistent JIT image generation, from Björn. 3) Use trusted arguments in XDP hints kfuncs, from Larysa. 4) Fix memory leak in cpu_map_update_elem, from Pu. * tag 'for-netdev' of https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf: xdp: use trusted arguments in XDP hints kfuncs bpf: cpumap: Fix memory leak in cpu_map_update_elem riscv, bpf: Fix inconsistent JIT image generation selftests/bpf: Add selftest for check_stack_max_depth bug bpf: Fix max stack depth check for async callbacks ==================== Link: https://lore.kernel.org/r/20230712223045.40182-1-alexei.starovoitov@gmail.com Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
commit
b0b0ab6f01
@ -69,7 +69,7 @@ struct rv_jit_context {
|
||||
struct bpf_prog *prog;
|
||||
u16 *insns; /* RV insns */
|
||||
int ninsns;
|
||||
int body_len;
|
||||
int prologue_len;
|
||||
int epilogue_offset;
|
||||
int *offset; /* BPF to RV */
|
||||
int nexentries;
|
||||
@ -216,8 +216,8 @@ static inline int rv_offset(int insn, int off, struct rv_jit_context *ctx)
|
||||
int from, to;
|
||||
|
||||
off++; /* BPF branch is from PC+1, RV is from PC */
|
||||
from = (insn > 0) ? ctx->offset[insn - 1] : 0;
|
||||
to = (insn + off > 0) ? ctx->offset[insn + off - 1] : 0;
|
||||
from = (insn > 0) ? ctx->offset[insn - 1] : ctx->prologue_len;
|
||||
to = (insn + off > 0) ? ctx->offset[insn + off - 1] : ctx->prologue_len;
|
||||
return ninsns_rvoff(to - from);
|
||||
}
|
||||
|
||||
|
@ -44,7 +44,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
|
||||
unsigned int prog_size = 0, extable_size = 0;
|
||||
bool tmp_blinded = false, extra_pass = false;
|
||||
struct bpf_prog *tmp, *orig_prog = prog;
|
||||
int pass = 0, prev_ninsns = 0, prologue_len, i;
|
||||
int pass = 0, prev_ninsns = 0, i;
|
||||
struct rv_jit_data *jit_data;
|
||||
struct rv_jit_context *ctx;
|
||||
|
||||
@ -83,6 +83,12 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
|
||||
prog = orig_prog;
|
||||
goto out_offset;
|
||||
}
|
||||
|
||||
if (build_body(ctx, extra_pass, NULL)) {
|
||||
prog = orig_prog;
|
||||
goto out_offset;
|
||||
}
|
||||
|
||||
for (i = 0; i < prog->len; i++) {
|
||||
prev_ninsns += 32;
|
||||
ctx->offset[i] = prev_ninsns;
|
||||
@ -91,12 +97,15 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
|
||||
for (i = 0; i < NR_JIT_ITERATIONS; i++) {
|
||||
pass++;
|
||||
ctx->ninsns = 0;
|
||||
|
||||
bpf_jit_build_prologue(ctx);
|
||||
ctx->prologue_len = ctx->ninsns;
|
||||
|
||||
if (build_body(ctx, extra_pass, ctx->offset)) {
|
||||
prog = orig_prog;
|
||||
goto out_offset;
|
||||
}
|
||||
ctx->body_len = ctx->ninsns;
|
||||
bpf_jit_build_prologue(ctx);
|
||||
|
||||
ctx->epilogue_offset = ctx->ninsns;
|
||||
bpf_jit_build_epilogue(ctx);
|
||||
|
||||
@ -162,10 +171,8 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
|
||||
|
||||
if (!prog->is_func || extra_pass) {
|
||||
bpf_jit_binary_lock_ro(jit_data->header);
|
||||
prologue_len = ctx->epilogue_offset - ctx->body_len;
|
||||
for (i = 0; i < prog->len; i++)
|
||||
ctx->offset[i] = ninsns_rvoff(prologue_len +
|
||||
ctx->offset[i]);
|
||||
ctx->offset[i] = ninsns_rvoff(ctx->offset[i]);
|
||||
bpf_prog_fill_jited_linfo(prog, ctx->offset);
|
||||
out_offset:
|
||||
kfree(ctx->offset);
|
||||
|
@ -122,22 +122,6 @@ static void get_cpu_map_entry(struct bpf_cpu_map_entry *rcpu)
|
||||
atomic_inc(&rcpu->refcnt);
|
||||
}
|
||||
|
||||
/* called from workqueue, to workaround syscall using preempt_disable */
|
||||
static void cpu_map_kthread_stop(struct work_struct *work)
|
||||
{
|
||||
struct bpf_cpu_map_entry *rcpu;
|
||||
|
||||
rcpu = container_of(work, struct bpf_cpu_map_entry, kthread_stop_wq);
|
||||
|
||||
/* Wait for flush in __cpu_map_entry_free(), via full RCU barrier,
|
||||
* as it waits until all in-flight call_rcu() callbacks complete.
|
||||
*/
|
||||
rcu_barrier();
|
||||
|
||||
/* kthread_stop will wake_up_process and wait for it to complete */
|
||||
kthread_stop(rcpu->kthread);
|
||||
}
|
||||
|
||||
static void __cpu_map_ring_cleanup(struct ptr_ring *ring)
|
||||
{
|
||||
/* The tear-down procedure should have made sure that queue is
|
||||
@ -165,6 +149,30 @@ static void put_cpu_map_entry(struct bpf_cpu_map_entry *rcpu)
|
||||
}
|
||||
}
|
||||
|
||||
/* called from workqueue, to workaround syscall using preempt_disable */
|
||||
static void cpu_map_kthread_stop(struct work_struct *work)
|
||||
{
|
||||
struct bpf_cpu_map_entry *rcpu;
|
||||
int err;
|
||||
|
||||
rcpu = container_of(work, struct bpf_cpu_map_entry, kthread_stop_wq);
|
||||
|
||||
/* Wait for flush in __cpu_map_entry_free(), via full RCU barrier,
|
||||
* as it waits until all in-flight call_rcu() callbacks complete.
|
||||
*/
|
||||
rcu_barrier();
|
||||
|
||||
/* kthread_stop will wake_up_process and wait for it to complete */
|
||||
err = kthread_stop(rcpu->kthread);
|
||||
if (err) {
|
||||
/* kthread_stop may be called before cpu_map_kthread_run
|
||||
* is executed, so we need to release the memory related
|
||||
* to rcpu.
|
||||
*/
|
||||
put_cpu_map_entry(rcpu);
|
||||
}
|
||||
}
|
||||
|
||||
static void cpu_map_bpf_prog_run_skb(struct bpf_cpu_map_entry *rcpu,
|
||||
struct list_head *listp,
|
||||
struct xdp_cpumap_stats *stats)
|
||||
|
@ -5642,8 +5642,9 @@ static int check_max_stack_depth(struct bpf_verifier_env *env)
|
||||
verbose(env, "verifier bug. subprog has tail_call and async cb\n");
|
||||
return -EFAULT;
|
||||
}
|
||||
/* async callbacks don't increase bpf prog stack size */
|
||||
continue;
|
||||
/* async callbacks don't increase bpf prog stack size unless called directly */
|
||||
if (!bpf_pseudo_call(insn + i))
|
||||
continue;
|
||||
}
|
||||
i = next_insn;
|
||||
|
||||
|
@ -741,7 +741,7 @@ __bpf_kfunc int bpf_xdp_metadata_rx_hash(const struct xdp_md *ctx, u32 *hash,
|
||||
__diag_pop();
|
||||
|
||||
BTF_SET8_START(xdp_metadata_kfunc_ids)
|
||||
#define XDP_METADATA_KFUNC(_, name) BTF_ID_FLAGS(func, name, 0)
|
||||
#define XDP_METADATA_KFUNC(_, name) BTF_ID_FLAGS(func, name, KF_TRUSTED_ARGS)
|
||||
XDP_METADATA_KFUNC_xxx
|
||||
#undef XDP_METADATA_KFUNC
|
||||
BTF_SET8_END(xdp_metadata_kfunc_ids)
|
||||
|
@ -0,0 +1,9 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
#include <test_progs.h>
|
||||
|
||||
#include "async_stack_depth.skel.h"
|
||||
|
||||
void test_async_stack_depth(void)
|
||||
{
|
||||
RUN_TESTS(async_stack_depth);
|
||||
}
|
40
tools/testing/selftests/bpf/progs/async_stack_depth.c
Normal file
40
tools/testing/selftests/bpf/progs/async_stack_depth.c
Normal file
@ -0,0 +1,40 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
#include <vmlinux.h>
|
||||
#include <bpf/bpf_helpers.h>
|
||||
|
||||
#include "bpf_misc.h"
|
||||
|
||||
struct hmap_elem {
|
||||
struct bpf_timer timer;
|
||||
};
|
||||
|
||||
struct {
|
||||
__uint(type, BPF_MAP_TYPE_HASH);
|
||||
__uint(max_entries, 64);
|
||||
__type(key, int);
|
||||
__type(value, struct hmap_elem);
|
||||
} hmap SEC(".maps");
|
||||
|
||||
__attribute__((noinline))
|
||||
static int timer_cb(void *map, int *key, struct bpf_timer *timer)
|
||||
{
|
||||
volatile char buf[256] = {};
|
||||
return buf[69];
|
||||
}
|
||||
|
||||
SEC("tc")
|
||||
__failure __msg("combined stack size of 2 calls")
|
||||
int prog(struct __sk_buff *ctx)
|
||||
{
|
||||
struct hmap_elem *elem;
|
||||
volatile char buf[256] = {};
|
||||
|
||||
elem = bpf_map_lookup_elem(&hmap, &(int){0});
|
||||
if (!elem)
|
||||
return 0;
|
||||
|
||||
timer_cb(NULL, NULL, NULL);
|
||||
return bpf_timer_set_callback(&elem->timer, timer_cb) + buf[0];
|
||||
}
|
||||
|
||||
char _license[] SEC("license") = "GPL";
|
Loading…
Reference in New Issue
Block a user