mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-12-29 09:16:33 +00:00
BPF fixes:
- Fix BPF verifier to not affect subreg_def marks in its range propagation, from Eduard Zingerman. - Fix a truncation bug in the BPF verifier's handling of coerce_reg_to_size_sx, from Dimitar Kanaliev. - Fix the BPF verifier's delta propagation between linked registers under 32-bit addition, from Daniel Borkmann. - Fix a NULL pointer dereference in BPF devmap due to missing rxq information, from Florian Kauer. - Fix a memory leak in bpf_core_apply, from Jiri Olsa. - Fix an UBSAN-reported array-index-out-of-bounds in BTF parsing for arrays of nested structs, from Hou Tao. - Fix build ID fetching where memory areas backing the file were created with memfd_secret, from Andrii Nakryiko. - Fix BPF task iterator tid filtering which was incorrectly using pid instead of tid, from Jordan Rome. - Several fixes for BPF sockmap and BPF sockhash redirection in combination with vsocks, from Michal Luczaj. - Fix riscv BPF JIT and make BPF_CMPXCHG fully ordered, from Andrea Parri. - Fix riscv BPF JIT under CONFIG_CFI_CLANG to prevent the possibility of an infinite BPF tailcall, from Pu Lehui. - Fix a build warning from resolve_btfids that bpf_lsm_key_free cannot be resolved, from Thomas Weißschuh. - Fix a bug in kfunc BTF caching for modules where the wrong BTF object was returned, from Toke Høiland-Jørgensen. - Fix a BPF selftest compilation error in cgroup-related tests with musl libc, from Tony Ambardar. - Several fixes to BPF link info dumps to fill missing fields, from Tyrone Wu. - Add BPF selftests for kfuncs from multiple modules, checking that the correct kfuncs are called, from Simon Sundberg. - Ensure that internal and user-facing bpf_redirect flags don't overlap, also from Toke Høiland-Jørgensen. - Switch to use kvzmalloc to allocate BPF verifier environment, from Rik van Riel. - Use raw_spinlock_t in BPF ringbuf to fix a sleep in atomic splat under RT, from Wander Lairson Costa. Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> -----BEGIN PGP SIGNATURE----- iIsEABYIADMWIQTFp0I1jqZrAX+hPRXbK58LschIgwUCZxK4OhUcZGFuaWVsQGlv Z2VhcmJveC5uZXQACgkQ2yufC7HISIOCrwEAib2kC5EEQn5+wKVE/bnZryVX2leT YXdfItDCBU6zCYUA+wTU5hGGn9lcDUcZx72l/KZPDyPw7HdzNJ+6iR1zQqoM =f9kv -----END PGP SIGNATURE----- Merge tag 'bpf-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf Pull bpf fixes from Daniel Borkmann: - Fix BPF verifier to not affect subreg_def marks in its range propagation (Eduard Zingerman) - Fix a truncation bug in the BPF verifier's handling of coerce_reg_to_size_sx (Dimitar Kanaliev) - Fix the BPF verifier's delta propagation between linked registers under 32-bit addition (Daniel Borkmann) - Fix a NULL pointer dereference in BPF devmap due to missing rxq information (Florian Kauer) - Fix a memory leak in bpf_core_apply (Jiri Olsa) - Fix an UBSAN-reported array-index-out-of-bounds in BTF parsing for arrays of nested structs (Hou Tao) - Fix build ID fetching where memory areas backing the file were created with memfd_secret (Andrii Nakryiko) - Fix BPF task iterator tid filtering which was incorrectly using pid instead of tid (Jordan Rome) - Several fixes for BPF sockmap and BPF sockhash redirection in combination with vsocks (Michal Luczaj) - Fix riscv BPF JIT and make BPF_CMPXCHG fully ordered (Andrea Parri) - Fix riscv BPF JIT under CONFIG_CFI_CLANG to prevent the possibility of an infinite BPF tailcall (Pu Lehui) - Fix a build warning from resolve_btfids that bpf_lsm_key_free cannot be resolved (Thomas Weißschuh) - Fix a bug in kfunc BTF caching for modules where the wrong BTF object was returned (Toke Høiland-Jørgensen) - Fix a BPF selftest compilation error in cgroup-related tests with musl libc (Tony Ambardar) - Several fixes to BPF link info dumps to fill missing fields (Tyrone Wu) - Add BPF selftests for kfuncs from multiple modules, checking that the correct kfuncs are called (Simon Sundberg) - Ensure that internal and user-facing bpf_redirect flags don't overlap (Toke Høiland-Jørgensen) - Switch to use kvzmalloc to allocate BPF verifier environment (Rik van Riel) - Use raw_spinlock_t in BPF ringbuf to fix a sleep in atomic splat under RT (Wander Lairson Costa) * tag 'bpf-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf: (38 commits) lib/buildid: Handle memfd_secret() files in build_id_parse() selftests/bpf: Add test case for delta propagation bpf: Fix print_reg_state's constant scalar dump bpf: Fix incorrect delta propagation between linked registers bpf: Properly test iter/task tid filtering bpf: Fix iter/task tid filtering riscv, bpf: Make BPF_CMPXCHG fully ordered bpf, vsock: Drop static vsock_bpf_prot initialization vsock: Update msg_count on read_skb() vsock: Update rx_bytes on read_skb() bpf, sockmap: SK_DROP on attempted redirects of unsupported af_vsock selftests/bpf: Add asserts for netfilter link info bpf: Fix link info netfilter flags to populate defrag flag selftests/bpf: Add test for sign extension in coerce_subreg_to_size_sx() selftests/bpf: Add test for truncation after sign extension in coerce_reg_to_size_sx() bpf: Fix truncation bug in coerce_reg_to_size_sx() selftests/bpf: Assert link info uprobe_multi count & path_size if unset bpf: Fix unpopulated path_size when uprobe_multi fields unset selftests/bpf: Fix cross-compiling urandom_read selftests/bpf: Add test for kfunc module order ...
This commit is contained in:
commit
3d5ad2d4ec
@ -18,6 +18,7 @@
|
||||
#define RV_MAX_REG_ARGS 8
|
||||
#define RV_FENTRY_NINSNS 2
|
||||
#define RV_FENTRY_NBYTES (RV_FENTRY_NINSNS * 4)
|
||||
#define RV_KCFI_NINSNS (IS_ENABLED(CONFIG_CFI_CLANG) ? 1 : 0)
|
||||
/* imm that allows emit_imm to emit max count insns */
|
||||
#define RV_MAX_COUNT_IMM 0x7FFF7FF7FF7FF7FF
|
||||
|
||||
@ -271,7 +272,8 @@ static void __build_epilogue(bool is_tail_call, struct rv_jit_context *ctx)
|
||||
if (!is_tail_call)
|
||||
emit_addiw(RV_REG_A0, RV_REG_A5, 0, ctx);
|
||||
emit_jalr(RV_REG_ZERO, is_tail_call ? RV_REG_T3 : RV_REG_RA,
|
||||
is_tail_call ? (RV_FENTRY_NINSNS + 1) * 4 : 0, /* skip reserved nops and TCC init */
|
||||
/* kcfi, fentry and TCC init insns will be skipped on tailcall */
|
||||
is_tail_call ? (RV_KCFI_NINSNS + RV_FENTRY_NINSNS + 1) * 4 : 0,
|
||||
ctx);
|
||||
}
|
||||
|
||||
@ -548,8 +550,8 @@ static void emit_atomic(u8 rd, u8 rs, s16 off, s32 imm, bool is64,
|
||||
rv_lr_w(r0, 0, rd, 0, 0), ctx);
|
||||
jmp_offset = ninsns_rvoff(8);
|
||||
emit(rv_bne(RV_REG_T2, r0, jmp_offset >> 1), ctx);
|
||||
emit(is64 ? rv_sc_d(RV_REG_T3, rs, rd, 0, 0) :
|
||||
rv_sc_w(RV_REG_T3, rs, rd, 0, 0), ctx);
|
||||
emit(is64 ? rv_sc_d(RV_REG_T3, rs, rd, 0, 1) :
|
||||
rv_sc_w(RV_REG_T3, rs, rd, 0, 1), ctx);
|
||||
jmp_offset = ninsns_rvoff(-6);
|
||||
emit(rv_bne(RV_REG_T3, 0, jmp_offset >> 1), ctx);
|
||||
emit(rv_fence(0x3, 0x3), ctx);
|
||||
|
@ -2717,6 +2717,11 @@ static inline bool sk_is_stream_unix(const struct sock *sk)
|
||||
return sk->sk_family == AF_UNIX && sk->sk_type == SOCK_STREAM;
|
||||
}
|
||||
|
||||
static inline bool sk_is_vsock(const struct sock *sk)
|
||||
{
|
||||
return sk->sk_family == AF_VSOCK;
|
||||
}
|
||||
|
||||
/**
|
||||
* sk_eat_skb - Release a skb if it is no longer needed
|
||||
* @sk: socket to eat this skb from
|
||||
|
@ -6047,11 +6047,6 @@ enum {
|
||||
BPF_F_MARK_ENFORCE = (1ULL << 6),
|
||||
};
|
||||
|
||||
/* BPF_FUNC_clone_redirect and BPF_FUNC_redirect flags. */
|
||||
enum {
|
||||
BPF_F_INGRESS = (1ULL << 0),
|
||||
};
|
||||
|
||||
/* BPF_FUNC_skb_set_tunnel_key and BPF_FUNC_skb_get_tunnel_key flags. */
|
||||
enum {
|
||||
BPF_F_TUNINFO_IPV6 = (1ULL << 0),
|
||||
@ -6198,10 +6193,12 @@ enum {
|
||||
BPF_F_BPRM_SECUREEXEC = (1ULL << 0),
|
||||
};
|
||||
|
||||
/* Flags for bpf_redirect_map helper */
|
||||
/* Flags for bpf_redirect and bpf_redirect_map helpers */
|
||||
enum {
|
||||
BPF_F_BROADCAST = (1ULL << 3),
|
||||
BPF_F_EXCLUDE_INGRESS = (1ULL << 4),
|
||||
BPF_F_INGRESS = (1ULL << 0), /* used for skb path */
|
||||
BPF_F_BROADCAST = (1ULL << 3), /* used for XDP path */
|
||||
BPF_F_EXCLUDE_INGRESS = (1ULL << 4), /* used for XDP path */
|
||||
#define BPF_F_REDIRECT_FLAGS (BPF_F_INGRESS | BPF_F_BROADCAST | BPF_F_EXCLUDE_INGRESS)
|
||||
};
|
||||
|
||||
#define __bpf_md_ptr(type, name) \
|
||||
|
@ -339,10 +339,6 @@ BTF_ID(func, bpf_lsm_path_chmod)
|
||||
BTF_ID(func, bpf_lsm_path_chown)
|
||||
#endif /* CONFIG_SECURITY_PATH */
|
||||
|
||||
#ifdef CONFIG_KEYS
|
||||
BTF_ID(func, bpf_lsm_key_free)
|
||||
#endif /* CONFIG_KEYS */
|
||||
|
||||
BTF_ID(func, bpf_lsm_mmap_file)
|
||||
BTF_ID(func, bpf_lsm_netlink_send)
|
||||
BTF_ID(func, bpf_lsm_path_notify)
|
||||
|
@ -3523,7 +3523,7 @@ static int btf_get_field_type(const struct btf *btf, const struct btf_type *var_
|
||||
* (i + 1) * elem_size
|
||||
* where i is the repeat index and elem_size is the size of an element.
|
||||
*/
|
||||
static int btf_repeat_fields(struct btf_field_info *info,
|
||||
static int btf_repeat_fields(struct btf_field_info *info, int info_cnt,
|
||||
u32 field_cnt, u32 repeat_cnt, u32 elem_size)
|
||||
{
|
||||
u32 i, j;
|
||||
@ -3543,6 +3543,12 @@ static int btf_repeat_fields(struct btf_field_info *info,
|
||||
}
|
||||
}
|
||||
|
||||
/* The type of struct size or variable size is u32,
|
||||
* so the multiplication will not overflow.
|
||||
*/
|
||||
if (field_cnt * (repeat_cnt + 1) > info_cnt)
|
||||
return -E2BIG;
|
||||
|
||||
cur = field_cnt;
|
||||
for (i = 0; i < repeat_cnt; i++) {
|
||||
memcpy(&info[cur], &info[0], field_cnt * sizeof(info[0]));
|
||||
@ -3587,7 +3593,7 @@ static int btf_find_nested_struct(const struct btf *btf, const struct btf_type *
|
||||
info[i].off += off;
|
||||
|
||||
if (nelems > 1) {
|
||||
err = btf_repeat_fields(info, ret, nelems - 1, t->size);
|
||||
err = btf_repeat_fields(info, info_cnt, ret, nelems - 1, t->size);
|
||||
if (err == 0)
|
||||
ret *= nelems;
|
||||
else
|
||||
@ -3681,10 +3687,10 @@ static int btf_find_field_one(const struct btf *btf,
|
||||
|
||||
if (ret == BTF_FIELD_IGNORE)
|
||||
return 0;
|
||||
if (nelems > info_cnt)
|
||||
if (!info_cnt)
|
||||
return -E2BIG;
|
||||
if (nelems > 1) {
|
||||
ret = btf_repeat_fields(info, 1, nelems - 1, sz);
|
||||
ret = btf_repeat_fields(info, info_cnt, 1, nelems - 1, sz);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
}
|
||||
@ -8961,6 +8967,7 @@ int bpf_core_apply(struct bpf_core_ctx *ctx, const struct bpf_core_relo *relo,
|
||||
if (!type) {
|
||||
bpf_log(ctx->log, "relo #%u: bad type id %u\n",
|
||||
relo_idx, relo->type_id);
|
||||
kfree(specs);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
@ -333,9 +333,11 @@ static int dev_map_hash_get_next_key(struct bpf_map *map, void *key,
|
||||
|
||||
static int dev_map_bpf_prog_run(struct bpf_prog *xdp_prog,
|
||||
struct xdp_frame **frames, int n,
|
||||
struct net_device *dev)
|
||||
struct net_device *tx_dev,
|
||||
struct net_device *rx_dev)
|
||||
{
|
||||
struct xdp_txq_info txq = { .dev = dev };
|
||||
struct xdp_txq_info txq = { .dev = tx_dev };
|
||||
struct xdp_rxq_info rxq = { .dev = rx_dev };
|
||||
struct xdp_buff xdp;
|
||||
int i, nframes = 0;
|
||||
|
||||
@ -346,6 +348,7 @@ static int dev_map_bpf_prog_run(struct bpf_prog *xdp_prog,
|
||||
|
||||
xdp_convert_frame_to_buff(xdpf, &xdp);
|
||||
xdp.txq = &txq;
|
||||
xdp.rxq = &rxq;
|
||||
|
||||
act = bpf_prog_run_xdp(xdp_prog, &xdp);
|
||||
switch (act) {
|
||||
@ -360,7 +363,7 @@ static int dev_map_bpf_prog_run(struct bpf_prog *xdp_prog,
|
||||
bpf_warn_invalid_xdp_action(NULL, xdp_prog, act);
|
||||
fallthrough;
|
||||
case XDP_ABORTED:
|
||||
trace_xdp_exception(dev, xdp_prog, act);
|
||||
trace_xdp_exception(tx_dev, xdp_prog, act);
|
||||
fallthrough;
|
||||
case XDP_DROP:
|
||||
xdp_return_frame_rx_napi(xdpf);
|
||||
@ -388,7 +391,7 @@ static void bq_xmit_all(struct xdp_dev_bulk_queue *bq, u32 flags)
|
||||
}
|
||||
|
||||
if (bq->xdp_prog) {
|
||||
to_send = dev_map_bpf_prog_run(bq->xdp_prog, bq->q, cnt, dev);
|
||||
to_send = dev_map_bpf_prog_run(bq->xdp_prog, bq->q, cnt, dev, bq->dev_rx);
|
||||
if (!to_send)
|
||||
goto out;
|
||||
}
|
||||
|
@ -688,8 +688,7 @@ static void print_reg_state(struct bpf_verifier_env *env,
|
||||
if (t == SCALAR_VALUE && reg->precise)
|
||||
verbose(env, "P");
|
||||
if (t == SCALAR_VALUE && tnum_is_const(reg->var_off)) {
|
||||
/* reg->off should be 0 for SCALAR_VALUE */
|
||||
verbose_snum(env, reg->var_off.value + reg->off);
|
||||
verbose_snum(env, reg->var_off.value);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -29,7 +29,7 @@ struct bpf_ringbuf {
|
||||
u64 mask;
|
||||
struct page **pages;
|
||||
int nr_pages;
|
||||
spinlock_t spinlock ____cacheline_aligned_in_smp;
|
||||
raw_spinlock_t spinlock ____cacheline_aligned_in_smp;
|
||||
/* For user-space producer ring buffers, an atomic_t busy bit is used
|
||||
* to synchronize access to the ring buffers in the kernel, rather than
|
||||
* the spinlock that is used for kernel-producer ring buffers. This is
|
||||
@ -173,7 +173,7 @@ static struct bpf_ringbuf *bpf_ringbuf_alloc(size_t data_sz, int numa_node)
|
||||
if (!rb)
|
||||
return NULL;
|
||||
|
||||
spin_lock_init(&rb->spinlock);
|
||||
raw_spin_lock_init(&rb->spinlock);
|
||||
atomic_set(&rb->busy, 0);
|
||||
init_waitqueue_head(&rb->waitq);
|
||||
init_irq_work(&rb->work, bpf_ringbuf_notify);
|
||||
@ -421,10 +421,10 @@ static void *__bpf_ringbuf_reserve(struct bpf_ringbuf *rb, u64 size)
|
||||
cons_pos = smp_load_acquire(&rb->consumer_pos);
|
||||
|
||||
if (in_nmi()) {
|
||||
if (!spin_trylock_irqsave(&rb->spinlock, flags))
|
||||
if (!raw_spin_trylock_irqsave(&rb->spinlock, flags))
|
||||
return NULL;
|
||||
} else {
|
||||
spin_lock_irqsave(&rb->spinlock, flags);
|
||||
raw_spin_lock_irqsave(&rb->spinlock, flags);
|
||||
}
|
||||
|
||||
pend_pos = rb->pending_pos;
|
||||
@ -450,7 +450,7 @@ static void *__bpf_ringbuf_reserve(struct bpf_ringbuf *rb, u64 size)
|
||||
*/
|
||||
if (new_prod_pos - cons_pos > rb->mask ||
|
||||
new_prod_pos - pend_pos > rb->mask) {
|
||||
spin_unlock_irqrestore(&rb->spinlock, flags);
|
||||
raw_spin_unlock_irqrestore(&rb->spinlock, flags);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@ -462,7 +462,7 @@ static void *__bpf_ringbuf_reserve(struct bpf_ringbuf *rb, u64 size)
|
||||
/* pairs with consumer's smp_load_acquire() */
|
||||
smp_store_release(&rb->producer_pos, new_prod_pos);
|
||||
|
||||
spin_unlock_irqrestore(&rb->spinlock, flags);
|
||||
raw_spin_unlock_irqrestore(&rb->spinlock, flags);
|
||||
|
||||
return (void *)hdr + BPF_RINGBUF_HDR_SZ;
|
||||
}
|
||||
|
@ -3565,15 +3565,16 @@ static void bpf_perf_link_dealloc(struct bpf_link *link)
|
||||
}
|
||||
|
||||
static int bpf_perf_link_fill_common(const struct perf_event *event,
|
||||
char __user *uname, u32 ulen,
|
||||
char __user *uname, u32 *ulenp,
|
||||
u64 *probe_offset, u64 *probe_addr,
|
||||
u32 *fd_type, unsigned long *missed)
|
||||
{
|
||||
const char *buf;
|
||||
u32 prog_id;
|
||||
u32 prog_id, ulen;
|
||||
size_t len;
|
||||
int err;
|
||||
|
||||
ulen = *ulenp;
|
||||
if (!ulen ^ !uname)
|
||||
return -EINVAL;
|
||||
|
||||
@ -3581,10 +3582,17 @@ static int bpf_perf_link_fill_common(const struct perf_event *event,
|
||||
probe_offset, probe_addr, missed);
|
||||
if (err)
|
||||
return err;
|
||||
if (!uname)
|
||||
return 0;
|
||||
|
||||
if (buf) {
|
||||
len = strlen(buf);
|
||||
*ulenp = len + 1;
|
||||
} else {
|
||||
*ulenp = 1;
|
||||
}
|
||||
if (!uname)
|
||||
return 0;
|
||||
|
||||
if (buf) {
|
||||
err = bpf_copy_to_user(uname, buf, ulen, len);
|
||||
if (err)
|
||||
return err;
|
||||
@ -3609,7 +3617,7 @@ static int bpf_perf_link_fill_kprobe(const struct perf_event *event,
|
||||
|
||||
uname = u64_to_user_ptr(info->perf_event.kprobe.func_name);
|
||||
ulen = info->perf_event.kprobe.name_len;
|
||||
err = bpf_perf_link_fill_common(event, uname, ulen, &offset, &addr,
|
||||
err = bpf_perf_link_fill_common(event, uname, &ulen, &offset, &addr,
|
||||
&type, &missed);
|
||||
if (err)
|
||||
return err;
|
||||
@ -3617,7 +3625,7 @@ static int bpf_perf_link_fill_kprobe(const struct perf_event *event,
|
||||
info->perf_event.type = BPF_PERF_EVENT_KRETPROBE;
|
||||
else
|
||||
info->perf_event.type = BPF_PERF_EVENT_KPROBE;
|
||||
|
||||
info->perf_event.kprobe.name_len = ulen;
|
||||
info->perf_event.kprobe.offset = offset;
|
||||
info->perf_event.kprobe.missed = missed;
|
||||
if (!kallsyms_show_value(current_cred()))
|
||||
@ -3639,7 +3647,7 @@ static int bpf_perf_link_fill_uprobe(const struct perf_event *event,
|
||||
|
||||
uname = u64_to_user_ptr(info->perf_event.uprobe.file_name);
|
||||
ulen = info->perf_event.uprobe.name_len;
|
||||
err = bpf_perf_link_fill_common(event, uname, ulen, &offset, &addr,
|
||||
err = bpf_perf_link_fill_common(event, uname, &ulen, &offset, &addr,
|
||||
&type, NULL);
|
||||
if (err)
|
||||
return err;
|
||||
@ -3648,6 +3656,7 @@ static int bpf_perf_link_fill_uprobe(const struct perf_event *event,
|
||||
info->perf_event.type = BPF_PERF_EVENT_URETPROBE;
|
||||
else
|
||||
info->perf_event.type = BPF_PERF_EVENT_UPROBE;
|
||||
info->perf_event.uprobe.name_len = ulen;
|
||||
info->perf_event.uprobe.offset = offset;
|
||||
info->perf_event.uprobe.cookie = event->bpf_cookie;
|
||||
return 0;
|
||||
@ -3673,12 +3682,18 @@ static int bpf_perf_link_fill_tracepoint(const struct perf_event *event,
|
||||
{
|
||||
char __user *uname;
|
||||
u32 ulen;
|
||||
int err;
|
||||
|
||||
uname = u64_to_user_ptr(info->perf_event.tracepoint.tp_name);
|
||||
ulen = info->perf_event.tracepoint.name_len;
|
||||
err = bpf_perf_link_fill_common(event, uname, &ulen, NULL, NULL, NULL, NULL);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
info->perf_event.type = BPF_PERF_EVENT_TRACEPOINT;
|
||||
info->perf_event.tracepoint.name_len = ulen;
|
||||
info->perf_event.tracepoint.cookie = event->bpf_cookie;
|
||||
return bpf_perf_link_fill_common(event, uname, ulen, NULL, NULL, NULL, NULL);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int bpf_perf_link_fill_perf_event(const struct perf_event *event,
|
||||
|
@ -99,7 +99,7 @@ static struct task_struct *task_seq_get_next(struct bpf_iter_seq_task_common *co
|
||||
rcu_read_lock();
|
||||
pid = find_pid_ns(common->pid, common->ns);
|
||||
if (pid) {
|
||||
task = get_pid_task(pid, PIDTYPE_TGID);
|
||||
task = get_pid_task(pid, PIDTYPE_PID);
|
||||
*tid = common->pid;
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
@ -2750,10 +2750,16 @@ static struct btf *__find_kfunc_desc_btf(struct bpf_verifier_env *env,
|
||||
b->module = mod;
|
||||
b->offset = offset;
|
||||
|
||||
/* sort() reorders entries by value, so b may no longer point
|
||||
* to the right entry after this
|
||||
*/
|
||||
sort(tab->descs, tab->nr_descs, sizeof(tab->descs[0]),
|
||||
kfunc_btf_cmp_by_off, NULL);
|
||||
} else {
|
||||
btf = b->btf;
|
||||
}
|
||||
return b->btf;
|
||||
|
||||
return btf;
|
||||
}
|
||||
|
||||
void bpf_free_kfunc_btf_tab(struct bpf_kfunc_btf_tab *tab)
|
||||
@ -6333,10 +6339,10 @@ static void coerce_reg_to_size_sx(struct bpf_reg_state *reg, int size)
|
||||
|
||||
/* both of s64_max/s64_min positive or negative */
|
||||
if ((s64_max >= 0) == (s64_min >= 0)) {
|
||||
reg->smin_value = reg->s32_min_value = s64_min;
|
||||
reg->smax_value = reg->s32_max_value = s64_max;
|
||||
reg->umin_value = reg->u32_min_value = s64_min;
|
||||
reg->umax_value = reg->u32_max_value = s64_max;
|
||||
reg->s32_min_value = reg->smin_value = s64_min;
|
||||
reg->s32_max_value = reg->smax_value = s64_max;
|
||||
reg->u32_min_value = reg->umin_value = s64_min;
|
||||
reg->u32_max_value = reg->umax_value = s64_max;
|
||||
reg->var_off = tnum_range(s64_min, s64_max);
|
||||
return;
|
||||
}
|
||||
@ -14264,12 +14270,13 @@ static int adjust_reg_min_max_vals(struct bpf_verifier_env *env,
|
||||
* r1 += 0x1
|
||||
* if r2 < 1000 goto ...
|
||||
* use r1 in memory access
|
||||
* So remember constant delta between r2 and r1 and update r1 after
|
||||
* 'if' condition.
|
||||
* So for 64-bit alu remember constant delta between r2 and r1 and
|
||||
* update r1 after 'if' condition.
|
||||
*/
|
||||
if (env->bpf_capable && BPF_OP(insn->code) == BPF_ADD &&
|
||||
dst_reg->id && is_reg_const(src_reg, alu32)) {
|
||||
u64 val = reg_const_value(src_reg, alu32);
|
||||
if (env->bpf_capable &&
|
||||
BPF_OP(insn->code) == BPF_ADD && !alu32 &&
|
||||
dst_reg->id && is_reg_const(src_reg, false)) {
|
||||
u64 val = reg_const_value(src_reg, false);
|
||||
|
||||
if ((dst_reg->id & BPF_ADD_CONST) ||
|
||||
/* prevent overflow in sync_linked_regs() later */
|
||||
@ -15326,8 +15333,12 @@ static void sync_linked_regs(struct bpf_verifier_state *vstate, struct bpf_reg_s
|
||||
continue;
|
||||
if ((!(reg->id & BPF_ADD_CONST) && !(known_reg->id & BPF_ADD_CONST)) ||
|
||||
reg->off == known_reg->off) {
|
||||
s32 saved_subreg_def = reg->subreg_def;
|
||||
|
||||
copy_register_state(reg, known_reg);
|
||||
reg->subreg_def = saved_subreg_def;
|
||||
} else {
|
||||
s32 saved_subreg_def = reg->subreg_def;
|
||||
s32 saved_off = reg->off;
|
||||
|
||||
fake_reg.type = SCALAR_VALUE;
|
||||
@ -15340,6 +15351,7 @@ static void sync_linked_regs(struct bpf_verifier_state *vstate, struct bpf_reg_s
|
||||
* otherwise another sync_linked_regs() will be incorrect.
|
||||
*/
|
||||
reg->off = saved_off;
|
||||
reg->subreg_def = saved_subreg_def;
|
||||
|
||||
scalar32_min_max_add(reg, &fake_reg);
|
||||
scalar_min_max_add(reg, &fake_reg);
|
||||
@ -22310,7 +22322,7 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr, bpfptr_t uattr, __u3
|
||||
/* 'struct bpf_verifier_env' can be global, but since it's not small,
|
||||
* allocate/free it every time bpf_check() is called
|
||||
*/
|
||||
env = kzalloc(sizeof(struct bpf_verifier_env), GFP_KERNEL);
|
||||
env = kvzalloc(sizeof(struct bpf_verifier_env), GFP_KERNEL);
|
||||
if (!env)
|
||||
return -ENOMEM;
|
||||
|
||||
@ -22546,6 +22558,6 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr, bpfptr_t uattr, __u3
|
||||
mutex_unlock(&bpf_verifier_lock);
|
||||
vfree(env->insn_aux_data);
|
||||
err_free_env:
|
||||
kfree(env);
|
||||
kvfree(env);
|
||||
return ret;
|
||||
}
|
||||
|
@ -3133,7 +3133,8 @@ static int bpf_uprobe_multi_link_fill_link_info(const struct bpf_link *link,
|
||||
struct bpf_uprobe_multi_link *umulti_link;
|
||||
u32 ucount = info->uprobe_multi.count;
|
||||
int err = 0, i;
|
||||
long left;
|
||||
char *p, *buf;
|
||||
long left = 0;
|
||||
|
||||
if (!upath ^ !upath_size)
|
||||
return -EINVAL;
|
||||
@ -3147,26 +3148,23 @@ static int bpf_uprobe_multi_link_fill_link_info(const struct bpf_link *link,
|
||||
info->uprobe_multi.pid = umulti_link->task ?
|
||||
task_pid_nr_ns(umulti_link->task, task_active_pid_ns(current)) : 0;
|
||||
|
||||
if (upath) {
|
||||
char *p, *buf;
|
||||
|
||||
upath_size = min_t(u32, upath_size, PATH_MAX);
|
||||
|
||||
buf = kmalloc(upath_size, GFP_KERNEL);
|
||||
if (!buf)
|
||||
return -ENOMEM;
|
||||
p = d_path(&umulti_link->path, buf, upath_size);
|
||||
if (IS_ERR(p)) {
|
||||
kfree(buf);
|
||||
return PTR_ERR(p);
|
||||
}
|
||||
upath_size = buf + upath_size - p;
|
||||
left = copy_to_user(upath, p, upath_size);
|
||||
upath_size = upath_size ? min_t(u32, upath_size, PATH_MAX) : PATH_MAX;
|
||||
buf = kmalloc(upath_size, GFP_KERNEL);
|
||||
if (!buf)
|
||||
return -ENOMEM;
|
||||
p = d_path(&umulti_link->path, buf, upath_size);
|
||||
if (IS_ERR(p)) {
|
||||
kfree(buf);
|
||||
if (left)
|
||||
return -EFAULT;
|
||||
info->uprobe_multi.path_size = upath_size;
|
||||
return PTR_ERR(p);
|
||||
}
|
||||
upath_size = buf + upath_size - p;
|
||||
|
||||
if (upath)
|
||||
left = copy_to_user(upath, p, upath_size);
|
||||
kfree(buf);
|
||||
if (left)
|
||||
return -EFAULT;
|
||||
info->uprobe_multi.path_size = upath_size;
|
||||
|
||||
if (!uoffsets && !ucookies && !uref_ctr_offsets)
|
||||
return 0;
|
||||
|
@ -5,6 +5,7 @@
|
||||
#include <linux/elf.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/pagemap.h>
|
||||
#include <linux/secretmem.h>
|
||||
|
||||
#define BUILD_ID 3
|
||||
|
||||
@ -64,6 +65,10 @@ static int freader_get_folio(struct freader *r, loff_t file_off)
|
||||
|
||||
freader_put_folio(r);
|
||||
|
||||
/* reject secretmem folios created with memfd_secret() */
|
||||
if (secretmem_mapping(r->file->f_mapping))
|
||||
return -EFAULT;
|
||||
|
||||
r->folio = filemap_get_folio(r->file->f_mapping, file_off >> PAGE_SHIFT);
|
||||
|
||||
/* if sleeping is allowed, wait for the page, if necessary */
|
||||
|
@ -2438,9 +2438,9 @@ static int __bpf_redirect_neigh(struct sk_buff *skb, struct net_device *dev,
|
||||
|
||||
/* Internal, non-exposed redirect flags. */
|
||||
enum {
|
||||
BPF_F_NEIGH = (1ULL << 1),
|
||||
BPF_F_PEER = (1ULL << 2),
|
||||
BPF_F_NEXTHOP = (1ULL << 3),
|
||||
BPF_F_NEIGH = (1ULL << 16),
|
||||
BPF_F_PEER = (1ULL << 17),
|
||||
BPF_F_NEXTHOP = (1ULL << 18),
|
||||
#define BPF_F_REDIRECT_INTERNAL (BPF_F_NEIGH | BPF_F_PEER | BPF_F_NEXTHOP)
|
||||
};
|
||||
|
||||
@ -2450,6 +2450,8 @@ BPF_CALL_3(bpf_clone_redirect, struct sk_buff *, skb, u32, ifindex, u64, flags)
|
||||
struct sk_buff *clone;
|
||||
int ret;
|
||||
|
||||
BUILD_BUG_ON(BPF_F_REDIRECT_INTERNAL & BPF_F_REDIRECT_FLAGS);
|
||||
|
||||
if (unlikely(flags & (~(BPF_F_INGRESS) | BPF_F_REDIRECT_INTERNAL)))
|
||||
return -EINVAL;
|
||||
|
||||
|
@ -647,6 +647,8 @@ BPF_CALL_4(bpf_sk_redirect_map, struct sk_buff *, skb,
|
||||
sk = __sock_map_lookup_elem(map, key);
|
||||
if (unlikely(!sk || !sock_map_redirect_allowed(sk)))
|
||||
return SK_DROP;
|
||||
if ((flags & BPF_F_INGRESS) && sk_is_vsock(sk))
|
||||
return SK_DROP;
|
||||
|
||||
skb_bpf_set_redir(skb, sk, flags & BPF_F_INGRESS);
|
||||
return SK_PASS;
|
||||
@ -675,6 +677,8 @@ BPF_CALL_4(bpf_msg_redirect_map, struct sk_msg *, msg,
|
||||
return SK_DROP;
|
||||
if (!(flags & BPF_F_INGRESS) && !sk_is_tcp(sk))
|
||||
return SK_DROP;
|
||||
if (sk_is_vsock(sk))
|
||||
return SK_DROP;
|
||||
|
||||
msg->flags = flags;
|
||||
msg->sk_redir = sk;
|
||||
@ -1249,6 +1253,8 @@ BPF_CALL_4(bpf_sk_redirect_hash, struct sk_buff *, skb,
|
||||
sk = __sock_hash_lookup_elem(map, key);
|
||||
if (unlikely(!sk || !sock_map_redirect_allowed(sk)))
|
||||
return SK_DROP;
|
||||
if ((flags & BPF_F_INGRESS) && sk_is_vsock(sk))
|
||||
return SK_DROP;
|
||||
|
||||
skb_bpf_set_redir(skb, sk, flags & BPF_F_INGRESS);
|
||||
return SK_PASS;
|
||||
@ -1277,6 +1283,8 @@ BPF_CALL_4(bpf_msg_redirect_hash, struct sk_msg *, msg,
|
||||
return SK_DROP;
|
||||
if (!(flags & BPF_F_INGRESS) && !sk_is_tcp(sk))
|
||||
return SK_DROP;
|
||||
if (sk_is_vsock(sk))
|
||||
return SK_DROP;
|
||||
|
||||
msg->flags = flags;
|
||||
msg->sk_redir = sk;
|
||||
|
@ -150,11 +150,12 @@ static int bpf_nf_link_fill_link_info(const struct bpf_link *link,
|
||||
struct bpf_link_info *info)
|
||||
{
|
||||
struct bpf_nf_link *nf_link = container_of(link, struct bpf_nf_link, link);
|
||||
const struct nf_defrag_hook *hook = nf_link->defrag_hook;
|
||||
|
||||
info->netfilter.pf = nf_link->hook_ops.pf;
|
||||
info->netfilter.hooknum = nf_link->hook_ops.hooknum;
|
||||
info->netfilter.priority = nf_link->hook_ops.priority;
|
||||
info->netfilter.flags = 0;
|
||||
info->netfilter.flags = hook ? BPF_F_NETFILTER_IP_DEFRAG : 0;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -1707,6 +1707,7 @@ int virtio_transport_read_skb(struct vsock_sock *vsk, skb_read_actor_t recv_acto
|
||||
{
|
||||
struct virtio_vsock_sock *vvs = vsk->trans;
|
||||
struct sock *sk = sk_vsock(vsk);
|
||||
struct virtio_vsock_hdr *hdr;
|
||||
struct sk_buff *skb;
|
||||
int off = 0;
|
||||
int err;
|
||||
@ -1716,10 +1717,19 @@ int virtio_transport_read_skb(struct vsock_sock *vsk, skb_read_actor_t recv_acto
|
||||
* works for types other than dgrams.
|
||||
*/
|
||||
skb = __skb_recv_datagram(sk, &vvs->rx_queue, MSG_DONTWAIT, &off, &err);
|
||||
if (!skb) {
|
||||
spin_unlock_bh(&vvs->rx_lock);
|
||||
return err;
|
||||
}
|
||||
|
||||
hdr = virtio_vsock_hdr(skb);
|
||||
if (le32_to_cpu(hdr->flags) & VIRTIO_VSOCK_SEQ_EOM)
|
||||
vvs->msg_count--;
|
||||
|
||||
virtio_transport_dec_rx_pkt(vvs, le32_to_cpu(hdr->len));
|
||||
spin_unlock_bh(&vvs->rx_lock);
|
||||
|
||||
if (!skb)
|
||||
return err;
|
||||
virtio_transport_send_credit_update(vsk);
|
||||
|
||||
return recv_actor(sk, skb);
|
||||
}
|
||||
|
@ -114,14 +114,6 @@ static int vsock_bpf_recvmsg(struct sock *sk, struct msghdr *msg,
|
||||
return copied;
|
||||
}
|
||||
|
||||
/* Copy of original proto with updated sock_map methods */
|
||||
static struct proto vsock_bpf_prot = {
|
||||
.close = sock_map_close,
|
||||
.recvmsg = vsock_bpf_recvmsg,
|
||||
.sock_is_readable = sk_msg_is_readable,
|
||||
.unhash = sock_map_unhash,
|
||||
};
|
||||
|
||||
static void vsock_bpf_rebuild_protos(struct proto *prot, const struct proto *base)
|
||||
{
|
||||
*prot = *base;
|
||||
|
@ -5519,11 +5519,12 @@ union bpf_attr {
|
||||
* **-EOPNOTSUPP** if the hash calculation failed or **-EINVAL** if
|
||||
* invalid arguments are passed.
|
||||
*
|
||||
* void *bpf_kptr_xchg(void *map_value, void *ptr)
|
||||
* void *bpf_kptr_xchg(void *dst, void *ptr)
|
||||
* Description
|
||||
* Exchange kptr at pointer *map_value* with *ptr*, and return the
|
||||
* old value. *ptr* can be NULL, otherwise it must be a referenced
|
||||
* pointer which will be released when this helper is called.
|
||||
* Exchange kptr at pointer *dst* with *ptr*, and return the old value.
|
||||
* *dst* can be map value or local kptr. *ptr* can be NULL, otherwise
|
||||
* it must be a referenced pointer which will be released when this helper
|
||||
* is called.
|
||||
* Return
|
||||
* The old value of kptr (which can be NULL). The returned pointer
|
||||
* if not NULL, is a reference which must be released using its
|
||||
@ -6046,11 +6047,6 @@ enum {
|
||||
BPF_F_MARK_ENFORCE = (1ULL << 6),
|
||||
};
|
||||
|
||||
/* BPF_FUNC_clone_redirect and BPF_FUNC_redirect flags. */
|
||||
enum {
|
||||
BPF_F_INGRESS = (1ULL << 0),
|
||||
};
|
||||
|
||||
/* BPF_FUNC_skb_set_tunnel_key and BPF_FUNC_skb_get_tunnel_key flags. */
|
||||
enum {
|
||||
BPF_F_TUNINFO_IPV6 = (1ULL << 0),
|
||||
@ -6197,10 +6193,12 @@ enum {
|
||||
BPF_F_BPRM_SECUREEXEC = (1ULL << 0),
|
||||
};
|
||||
|
||||
/* Flags for bpf_redirect_map helper */
|
||||
/* Flags for bpf_redirect and bpf_redirect_map helpers */
|
||||
enum {
|
||||
BPF_F_BROADCAST = (1ULL << 3),
|
||||
BPF_F_EXCLUDE_INGRESS = (1ULL << 4),
|
||||
BPF_F_INGRESS = (1ULL << 0), /* used for skb path */
|
||||
BPF_F_BROADCAST = (1ULL << 3), /* used for XDP path */
|
||||
BPF_F_EXCLUDE_INGRESS = (1ULL << 4), /* used for XDP path */
|
||||
#define BPF_F_REDIRECT_FLAGS (BPF_F_INGRESS | BPF_F_BROADCAST | BPF_F_EXCLUDE_INGRESS)
|
||||
};
|
||||
|
||||
#define __bpf_md_ptr(type, name) \
|
||||
|
@ -157,7 +157,8 @@ TEST_GEN_PROGS_EXTENDED = \
|
||||
flow_dissector_load test_flow_dissector test_tcp_check_syncookie_user \
|
||||
test_lirc_mode2_user xdping test_cpp runqslower bench bpf_testmod.ko \
|
||||
xskxceiver xdp_redirect_multi xdp_synproxy veristat xdp_hw_metadata \
|
||||
xdp_features bpf_test_no_cfi.ko
|
||||
xdp_features bpf_test_no_cfi.ko bpf_test_modorder_x.ko \
|
||||
bpf_test_modorder_y.ko
|
||||
|
||||
TEST_GEN_FILES += liburandom_read.so urandom_read sign-file uprobe_multi
|
||||
|
||||
@ -263,7 +264,7 @@ $(OUTPUT)/%:%.c
|
||||
ifeq ($(SRCARCH),$(filter $(SRCARCH),x86 riscv))
|
||||
LLD := lld
|
||||
else
|
||||
LLD := ld
|
||||
LLD := $(shell command -v $(LD))
|
||||
endif
|
||||
|
||||
# Filter out -static for liburandom_read.so and its dependent targets so that static builds
|
||||
@ -303,6 +304,19 @@ $(OUTPUT)/bpf_test_no_cfi.ko: $(VMLINUX_BTF) $(RESOLVE_BTFIDS) $(wildcard bpf_te
|
||||
$(Q)$(MAKE) $(submake_extras) RESOLVE_BTFIDS=$(RESOLVE_BTFIDS) -C bpf_test_no_cfi
|
||||
$(Q)cp bpf_test_no_cfi/bpf_test_no_cfi.ko $@
|
||||
|
||||
$(OUTPUT)/bpf_test_modorder_x.ko: $(VMLINUX_BTF) $(RESOLVE_BTFIDS) $(wildcard bpf_test_modorder_x/Makefile bpf_test_modorder_x/*.[ch])
|
||||
$(call msg,MOD,,$@)
|
||||
$(Q)$(RM) bpf_test_modorder_x/bpf_test_modorder_x.ko # force re-compilation
|
||||
$(Q)$(MAKE) $(submake_extras) RESOLVE_BTFIDS=$(RESOLVE_BTFIDS) -C bpf_test_modorder_x
|
||||
$(Q)cp bpf_test_modorder_x/bpf_test_modorder_x.ko $@
|
||||
|
||||
$(OUTPUT)/bpf_test_modorder_y.ko: $(VMLINUX_BTF) $(RESOLVE_BTFIDS) $(wildcard bpf_test_modorder_y/Makefile bpf_test_modorder_y/*.[ch])
|
||||
$(call msg,MOD,,$@)
|
||||
$(Q)$(RM) bpf_test_modorder_y/bpf_test_modorder_y.ko # force re-compilation
|
||||
$(Q)$(MAKE) $(submake_extras) RESOLVE_BTFIDS=$(RESOLVE_BTFIDS) -C bpf_test_modorder_y
|
||||
$(Q)cp bpf_test_modorder_y/bpf_test_modorder_y.ko $@
|
||||
|
||||
|
||||
DEFAULT_BPFTOOL := $(HOST_SCRATCH_DIR)/sbin/bpftool
|
||||
ifneq ($(CROSS_COMPILE),)
|
||||
CROSS_BPFTOOL := $(SCRATCH_DIR)/sbin/bpftool
|
||||
@ -722,6 +736,8 @@ TRUNNER_EXTRA_SOURCES := test_progs.c \
|
||||
ip_check_defrag_frags.h
|
||||
TRUNNER_EXTRA_FILES := $(OUTPUT)/urandom_read $(OUTPUT)/bpf_testmod.ko \
|
||||
$(OUTPUT)/bpf_test_no_cfi.ko \
|
||||
$(OUTPUT)/bpf_test_modorder_x.ko \
|
||||
$(OUTPUT)/bpf_test_modorder_y.ko \
|
||||
$(OUTPUT)/liburandom_read.so \
|
||||
$(OUTPUT)/xdp_synproxy \
|
||||
$(OUTPUT)/sign-file \
|
||||
@ -856,6 +872,8 @@ EXTRA_CLEAN := $(SCRATCH_DIR) $(HOST_SCRATCH_DIR) \
|
||||
$(addprefix $(OUTPUT)/,*.o *.d *.skel.h *.lskel.h *.subskel.h \
|
||||
no_alu32 cpuv4 bpf_gcc bpf_testmod.ko \
|
||||
bpf_test_no_cfi.ko \
|
||||
bpf_test_modorder_x.ko \
|
||||
bpf_test_modorder_y.ko \
|
||||
liburandom_read.so) \
|
||||
$(OUTPUT)/FEATURE-DUMP.selftests
|
||||
|
||||
|
19
tools/testing/selftests/bpf/bpf_test_modorder_x/Makefile
Normal file
19
tools/testing/selftests/bpf/bpf_test_modorder_x/Makefile
Normal file
@ -0,0 +1,19 @@
|
||||
BPF_TESTMOD_DIR := $(realpath $(dir $(abspath $(lastword $(MAKEFILE_LIST)))))
|
||||
KDIR ?= $(abspath $(BPF_TESTMOD_DIR)/../../../../..)
|
||||
|
||||
ifeq ($(V),1)
|
||||
Q =
|
||||
else
|
||||
Q = @
|
||||
endif
|
||||
|
||||
MODULES = bpf_test_modorder_x.ko
|
||||
|
||||
obj-m += bpf_test_modorder_x.o
|
||||
|
||||
all:
|
||||
+$(Q)make -C $(KDIR) M=$(BPF_TESTMOD_DIR) modules
|
||||
|
||||
clean:
|
||||
+$(Q)make -C $(KDIR) M=$(BPF_TESTMOD_DIR) clean
|
||||
|
@ -0,0 +1,39 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
#include <linux/bpf.h>
|
||||
#include <linux/btf.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/init.h>
|
||||
|
||||
__bpf_kfunc_start_defs();
|
||||
|
||||
__bpf_kfunc int bpf_test_modorder_retx(void)
|
||||
{
|
||||
return 'x';
|
||||
}
|
||||
|
||||
__bpf_kfunc_end_defs();
|
||||
|
||||
BTF_KFUNCS_START(bpf_test_modorder_kfunc_x_ids)
|
||||
BTF_ID_FLAGS(func, bpf_test_modorder_retx);
|
||||
BTF_KFUNCS_END(bpf_test_modorder_kfunc_x_ids)
|
||||
|
||||
static const struct btf_kfunc_id_set bpf_test_modorder_x_set = {
|
||||
.owner = THIS_MODULE,
|
||||
.set = &bpf_test_modorder_kfunc_x_ids,
|
||||
};
|
||||
|
||||
static int __init bpf_test_modorder_x_init(void)
|
||||
{
|
||||
return register_btf_kfunc_id_set(BPF_PROG_TYPE_SCHED_CLS,
|
||||
&bpf_test_modorder_x_set);
|
||||
}
|
||||
|
||||
static void __exit bpf_test_modorder_x_exit(void)
|
||||
{
|
||||
}
|
||||
|
||||
module_init(bpf_test_modorder_x_init);
|
||||
module_exit(bpf_test_modorder_x_exit);
|
||||
|
||||
MODULE_DESCRIPTION("BPF selftest ordertest module X");
|
||||
MODULE_LICENSE("GPL");
|
19
tools/testing/selftests/bpf/bpf_test_modorder_y/Makefile
Normal file
19
tools/testing/selftests/bpf/bpf_test_modorder_y/Makefile
Normal file
@ -0,0 +1,19 @@
|
||||
BPF_TESTMOD_DIR := $(realpath $(dir $(abspath $(lastword $(MAKEFILE_LIST)))))
|
||||
KDIR ?= $(abspath $(BPF_TESTMOD_DIR)/../../../../..)
|
||||
|
||||
ifeq ($(V),1)
|
||||
Q =
|
||||
else
|
||||
Q = @
|
||||
endif
|
||||
|
||||
MODULES = bpf_test_modorder_y.ko
|
||||
|
||||
obj-m += bpf_test_modorder_y.o
|
||||
|
||||
all:
|
||||
+$(Q)make -C $(KDIR) M=$(BPF_TESTMOD_DIR) modules
|
||||
|
||||
clean:
|
||||
+$(Q)make -C $(KDIR) M=$(BPF_TESTMOD_DIR) clean
|
||||
|
@ -0,0 +1,39 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
#include <linux/bpf.h>
|
||||
#include <linux/btf.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/init.h>
|
||||
|
||||
__bpf_kfunc_start_defs();
|
||||
|
||||
__bpf_kfunc int bpf_test_modorder_rety(void)
|
||||
{
|
||||
return 'y';
|
||||
}
|
||||
|
||||
__bpf_kfunc_end_defs();
|
||||
|
||||
BTF_KFUNCS_START(bpf_test_modorder_kfunc_y_ids)
|
||||
BTF_ID_FLAGS(func, bpf_test_modorder_rety);
|
||||
BTF_KFUNCS_END(bpf_test_modorder_kfunc_y_ids)
|
||||
|
||||
static const struct btf_kfunc_id_set bpf_test_modorder_y_set = {
|
||||
.owner = THIS_MODULE,
|
||||
.set = &bpf_test_modorder_kfunc_y_ids,
|
||||
};
|
||||
|
||||
static int __init bpf_test_modorder_y_init(void)
|
||||
{
|
||||
return register_btf_kfunc_id_set(BPF_PROG_TYPE_SCHED_CLS,
|
||||
&bpf_test_modorder_y_set);
|
||||
}
|
||||
|
||||
static void __exit bpf_test_modorder_y_exit(void)
|
||||
{
|
||||
}
|
||||
|
||||
module_init(bpf_test_modorder_y_init);
|
||||
module_exit(bpf_test_modorder_y_exit);
|
||||
|
||||
MODULE_DESCRIPTION("BPF selftest ordertest module Y");
|
||||
MODULE_LICENSE("GPL");
|
@ -226,7 +226,7 @@ static void test_task_common_nocheck(struct bpf_iter_attach_opts *opts,
|
||||
ASSERT_OK(pthread_create(&thread_id, NULL, &do_nothing_wait, NULL),
|
||||
"pthread_create");
|
||||
|
||||
skel->bss->tid = getpid();
|
||||
skel->bss->tid = gettid();
|
||||
|
||||
do_dummy_read_opts(skel->progs.dump_task, opts);
|
||||
|
||||
@ -249,25 +249,42 @@ static void test_task_common(struct bpf_iter_attach_opts *opts, int num_unknown,
|
||||
ASSERT_EQ(num_known_tid, num_known, "check_num_known_tid");
|
||||
}
|
||||
|
||||
static void test_task_tid(void)
|
||||
static void *run_test_task_tid(void *arg)
|
||||
{
|
||||
LIBBPF_OPTS(bpf_iter_attach_opts, opts);
|
||||
union bpf_iter_link_info linfo;
|
||||
int num_unknown_tid, num_known_tid;
|
||||
|
||||
ASSERT_NEQ(getpid(), gettid(), "check_new_thread_id");
|
||||
|
||||
memset(&linfo, 0, sizeof(linfo));
|
||||
linfo.task.tid = getpid();
|
||||
linfo.task.tid = gettid();
|
||||
opts.link_info = &linfo;
|
||||
opts.link_info_len = sizeof(linfo);
|
||||
test_task_common(&opts, 0, 1);
|
||||
|
||||
linfo.task.tid = 0;
|
||||
linfo.task.pid = getpid();
|
||||
test_task_common(&opts, 1, 1);
|
||||
/* This includes the parent thread, this thread,
|
||||
* and the do_nothing_wait thread
|
||||
*/
|
||||
test_task_common(&opts, 2, 1);
|
||||
|
||||
test_task_common_nocheck(NULL, &num_unknown_tid, &num_known_tid);
|
||||
ASSERT_GT(num_unknown_tid, 1, "check_num_unknown_tid");
|
||||
ASSERT_GT(num_unknown_tid, 2, "check_num_unknown_tid");
|
||||
ASSERT_EQ(num_known_tid, 1, "check_num_known_tid");
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void test_task_tid(void)
|
||||
{
|
||||
pthread_t thread_id;
|
||||
|
||||
/* Create a new thread so pid and tid aren't the same */
|
||||
ASSERT_OK(pthread_create(&thread_id, NULL, &run_test_task_tid, NULL),
|
||||
"pthread_create");
|
||||
ASSERT_FALSE(pthread_join(thread_id, NULL), "pthread_join");
|
||||
}
|
||||
|
||||
static void test_task_pid(void)
|
||||
|
@ -35,7 +35,7 @@ static int send_datagram(void)
|
||||
if (!ASSERT_OK_FD(sock, "create socket"))
|
||||
return sock;
|
||||
|
||||
if (!ASSERT_OK(connect(sock, &addr, sizeof(addr)), "connect")) {
|
||||
if (!ASSERT_OK(connect(sock, (struct sockaddr *)&addr, sizeof(addr)), "connect")) {
|
||||
close(sock);
|
||||
return -1;
|
||||
}
|
||||
|
@ -23,6 +23,7 @@ static const char * const cpumask_success_testcases[] = {
|
||||
"test_global_mask_array_l2_rcu",
|
||||
"test_global_mask_nested_rcu",
|
||||
"test_global_mask_nested_deep_rcu",
|
||||
"test_global_mask_nested_deep_array_rcu",
|
||||
"test_cpumask_weight",
|
||||
};
|
||||
|
||||
|
@ -67,8 +67,9 @@ static int verify_perf_link_info(int fd, enum bpf_perf_event_type type, long add
|
||||
|
||||
ASSERT_EQ(info.perf_event.kprobe.cookie, PERF_EVENT_COOKIE, "kprobe_cookie");
|
||||
|
||||
ASSERT_EQ(info.perf_event.kprobe.name_len, strlen(KPROBE_FUNC) + 1,
|
||||
"name_len");
|
||||
if (!info.perf_event.kprobe.func_name) {
|
||||
ASSERT_EQ(info.perf_event.kprobe.name_len, 0, "name_len");
|
||||
info.perf_event.kprobe.func_name = ptr_to_u64(&buf);
|
||||
info.perf_event.kprobe.name_len = sizeof(buf);
|
||||
goto again;
|
||||
@ -79,8 +80,9 @@ static int verify_perf_link_info(int fd, enum bpf_perf_event_type type, long add
|
||||
ASSERT_EQ(err, 0, "cmp_kprobe_func_name");
|
||||
break;
|
||||
case BPF_PERF_EVENT_TRACEPOINT:
|
||||
ASSERT_EQ(info.perf_event.tracepoint.name_len, strlen(TP_NAME) + 1,
|
||||
"name_len");
|
||||
if (!info.perf_event.tracepoint.tp_name) {
|
||||
ASSERT_EQ(info.perf_event.tracepoint.name_len, 0, "name_len");
|
||||
info.perf_event.tracepoint.tp_name = ptr_to_u64(&buf);
|
||||
info.perf_event.tracepoint.name_len = sizeof(buf);
|
||||
goto again;
|
||||
@ -96,8 +98,9 @@ static int verify_perf_link_info(int fd, enum bpf_perf_event_type type, long add
|
||||
case BPF_PERF_EVENT_URETPROBE:
|
||||
ASSERT_EQ(info.perf_event.uprobe.offset, offset, "uprobe_offset");
|
||||
|
||||
ASSERT_EQ(info.perf_event.uprobe.name_len, strlen(UPROBE_FILE) + 1,
|
||||
"name_len");
|
||||
if (!info.perf_event.uprobe.file_name) {
|
||||
ASSERT_EQ(info.perf_event.uprobe.name_len, 0, "name_len");
|
||||
info.perf_event.uprobe.file_name = ptr_to_u64(&buf);
|
||||
info.perf_event.uprobe.name_len = sizeof(buf);
|
||||
goto again;
|
||||
@ -417,6 +420,15 @@ verify_umulti_link_info(int fd, bool retprobe, __u64 *offsets,
|
||||
if (!ASSERT_NEQ(err, -1, "readlink"))
|
||||
return -1;
|
||||
|
||||
memset(&info, 0, sizeof(info));
|
||||
err = bpf_link_get_info_by_fd(fd, &info, &len);
|
||||
if (!ASSERT_OK(err, "bpf_link_get_info_by_fd"))
|
||||
return -1;
|
||||
|
||||
ASSERT_EQ(info.uprobe_multi.count, 3, "info.uprobe_multi.count");
|
||||
ASSERT_EQ(info.uprobe_multi.path_size, strlen(path) + 1,
|
||||
"info.uprobe_multi.path_size");
|
||||
|
||||
for (bit = 0; bit < 8; bit++) {
|
||||
memset(&info, 0, sizeof(info));
|
||||
info.uprobe_multi.path = ptr_to_u64(path_buf);
|
||||
|
55
tools/testing/selftests/bpf/prog_tests/kfunc_module_order.c
Normal file
55
tools/testing/selftests/bpf/prog_tests/kfunc_module_order.c
Normal file
@ -0,0 +1,55 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
#include <test_progs.h>
|
||||
#include <testing_helpers.h>
|
||||
|
||||
#include "kfunc_module_order.skel.h"
|
||||
|
||||
static int test_run_prog(const struct bpf_program *prog,
|
||||
struct bpf_test_run_opts *opts)
|
||||
{
|
||||
int err;
|
||||
|
||||
err = bpf_prog_test_run_opts(bpf_program__fd(prog), opts);
|
||||
if (!ASSERT_OK(err, "bpf_prog_test_run_opts"))
|
||||
return err;
|
||||
|
||||
if (!ASSERT_EQ((int)opts->retval, 0, bpf_program__name(prog)))
|
||||
return -EINVAL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void test_kfunc_module_order(void)
|
||||
{
|
||||
struct kfunc_module_order *skel;
|
||||
char pkt_data[64] = {};
|
||||
int err = 0;
|
||||
|
||||
DECLARE_LIBBPF_OPTS(bpf_test_run_opts, test_opts, .data_in = pkt_data,
|
||||
.data_size_in = sizeof(pkt_data));
|
||||
|
||||
err = load_module("bpf_test_modorder_x.ko",
|
||||
env_verbosity > VERBOSE_NONE);
|
||||
if (!ASSERT_OK(err, "load bpf_test_modorder_x.ko"))
|
||||
return;
|
||||
|
||||
err = load_module("bpf_test_modorder_y.ko",
|
||||
env_verbosity > VERBOSE_NONE);
|
||||
if (!ASSERT_OK(err, "load bpf_test_modorder_y.ko"))
|
||||
goto exit_modx;
|
||||
|
||||
skel = kfunc_module_order__open_and_load();
|
||||
if (!ASSERT_OK_PTR(skel, "kfunc_module_order__open_and_load()")) {
|
||||
err = -EINVAL;
|
||||
goto exit_mods;
|
||||
}
|
||||
|
||||
test_run_prog(skel->progs.call_kfunc_xy, &test_opts);
|
||||
test_run_prog(skel->progs.call_kfunc_yx, &test_opts);
|
||||
|
||||
kfunc_module_order__destroy(skel);
|
||||
exit_mods:
|
||||
unload_module("bpf_test_modorder_y", env_verbosity > VERBOSE_NONE);
|
||||
exit_modx:
|
||||
unload_module("bpf_test_modorder_x", env_verbosity > VERBOSE_NONE);
|
||||
}
|
@ -26,10 +26,43 @@ static const struct nf_link_test nf_hook_link_tests[] = {
|
||||
|
||||
{ .pf = NFPROTO_INET, .priority = 1, .name = "invalid-inet-not-supported", },
|
||||
|
||||
{ .pf = NFPROTO_IPV4, .priority = -10000, .expect_success = true, .name = "attach ipv4", },
|
||||
{ .pf = NFPROTO_IPV6, .priority = 10001, .expect_success = true, .name = "attach ipv6", },
|
||||
{
|
||||
.pf = NFPROTO_IPV4,
|
||||
.hooknum = NF_INET_POST_ROUTING,
|
||||
.priority = -10000,
|
||||
.flags = 0,
|
||||
.expect_success = true,
|
||||
.name = "attach ipv4",
|
||||
},
|
||||
{
|
||||
.pf = NFPROTO_IPV6,
|
||||
.hooknum = NF_INET_FORWARD,
|
||||
.priority = 10001,
|
||||
.flags = BPF_F_NETFILTER_IP_DEFRAG,
|
||||
.expect_success = true,
|
||||
.name = "attach ipv6",
|
||||
},
|
||||
};
|
||||
|
||||
static void verify_netfilter_link_info(struct bpf_link *link, const struct nf_link_test nf_expected)
|
||||
{
|
||||
struct bpf_link_info info;
|
||||
__u32 len = sizeof(info);
|
||||
int err, fd;
|
||||
|
||||
memset(&info, 0, len);
|
||||
|
||||
fd = bpf_link__fd(link);
|
||||
err = bpf_link_get_info_by_fd(fd, &info, &len);
|
||||
ASSERT_OK(err, "get_link_info");
|
||||
|
||||
ASSERT_EQ(info.type, BPF_LINK_TYPE_NETFILTER, "info link type");
|
||||
ASSERT_EQ(info.netfilter.pf, nf_expected.pf, "info nf protocol family");
|
||||
ASSERT_EQ(info.netfilter.hooknum, nf_expected.hooknum, "info nf hooknum");
|
||||
ASSERT_EQ(info.netfilter.priority, nf_expected.priority, "info nf priority");
|
||||
ASSERT_EQ(info.netfilter.flags, nf_expected.flags, "info nf flags");
|
||||
}
|
||||
|
||||
void test_netfilter_link_attach(void)
|
||||
{
|
||||
struct test_netfilter_link_attach *skel;
|
||||
@ -64,6 +97,8 @@ void test_netfilter_link_attach(void)
|
||||
if (!ASSERT_OK_PTR(link, "program attach successful"))
|
||||
continue;
|
||||
|
||||
verify_netfilter_link_info(link, nf_hook_link_tests[i]);
|
||||
|
||||
link2 = bpf_program__attach_netfilter(prog, &opts);
|
||||
ASSERT_ERR_PTR(link2, "attach program with same pf/hook/priority");
|
||||
|
||||
@ -73,6 +108,9 @@ void test_netfilter_link_attach(void)
|
||||
link2 = bpf_program__attach_netfilter(prog, &opts);
|
||||
if (!ASSERT_OK_PTR(link2, "program reattach successful"))
|
||||
continue;
|
||||
|
||||
verify_netfilter_link_info(link2, nf_hook_link_tests[i]);
|
||||
|
||||
if (!ASSERT_OK(bpf_link__destroy(link2), "link destroy"))
|
||||
break;
|
||||
} else {
|
||||
|
@ -44,6 +44,7 @@
|
||||
#include "verifier_ld_ind.skel.h"
|
||||
#include "verifier_ldsx.skel.h"
|
||||
#include "verifier_leak_ptr.skel.h"
|
||||
#include "verifier_linked_scalars.skel.h"
|
||||
#include "verifier_loops1.skel.h"
|
||||
#include "verifier_lwt.skel.h"
|
||||
#include "verifier_map_in_map.skel.h"
|
||||
@ -170,6 +171,7 @@ void test_verifier_jit_convergence(void) { RUN(verifier_jit_convergence); }
|
||||
void test_verifier_ld_ind(void) { RUN(verifier_ld_ind); }
|
||||
void test_verifier_ldsx(void) { RUN(verifier_ldsx); }
|
||||
void test_verifier_leak_ptr(void) { RUN(verifier_leak_ptr); }
|
||||
void test_verifier_linked_scalars(void) { RUN(verifier_linked_scalars); }
|
||||
void test_verifier_loops1(void) { RUN(verifier_loops1); }
|
||||
void test_verifier_lwt(void) { RUN(verifier_lwt); }
|
||||
void test_verifier_map_in_map(void) { RUN(verifier_map_in_map); }
|
||||
|
@ -1,6 +1,9 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
#include <arpa/inet.h>
|
||||
#include <uapi/linux/bpf.h>
|
||||
#include <linux/if_link.h>
|
||||
#include <network_helpers.h>
|
||||
#include <net/if.h>
|
||||
#include <test_progs.h>
|
||||
|
||||
#include "test_xdp_devmap_helpers.skel.h"
|
||||
@ -8,30 +11,35 @@
|
||||
#include "test_xdp_with_devmap_helpers.skel.h"
|
||||
|
||||
#define IFINDEX_LO 1
|
||||
#define TEST_NS "devmap_attach_ns"
|
||||
|
||||
static void test_xdp_with_devmap_helpers(void)
|
||||
{
|
||||
struct test_xdp_with_devmap_helpers *skel;
|
||||
struct test_xdp_with_devmap_helpers *skel = NULL;
|
||||
struct bpf_prog_info info = {};
|
||||
struct bpf_devmap_val val = {
|
||||
.ifindex = IFINDEX_LO,
|
||||
};
|
||||
__u32 len = sizeof(info);
|
||||
int err, dm_fd, map_fd;
|
||||
int err, dm_fd, dm_fd_redir, map_fd;
|
||||
struct nstoken *nstoken = NULL;
|
||||
char data[10] = {};
|
||||
__u32 idx = 0;
|
||||
|
||||
SYS(out_close, "ip netns add %s", TEST_NS);
|
||||
nstoken = open_netns(TEST_NS);
|
||||
if (!ASSERT_OK_PTR(nstoken, "open_netns"))
|
||||
goto out_close;
|
||||
SYS(out_close, "ip link set dev lo up");
|
||||
|
||||
skel = test_xdp_with_devmap_helpers__open_and_load();
|
||||
if (!ASSERT_OK_PTR(skel, "test_xdp_with_devmap_helpers__open_and_load"))
|
||||
return;
|
||||
|
||||
dm_fd = bpf_program__fd(skel->progs.xdp_redir_prog);
|
||||
err = bpf_xdp_attach(IFINDEX_LO, dm_fd, XDP_FLAGS_SKB_MODE, NULL);
|
||||
if (!ASSERT_OK(err, "Generic attach of program with 8-byte devmap"))
|
||||
goto out_close;
|
||||
|
||||
err = bpf_xdp_detach(IFINDEX_LO, XDP_FLAGS_SKB_MODE, NULL);
|
||||
ASSERT_OK(err, "XDP program detach");
|
||||
dm_fd_redir = bpf_program__fd(skel->progs.xdp_redir_prog);
|
||||
err = bpf_xdp_attach(IFINDEX_LO, dm_fd_redir, XDP_FLAGS_SKB_MODE, NULL);
|
||||
if (!ASSERT_OK(err, "Generic attach of program with 8-byte devmap"))
|
||||
goto out_close;
|
||||
|
||||
dm_fd = bpf_program__fd(skel->progs.xdp_dummy_dm);
|
||||
map_fd = bpf_map__fd(skel->maps.dm_ports);
|
||||
@ -47,6 +55,22 @@ static void test_xdp_with_devmap_helpers(void)
|
||||
ASSERT_OK(err, "Read devmap entry");
|
||||
ASSERT_EQ(info.id, val.bpf_prog.id, "Match program id to devmap entry prog_id");
|
||||
|
||||
/* send a packet to trigger any potential bugs in there */
|
||||
DECLARE_LIBBPF_OPTS(bpf_test_run_opts, opts,
|
||||
.data_in = &data,
|
||||
.data_size_in = 10,
|
||||
.flags = BPF_F_TEST_XDP_LIVE_FRAMES,
|
||||
.repeat = 1,
|
||||
);
|
||||
err = bpf_prog_test_run_opts(dm_fd_redir, &opts);
|
||||
ASSERT_OK(err, "XDP test run");
|
||||
|
||||
/* wait for the packets to be flushed */
|
||||
kern_sync_rcu();
|
||||
|
||||
err = bpf_xdp_detach(IFINDEX_LO, XDP_FLAGS_SKB_MODE, NULL);
|
||||
ASSERT_OK(err, "XDP program detach");
|
||||
|
||||
/* can not attach BPF_XDP_DEVMAP program to a device */
|
||||
err = bpf_xdp_attach(IFINDEX_LO, dm_fd, XDP_FLAGS_SKB_MODE, NULL);
|
||||
if (!ASSERT_NEQ(err, 0, "Attach of BPF_XDP_DEVMAP program"))
|
||||
@ -67,6 +91,8 @@ static void test_xdp_with_devmap_helpers(void)
|
||||
ASSERT_NEQ(err, 0, "Add BPF_XDP program with frags to devmap entry");
|
||||
|
||||
out_close:
|
||||
close_netns(nstoken);
|
||||
SYS_NOFAIL("ip netns del %s", TEST_NS);
|
||||
test_xdp_with_devmap_helpers__destroy(skel);
|
||||
}
|
||||
|
||||
@ -124,6 +150,86 @@ static void test_xdp_with_devmap_frags_helpers(void)
|
||||
test_xdp_with_devmap_frags_helpers__destroy(skel);
|
||||
}
|
||||
|
||||
static void test_xdp_with_devmap_helpers_veth(void)
|
||||
{
|
||||
struct test_xdp_with_devmap_helpers *skel = NULL;
|
||||
struct bpf_prog_info info = {};
|
||||
struct bpf_devmap_val val = {};
|
||||
struct nstoken *nstoken = NULL;
|
||||
__u32 len = sizeof(info);
|
||||
int err, dm_fd, dm_fd_redir, map_fd, ifindex_dst;
|
||||
char data[10] = {};
|
||||
__u32 idx = 0;
|
||||
|
||||
SYS(out_close, "ip netns add %s", TEST_NS);
|
||||
nstoken = open_netns(TEST_NS);
|
||||
if (!ASSERT_OK_PTR(nstoken, "open_netns"))
|
||||
goto out_close;
|
||||
|
||||
SYS(out_close, "ip link add veth_src type veth peer name veth_dst");
|
||||
SYS(out_close, "ip link set dev veth_src up");
|
||||
SYS(out_close, "ip link set dev veth_dst up");
|
||||
|
||||
val.ifindex = if_nametoindex("veth_src");
|
||||
ifindex_dst = if_nametoindex("veth_dst");
|
||||
if (!ASSERT_NEQ(val.ifindex, 0, "val.ifindex") ||
|
||||
!ASSERT_NEQ(ifindex_dst, 0, "ifindex_dst"))
|
||||
goto out_close;
|
||||
|
||||
skel = test_xdp_with_devmap_helpers__open_and_load();
|
||||
if (!ASSERT_OK_PTR(skel, "test_xdp_with_devmap_helpers__open_and_load"))
|
||||
goto out_close;
|
||||
|
||||
dm_fd_redir = bpf_program__fd(skel->progs.xdp_redir_prog);
|
||||
err = bpf_xdp_attach(val.ifindex, dm_fd_redir, XDP_FLAGS_DRV_MODE, NULL);
|
||||
if (!ASSERT_OK(err, "Attach of program with 8-byte devmap"))
|
||||
goto out_close;
|
||||
|
||||
dm_fd = bpf_program__fd(skel->progs.xdp_dummy_dm);
|
||||
map_fd = bpf_map__fd(skel->maps.dm_ports);
|
||||
err = bpf_prog_get_info_by_fd(dm_fd, &info, &len);
|
||||
if (!ASSERT_OK(err, "bpf_prog_get_info_by_fd"))
|
||||
goto out_close;
|
||||
|
||||
val.bpf_prog.fd = dm_fd;
|
||||
err = bpf_map_update_elem(map_fd, &idx, &val, 0);
|
||||
ASSERT_OK(err, "Add program to devmap entry");
|
||||
|
||||
err = bpf_map_lookup_elem(map_fd, &idx, &val);
|
||||
ASSERT_OK(err, "Read devmap entry");
|
||||
ASSERT_EQ(info.id, val.bpf_prog.id, "Match program id to devmap entry prog_id");
|
||||
|
||||
/* attach dummy to other side to enable reception */
|
||||
dm_fd = bpf_program__fd(skel->progs.xdp_dummy_prog);
|
||||
err = bpf_xdp_attach(ifindex_dst, dm_fd, XDP_FLAGS_DRV_MODE, NULL);
|
||||
if (!ASSERT_OK(err, "Attach of dummy XDP"))
|
||||
goto out_close;
|
||||
|
||||
/* send a packet to trigger any potential bugs in there */
|
||||
DECLARE_LIBBPF_OPTS(bpf_test_run_opts, opts,
|
||||
.data_in = &data,
|
||||
.data_size_in = 10,
|
||||
.flags = BPF_F_TEST_XDP_LIVE_FRAMES,
|
||||
.repeat = 1,
|
||||
);
|
||||
err = bpf_prog_test_run_opts(dm_fd_redir, &opts);
|
||||
ASSERT_OK(err, "XDP test run");
|
||||
|
||||
/* wait for the packets to be flushed */
|
||||
kern_sync_rcu();
|
||||
|
||||
err = bpf_xdp_detach(val.ifindex, XDP_FLAGS_DRV_MODE, NULL);
|
||||
ASSERT_OK(err, "XDP program detach");
|
||||
|
||||
err = bpf_xdp_detach(ifindex_dst, XDP_FLAGS_DRV_MODE, NULL);
|
||||
ASSERT_OK(err, "XDP program detach");
|
||||
|
||||
out_close:
|
||||
close_netns(nstoken);
|
||||
SYS_NOFAIL("ip netns del %s", TEST_NS);
|
||||
test_xdp_with_devmap_helpers__destroy(skel);
|
||||
}
|
||||
|
||||
void serial_test_xdp_devmap_attach(void)
|
||||
{
|
||||
if (test__start_subtest("DEVMAP with programs in entries"))
|
||||
@ -134,4 +240,7 @@ void serial_test_xdp_devmap_attach(void)
|
||||
|
||||
if (test__start_subtest("Verifier check of DEVMAP programs"))
|
||||
test_neg_xdp_devmap_helpers();
|
||||
|
||||
if (test__start_subtest("DEVMAP with programs in entries on veth"))
|
||||
test_xdp_with_devmap_helpers_veth();
|
||||
}
|
||||
|
@ -7,6 +7,11 @@
|
||||
#include "errno.h"
|
||||
#include <stdbool.h>
|
||||
|
||||
/* Should use BTF_FIELDS_MAX, but it is not always available in vmlinux.h,
|
||||
* so use the hard-coded number as a workaround.
|
||||
*/
|
||||
#define CPUMASK_KPTR_FIELDS_MAX 11
|
||||
|
||||
int err;
|
||||
|
||||
#define private(name) SEC(".bss." #name) __attribute__((aligned(8)))
|
||||
|
@ -10,6 +10,21 @@
|
||||
|
||||
char _license[] SEC("license") = "GPL";
|
||||
|
||||
struct kptr_nested_array_2 {
|
||||
struct bpf_cpumask __kptr * mask;
|
||||
};
|
||||
|
||||
struct kptr_nested_array_1 {
|
||||
/* Make btf_parse_fields() in map_create() return -E2BIG */
|
||||
struct kptr_nested_array_2 d_2[CPUMASK_KPTR_FIELDS_MAX + 1];
|
||||
};
|
||||
|
||||
struct kptr_nested_array {
|
||||
struct kptr_nested_array_1 d_1;
|
||||
};
|
||||
|
||||
private(MASK_NESTED) static struct kptr_nested_array global_mask_nested_arr;
|
||||
|
||||
/* Prototype for all of the program trace events below:
|
||||
*
|
||||
* TRACE_EVENT(task_newtask,
|
||||
@ -187,3 +202,23 @@ int BPF_PROG(test_global_mask_rcu_no_null_check, struct task_struct *task, u64 c
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
SEC("tp_btf/task_newtask")
|
||||
__failure __msg("has no valid kptr")
|
||||
int BPF_PROG(test_invalid_nested_array, struct task_struct *task, u64 clone_flags)
|
||||
{
|
||||
struct bpf_cpumask *local, *prev;
|
||||
|
||||
local = create_cpumask();
|
||||
if (!local)
|
||||
return 0;
|
||||
|
||||
prev = bpf_kptr_xchg(&global_mask_nested_arr.d_1.d_2[CPUMASK_KPTR_FIELDS_MAX].mask, local);
|
||||
if (prev) {
|
||||
bpf_cpumask_release(prev);
|
||||
err = 3;
|
||||
return 0;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -31,11 +31,59 @@ struct kptr_nested_deep {
|
||||
struct kptr_nested_pair ptr_pairs[3];
|
||||
};
|
||||
|
||||
struct kptr_nested_deep_array_1_2 {
|
||||
int dummy;
|
||||
struct bpf_cpumask __kptr * mask[CPUMASK_KPTR_FIELDS_MAX];
|
||||
};
|
||||
|
||||
struct kptr_nested_deep_array_1_1 {
|
||||
int dummy;
|
||||
struct kptr_nested_deep_array_1_2 d_2;
|
||||
};
|
||||
|
||||
struct kptr_nested_deep_array_1 {
|
||||
long dummy;
|
||||
struct kptr_nested_deep_array_1_1 d_1;
|
||||
};
|
||||
|
||||
struct kptr_nested_deep_array_2_2 {
|
||||
long dummy[2];
|
||||
struct bpf_cpumask __kptr * mask;
|
||||
};
|
||||
|
||||
struct kptr_nested_deep_array_2_1 {
|
||||
int dummy;
|
||||
struct kptr_nested_deep_array_2_2 d_2[CPUMASK_KPTR_FIELDS_MAX];
|
||||
};
|
||||
|
||||
struct kptr_nested_deep_array_2 {
|
||||
long dummy;
|
||||
struct kptr_nested_deep_array_2_1 d_1;
|
||||
};
|
||||
|
||||
struct kptr_nested_deep_array_3_2 {
|
||||
long dummy[2];
|
||||
struct bpf_cpumask __kptr * mask;
|
||||
};
|
||||
|
||||
struct kptr_nested_deep_array_3_1 {
|
||||
int dummy;
|
||||
struct kptr_nested_deep_array_3_2 d_2;
|
||||
};
|
||||
|
||||
struct kptr_nested_deep_array_3 {
|
||||
long dummy;
|
||||
struct kptr_nested_deep_array_3_1 d_1[CPUMASK_KPTR_FIELDS_MAX];
|
||||
};
|
||||
|
||||
private(MASK) static struct bpf_cpumask __kptr * global_mask_array[2];
|
||||
private(MASK) static struct bpf_cpumask __kptr * global_mask_array_l2[2][1];
|
||||
private(MASK) static struct bpf_cpumask __kptr * global_mask_array_one[1];
|
||||
private(MASK) static struct kptr_nested global_mask_nested[2];
|
||||
private(MASK_DEEP) static struct kptr_nested_deep global_mask_nested_deep;
|
||||
private(MASK_1) static struct kptr_nested_deep_array_1 global_mask_nested_deep_array_1;
|
||||
private(MASK_2) static struct kptr_nested_deep_array_2 global_mask_nested_deep_array_2;
|
||||
private(MASK_3) static struct kptr_nested_deep_array_3 global_mask_nested_deep_array_3;
|
||||
|
||||
static bool is_test_task(void)
|
||||
{
|
||||
@ -543,12 +591,21 @@ static int _global_mask_array_rcu(struct bpf_cpumask **mask0,
|
||||
goto err_exit;
|
||||
}
|
||||
|
||||
/* [<mask 0>, NULL] */
|
||||
if (!*mask0 || *mask1) {
|
||||
/* [<mask 0>, *] */
|
||||
if (!*mask0) {
|
||||
err = 2;
|
||||
goto err_exit;
|
||||
}
|
||||
|
||||
if (!mask1)
|
||||
goto err_exit;
|
||||
|
||||
/* [*, NULL] */
|
||||
if (*mask1) {
|
||||
err = 3;
|
||||
goto err_exit;
|
||||
}
|
||||
|
||||
local = create_cpumask();
|
||||
if (!local) {
|
||||
err = 9;
|
||||
@ -631,6 +688,23 @@ int BPF_PROG(test_global_mask_nested_deep_rcu, struct task_struct *task, u64 clo
|
||||
return 0;
|
||||
}
|
||||
|
||||
SEC("tp_btf/task_newtask")
|
||||
int BPF_PROG(test_global_mask_nested_deep_array_rcu, struct task_struct *task, u64 clone_flags)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < CPUMASK_KPTR_FIELDS_MAX; i++)
|
||||
_global_mask_array_rcu(&global_mask_nested_deep_array_1.d_1.d_2.mask[i], NULL);
|
||||
|
||||
for (i = 0; i < CPUMASK_KPTR_FIELDS_MAX; i++)
|
||||
_global_mask_array_rcu(&global_mask_nested_deep_array_2.d_1.d_2[i].mask, NULL);
|
||||
|
||||
for (i = 0; i < CPUMASK_KPTR_FIELDS_MAX; i++)
|
||||
_global_mask_array_rcu(&global_mask_nested_deep_array_3.d_1[i].d_2.mask, NULL);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
SEC("tp_btf/task_newtask")
|
||||
int BPF_PROG(test_cpumask_weight, struct task_struct *task, u64 clone_flags)
|
||||
{
|
||||
|
30
tools/testing/selftests/bpf/progs/kfunc_module_order.c
Normal file
30
tools/testing/selftests/bpf/progs/kfunc_module_order.c
Normal file
@ -0,0 +1,30 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
#include <linux/bpf.h>
|
||||
#include <bpf/bpf_helpers.h>
|
||||
|
||||
extern int bpf_test_modorder_retx(void) __ksym;
|
||||
extern int bpf_test_modorder_rety(void) __ksym;
|
||||
|
||||
SEC("classifier")
|
||||
int call_kfunc_xy(struct __sk_buff *skb)
|
||||
{
|
||||
int ret1, ret2;
|
||||
|
||||
ret1 = bpf_test_modorder_retx();
|
||||
ret2 = bpf_test_modorder_rety();
|
||||
|
||||
return ret1 == 'x' && ret2 == 'y' ? 0 : -1;
|
||||
}
|
||||
|
||||
SEC("classifier")
|
||||
int call_kfunc_yx(struct __sk_buff *skb)
|
||||
{
|
||||
int ret1, ret2;
|
||||
|
||||
ret1 = bpf_test_modorder_rety();
|
||||
ret2 = bpf_test_modorder_retx();
|
||||
|
||||
return ret1 == 'y' && ret2 == 'x' ? 0 : -1;
|
||||
}
|
||||
|
||||
char _license[] SEC("license") = "GPL";
|
@ -12,7 +12,7 @@ struct {
|
||||
SEC("xdp")
|
||||
int xdp_redir_prog(struct xdp_md *ctx)
|
||||
{
|
||||
return bpf_redirect_map(&dm_ports, 1, 0);
|
||||
return bpf_redirect_map(&dm_ports, 0, 0);
|
||||
}
|
||||
|
||||
/* invalid program on DEVMAP entry;
|
||||
|
34
tools/testing/selftests/bpf/progs/verifier_linked_scalars.c
Normal file
34
tools/testing/selftests/bpf/progs/verifier_linked_scalars.c
Normal file
@ -0,0 +1,34 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
|
||||
#include <linux/bpf.h>
|
||||
#include <bpf/bpf_helpers.h>
|
||||
#include "bpf_misc.h"
|
||||
|
||||
SEC("socket")
|
||||
__description("scalars: find linked scalars")
|
||||
__failure
|
||||
__msg("math between fp pointer and 2147483647 is not allowed")
|
||||
__naked void scalars(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r0 = 0; \
|
||||
r1 = 0x80000001 ll; \
|
||||
r1 /= 1; \
|
||||
r2 = r1; \
|
||||
r4 = r1; \
|
||||
w2 += 0x7FFFFFFF; \
|
||||
w4 += 0; \
|
||||
if r2 == 0 goto l1; \
|
||||
exit; \
|
||||
l1: \
|
||||
r4 >>= 63; \
|
||||
r3 = 1; \
|
||||
r3 -= r4; \
|
||||
r3 *= 0x7FFFFFFF; \
|
||||
r3 += r10; \
|
||||
*(u8*)(r3 - 1) = r0; \
|
||||
exit; \
|
||||
" ::: __clobber_all);
|
||||
}
|
||||
|
||||
char _license[] SEC("license") = "GPL";
|
@ -287,6 +287,46 @@ l0_%=: \
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("socket")
|
||||
__description("MOV64SX, S8, unsigned range_check")
|
||||
__success __retval(0)
|
||||
__naked void mov64sx_s8_range_check(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
call %[bpf_get_prandom_u32]; \
|
||||
r0 &= 0x1; \
|
||||
r0 += 0xfe; \
|
||||
r0 = (s8)r0; \
|
||||
if r0 < 0xfffffffffffffffe goto label_%=; \
|
||||
r0 = 0; \
|
||||
exit; \
|
||||
label_%=: \
|
||||
exit; \
|
||||
" :
|
||||
: __imm(bpf_get_prandom_u32)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("socket")
|
||||
__description("MOV32SX, S8, unsigned range_check")
|
||||
__success __retval(0)
|
||||
__naked void mov32sx_s8_range_check(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
call %[bpf_get_prandom_u32]; \
|
||||
w0 &= 0x1; \
|
||||
w0 += 0xfe; \
|
||||
w0 = (s8)w0; \
|
||||
if w0 < 0xfffffffe goto label_%=; \
|
||||
r0 = 0; \
|
||||
exit; \
|
||||
label_%=: \
|
||||
exit; \
|
||||
" :
|
||||
: __imm(bpf_get_prandom_u32)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
SEC("socket")
|
||||
|
@ -760,4 +760,71 @@ __naked void two_old_ids_one_cur_id(void)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("socket")
|
||||
/* Note the flag, see verifier.c:opt_subreg_zext_lo32_rnd_hi32() */
|
||||
__flag(BPF_F_TEST_RND_HI32)
|
||||
__success
|
||||
/* This test was added because of a bug in verifier.c:sync_linked_regs(),
|
||||
* upon range propagation it destroyed subreg_def marks for registers.
|
||||
* The subreg_def mark is used to decide whether zero extension instructions
|
||||
* are needed when register is read. When BPF_F_TEST_RND_HI32 is set it
|
||||
* also causes generation of statements to randomize upper halves of
|
||||
* read registers.
|
||||
*
|
||||
* The test is written in a way to return an upper half of a register
|
||||
* that is affected by range propagation and must have it's subreg_def
|
||||
* preserved. This gives a return value of 0 and leads to undefined
|
||||
* return value if subreg_def mark is not preserved.
|
||||
*/
|
||||
__retval(0)
|
||||
/* Check that verifier believes r1/r0 are zero at exit */
|
||||
__log_level(2)
|
||||
__msg("4: (77) r1 >>= 32 ; R1_w=0")
|
||||
__msg("5: (bf) r0 = r1 ; R0_w=0 R1_w=0")
|
||||
__msg("6: (95) exit")
|
||||
__msg("from 3 to 4")
|
||||
__msg("4: (77) r1 >>= 32 ; R1_w=0")
|
||||
__msg("5: (bf) r0 = r1 ; R0_w=0 R1_w=0")
|
||||
__msg("6: (95) exit")
|
||||
/* Verify that statements to randomize upper half of r1 had not been
|
||||
* generated.
|
||||
*/
|
||||
__xlated("call unknown")
|
||||
__xlated("r0 &= 2147483647")
|
||||
__xlated("w1 = w0")
|
||||
/* This is how disasm.c prints BPF_ZEXT_REG at the moment, x86 and arm
|
||||
* are the only CI archs that do not need zero extension for subregs.
|
||||
*/
|
||||
#if !defined(__TARGET_ARCH_x86) && !defined(__TARGET_ARCH_arm64)
|
||||
__xlated("w1 = w1")
|
||||
#endif
|
||||
__xlated("if w0 < 0xa goto pc+0")
|
||||
__xlated("r1 >>= 32")
|
||||
__xlated("r0 = r1")
|
||||
__xlated("exit")
|
||||
__naked void linked_regs_and_subreg_def(void)
|
||||
{
|
||||
asm volatile (
|
||||
"call %[bpf_ktime_get_ns];"
|
||||
/* make sure r0 is in 32-bit range, otherwise w1 = w0 won't
|
||||
* assign same IDs to registers.
|
||||
*/
|
||||
"r0 &= 0x7fffffff;"
|
||||
/* link w1 and w0 via ID */
|
||||
"w1 = w0;"
|
||||
/* 'if' statement propagates range info from w0 to w1,
|
||||
* but should not affect w1->subreg_def property.
|
||||
*/
|
||||
"if w0 < 10 goto +0;"
|
||||
/* r1 is read here, on archs that require subreg zero
|
||||
* extension this would cause zext patch generation.
|
||||
*/
|
||||
"r1 >>= 32;"
|
||||
"r0 = r1;"
|
||||
"exit;"
|
||||
:
|
||||
: __imm(bpf_ktime_get_ns)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
char _license[] SEC("license") = "GPL";
|
||||
|
@ -367,7 +367,7 @@ int delete_module(const char *name, int flags)
|
||||
return syscall(__NR_delete_module, name, flags);
|
||||
}
|
||||
|
||||
int unload_bpf_testmod(bool verbose)
|
||||
int unload_module(const char *name, bool verbose)
|
||||
{
|
||||
int ret, cnt = 0;
|
||||
|
||||
@ -375,11 +375,11 @@ int unload_bpf_testmod(bool verbose)
|
||||
fprintf(stdout, "Failed to trigger kernel-side RCU sync!\n");
|
||||
|
||||
for (;;) {
|
||||
ret = delete_module("bpf_testmod", 0);
|
||||
ret = delete_module(name, 0);
|
||||
if (!ret || errno != EAGAIN)
|
||||
break;
|
||||
if (++cnt > 10000) {
|
||||
fprintf(stdout, "Unload of bpf_testmod timed out\n");
|
||||
fprintf(stdout, "Unload of %s timed out\n", name);
|
||||
break;
|
||||
}
|
||||
usleep(100);
|
||||
@ -388,41 +388,51 @@ int unload_bpf_testmod(bool verbose)
|
||||
if (ret) {
|
||||
if (errno == ENOENT) {
|
||||
if (verbose)
|
||||
fprintf(stdout, "bpf_testmod.ko is already unloaded.\n");
|
||||
fprintf(stdout, "%s.ko is already unloaded.\n", name);
|
||||
return -1;
|
||||
}
|
||||
fprintf(stdout, "Failed to unload bpf_testmod.ko from kernel: %d\n", -errno);
|
||||
fprintf(stdout, "Failed to unload %s.ko from kernel: %d\n", name, -errno);
|
||||
return -1;
|
||||
}
|
||||
if (verbose)
|
||||
fprintf(stdout, "Successfully unloaded bpf_testmod.ko.\n");
|
||||
fprintf(stdout, "Successfully unloaded %s.ko.\n", name);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int load_bpf_testmod(bool verbose)
|
||||
int load_module(const char *path, bool verbose)
|
||||
{
|
||||
int fd;
|
||||
|
||||
if (verbose)
|
||||
fprintf(stdout, "Loading bpf_testmod.ko...\n");
|
||||
fprintf(stdout, "Loading %s...\n", path);
|
||||
|
||||
fd = open("bpf_testmod.ko", O_RDONLY);
|
||||
fd = open(path, O_RDONLY);
|
||||
if (fd < 0) {
|
||||
fprintf(stdout, "Can't find bpf_testmod.ko kernel module: %d\n", -errno);
|
||||
fprintf(stdout, "Can't find %s kernel module: %d\n", path, -errno);
|
||||
return -ENOENT;
|
||||
}
|
||||
if (finit_module(fd, "", 0)) {
|
||||
fprintf(stdout, "Failed to load bpf_testmod.ko into the kernel: %d\n", -errno);
|
||||
fprintf(stdout, "Failed to load %s into the kernel: %d\n", path, -errno);
|
||||
close(fd);
|
||||
return -EINVAL;
|
||||
}
|
||||
close(fd);
|
||||
|
||||
if (verbose)
|
||||
fprintf(stdout, "Successfully loaded bpf_testmod.ko.\n");
|
||||
fprintf(stdout, "Successfully loaded %s.\n", path);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int unload_bpf_testmod(bool verbose)
|
||||
{
|
||||
return unload_module("bpf_testmod", verbose);
|
||||
}
|
||||
|
||||
int load_bpf_testmod(bool verbose)
|
||||
{
|
||||
return load_module("bpf_testmod.ko", verbose);
|
||||
}
|
||||
|
||||
/*
|
||||
* Trigger synchronize_rcu() in kernel.
|
||||
*/
|
||||
|
@ -38,6 +38,8 @@ int unload_bpf_testmod(bool verbose);
|
||||
int kern_sync_rcu(void);
|
||||
int finit_module(int fd, const char *param_values, int flags);
|
||||
int delete_module(const char *name, int flags);
|
||||
int load_module(const char *path, bool verbose);
|
||||
int unload_module(const char *name, bool verbose);
|
||||
|
||||
static inline __u64 get_time_ns(void)
|
||||
{
|
||||
|
Loading…
Reference in New Issue
Block a user