Merge branch 'bpf-next/master' into for-next

Signed-off-by: Alexei Starovoitov <ast@kernel.org>
This commit is contained in:
Alexei Starovoitov 2025-01-09 07:42:08 -08:00
commit 6e90b3222a
130 changed files with 3887 additions and 1892 deletions

View File

@ -267,6 +267,19 @@ static bool is_addsub_imm(u32 imm)
return !(imm & ~0xfff) || !(imm & ~0xfff000);
}
static inline void emit_a64_add_i(const bool is64, const int dst, const int src,
const int tmp, const s32 imm, struct jit_ctx *ctx)
{
if (is_addsub_imm(imm)) {
emit(A64_ADD_I(is64, dst, src, imm), ctx);
} else if (is_addsub_imm(-imm)) {
emit(A64_SUB_I(is64, dst, src, -imm), ctx);
} else {
emit_a64_mov_i(is64, tmp, imm, ctx);
emit(A64_ADD(is64, dst, src, tmp), ctx);
}
}
/*
* There are 3 types of AArch64 LDR/STR (immediate) instruction:
* Post-index, Pre-index, Unsigned offset.
@ -648,16 +661,13 @@ static int emit_lse_atomic(const struct bpf_insn *insn, struct jit_ctx *ctx)
const s16 off = insn->off;
u8 reg = dst;
if (off || arena) {
if (off) {
emit_a64_mov_i(1, tmp, off, ctx);
emit(A64_ADD(1, tmp, tmp, dst), ctx);
reg = tmp;
}
if (arena) {
emit(A64_ADD(1, tmp, reg, arena_vm_base), ctx);
reg = tmp;
}
if (off) {
emit_a64_add_i(1, tmp, reg, tmp, off, ctx);
reg = tmp;
}
if (arena) {
emit(A64_ADD(1, tmp, reg, arena_vm_base), ctx);
reg = tmp;
}
switch (insn->imm) {
@ -723,7 +733,7 @@ static int emit_ll_sc_atomic(const struct bpf_insn *insn, struct jit_ctx *ctx)
const s32 imm = insn->imm;
const s16 off = insn->off;
const bool isdw = BPF_SIZE(code) == BPF_DW;
u8 reg;
u8 reg = dst;
s32 jmp_offset;
if (BPF_MODE(code) == BPF_PROBE_ATOMIC) {
@ -732,11 +742,8 @@ static int emit_ll_sc_atomic(const struct bpf_insn *insn, struct jit_ctx *ctx)
return -EINVAL;
}
if (!off) {
reg = dst;
} else {
emit_a64_mov_i(1, tmp, off, ctx);
emit(A64_ADD(1, tmp, tmp, dst), ctx);
if (off) {
emit_a64_add_i(1, tmp, reg, tmp, off, ctx);
reg = tmp;
}
@ -1146,14 +1153,7 @@ emit_bswap_uxt:
/* dst = dst OP imm */
case BPF_ALU | BPF_ADD | BPF_K:
case BPF_ALU64 | BPF_ADD | BPF_K:
if (is_addsub_imm(imm)) {
emit(A64_ADD_I(is64, dst, dst, imm), ctx);
} else if (is_addsub_imm(-imm)) {
emit(A64_SUB_I(is64, dst, dst, -imm), ctx);
} else {
emit_a64_mov_i(is64, tmp, imm, ctx);
emit(A64_ADD(is64, dst, dst, tmp), ctx);
}
emit_a64_add_i(is64, dst, dst, tmp, imm, ctx);
break;
case BPF_ALU | BPF_SUB | BPF_K:
case BPF_ALU64 | BPF_SUB | BPF_K:

View File

@ -2299,6 +2299,14 @@ void __bpf_obj_drop_impl(void *p, const struct btf_record *rec, bool percpu);
struct bpf_map *bpf_map_get(u32 ufd);
struct bpf_map *bpf_map_get_with_uref(u32 ufd);
/*
* The __bpf_map_get() and __btf_get_by_fd() functions parse a file
* descriptor and return a corresponding map or btf object.
* Their names are double underscored to emphasize the fact that they
* do not increase refcnt. To also increase refcnt use corresponding
* bpf_map_get() and btf_get_by_fd() functions.
*/
static inline struct bpf_map *__bpf_map_get(struct fd f)
{
if (fd_empty(f))
@ -2308,6 +2316,15 @@ static inline struct bpf_map *__bpf_map_get(struct fd f)
return fd_file(f)->private_data;
}
static inline struct btf *__btf_get_by_fd(struct fd f)
{
if (fd_empty(f))
return ERR_PTR(-EBADF);
if (unlikely(fd_file(f)->f_op != &btf_fops))
return ERR_PTR(-EINVAL);
return fd_file(f)->private_data;
}
void bpf_map_inc(struct bpf_map *map);
void bpf_map_inc_with_uref(struct bpf_map *map);
struct bpf_map *__bpf_map_inc_not_zero(struct bpf_map *map, bool uref);

View File

@ -233,6 +233,7 @@ enum bpf_stack_slot_type {
*/
STACK_DYNPTR,
STACK_ITER,
STACK_IRQ_FLAG,
};
#define BPF_REG_SIZE 8 /* size of eBPF register in bytes */
@ -254,8 +255,9 @@ struct bpf_reference_state {
* default to pointer reference on zero initialization of a state.
*/
enum ref_state_type {
REF_TYPE_PTR = 0,
REF_TYPE_LOCK,
REF_TYPE_PTR = 1,
REF_TYPE_IRQ = 2,
REF_TYPE_LOCK = 3,
} type;
/* Track each reference created with a unique id, even if the same
* instruction creates the reference multiple times (eg, via CALL).
@ -315,9 +317,6 @@ struct bpf_func_state {
u32 callback_depth;
/* The following fields should be last. See copy_func_state() */
int acquired_refs;
int active_locks;
struct bpf_reference_state *refs;
/* The state of the stack. Each element of the array describes BPF_REG_SIZE
* (i.e. 8) bytes worth of stack memory.
* stack[0] represents bytes [*(r10-8)..*(r10-1)]
@ -370,6 +369,8 @@ struct bpf_verifier_state {
/* call stack tracking */
struct bpf_func_state *frame[MAX_CALL_FRAMES];
struct bpf_verifier_state *parent;
/* Acquired reference states */
struct bpf_reference_state *refs;
/*
* 'branches' field is the number of branches left to explore:
* 0 - all possible paths from this state reached bpf_exit or
@ -419,9 +420,13 @@ struct bpf_verifier_state {
u32 insn_idx;
u32 curframe;
bool speculative;
u32 acquired_refs;
u32 active_locks;
u32 active_preempt_locks;
u32 active_irq_id;
bool active_rcu_lock;
u32 active_preempt_lock;
bool speculative;
/* If this state was ever pointed-to by other state's loop_entry field
* this flag would be set to true. Used to avoid freeing such states
* while they are still in use.
@ -980,8 +985,9 @@ const char *dynptr_type_str(enum bpf_dynptr_type type);
const char *iter_type_str(const struct btf *btf, u32 btf_id);
const char *iter_state_str(enum bpf_iter_state state);
void print_verifier_state(struct bpf_verifier_env *env,
const struct bpf_func_state *state, bool print_all);
void print_insn_state(struct bpf_verifier_env *env, const struct bpf_func_state *state);
void print_verifier_state(struct bpf_verifier_env *env, const struct bpf_verifier_state *vstate,
u32 frameno, bool print_all);
void print_insn_state(struct bpf_verifier_env *env, const struct bpf_verifier_state *vstate,
u32 frameno);
#endif /* _LINUX_BPF_VERIFIER_H */

View File

@ -353,6 +353,11 @@ static inline bool btf_type_is_scalar(const struct btf_type *t)
return btf_type_is_int(t) || btf_type_is_enum(t);
}
static inline bool btf_type_is_fwd(const struct btf_type *t)
{
return BTF_INFO_KIND(t->info) == BTF_KIND_FWD;
}
static inline bool btf_type_is_typedef(const struct btf_type *t)
{
return BTF_INFO_KIND(t->info) == BTF_KIND_TYPEDEF;

View File

@ -1573,6 +1573,16 @@ union bpf_attr {
* If provided, prog_flags should have BPF_F_TOKEN_FD flag set.
*/
__s32 prog_token_fd;
/* The fd_array_cnt can be used to pass the length of the
* fd_array array. In this case all the [map] file descriptors
* passed in this array will be bound to the program, even if
* the maps are not referenced directly. The functionality is
* similar to the BPF_PROG_BIND_MAP syscall, but maps can be
* used by the verifier during the program load. If provided,
* then the fd_array[0,...,fd_array_cnt-1] is expected to be
* continuous.
*/
__u32 fd_array_cnt;
};
struct { /* anonymous struct used by BPF_OBJ_* commands */

View File

@ -138,7 +138,11 @@ static struct bpf_map *arena_map_alloc(union bpf_attr *attr)
INIT_LIST_HEAD(&arena->vma_list);
bpf_map_init_from_attr(&arena->map, attr);
range_tree_init(&arena->rt);
range_tree_set(&arena->rt, 0, attr->max_entries);
err = range_tree_set(&arena->rt, 0, attr->max_entries);
if (err) {
bpf_map_area_free(arena);
goto err;
}
mutex_init(&arena->lock);
return &arena->map;
@ -218,7 +222,7 @@ static u64 arena_map_mem_usage(const struct bpf_map *map)
struct vma_list {
struct vm_area_struct *vma;
struct list_head head;
atomic_t mmap_count;
refcount_t mmap_count;
};
static int remember_vma(struct bpf_arena *arena, struct vm_area_struct *vma)
@ -228,7 +232,7 @@ static int remember_vma(struct bpf_arena *arena, struct vm_area_struct *vma)
vml = kmalloc(sizeof(*vml), GFP_KERNEL);
if (!vml)
return -ENOMEM;
atomic_set(&vml->mmap_count, 1);
refcount_set(&vml->mmap_count, 1);
vma->vm_private_data = vml;
vml->vma = vma;
list_add(&vml->head, &arena->vma_list);
@ -239,7 +243,7 @@ static void arena_vm_open(struct vm_area_struct *vma)
{
struct vma_list *vml = vma->vm_private_data;
atomic_inc(&vml->mmap_count);
refcount_inc(&vml->mmap_count);
}
static void arena_vm_close(struct vm_area_struct *vma)
@ -248,7 +252,7 @@ static void arena_vm_close(struct vm_area_struct *vma)
struct bpf_arena *arena = container_of(map, struct bpf_arena, map);
struct vma_list *vml = vma->vm_private_data;
if (!atomic_dec_and_test(&vml->mmap_count))
if (!refcount_dec_and_test(&vml->mmap_count))
return;
guard(mutex)(&arena->lock);
/* update link list under lock */
@ -257,8 +261,6 @@ static void arena_vm_close(struct vm_area_struct *vma)
kfree(vml);
}
#define MT_ENTRY ((void *)&arena_map_ops) /* unused. has to be valid pointer */
static vm_fault_t arena_vm_fault(struct vm_fault *vmf)
{
struct bpf_map *map = vmf->vma->vm_file->private_data;

View File

@ -735,13 +735,13 @@ static long bpf_for_each_array_elem(struct bpf_map *map, bpf_callback_t callback
u64 ret = 0;
void *val;
cant_migrate();
if (flags != 0)
return -EINVAL;
is_percpu = map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
array = container_of(map, struct bpf_array, map);
if (is_percpu)
migrate_disable();
for (i = 0; i < map->max_entries; i++) {
if (is_percpu)
val = this_cpu_ptr(array->pptrs[i]);
@ -756,8 +756,6 @@ static long bpf_for_each_array_elem(struct bpf_map *map, bpf_callback_t callback
break;
}
if (is_percpu)
migrate_enable();
return num_elems;
}

View File

@ -15,22 +15,20 @@ static DEFINE_PER_CPU(int, bpf_cgrp_storage_busy);
static void bpf_cgrp_storage_lock(void)
{
migrate_disable();
cant_migrate();
this_cpu_inc(bpf_cgrp_storage_busy);
}
static void bpf_cgrp_storage_unlock(void)
{
this_cpu_dec(bpf_cgrp_storage_busy);
migrate_enable();
}
static bool bpf_cgrp_storage_trylock(void)
{
migrate_disable();
cant_migrate();
if (unlikely(this_cpu_inc_return(bpf_cgrp_storage_busy) != 1)) {
this_cpu_dec(bpf_cgrp_storage_busy);
migrate_enable();
return false;
}
return true;
@ -47,17 +45,18 @@ void bpf_cgrp_storage_free(struct cgroup *cgroup)
{
struct bpf_local_storage *local_storage;
migrate_disable();
rcu_read_lock();
local_storage = rcu_dereference(cgroup->bpf_cgrp_storage);
if (!local_storage) {
rcu_read_unlock();
return;
}
if (!local_storage)
goto out;
bpf_cgrp_storage_lock();
bpf_local_storage_destroy(local_storage);
bpf_cgrp_storage_unlock();
out:
rcu_read_unlock();
migrate_enable();
}
static struct bpf_local_storage_data *

View File

@ -62,16 +62,17 @@ void bpf_inode_storage_free(struct inode *inode)
if (!bsb)
return;
migrate_disable();
rcu_read_lock();
local_storage = rcu_dereference(bsb->storage);
if (!local_storage) {
rcu_read_unlock();
return;
}
if (!local_storage)
goto out;
bpf_local_storage_destroy(local_storage);
out:
rcu_read_unlock();
migrate_enable();
}
static void *bpf_fd_inode_storage_lookup_elem(struct bpf_map *map, void *key)

View File

@ -81,9 +81,7 @@ bpf_selem_alloc(struct bpf_local_storage_map *smap, void *owner,
return NULL;
if (smap->bpf_ma) {
migrate_disable();
selem = bpf_mem_cache_alloc_flags(&smap->selem_ma, gfp_flags);
migrate_enable();
if (selem)
/* Keep the original bpf_map_kzalloc behavior
* before started using the bpf_mem_cache_alloc.
@ -174,17 +172,14 @@ static void bpf_local_storage_free(struct bpf_local_storage *local_storage,
return;
}
if (smap) {
migrate_disable();
if (smap)
bpf_mem_cache_free(&smap->storage_ma, local_storage);
migrate_enable();
} else {
else
/* smap could be NULL if the selem that triggered
* this 'local_storage' creation had been long gone.
* In this case, directly do call_rcu().
*/
call_rcu(&local_storage->rcu, bpf_local_storage_free_rcu);
}
}
/* rcu tasks trace callback for bpf_ma == false */
@ -217,7 +212,10 @@ static void bpf_selem_free_rcu(struct rcu_head *rcu)
selem = container_of(rcu, struct bpf_local_storage_elem, rcu);
/* The bpf_local_storage_map_free will wait for rcu_barrier */
smap = rcu_dereference_check(SDATA(selem)->smap, 1);
migrate_disable();
bpf_obj_free_fields(smap->map.record, SDATA(selem)->data);
migrate_enable();
bpf_mem_cache_raw_free(selem);
}
@ -256,9 +254,7 @@ void bpf_selem_free(struct bpf_local_storage_elem *selem,
* bpf_mem_cache_free will be able to reuse selem
* immediately.
*/
migrate_disable();
bpf_mem_cache_free(&smap->selem_ma, selem);
migrate_enable();
return;
}
@ -497,15 +493,11 @@ int bpf_local_storage_alloc(void *owner,
if (err)
return err;
if (smap->bpf_ma) {
migrate_disable();
if (smap->bpf_ma)
storage = bpf_mem_cache_alloc_flags(&smap->storage_ma, gfp_flags);
migrate_enable();
} else {
else
storage = bpf_map_kzalloc(&smap->map, sizeof(*storage),
gfp_flags | __GFP_NOWARN);
}
if (!storage) {
err = -ENOMEM;
goto uncharge;
@ -841,8 +833,12 @@ bpf_local_storage_map_alloc(union bpf_attr *attr,
smap->elem_size = offsetof(struct bpf_local_storage_elem,
sdata.data[attr->value_size]);
smap->bpf_ma = bpf_ma;
if (bpf_ma) {
/* In PREEMPT_RT, kmalloc(GFP_ATOMIC) is still not safe in non
* preemptible context. Thus, enforce all storages to use
* bpf_mem_alloc when CONFIG_PREEMPT_RT is enabled.
*/
smap->bpf_ma = IS_ENABLED(CONFIG_PREEMPT_RT) ? true : bpf_ma;
if (smap->bpf_ma) {
err = bpf_mem_alloc_init(&smap->selem_ma, smap->elem_size, false);
if (err)
goto free_smap;
@ -898,15 +894,11 @@ void bpf_local_storage_map_free(struct bpf_map *map,
while ((selem = hlist_entry_safe(
rcu_dereference_raw(hlist_first_rcu(&b->list)),
struct bpf_local_storage_elem, map_node))) {
if (busy_counter) {
migrate_disable();
if (busy_counter)
this_cpu_inc(*busy_counter);
}
bpf_selem_unlink(selem, true);
if (busy_counter) {
if (busy_counter)
this_cpu_dec(*busy_counter);
migrate_enable();
}
cond_resched_rcu();
}
rcu_read_unlock();

View File

@ -310,6 +310,20 @@ void bpf_struct_ops_desc_release(struct bpf_struct_ops_desc *st_ops_desc)
kfree(arg_info);
}
static bool is_module_member(const struct btf *btf, u32 id)
{
const struct btf_type *t;
t = btf_type_resolve_ptr(btf, id, NULL);
if (!t)
return false;
if (!__btf_type_is_struct(t) && !btf_type_is_fwd(t))
return false;
return !strcmp(btf_name_by_offset(btf, t->name_off), "module");
}
int bpf_struct_ops_desc_init(struct bpf_struct_ops_desc *st_ops_desc,
struct btf *btf,
struct bpf_verifier_log *log)
@ -389,6 +403,13 @@ int bpf_struct_ops_desc_init(struct bpf_struct_ops_desc *st_ops_desc,
goto errout;
}
if (!st_ops_ids[IDX_MODULE_ID] && is_module_member(btf, member->type)) {
pr_warn("'struct module' btf id not found. Is CONFIG_MODULES enabled? bpf_struct_ops '%s' needs module support.\n",
st_ops->name);
err = -EOPNOTSUPP;
goto errout;
}
func_proto = btf_type_resolve_func_ptr(btf,
member->type,
NULL);

View File

@ -24,22 +24,20 @@ static DEFINE_PER_CPU(int, bpf_task_storage_busy);
static void bpf_task_storage_lock(void)
{
migrate_disable();
cant_migrate();
this_cpu_inc(bpf_task_storage_busy);
}
static void bpf_task_storage_unlock(void)
{
this_cpu_dec(bpf_task_storage_busy);
migrate_enable();
}
static bool bpf_task_storage_trylock(void)
{
migrate_disable();
cant_migrate();
if (unlikely(this_cpu_inc_return(bpf_task_storage_busy) != 1)) {
this_cpu_dec(bpf_task_storage_busy);
migrate_enable();
return false;
}
return true;
@ -72,18 +70,19 @@ void bpf_task_storage_free(struct task_struct *task)
{
struct bpf_local_storage *local_storage;
migrate_disable();
rcu_read_lock();
local_storage = rcu_dereference(task->bpf_storage);
if (!local_storage) {
rcu_read_unlock();
return;
}
if (!local_storage)
goto out;
bpf_task_storage_lock();
bpf_local_storage_destroy(local_storage);
bpf_task_storage_unlock();
out:
rcu_read_unlock();
migrate_enable();
}
static void *bpf_pid_task_storage_lookup_elem(struct bpf_map *map, void *key)

View File

@ -498,11 +498,6 @@ bool btf_type_is_void(const struct btf_type *t)
return t == &btf_void;
}
static bool btf_type_is_fwd(const struct btf_type *t)
{
return BTF_INFO_KIND(t->info) == BTF_KIND_FWD;
}
static bool btf_type_is_datasec(const struct btf_type *t)
{
return BTF_INFO_KIND(t->info) == BTF_KIND_DATASEC;
@ -7887,14 +7882,9 @@ struct btf *btf_get_by_fd(int fd)
struct btf *btf;
CLASS(fd, f)(fd);
if (fd_empty(f))
return ERR_PTR(-EBADF);
if (fd_file(f)->f_op != &btf_fops)
return ERR_PTR(-EINVAL);
btf = fd_file(f)->private_data;
refcount_inc(&btf->refcnt);
btf = __btf_get_by_fd(f);
if (!IS_ERR(btf))
refcount_inc(&btf->refcnt);
return btf;
}

View File

@ -91,9 +91,7 @@ __bpf_kfunc void bpf_cpumask_release(struct bpf_cpumask *cpumask)
if (!refcount_dec_and_test(&cpumask->usage))
return;
migrate_disable();
bpf_mem_cache_free_rcu(&bpf_cpumask_ma, cpumask);
migrate_enable();
}
__bpf_kfunc void bpf_cpumask_release_dtor(void *cpumask)

View File

@ -897,11 +897,9 @@ static void htab_elem_free(struct bpf_htab *htab, struct htab_elem *l)
{
check_and_free_fields(htab, l);
migrate_disable();
if (htab->map.map_type == BPF_MAP_TYPE_PERCPU_HASH)
bpf_mem_cache_free(&htab->pcpu_ma, l->ptr_to_pptr);
bpf_mem_cache_free(&htab->ma, l);
migrate_enable();
}
static void htab_put_fd_value(struct bpf_htab *htab, struct htab_elem *l)
@ -1502,10 +1500,9 @@ static void delete_all_elements(struct bpf_htab *htab)
{
int i;
/* It's called from a worker thread, so disable migration here,
* since bpf_mem_cache_free() relies on that.
/* It's called from a worker thread and migration has been disabled,
* therefore, it is OK to invoke bpf_mem_cache_free() directly.
*/
migrate_disable();
for (i = 0; i < htab->n_buckets; i++) {
struct hlist_nulls_head *head = select_bucket(htab, i);
struct hlist_nulls_node *n;
@ -1517,7 +1514,6 @@ static void delete_all_elements(struct bpf_htab *htab)
}
cond_resched();
}
migrate_enable();
}
static void htab_free_malloced_timers_and_wq(struct bpf_htab *htab)
@ -2208,17 +2204,18 @@ static long bpf_for_each_hash_elem(struct bpf_map *map, bpf_callback_t callback_
bool is_percpu;
u64 ret = 0;
cant_migrate();
if (flags != 0)
return -EINVAL;
is_percpu = htab_is_percpu(htab);
roundup_key_size = round_up(map->key_size, 8);
/* disable migration so percpu value prepared here will be the
* same as the one seen by the bpf program with bpf_map_lookup_elem().
/* migration has been disabled, so percpu value prepared here will be
* the same as the one seen by the bpf program with
* bpf_map_lookup_elem().
*/
if (is_percpu)
migrate_disable();
for (i = 0; i < htab->n_buckets; i++) {
b = &htab->buckets[i];
rcu_read_lock();
@ -2244,8 +2241,6 @@ static long bpf_for_each_hash_elem(struct bpf_map *map, bpf_callback_t callback_
rcu_read_unlock();
}
out:
if (is_percpu)
migrate_enable();
return num_elems;
}

View File

@ -2066,9 +2066,7 @@ unlock:
/* The contained type can also have resources, including a
* bpf_list_head which needs to be freed.
*/
migrate_disable();
__bpf_obj_drop_impl(obj, field->graph_root.value_rec, false);
migrate_enable();
}
}
@ -2105,9 +2103,7 @@ void bpf_rb_root_free(const struct btf_field *field, void *rb_root,
obj -= field->graph_root.node_offset;
migrate_disable();
__bpf_obj_drop_impl(obj, field->graph_root.value_rec, false);
migrate_enable();
}
}
@ -3057,6 +3053,21 @@ __bpf_kfunc int bpf_copy_from_user_str(void *dst, u32 dst__sz, const void __user
return ret + 1;
}
/* Keep unsinged long in prototype so that kfunc is usable when emitted to
* vmlinux.h in BPF programs directly, but note that while in BPF prog, the
* unsigned long always points to 8-byte region on stack, the kernel may only
* read and write the 4-bytes on 32-bit.
*/
__bpf_kfunc void bpf_local_irq_save(unsigned long *flags__irq_flag)
{
local_irq_save(*flags__irq_flag);
}
__bpf_kfunc void bpf_local_irq_restore(unsigned long *flags__irq_flag)
{
local_irq_restore(*flags__irq_flag);
}
__bpf_kfunc_end_defs();
BTF_KFUNCS_START(generic_btf_ids)
@ -3089,7 +3100,9 @@ BTF_ID_FLAGS(func, bpf_task_get_cgroup1, KF_ACQUIRE | KF_RCU | KF_RET_NULL)
BTF_ID_FLAGS(func, bpf_task_from_pid, KF_ACQUIRE | KF_RET_NULL)
BTF_ID_FLAGS(func, bpf_task_from_vpid, KF_ACQUIRE | KF_RET_NULL)
BTF_ID_FLAGS(func, bpf_throw)
#ifdef CONFIG_BPF_EVENTS
BTF_ID_FLAGS(func, bpf_send_signal_task, KF_TRUSTED_ARGS)
#endif
BTF_KFUNCS_END(generic_btf_ids)
static const struct btf_kfunc_id_set generic_kfunc_set = {
@ -3135,7 +3148,9 @@ BTF_ID_FLAGS(func, bpf_dynptr_is_null)
BTF_ID_FLAGS(func, bpf_dynptr_is_rdonly)
BTF_ID_FLAGS(func, bpf_dynptr_size)
BTF_ID_FLAGS(func, bpf_dynptr_clone)
#ifdef CONFIG_NET
BTF_ID_FLAGS(func, bpf_modify_return_test_tp)
#endif
BTF_ID_FLAGS(func, bpf_wq_init)
BTF_ID_FLAGS(func, bpf_wq_set_callback_impl)
BTF_ID_FLAGS(func, bpf_wq_start)
@ -3149,6 +3164,8 @@ BTF_ID_FLAGS(func, bpf_get_kmem_cache)
BTF_ID_FLAGS(func, bpf_iter_kmem_cache_new, KF_ITER_NEW | KF_SLEEPABLE)
BTF_ID_FLAGS(func, bpf_iter_kmem_cache_next, KF_ITER_NEXT | KF_RET_NULL | KF_SLEEPABLE)
BTF_ID_FLAGS(func, bpf_iter_kmem_cache_destroy, KF_ITER_DESTROY | KF_SLEEPABLE)
BTF_ID_FLAGS(func, bpf_local_irq_save)
BTF_ID_FLAGS(func, bpf_local_irq_restore)
BTF_KFUNCS_END(common_btf_ids)
static const struct btf_kfunc_id_set common_kfunc_set = {

View File

@ -537,6 +537,7 @@ static char slot_type_char[] = {
[STACK_ZERO] = '0',
[STACK_DYNPTR] = 'd',
[STACK_ITER] = 'i',
[STACK_IRQ_FLAG] = 'f'
};
static void print_liveness(struct bpf_verifier_env *env,
@ -753,9 +754,10 @@ static void print_reg_state(struct bpf_verifier_env *env,
verbose(env, ")");
}
void print_verifier_state(struct bpf_verifier_env *env, const struct bpf_func_state *state,
bool print_all)
void print_verifier_state(struct bpf_verifier_env *env, const struct bpf_verifier_state *vstate,
u32 frameno, bool print_all)
{
const struct bpf_func_state *state = vstate->frame[frameno];
const struct bpf_reg_state *reg;
int i;
@ -843,11 +845,11 @@ void print_verifier_state(struct bpf_verifier_env *env, const struct bpf_func_st
break;
}
}
if (state->acquired_refs && state->refs[0].id) {
verbose(env, " refs=%d", state->refs[0].id);
for (i = 1; i < state->acquired_refs; i++)
if (state->refs[i].id)
verbose(env, ",%d", state->refs[i].id);
if (vstate->acquired_refs && vstate->refs[0].id) {
verbose(env, " refs=%d", vstate->refs[0].id);
for (i = 1; i < vstate->acquired_refs; i++)
if (vstate->refs[i].id)
verbose(env, ",%d", vstate->refs[i].id);
}
if (state->in_callback_fn)
verbose(env, " cb");
@ -864,7 +866,8 @@ static inline u32 vlog_alignment(u32 pos)
BPF_LOG_MIN_ALIGNMENT) - pos - 1;
}
void print_insn_state(struct bpf_verifier_env *env, const struct bpf_func_state *state)
void print_insn_state(struct bpf_verifier_env *env, const struct bpf_verifier_state *vstate,
u32 frameno)
{
if (env->prev_log_pos && env->prev_log_pos == env->log.end_pos) {
/* remove new line character */
@ -873,5 +876,5 @@ void print_insn_state(struct bpf_verifier_env *env, const struct bpf_func_state
} else {
verbose(env, "%d:", env->insn_idx);
}
print_verifier_state(env, state, false);
print_verifier_state(env, vstate, frameno, false);
}

View File

@ -289,16 +289,11 @@ static void *trie_lookup_elem(struct bpf_map *map, void *_key)
}
static struct lpm_trie_node *lpm_trie_node_alloc(struct lpm_trie *trie,
const void *value,
bool disable_migration)
const void *value)
{
struct lpm_trie_node *node;
if (disable_migration)
migrate_disable();
node = bpf_mem_cache_alloc(&trie->ma);
if (disable_migration)
migrate_enable();
if (!node)
return NULL;
@ -342,10 +337,8 @@ static long trie_update_elem(struct bpf_map *map,
if (key->prefixlen > trie->max_prefixlen)
return -EINVAL;
/* Allocate and fill a new node. Need to disable migration before
* invoking bpf_mem_cache_alloc().
*/
new_node = lpm_trie_node_alloc(trie, value, true);
/* Allocate and fill a new node */
new_node = lpm_trie_node_alloc(trie, value);
if (!new_node)
return -ENOMEM;
@ -425,8 +418,7 @@ static long trie_update_elem(struct bpf_map *map,
goto out;
}
/* migration is disabled within the locked scope */
im_node = lpm_trie_node_alloc(trie, NULL, false);
im_node = lpm_trie_node_alloc(trie, NULL);
if (!im_node) {
trie->n_entries--;
ret = -ENOMEM;
@ -452,11 +444,9 @@ static long trie_update_elem(struct bpf_map *map,
out:
raw_spin_unlock_irqrestore(&trie->lock, irq_flags);
migrate_disable();
if (ret)
bpf_mem_cache_free(&trie->ma, new_node);
bpf_mem_cache_free_rcu(&trie->ma, free_node);
migrate_enable();
return ret;
}
@ -555,10 +545,8 @@ static long trie_delete_elem(struct bpf_map *map, void *_key)
out:
raw_spin_unlock_irqrestore(&trie->lock, irq_flags);
migrate_disable();
bpf_mem_cache_free_rcu(&trie->ma, free_parent);
bpf_mem_cache_free_rcu(&trie->ma, free_node);
migrate_enable();
return ret;
}

View File

@ -259,9 +259,7 @@ void range_tree_destroy(struct range_tree *rt)
while ((rn = range_it_iter_first(rt, 0, -1U))) {
range_it_remove(rn, rt);
migrate_disable();
bpf_mem_free(&bpf_global_ma, rn);
migrate_enable();
}
}

View File

@ -796,11 +796,9 @@ void bpf_obj_free_fields(const struct btf_record *rec, void *obj)
if (!btf_is_kernel(field->kptr.btf)) {
pointee_struct_meta = btf_find_struct_meta(field->kptr.btf,
field->kptr.btf_id);
migrate_disable();
__bpf_obj_drop_impl(xchgd_field, pointee_struct_meta ?
pointee_struct_meta->record : NULL,
fields[i].type == BPF_KPTR_PERCPU);
migrate_enable();
} else {
field->kptr.dtor(xchgd_field);
}
@ -835,8 +833,14 @@ static void bpf_map_free(struct bpf_map *map)
struct btf_record *rec = map->record;
struct btf *btf = map->btf;
/* implementation dependent freeing */
/* implementation dependent freeing. Disabling migration to simplify
* the free of values or special fields allocated from bpf memory
* allocator.
*/
migrate_disable();
map->ops->map_free(map);
migrate_enable();
/* Delay freeing of btf_record for maps, as map_free
* callback usually needs access to them. It is better to do it here
* than require each callback to do the free itself manually.
@ -2730,7 +2734,7 @@ static bool is_perfmon_prog_type(enum bpf_prog_type prog_type)
}
/* last field in 'union bpf_attr' used by this command */
#define BPF_PROG_LOAD_LAST_FIELD prog_token_fd
#define BPF_PROG_LOAD_LAST_FIELD fd_array_cnt
static int bpf_prog_load(union bpf_attr *attr, bpfptr_t uattr, u32 uattr_size)
{

File diff suppressed because it is too large Load Diff

View File

@ -357,17 +357,6 @@ static const struct bpf_func_proto bpf_probe_write_user_proto = {
.arg3_type = ARG_CONST_SIZE,
};
static const struct bpf_func_proto *bpf_get_probe_write_proto(void)
{
if (!capable(CAP_SYS_ADMIN))
return NULL;
pr_warn_ratelimited("%s[%d] is installing a program with bpf_probe_write_user helper that may corrupt user memory!",
current->comm, task_pid_nr(current));
return &bpf_probe_write_user_proto;
}
#define MAX_TRACE_PRINTK_VARARGS 3
#define BPF_TRACE_PRINTK_SIZE 1024
@ -1444,6 +1433,8 @@ late_initcall(bpf_key_sig_kfuncs_init);
static const struct bpf_func_proto *
bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
{
const struct bpf_func_proto *func_proto;
switch (func_id) {
case BPF_FUNC_map_lookup_elem:
return &bpf_map_lookup_elem_proto;
@ -1485,9 +1476,6 @@ bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
return &bpf_perf_event_read_proto;
case BPF_FUNC_get_prandom_u32:
return &bpf_get_prandom_u32_proto;
case BPF_FUNC_probe_write_user:
return security_locked_down(LOCKDOWN_BPF_WRITE_USER) < 0 ?
NULL : bpf_get_probe_write_proto();
case BPF_FUNC_probe_read_user:
return &bpf_probe_read_user_proto;
case BPF_FUNC_probe_read_kernel:
@ -1566,7 +1554,22 @@ bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
case BPF_FUNC_trace_vprintk:
return bpf_get_trace_vprintk_proto();
default:
return bpf_base_func_proto(func_id, prog);
break;
}
func_proto = bpf_base_func_proto(func_id, prog);
if (func_proto)
return func_proto;
if (!bpf_token_capable(prog->aux->token, CAP_SYS_ADMIN))
return NULL;
switch (func_id) {
case BPF_FUNC_probe_write_user:
return security_locked_down(LOCKDOWN_BPF_WRITE_USER) < 0 ?
NULL : &bpf_probe_write_user_proto;
default:
return NULL;
}
}
@ -2242,6 +2245,7 @@ void perf_event_detach_bpf_prog(struct perf_event *event)
{
struct bpf_prog_array *old_array;
struct bpf_prog_array *new_array;
struct bpf_prog *prog = NULL;
int ret;
mutex_lock(&bpf_event_mutex);
@ -2262,18 +2266,22 @@ void perf_event_detach_bpf_prog(struct perf_event *event)
}
put:
/*
* It could be that the bpf_prog is not sleepable (and will be freed
* via normal RCU), but is called from a point that supports sleepable
* programs and uses tasks-trace-RCU.
*/
synchronize_rcu_tasks_trace();
bpf_prog_put(event->prog);
prog = event->prog;
event->prog = NULL;
unlock:
mutex_unlock(&bpf_event_mutex);
if (prog) {
/*
* It could be that the bpf_prog is not sleepable (and will be freed
* via normal RCU), but is called from a point that supports sleepable
* programs and uses tasks-trace-RCU.
*/
synchronize_rcu_tasks_trace();
bpf_prog_put(prog);
}
}
int perf_event_query_prog_array(struct perf_event *event, void __user *info)
@ -2794,7 +2802,7 @@ kprobe_multi_link_prog_run(struct bpf_kprobe_multi_link *link,
if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1)) {
bpf_prog_inc_misses_counter(link->link.prog);
err = 0;
err = 1;
goto out;
}

View File

@ -478,7 +478,7 @@ static int __bpf_ld_imm64(struct bpf_insn insns[2], u8 reg, s64 imm64)
* to overflow the field size of the native instruction, triggering
* a branch conversion mechanism in some JITs.
*/
static int __bpf_fill_max_jmp(struct bpf_test *self, int jmp, int imm)
static int __bpf_fill_max_jmp(struct bpf_test *self, int jmp, int imm, bool alu32)
{
struct bpf_insn *insns;
int len = S16_MAX + 5;
@ -501,7 +501,7 @@ static int __bpf_fill_max_jmp(struct bpf_test *self, int jmp, int imm)
};
int op = ops[(i >> 1) % ARRAY_SIZE(ops)];
if (i & 1)
if ((i & 1) || alu32)
insns[i++] = BPF_ALU32_REG(op, R0, R1);
else
insns[i++] = BPF_ALU64_REG(op, R0, R1);
@ -516,27 +516,47 @@ static int __bpf_fill_max_jmp(struct bpf_test *self, int jmp, int imm)
}
/* Branch taken by runtime decision */
static int bpf_fill_max_jmp_taken_32(struct bpf_test *self)
{
return __bpf_fill_max_jmp(self, BPF_JEQ, 1, true);
}
static int bpf_fill_max_jmp_taken(struct bpf_test *self)
{
return __bpf_fill_max_jmp(self, BPF_JEQ, 1);
return __bpf_fill_max_jmp(self, BPF_JEQ, 1, false);
}
/* Branch not taken by runtime decision */
static int bpf_fill_max_jmp_not_taken_32(struct bpf_test *self)
{
return __bpf_fill_max_jmp(self, BPF_JEQ, 0, true);
}
static int bpf_fill_max_jmp_not_taken(struct bpf_test *self)
{
return __bpf_fill_max_jmp(self, BPF_JEQ, 0);
return __bpf_fill_max_jmp(self, BPF_JEQ, 0, false);
}
/* Branch always taken, known at JIT time */
static int bpf_fill_max_jmp_always_taken_32(struct bpf_test *self)
{
return __bpf_fill_max_jmp(self, BPF_JGE, 0, true);
}
static int bpf_fill_max_jmp_always_taken(struct bpf_test *self)
{
return __bpf_fill_max_jmp(self, BPF_JGE, 0);
return __bpf_fill_max_jmp(self, BPF_JGE, 0, false);
}
/* Branch never taken, known at JIT time */
static int bpf_fill_max_jmp_never_taken_32(struct bpf_test *self)
{
return __bpf_fill_max_jmp(self, BPF_JLT, 0, true);
}
static int bpf_fill_max_jmp_never_taken(struct bpf_test *self)
{
return __bpf_fill_max_jmp(self, BPF_JLT, 0);
return __bpf_fill_max_jmp(self, BPF_JLT, 0, false);
}
/* ALU result computation used in tests */
@ -14233,6 +14253,38 @@ static struct bpf_test tests[] = {
{ { 0, 0 } },
},
/* Conditional branch conversions */
{
"Long conditional jump: taken at runtime (32 bits)",
{ },
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_max_jmp_taken_32,
},
{
"Long conditional jump: not taken at runtime (32 bits)",
{ },
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, 2 } },
.fill_helper = bpf_fill_max_jmp_not_taken_32,
},
{
"Long conditional jump: always taken, known at JIT time (32 bits)",
{ },
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_max_jmp_always_taken_32,
},
{
"Long conditional jump: never taken, known at JIT time (32 bits)",
{ },
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, 2 } },
.fill_helper = bpf_fill_max_jmp_never_taken_32,
},
{
"Long conditional jump: taken at runtime",
{ },

View File

@ -1018,6 +1018,7 @@ int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
case BPF_PROG_TYPE_LWT_IN:
case BPF_PROG_TYPE_LWT_OUT:
case BPF_PROG_TYPE_LWT_XMIT:
case BPF_PROG_TYPE_CGROUP_SKB:
is_direct_pkt_access = true;
break;
default:

View File

@ -50,15 +50,16 @@ void bpf_sk_storage_free(struct sock *sk)
{
struct bpf_local_storage *sk_storage;
migrate_disable();
rcu_read_lock();
sk_storage = rcu_dereference(sk->sk_bpf_storage);
if (!sk_storage) {
rcu_read_unlock();
return;
}
if (!sk_storage)
goto out;
bpf_local_storage_destroy(sk_storage);
out:
rcu_read_unlock();
migrate_enable();
}
static void bpf_sk_storage_map_free(struct bpf_map *map)
@ -160,6 +161,7 @@ int bpf_sk_storage_clone(const struct sock *sk, struct sock *newsk)
RCU_INIT_POINTER(newsk->sk_bpf_storage, NULL);
migrate_disable();
rcu_read_lock();
sk_storage = rcu_dereference(sk->sk_bpf_storage);
@ -212,6 +214,7 @@ int bpf_sk_storage_clone(const struct sock *sk, struct sock *newsk)
out:
rcu_read_unlock();
migrate_enable();
/* In case of an error, don't free anything explicitly here, the
* caller is responsible to call bpf_sk_storage_free.

View File

@ -123,7 +123,7 @@ always-y += ibumad_kern.o
always-y += hbm_out_kern.o
always-y += hbm_edt_kern.o
TPROGS_CFLAGS = $(TPROGS_USER_CFLAGS)
COMMON_CFLAGS = $(TPROGS_USER_CFLAGS)
TPROGS_LDFLAGS = $(TPROGS_USER_LDFLAGS)
ifeq ($(ARCH), arm)

View File

@ -63,7 +63,6 @@ SEC("tc_mark")
int _tc_mark(struct __sk_buff *ctx)
{
void *data = (void *)(unsigned long)ctx->data;
void *data_end = (void *)(unsigned long)ctx->data_end;
void *data_meta = (void *)(unsigned long)ctx->data_meta;
struct meta_info *meta = data_meta;

View File

@ -283,7 +283,11 @@ vmlinux_link vmlinux
# fill in BTF IDs
if is_enabled CONFIG_DEBUG_INFO_BTF; then
info BTFIDS vmlinux
${RESOLVE_BTFIDS} vmlinux
RESOLVE_BTFIDS_ARGS=""
if is_enabled CONFIG_WERROR; then
RESOLVE_BTFIDS_ARGS=" --fatal_warnings "
fi
${RESOLVE_BTFIDS} ${RESOLVE_BTFIDS_ARGS} vmlinux
fi
mksysmap vmlinux System.map

View File

@ -13,7 +13,6 @@ static struct security_hook_list bpf_lsm_hooks[] __ro_after_init = {
#include <linux/lsm_hook_defs.h>
#undef LSM_HOOK
LSM_HOOK_INIT(inode_free_security, bpf_inode_storage_free),
LSM_HOOK_INIT(task_free, bpf_task_storage_free),
};
static const struct lsm_id bpf_lsmid = {

View File

@ -24,7 +24,7 @@ BTF COMMANDS
=============
| **bpftool** **btf** { **show** | **list** } [**id** *BTF_ID*]
| **bpftool** **btf dump** *BTF_SRC* [**format** *FORMAT*]
| **bpftool** **btf dump** *BTF_SRC* [**format** *FORMAT*] [**root_id** *ROOT_ID*]
| **bpftool** **btf help**
|
| *BTF_SRC* := { **id** *BTF_ID* | **prog** *PROG* | **map** *MAP* [{**key** | **value** | **kv** | **all**}] | **file** *FILE* }
@ -43,7 +43,7 @@ bpftool btf { show | list } [id *BTF_ID*]
that hold open file descriptors (FDs) against BTF objects. On such kernels
bpftool will automatically emit this information as well.
bpftool btf dump *BTF_SRC*
bpftool btf dump *BTF_SRC* [format *FORMAT*] [root_id *ROOT_ID*]
Dump BTF entries from a given *BTF_SRC*.
When **id** is specified, BTF object with that ID will be loaded and all
@ -67,6 +67,11 @@ bpftool btf dump *BTF_SRC*
formatting, the output is sorted by default. Use the **unsorted** option
to avoid sorting the output.
**root_id** option can be used to filter a dump to a single type and all
its dependent types. It cannot be used with any other types of filtering
(such as the "key", "value", or "kv" arguments when dumping BTF for a map).
It can be passed multiple times to dump multiple types.
bpftool btf help
Print short help message.

View File

@ -930,19 +930,24 @@ _bpftool()
format)
COMPREPLY=( $( compgen -W "c raw" -- "$cur" ) )
;;
root_id)
return 0;
;;
c)
COMPREPLY=( $( compgen -W "unsorted" -- "$cur" ) )
COMPREPLY=( $( compgen -W "unsorted root_id" -- "$cur" ) )
;;
*)
# emit extra options
case ${words[3]} in
id|file)
COMPREPLY=( $( compgen -W "root_id" -- "$cur" ) )
_bpftool_once_attr 'format'
;;
map|prog)
if [[ ${words[3]} == "map" ]] && [[ $cword == 6 ]]; then
COMPREPLY+=( $( compgen -W "key value kv all" -- "$cur" ) )
fi
COMPREPLY=( $( compgen -W "root_id" -- "$cur" ) )
_bpftool_once_attr 'format'
;;
*)

View File

@ -27,6 +27,8 @@
#define KFUNC_DECL_TAG "bpf_kfunc"
#define FASTCALL_DECL_TAG "bpf_fastcall"
#define MAX_ROOT_IDS 16
static const char * const btf_kind_str[NR_BTF_KINDS] = {
[BTF_KIND_UNKN] = "UNKNOWN",
[BTF_KIND_INT] = "INT",
@ -880,12 +882,14 @@ static int do_dump(int argc, char **argv)
{
bool dump_c = false, sort_dump_c = true;
struct btf *btf = NULL, *base = NULL;
__u32 root_type_ids[2];
__u32 root_type_ids[MAX_ROOT_IDS];
bool have_id_filtering;
int root_type_cnt = 0;
__u32 btf_id = -1;
const char *src;
int fd = -1;
int err = 0;
int i;
if (!REQ_ARGS(2)) {
usage();
@ -973,6 +977,8 @@ static int do_dump(int argc, char **argv)
goto done;
}
have_id_filtering = !!root_type_cnt;
while (argc) {
if (is_prefix(*argv, "format")) {
NEXT_ARG();
@ -992,6 +998,36 @@ static int do_dump(int argc, char **argv)
goto done;
}
NEXT_ARG();
} else if (is_prefix(*argv, "root_id")) {
__u32 root_id;
char *end;
if (have_id_filtering) {
p_err("cannot use root_id with other type filtering");
err = -EINVAL;
goto done;
} else if (root_type_cnt == MAX_ROOT_IDS) {
p_err("only %d root_id are supported", MAX_ROOT_IDS);
err = -E2BIG;
goto done;
}
NEXT_ARG();
root_id = strtoul(*argv, &end, 0);
if (*end) {
err = -1;
p_err("can't parse %s as root ID", *argv);
goto done;
}
for (i = 0; i < root_type_cnt; i++) {
if (root_type_ids[i] == root_id) {
err = -EINVAL;
p_err("duplicate root_id %d supplied", root_id);
goto done;
}
}
root_type_ids[root_type_cnt++] = root_id;
NEXT_ARG();
} else if (is_prefix(*argv, "unsorted")) {
sort_dump_c = false;
NEXT_ARG();
@ -1017,6 +1053,17 @@ static int do_dump(int argc, char **argv)
}
}
/* Invalid root IDs causes half emitted boilerplate and then unclean
* exit. It's an ugly user experience, so handle common error here.
*/
for (i = 0; i < root_type_cnt; i++) {
if (root_type_ids[i] >= btf__type_cnt(btf)) {
err = -EINVAL;
p_err("invalid root ID: %u", root_type_ids[i]);
goto done;
}
}
if (dump_c) {
if (json_output) {
p_err("JSON output for C-syntax dump is not supported");
@ -1391,7 +1438,7 @@ static int do_help(int argc, char **argv)
fprintf(stderr,
"Usage: %1$s %2$s { show | list } [id BTF_ID]\n"
" %1$s %2$s dump BTF_SRC [format FORMAT]\n"
" %1$s %2$s dump BTF_SRC [format FORMAT] [root_id ROOT_ID]\n"
" %1$s %2$s help\n"
"\n"
" BTF_SRC := { id BTF_ID | prog PROG | map MAP [{key | value | kv | all}] | file FILE }\n"

View File

@ -885,6 +885,28 @@ probe_v3_isa_extension(const char *define_prefix, __u32 ifindex)
"V3_ISA_EXTENSION");
}
/*
* Probe for the v4 instruction set extension introduced in commit 1f9a1ea821ff
* ("bpf: Support new sign-extension load insns").
*/
static void
probe_v4_isa_extension(const char *define_prefix, __u32 ifindex)
{
struct bpf_insn insns[5] = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_JMP32_IMM(BPF_JEQ, BPF_REG_0, 1, 1),
BPF_JMP32_A(1),
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN()
};
probe_misc_feature(insns, ARRAY_SIZE(insns),
define_prefix, ifindex,
"have_v4_isa_extension",
"ISA extension v4",
"V4_ISA_EXTENSION");
}
static void
section_system_config(enum probe_component target, const char *define_prefix)
{
@ -1029,6 +1051,7 @@ static void section_misc(const char *define_prefix, __u32 ifindex)
probe_bounded_loops(define_prefix, ifindex);
probe_v2_isa_extension(define_prefix, ifindex);
probe_v3_isa_extension(define_prefix, ifindex);
probe_v4_isa_extension(define_prefix, ifindex);
print_end_section();
}

View File

@ -141,6 +141,7 @@ struct object {
};
static int verbose;
static int warnings;
static int eprintf(int level, int var, const char *fmt, ...)
{
@ -604,6 +605,7 @@ static int symbols_resolve(struct object *obj)
if (id->id) {
pr_info("WARN: multiple IDs found for '%s': %d, %d - using %d\n",
str, id->id, type_id, id->id);
warnings++;
} else {
id->id = type_id;
(*nr)--;
@ -625,8 +627,10 @@ static int id_patch(struct object *obj, struct btf_id *id)
int i;
/* For set, set8, id->id may be 0 */
if (!id->id && !id->is_set && !id->is_set8)
if (!id->id && !id->is_set && !id->is_set8) {
pr_err("WARN: resolve_btfids: unresolved symbol %s\n", id->name);
warnings++;
}
for (i = 0; i < id->addr_cnt; i++) {
unsigned long addr = id->addr[i];
@ -782,6 +786,7 @@ int main(int argc, const char **argv)
.funcs = RB_ROOT,
.sets = RB_ROOT,
};
bool fatal_warnings = false;
struct option btfid_options[] = {
OPT_INCR('v', "verbose", &verbose,
"be more verbose (show errors, etc)"),
@ -789,6 +794,8 @@ int main(int argc, const char **argv)
"BTF data"),
OPT_STRING('b', "btf_base", &obj.base_btf_path, "file",
"path of file providing base BTF"),
OPT_BOOLEAN(0, "fatal_warnings", &fatal_warnings,
"turn warnings into errors"),
OPT_END()
};
int err = -1;
@ -823,7 +830,8 @@ int main(int argc, const char **argv)
if (symbols_patch(&obj))
goto out;
err = 0;
if (!(fatal_warnings && warnings))
err = 0;
out:
if (obj.efile.elf) {
elf_end(obj.efile.elf);

View File

@ -273,6 +273,16 @@
.off = OFF, \
.imm = 0 })
/* Unconditional jumps, gotol pc + imm32 */
#define BPF_JMP32_A(IMM) \
((struct bpf_insn) { \
.code = BPF_JMP32 | BPF_JA, \
.dst_reg = 0, \
.src_reg = 0, \
.off = 0, \
.imm = IMM })
/* Function call */
#define BPF_EMIT_CALL(FUNC) \

View File

@ -1573,6 +1573,16 @@ union bpf_attr {
* If provided, prog_flags should have BPF_F_TOKEN_FD flag set.
*/
__s32 prog_token_fd;
/* The fd_array_cnt can be used to pass the length of the
* fd_array array. In this case all the [map] file descriptors
* passed in this array will be bound to the program, even if
* the maps are not referenced directly. The functionality is
* similar to the BPF_PROG_BIND_MAP syscall, but maps can be
* used by the verifier during the program load. If provided,
* then the fd_array[0,...,fd_array_cnt-1] is expected to be
* continuous.
*/
__u32 fd_array_cnt;
};
struct { /* anonymous struct used by BPF_OBJ_* commands */

View File

@ -238,7 +238,7 @@ int bpf_prog_load(enum bpf_prog_type prog_type,
const struct bpf_insn *insns, size_t insn_cnt,
struct bpf_prog_load_opts *opts)
{
const size_t attr_sz = offsetofend(union bpf_attr, prog_token_fd);
const size_t attr_sz = offsetofend(union bpf_attr, fd_array_cnt);
void *finfo = NULL, *linfo = NULL;
const char *func_info, *line_info;
__u32 log_size, log_level, attach_prog_fd, attach_btf_obj_fd;
@ -311,6 +311,7 @@ int bpf_prog_load(enum bpf_prog_type prog_type,
attr.line_info_cnt = OPTS_GET(opts, line_info_cnt, 0);
attr.fd_array = ptr_to_u64(OPTS_GET(opts, fd_array, NULL));
attr.fd_array_cnt = OPTS_GET(opts, fd_array_cnt, 0);
if (log_level) {
attr.log_buf = ptr_to_u64(log_buf);

View File

@ -107,9 +107,12 @@ struct bpf_prog_load_opts {
*/
__u32 log_true_size;
__u32 token_fd;
/* if set, provides the length of fd_array */
__u32 fd_array_cnt;
size_t :0;
};
#define bpf_prog_load_opts__last_field token_fd
#define bpf_prog_load_opts__last_field fd_array_cnt
LIBBPF_API int bpf_prog_load(enum bpf_prog_type prog_type,
const char *prog_name, const char *license,

View File

@ -283,7 +283,7 @@ static int btf_parse_str_sec(struct btf *btf)
return -EINVAL;
}
if (!btf->base_btf && start[0]) {
pr_debug("Invalid BTF string section\n");
pr_debug("Malformed BTF string section, did you forget to provide base BTF?\n");
return -EINVAL;
}
return 0;

View File

@ -1731,12 +1731,24 @@ static int sys_memfd_create(const char *name, unsigned flags)
#ifndef MFD_CLOEXEC
#define MFD_CLOEXEC 0x0001U
#endif
#ifndef MFD_NOEXEC_SEAL
#define MFD_NOEXEC_SEAL 0x0008U
#endif
static int create_placeholder_fd(void)
{
unsigned int flags = MFD_CLOEXEC | MFD_NOEXEC_SEAL;
const char *name = "libbpf-placeholder-fd";
int fd;
fd = ensure_good_fd(sys_memfd_create("libbpf-placeholder-fd", MFD_CLOEXEC));
fd = ensure_good_fd(sys_memfd_create(name, flags));
if (fd >= 0)
return fd;
else if (errno != EINVAL)
return -errno;
/* Possibly running on kernel without MFD_NOEXEC_SEAL */
fd = ensure_good_fd(sys_memfd_create(name, flags & ~MFD_NOEXEC_SEAL));
if (fd < 0)
return -errno;
return fd;

View File

@ -1796,9 +1796,14 @@ struct bpf_linker_file_opts {
struct bpf_linker;
LIBBPF_API struct bpf_linker *bpf_linker__new(const char *filename, struct bpf_linker_opts *opts);
LIBBPF_API struct bpf_linker *bpf_linker__new_fd(int fd, struct bpf_linker_opts *opts);
LIBBPF_API int bpf_linker__add_file(struct bpf_linker *linker,
const char *filename,
const struct bpf_linker_file_opts *opts);
LIBBPF_API int bpf_linker__add_fd(struct bpf_linker *linker, int fd,
const struct bpf_linker_file_opts *opts);
LIBBPF_API int bpf_linker__add_buf(struct bpf_linker *linker, void *buf, size_t buf_sz,
const struct bpf_linker_file_opts *opts);
LIBBPF_API int bpf_linker__finalize(struct bpf_linker *linker);
LIBBPF_API void bpf_linker__free(struct bpf_linker *linker);

View File

@ -432,4 +432,8 @@ LIBBPF_1.5.0 {
} LIBBPF_1.4.0;
LIBBPF_1.6.0 {
global:
bpf_linker__add_buf;
bpf_linker__add_fd;
bpf_linker__new_fd;
} LIBBPF_1.5.0;

View File

@ -4,6 +4,10 @@
*
* Copyright (c) 2021 Facebook
*/
#ifndef _GNU_SOURCE
#define _GNU_SOURCE
#endif
#include <stdbool.h>
#include <stddef.h>
#include <stdio.h>
@ -16,6 +20,7 @@
#include <elf.h>
#include <libelf.h>
#include <fcntl.h>
#include <sys/mman.h>
#include "libbpf.h"
#include "btf.h"
#include "libbpf_internal.h"
@ -152,15 +157,19 @@ struct bpf_linker {
/* global (including extern) ELF symbols */
int glob_sym_cnt;
struct glob_sym *glob_syms;
bool fd_is_owned;
};
#define pr_warn_elf(fmt, ...) \
libbpf_print(LIBBPF_WARN, "libbpf: " fmt ": %s\n", ##__VA_ARGS__, elf_errmsg(-1))
static int init_output_elf(struct bpf_linker *linker, const char *file);
static int init_output_elf(struct bpf_linker *linker);
static int linker_load_obj_file(struct bpf_linker *linker, const char *filename,
const struct bpf_linker_file_opts *opts,
static int bpf_linker_add_file(struct bpf_linker *linker, int fd,
const char *filename);
static int linker_load_obj_file(struct bpf_linker *linker,
struct src_obj *obj);
static int linker_sanity_check_elf(struct src_obj *obj);
static int linker_sanity_check_elf_symtab(struct src_obj *obj, struct src_sec *sec);
@ -191,7 +200,7 @@ void bpf_linker__free(struct bpf_linker *linker)
if (linker->elf)
elf_end(linker->elf);
if (linker->fd >= 0)
if (linker->fd >= 0 && linker->fd_is_owned)
close(linker->fd);
strset__free(linker->strtab_strs);
@ -233,9 +242,63 @@ struct bpf_linker *bpf_linker__new(const char *filename, struct bpf_linker_opts
if (!linker)
return errno = ENOMEM, NULL;
linker->fd = -1;
linker->filename = strdup(filename);
if (!linker->filename) {
err = -ENOMEM;
goto err_out;
}
err = init_output_elf(linker, filename);
linker->fd = open(filename, O_WRONLY | O_CREAT | O_TRUNC | O_CLOEXEC, 0644);
if (linker->fd < 0) {
err = -errno;
pr_warn("failed to create '%s': %d\n", filename, err);
goto err_out;
}
linker->fd_is_owned = true;
err = init_output_elf(linker);
if (err)
goto err_out;
return linker;
err_out:
bpf_linker__free(linker);
return errno = -err, NULL;
}
struct bpf_linker *bpf_linker__new_fd(int fd, struct bpf_linker_opts *opts)
{
struct bpf_linker *linker;
char filename[32];
int err;
if (fd < 0)
return errno = EINVAL, NULL;
if (!OPTS_VALID(opts, bpf_linker_opts))
return errno = EINVAL, NULL;
if (elf_version(EV_CURRENT) == EV_NONE) {
pr_warn_elf("libelf initialization failed");
return errno = EINVAL, NULL;
}
linker = calloc(1, sizeof(*linker));
if (!linker)
return errno = ENOMEM, NULL;
snprintf(filename, sizeof(filename), "fd:%d", fd);
linker->filename = strdup(filename);
if (!linker->filename) {
err = -ENOMEM;
goto err_out;
}
linker->fd = fd;
linker->fd_is_owned = false;
err = init_output_elf(linker);
if (err)
goto err_out;
@ -294,23 +357,12 @@ static Elf64_Sym *add_new_sym(struct bpf_linker *linker, size_t *sym_idx)
return sym;
}
static int init_output_elf(struct bpf_linker *linker, const char *file)
static int init_output_elf(struct bpf_linker *linker)
{
int err, str_off;
Elf64_Sym *init_sym;
struct dst_sec *sec;
linker->filename = strdup(file);
if (!linker->filename)
return -ENOMEM;
linker->fd = open(file, O_WRONLY | O_CREAT | O_TRUNC | O_CLOEXEC, 0644);
if (linker->fd < 0) {
err = -errno;
pr_warn("failed to create '%s': %s\n", file, errstr(err));
return err;
}
linker->elf = elf_begin(linker->fd, ELF_C_WRITE, NULL);
if (!linker->elf) {
pr_warn_elf("failed to create ELF object");
@ -436,19 +488,16 @@ static int init_output_elf(struct bpf_linker *linker, const char *file)
return 0;
}
int bpf_linker__add_file(struct bpf_linker *linker, const char *filename,
const struct bpf_linker_file_opts *opts)
static int bpf_linker_add_file(struct bpf_linker *linker, int fd,
const char *filename)
{
struct src_obj obj = {};
int err = 0;
if (!OPTS_VALID(opts, bpf_linker_file_opts))
return libbpf_err(-EINVAL);
obj.filename = filename;
obj.fd = fd;
if (!linker->elf)
return libbpf_err(-EINVAL);
err = err ?: linker_load_obj_file(linker, filename, opts, &obj);
err = err ?: linker_load_obj_file(linker, &obj);
err = err ?: linker_append_sec_data(linker, &obj);
err = err ?: linker_append_elf_syms(linker, &obj);
err = err ?: linker_append_elf_relos(linker, &obj);
@ -463,12 +512,91 @@ int bpf_linker__add_file(struct bpf_linker *linker, const char *filename,
free(obj.sym_map);
if (obj.elf)
elf_end(obj.elf);
if (obj.fd >= 0)
close(obj.fd);
return err;
}
int bpf_linker__add_file(struct bpf_linker *linker, const char *filename,
const struct bpf_linker_file_opts *opts)
{
int fd, err;
if (!OPTS_VALID(opts, bpf_linker_file_opts))
return libbpf_err(-EINVAL);
if (!linker->elf)
return libbpf_err(-EINVAL);
fd = open(filename, O_RDONLY | O_CLOEXEC);
if (fd < 0) {
err = -errno;
pr_warn("failed to open file '%s': %s\n", filename, errstr(err));
return libbpf_err(err);
}
err = bpf_linker_add_file(linker, fd, filename);
close(fd);
return libbpf_err(err);
}
int bpf_linker__add_fd(struct bpf_linker *linker, int fd,
const struct bpf_linker_file_opts *opts)
{
char filename[32];
int err;
if (!OPTS_VALID(opts, bpf_linker_file_opts))
return libbpf_err(-EINVAL);
if (!linker->elf)
return libbpf_err(-EINVAL);
if (fd < 0)
return libbpf_err(-EINVAL);
snprintf(filename, sizeof(filename), "fd:%d", fd);
err = bpf_linker_add_file(linker, fd, filename);
return libbpf_err(err);
}
int bpf_linker__add_buf(struct bpf_linker *linker, void *buf, size_t buf_sz,
const struct bpf_linker_file_opts *opts)
{
char filename[32];
int fd, written, ret;
if (!OPTS_VALID(opts, bpf_linker_file_opts))
return libbpf_err(-EINVAL);
if (!linker->elf)
return libbpf_err(-EINVAL);
snprintf(filename, sizeof(filename), "mem:%p+%zu", buf, buf_sz);
fd = memfd_create(filename, 0);
if (fd < 0) {
ret = -errno;
pr_warn("failed to create memfd '%s': %s\n", filename, errstr(ret));
return libbpf_err(ret);
}
written = 0;
while (written < buf_sz) {
ret = write(fd, buf, buf_sz);
if (ret < 0) {
ret = -errno;
pr_warn("failed to write '%s': %s\n", filename, errstr(ret));
goto err_out;
}
written += ret;
}
ret = bpf_linker_add_file(linker, fd, filename);
err_out:
close(fd);
return libbpf_err(ret);
}
static bool is_dwarf_sec_name(const char *name)
{
/* approximation, but the actual list is too long */
@ -534,8 +662,7 @@ static struct src_sec *add_src_sec(struct src_obj *obj, const char *sec_name)
return sec;
}
static int linker_load_obj_file(struct bpf_linker *linker, const char *filename,
const struct bpf_linker_file_opts *opts,
static int linker_load_obj_file(struct bpf_linker *linker,
struct src_obj *obj)
{
int err = 0;
@ -554,36 +681,26 @@ static int linker_load_obj_file(struct bpf_linker *linker, const char *filename,
#error "Unknown __BYTE_ORDER__"
#endif
pr_debug("linker: adding object file '%s'...\n", filename);
pr_debug("linker: adding object file '%s'...\n", obj->filename);
obj->filename = filename;
obj->fd = open(filename, O_RDONLY | O_CLOEXEC);
if (obj->fd < 0) {
err = -errno;
pr_warn("failed to open file '%s': %s\n", filename, errstr(err));
return err;
}
obj->elf = elf_begin(obj->fd, ELF_C_READ_MMAP, NULL);
if (!obj->elf) {
err = -errno;
pr_warn_elf("failed to parse ELF file '%s'", filename);
return err;
pr_warn_elf("failed to parse ELF file '%s'", obj->filename);
return -EINVAL;
}
/* Sanity check ELF file high-level properties */
ehdr = elf64_getehdr(obj->elf);
if (!ehdr) {
err = -errno;
pr_warn_elf("failed to get ELF header for %s", filename);
return err;
pr_warn_elf("failed to get ELF header for %s", obj->filename);
return -EINVAL;
}
/* Linker output endianness set by first input object */
obj_byteorder = ehdr->e_ident[EI_DATA];
if (obj_byteorder != ELFDATA2LSB && obj_byteorder != ELFDATA2MSB) {
err = -EOPNOTSUPP;
pr_warn("unknown byte order of ELF file %s\n", filename);
pr_warn("unknown byte order of ELF file %s\n", obj->filename);
return err;
}
if (link_byteorder == ELFDATANONE) {
@ -593,7 +710,7 @@ static int linker_load_obj_file(struct bpf_linker *linker, const char *filename,
obj_byteorder == ELFDATA2MSB ? "big" : "little");
} else if (link_byteorder != obj_byteorder) {
err = -EOPNOTSUPP;
pr_warn("byte order mismatch with ELF file %s\n", filename);
pr_warn("byte order mismatch with ELF file %s\n", obj->filename);
return err;
}
@ -601,14 +718,13 @@ static int linker_load_obj_file(struct bpf_linker *linker, const char *filename,
|| ehdr->e_machine != EM_BPF
|| ehdr->e_ident[EI_CLASS] != ELFCLASS64) {
err = -EOPNOTSUPP;
pr_warn_elf("unsupported kind of ELF file %s", filename);
pr_warn_elf("unsupported kind of ELF file %s", obj->filename);
return err;
}
if (elf_getshdrstrndx(obj->elf, &obj->shstrs_sec_idx)) {
err = -errno;
pr_warn_elf("failed to get SHSTRTAB section index for %s", filename);
return err;
pr_warn_elf("failed to get SHSTRTAB section index for %s", obj->filename);
return -EINVAL;
}
scn = NULL;
@ -618,26 +734,23 @@ static int linker_load_obj_file(struct bpf_linker *linker, const char *filename,
shdr = elf64_getshdr(scn);
if (!shdr) {
err = -errno;
pr_warn_elf("failed to get section #%zu header for %s",
sec_idx, filename);
return err;
sec_idx, obj->filename);
return -EINVAL;
}
sec_name = elf_strptr(obj->elf, obj->shstrs_sec_idx, shdr->sh_name);
if (!sec_name) {
err = -errno;
pr_warn_elf("failed to get section #%zu name for %s",
sec_idx, filename);
return err;
sec_idx, obj->filename);
return -EINVAL;
}
data = elf_getdata(scn, 0);
if (!data) {
err = -errno;
pr_warn_elf("failed to get section #%zu (%s) data from %s",
sec_idx, sec_name, filename);
return err;
sec_idx, sec_name, obj->filename);
return -EINVAL;
}
sec = add_src_sec(obj, sec_name);
@ -672,7 +785,7 @@ static int linker_load_obj_file(struct bpf_linker *linker, const char *filename,
err = libbpf_get_error(obj->btf);
if (err) {
pr_warn("failed to parse .BTF from %s: %s\n",
filename, errstr(err));
obj->filename, errstr(err));
return err;
}
sec->skipped = true;
@ -683,7 +796,7 @@ static int linker_load_obj_file(struct bpf_linker *linker, const char *filename,
err = libbpf_get_error(obj->btf_ext);
if (err) {
pr_warn("failed to parse .BTF.ext from '%s': %s\n",
filename, errstr(err));
obj->filename, errstr(err));
return err;
}
sec->skipped = true;
@ -700,7 +813,7 @@ static int linker_load_obj_file(struct bpf_linker *linker, const char *filename,
break;
default:
pr_warn("unrecognized section #%zu (%s) in %s\n",
sec_idx, sec_name, filename);
sec_idx, sec_name, obj->filename);
err = -EINVAL;
return err;
}
@ -2680,22 +2793,23 @@ int bpf_linker__finalize(struct bpf_linker *linker)
/* Finalize ELF layout */
if (elf_update(linker->elf, ELF_C_NULL) < 0) {
err = -errno;
err = -EINVAL;
pr_warn_elf("failed to finalize ELF layout");
return libbpf_err(err);
}
/* Write out final ELF contents */
if (elf_update(linker->elf, ELF_C_WRITE) < 0) {
err = -errno;
err = -EINVAL;
pr_warn_elf("failed to write ELF contents");
return libbpf_err(err);
}
elf_end(linker->elf);
close(linker->fd);
linker->elf = NULL;
if (linker->fd_is_owned)
close(linker->fd);
linker->fd = -1;
return 0;

View File

@ -661,7 +661,7 @@ static int collect_usdt_targets(struct usdt_manager *man, Elf *elf, const char *
* [0] https://sourceware.org/systemtap/wiki/UserSpaceProbeImplementation
*/
usdt_abs_ip = note.loc_addr;
if (base_addr)
if (base_addr && note.base_addr)
usdt_abs_ip += base_addr - note.base_addr;
/* When attaching uprobes (which is what USDTs basically are)

View File

@ -18,7 +18,6 @@ feature
urandom_read
test_sockmap
test_lirc_mode2_user
test_flow_dissector
flow_dissector_load
test_tcpnotify_user
test_libbpf

View File

@ -41,7 +41,7 @@ srctree := $(patsubst %/,%,$(dir $(srctree)))
srctree := $(patsubst %/,%,$(dir $(srctree)))
endif
CFLAGS += -g $(OPT_FLAGS) -rdynamic \
CFLAGS += -g $(OPT_FLAGS) -rdynamic -std=gnu11 \
-Wall -Werror -fno-omit-frame-pointer \
$(GENFLAGS) $(SAN_CFLAGS) $(LIBELF_CFLAGS) \
-I$(CURDIR) -I$(INCLUDE_DIR) -I$(GENDIR) -I$(LIBDIR) \
@ -54,21 +54,6 @@ PCAP_LIBS := $(shell $(PKG_CONFIG) --libs libpcap 2>/dev/null)
LDLIBS += $(PCAP_LIBS)
CFLAGS += $(PCAP_CFLAGS)
# The following tests perform type punning and they may break strict
# aliasing rules, which are exploited by both GCC and clang by default
# while optimizing. This can lead to broken programs.
progs/bind4_prog.c-CFLAGS := -fno-strict-aliasing
progs/bind6_prog.c-CFLAGS := -fno-strict-aliasing
progs/dynptr_fail.c-CFLAGS := -fno-strict-aliasing
progs/linked_list_fail.c-CFLAGS := -fno-strict-aliasing
progs/map_kptr_fail.c-CFLAGS := -fno-strict-aliasing
progs/syscall.c-CFLAGS := -fno-strict-aliasing
progs/test_pkt_md_access.c-CFLAGS := -fno-strict-aliasing
progs/test_sk_lookup.c-CFLAGS := -fno-strict-aliasing
progs/timer_crash.c-CFLAGS := -fno-strict-aliasing
progs/test_global_func9.c-CFLAGS := -fno-strict-aliasing
progs/verifier_nocsr.c-CFLAGS := -fno-strict-aliasing
# Some utility functions use LLVM libraries
jit_disasm_helpers.c-CFLAGS = $(LLVM_CFLAGS)
@ -103,18 +88,6 @@ progs/btf_dump_test_case_packing.c-bpf_gcc-CFLAGS := -Wno-error
progs/btf_dump_test_case_padding.c-bpf_gcc-CFLAGS := -Wno-error
progs/btf_dump_test_case_syntax.c-bpf_gcc-CFLAGS := -Wno-error
# The following tests do type-punning, via the __imm_insn macro, from
# `struct bpf_insn' to long and then uses the value. This triggers an
# "is used uninitialized" warning in GCC due to strict-aliasing
# rules.
progs/verifier_ref_tracking.c-bpf_gcc-CFLAGS := -fno-strict-aliasing
progs/verifier_unpriv.c-bpf_gcc-CFLAGS := -fno-strict-aliasing
progs/verifier_cgroup_storage.c-bpf_gcc-CFLAGS := -fno-strict-aliasing
progs/verifier_ld_ind.c-bpf_gcc-CFLAGS := -fno-strict-aliasing
progs/verifier_map_ret_val.c-bpf_gcc-CFLAGS := -fno-strict-aliasing
progs/verifier_spill_fill.c-bpf_gcc-CFLAGS := -fno-strict-aliasing
progs/verifier_subprog_precision.c-bpf_gcc-CFLAGS := -fno-strict-aliasing
progs/verifier_uninit.c-bpf_gcc-CFLAGS := -fno-strict-aliasing
endif
ifneq ($(CLANG_CPUV4),)
@ -132,7 +105,6 @@ TEST_PROGS := test_kmod.sh \
test_tunnel.sh \
test_lwt_seg6local.sh \
test_lirc_mode2.sh \
test_flow_dissector.sh \
test_xdp_vlan_mode_generic.sh \
test_xdp_vlan_mode_native.sh \
test_lwt_ip_encap.sh \
@ -150,17 +122,16 @@ TEST_PROGS_EXTENDED := with_addr.sh \
with_tunnels.sh ima_setup.sh verify_sig_setup.sh \
test_xdp_vlan.sh test_bpftool.py
TEST_KMODS := bpf_testmod.ko bpf_test_no_cfi.ko bpf_test_modorder_x.ko \
bpf_test_modorder_y.ko
TEST_KMOD_TARGETS = $(addprefix $(OUTPUT)/,$(TEST_KMODS))
# Compile but not part of 'make run_tests'
TEST_GEN_PROGS_EXTENDED = \
bench \
bpf_testmod.ko \
bpf_test_modorder_x.ko \
bpf_test_modorder_y.ko \
bpf_test_no_cfi.ko \
flow_dissector_load \
runqslower \
test_cpp \
test_flow_dissector \
test_lirc_mode2_user \
veristat \
xdp_features \
@ -183,8 +154,9 @@ override define CLEAN
$(Q)$(RM) -r $(TEST_GEN_PROGS)
$(Q)$(RM) -r $(TEST_GEN_PROGS_EXTENDED)
$(Q)$(RM) -r $(TEST_GEN_FILES)
$(Q)$(RM) -r $(TEST_KMODS)
$(Q)$(RM) -r $(EXTRA_CLEAN)
$(Q)$(MAKE) -C bpf_testmod clean
$(Q)$(MAKE) -C test_kmods clean
$(Q)$(MAKE) docs-clean
endef
@ -202,9 +174,9 @@ ifeq ($(shell expr $(MAKE_VERSION) \>= 4.4), 1)
$(let OUTPUT,$(OUTPUT)/,\
$(eval include ../../../build/Makefile.feature))
else
OUTPUT := $(OUTPUT)/
override OUTPUT := $(OUTPUT)/
$(eval include ../../../build/Makefile.feature)
OUTPUT := $(patsubst %/,%,$(OUTPUT))
override OUTPUT := $(patsubst %/,%,$(OUTPUT))
endif
endif
@ -250,7 +222,7 @@ endif
# to build individual tests.
# NOTE: Semicolon at the end is critical to override lib.mk's default static
# rule for binaries.
$(notdir $(TEST_GEN_PROGS) \
$(notdir $(TEST_GEN_PROGS) $(TEST_KMODS) \
$(TEST_GEN_PROGS_EXTENDED)): %: $(OUTPUT)/% ;
# sort removes libbpf duplicates when not cross-building
@ -304,37 +276,19 @@ $(OUTPUT)/sign-file: ../../../../scripts/sign-file.c
$< -o $@ \
$(shell $(PKG_CONFIG) --libs libcrypto 2> /dev/null || echo -lcrypto)
$(OUTPUT)/bpf_testmod.ko: $(VMLINUX_BTF) $(RESOLVE_BTFIDS) $(wildcard bpf_testmod/Makefile bpf_testmod/*.[ch])
$(call msg,MOD,,$@)
$(Q)$(RM) bpf_testmod/bpf_testmod.ko # force re-compilation
$(Q)$(MAKE) $(submake_extras) -C bpf_testmod \
RESOLVE_BTFIDS=$(RESOLVE_BTFIDS) \
# This should really be a grouped target, but make versions before 4.3 don't
# support that for regular rules. However, pattern matching rules are implicitly
# treated as grouped even with older versions of make, so as a workaround, the
# subst() turns the rule into a pattern matching rule
$(addprefix test_kmods/,$(subst .ko,%ko,$(TEST_KMODS))): $(VMLINUX_BTF) $(RESOLVE_BTFIDS) $(wildcard test_kmods/Makefile test_kmods/*.[ch])
$(Q)$(RM) test_kmods/*.ko test_kmods/*.mod.o # force re-compilation
$(Q)$(MAKE) $(submake_extras) -C test_kmods \
RESOLVE_BTFIDS=$(RESOLVE_BTFIDS) \
EXTRA_CFLAGS='' EXTRA_LDFLAGS=''
$(Q)cp bpf_testmod/bpf_testmod.ko $@
$(OUTPUT)/bpf_test_no_cfi.ko: $(VMLINUX_BTF) $(RESOLVE_BTFIDS) $(wildcard bpf_test_no_cfi/Makefile bpf_test_no_cfi/*.[ch])
$(TEST_KMOD_TARGETS): $(addprefix test_kmods/,$(TEST_KMODS))
$(call msg,MOD,,$@)
$(Q)$(RM) bpf_test_no_cfi/bpf_test_no_cfi.ko # force re-compilation
$(Q)$(MAKE) $(submake_extras) -C bpf_test_no_cfi \
RESOLVE_BTFIDS=$(RESOLVE_BTFIDS) \
EXTRA_CFLAGS='' EXTRA_LDFLAGS=''
$(Q)cp bpf_test_no_cfi/bpf_test_no_cfi.ko $@
$(OUTPUT)/bpf_test_modorder_x.ko: $(VMLINUX_BTF) $(RESOLVE_BTFIDS) $(wildcard bpf_test_modorder_x/Makefile bpf_test_modorder_x/*.[ch])
$(call msg,MOD,,$@)
$(Q)$(RM) bpf_test_modorder_x/bpf_test_modorder_x.ko # force re-compilation
$(Q)$(MAKE) $(submake_extras) -C bpf_test_modorder_x \
RESOLVE_BTFIDS=$(RESOLVE_BTFIDS) \
EXTRA_CFLAGS='' EXTRA_LDFLAGS=''
$(Q)cp bpf_test_modorder_x/bpf_test_modorder_x.ko $@
$(OUTPUT)/bpf_test_modorder_y.ko: $(VMLINUX_BTF) $(RESOLVE_BTFIDS) $(wildcard bpf_test_modorder_y/Makefile bpf_test_modorder_y/*.[ch])
$(call msg,MOD,,$@)
$(Q)$(RM) bpf_test_modorder_y/bpf_test_modorder_y.ko # force re-compilation
$(Q)$(MAKE) $(submake_extras) -C bpf_test_modorder_y \
RESOLVE_BTFIDS=$(RESOLVE_BTFIDS) \
EXTRA_CFLAGS='' EXTRA_LDFLAGS=''
$(Q)cp bpf_test_modorder_y/bpf_test_modorder_y.ko $@
$(Q)cp test_kmods/$(@F) $@
DEFAULT_BPFTOOL := $(HOST_SCRATCH_DIR)/sbin/bpftool
@ -479,10 +433,10 @@ $(shell $(1) $(2) -dM -E - </dev/null | grep -E 'MIPS(EL|EB)|_MIPS_SZ(PTR|LONG)
endef
# Determine target endianness.
IS_LITTLE_ENDIAN = $(shell $(CC) -dM -E - </dev/null | \
IS_LITTLE_ENDIAN := $(shell $(CC) -dM -E - </dev/null | \
grep 'define __BYTE_ORDER__ __ORDER_LITTLE_ENDIAN__')
MENDIAN=$(if $(IS_LITTLE_ENDIAN),-mlittle-endian,-mbig-endian)
BPF_TARGET_ENDIAN=$(if $(IS_LITTLE_ENDIAN),--target=bpfel,--target=bpfeb)
MENDIAN:=$(if $(IS_LITTLE_ENDIAN),-mlittle-endian,-mbig-endian)
BPF_TARGET_ENDIAN:=$(if $(IS_LITTLE_ENDIAN),--target=bpfel,--target=bpfeb)
ifneq ($(CROSS_COMPILE),)
CLANG_TARGET_ARCH = --target=$(notdir $(CROSS_COMPILE:%-=%))
@ -492,6 +446,8 @@ CLANG_SYS_INCLUDES = $(call get_sys_includes,$(CLANG),$(CLANG_TARGET_ARCH))
BPF_CFLAGS = -g -Wall -Werror -D__TARGET_ARCH_$(SRCARCH) $(MENDIAN) \
-I$(INCLUDE_DIR) -I$(CURDIR) -I$(APIDIR) \
-I$(abspath $(OUTPUT)/../usr/include) \
-std=gnu11 \
-fno-strict-aliasing \
-Wno-compare-distinct-pointer-types
# TODO: enable me -Wsign-compare
@ -759,14 +715,12 @@ TRUNNER_EXTRA_SOURCES := test_progs.c \
json_writer.c \
flow_dissector_load.h \
ip_check_defrag_frags.h
TRUNNER_EXTRA_FILES := $(OUTPUT)/urandom_read $(OUTPUT)/bpf_testmod.ko \
$(OUTPUT)/bpf_test_no_cfi.ko \
$(OUTPUT)/bpf_test_modorder_x.ko \
$(OUTPUT)/bpf_test_modorder_y.ko \
TRUNNER_EXTRA_FILES := $(OUTPUT)/urandom_read \
$(OUTPUT)/liburandom_read.so \
$(OUTPUT)/xdp_synproxy \
$(OUTPUT)/sign-file \
$(OUTPUT)/uprobe_multi \
$(TEST_KMOD_TARGETS) \
ima_setup.sh \
verify_sig_setup.sh \
$(wildcard progs/btf_dump_test_case_*.c) \
@ -833,9 +787,12 @@ $(OUTPUT)/xdp_features: xdp_features.c $(OUTPUT)/network_helpers.o $(OUTPUT)/xdp
$(Q)$(CC) $(CFLAGS) $(filter %.a %.o %.c,$^) $(LDLIBS) -o $@
# Make sure we are able to include and link libbpf against c++.
CXXFLAGS += $(CFLAGS)
CXXFLAGS := $(subst -D_GNU_SOURCE=,,$(CXXFLAGS))
CXXFLAGS := $(subst -std=gnu11,-std=gnu++11,$(CXXFLAGS))
$(OUTPUT)/test_cpp: test_cpp.cpp $(OUTPUT)/test_core_extern.skel.h $(BPFOBJ)
$(call msg,CXX,,$@)
$(Q)$(CXX) $(subst -D_GNU_SOURCE=,,$(CFLAGS)) $(filter %.a %.o %.cpp,$^) $(LDLIBS) -o $@
$(Q)$(CXX) $(CXXFLAGS) $(filter %.a %.o %.cpp,$^) $(LDLIBS) -o $@
# Benchmark runner
$(OUTPUT)/bench_%.o: benchs/bench_%.c bench.h $(BPFOBJ)
@ -893,12 +850,9 @@ $(OUTPUT)/uprobe_multi: uprobe_multi.c uprobe_multi.ld
EXTRA_CLEAN := $(SCRATCH_DIR) $(HOST_SCRATCH_DIR) \
prog_tests/tests.h map_tests/tests.h verifier/tests.h \
feature bpftool \
feature bpftool $(TEST_KMOD_TARGETS) \
$(addprefix $(OUTPUT)/,*.o *.d *.skel.h *.lskel.h *.subskel.h \
no_alu32 cpuv4 bpf_gcc bpf_testmod.ko \
bpf_test_no_cfi.ko \
bpf_test_modorder_x.ko \
bpf_test_modorder_y.ko \
no_alu32 cpuv4 bpf_gcc \
liburandom_read.so) \
$(OUTPUT)/FEATURE-DUMP.selftests

View File

@ -1,19 +0,0 @@
BPF_TESTMOD_DIR := $(realpath $(dir $(abspath $(lastword $(MAKEFILE_LIST)))))
KDIR ?= $(abspath $(BPF_TESTMOD_DIR)/../../../../..)
ifeq ($(V),1)
Q =
else
Q = @
endif
MODULES = bpf_test_modorder_x.ko
obj-m += bpf_test_modorder_x.o
all:
+$(Q)make -C $(KDIR) M=$(BPF_TESTMOD_DIR) modules
clean:
+$(Q)make -C $(KDIR) M=$(BPF_TESTMOD_DIR) clean

View File

@ -1,19 +0,0 @@
BPF_TESTMOD_DIR := $(realpath $(dir $(abspath $(lastword $(MAKEFILE_LIST)))))
KDIR ?= $(abspath $(BPF_TESTMOD_DIR)/../../../../..)
ifeq ($(V),1)
Q =
else
Q = @
endif
MODULES = bpf_test_modorder_y.ko
obj-m += bpf_test_modorder_y.o
all:
+$(Q)make -C $(KDIR) M=$(BPF_TESTMOD_DIR) modules
clean:
+$(Q)make -C $(KDIR) M=$(BPF_TESTMOD_DIR) clean

View File

@ -1,19 +0,0 @@
BPF_TEST_NO_CFI_DIR := $(realpath $(dir $(abspath $(lastword $(MAKEFILE_LIST)))))
KDIR ?= $(abspath $(BPF_TEST_NO_CFI_DIR)/../../../../..)
ifeq ($(V),1)
Q =
else
Q = @
endif
MODULES = bpf_test_no_cfi.ko
obj-m += bpf_test_no_cfi.o
all:
+$(Q)make -C $(KDIR) M=$(BPF_TEST_NO_CFI_DIR) modules
clean:
+$(Q)make -C $(KDIR) M=$(BPF_TEST_NO_CFI_DIR) clean

View File

@ -1,20 +0,0 @@
BPF_TESTMOD_DIR := $(realpath $(dir $(abspath $(lastword $(MAKEFILE_LIST)))))
KDIR ?= $(abspath $(BPF_TESTMOD_DIR)/../../../../..)
ifeq ($(V),1)
Q =
else
Q = @
endif
MODULES = bpf_testmod.ko
obj-m += bpf_testmod.o
CFLAGS_bpf_testmod.o = -I$(src)
all:
+$(Q)make -C $(KDIR) M=$(BPF_TESTMOD_DIR) modules
clean:
+$(Q)make -C $(KDIR) M=$(BPF_TESTMOD_DIR) clean

View File

@ -58,6 +58,7 @@ CONFIG_MPLS=y
CONFIG_MPLS_IPTUNNEL=y
CONFIG_MPLS_ROUTING=y
CONFIG_MPTCP=y
CONFIG_NET_ACT_GACT=y
CONFIG_NET_ACT_SKBMOD=y
CONFIG_NET_CLS=y
CONFIG_NET_CLS_ACT=y

View File

@ -21,7 +21,7 @@
#include <linux/limits.h>
#include <linux/ip.h>
#include <linux/udp.h>
#include <netinet/udp.h>
#include <netinet/tcp.h>
#include <net/if.h>

View File

@ -14,6 +14,7 @@ typedef __u16 __sum16;
#include <linux/sockios.h>
#include <linux/err.h>
#include <netinet/tcp.h>
#include <netinet/udp.h>
#include <bpf/bpf_endian.h>
#include <net/if.h>
@ -105,6 +106,45 @@ static __u16 csum_fold(__u32 csum)
return (__u16)~csum;
}
static __wsum csum_partial(const void *buf, int len, __wsum sum)
{
__u16 *p = (__u16 *)buf;
int num_u16 = len >> 1;
int i;
for (i = 0; i < num_u16; i++)
sum += p[i];
return sum;
}
static inline __sum16 build_ip_csum(struct iphdr *iph)
{
__u32 sum = 0;
__u16 *p;
iph->check = 0;
p = (void *)iph;
sum = csum_partial(p, iph->ihl << 2, 0);
return csum_fold(sum);
}
/**
* csum_tcpudp_magic - compute IP pseudo-header checksum
*
* Compute the IPv4 pseudo header checksum. The helper can take a
* accumulated sum from the transport layer to accumulate it and directly
* return the transport layer
*
* @saddr: IP source address
* @daddr: IP dest address
* @len: IP data size
* @proto: transport layer protocol
* @csum: The accumulated partial sum to add to the computation
*
* Returns the folded sum
*/
static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
__u32 len, __u8 proto,
__wsum csum)
@ -120,6 +160,21 @@ static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
return csum_fold((__u32)s);
}
/**
* csum_ipv6_magic - compute IPv6 pseudo-header checksum
*
* Compute the ipv6 pseudo header checksum. The helper can take a
* accumulated sum from the transport layer to accumulate it and directly
* return the transport layer
*
* @saddr: IPv6 source address
* @daddr: IPv6 dest address
* @len: IPv6 data size
* @proto: transport layer protocol
* @csum: The accumulated partial sum to add to the computation
*
* Returns the folded sum
*/
static inline __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
const struct in6_addr *daddr,
__u32 len, __u8 proto,
@ -139,6 +194,47 @@ static inline __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
return csum_fold((__u32)s);
}
/**
* build_udp_v4_csum - compute UDP checksum for UDP over IPv4
*
* Compute the checksum to embed in UDP header, composed of the sum of IP
* pseudo-header checksum, UDP header checksum and UDP data checksum
* @iph IP header
* @udph UDP header, which must be immediately followed by UDP data
*
* Returns the total checksum
*/
static inline __sum16 build_udp_v4_csum(const struct iphdr *iph,
const struct udphdr *udph)
{
unsigned long sum;
sum = csum_partial(udph, ntohs(udph->len), 0);
return csum_tcpudp_magic(iph->saddr, iph->daddr, ntohs(udph->len),
IPPROTO_UDP, sum);
}
/**
* build_udp_v6_csum - compute UDP checksum for UDP over IPv6
*
* Compute the checksum to embed in UDP header, composed of the sum of IPv6
* pseudo-header checksum, UDP header checksum and UDP data checksum
* @ip6h IPv6 header
* @udph UDP header, which must be immediately followed by UDP data
*
* Returns the total checksum
*/
static inline __sum16 build_udp_v6_csum(const struct ipv6hdr *ip6h,
const struct udphdr *udph)
{
unsigned long sum;
sum = csum_partial(udph, ntohs(udph->len), 0);
return csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, ntohs(udph->len),
IPPROTO_UDP, sum);
}
struct tmonitor_ctx;
#ifdef TRAFFIC_MONITOR

View File

@ -0,0 +1,28 @@
// SPDX-License-Identifier: GPL-2.0
#include <test_progs.h>
#include "cgroup_skb_direct_packet_access.skel.h"
void test_cgroup_skb_prog_run_direct_packet_access(void)
{
int err;
struct cgroup_skb_direct_packet_access *skel;
char test_skb[64] = {};
LIBBPF_OPTS(bpf_test_run_opts, topts,
.data_in = test_skb,
.data_size_in = sizeof(test_skb),
);
skel = cgroup_skb_direct_packet_access__open_and_load();
if (!ASSERT_OK_PTR(skel, "cgroup_skb_direct_packet_access__open_and_load"))
return;
err = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.direct_packet_access), &topts);
ASSERT_OK(err, "bpf_prog_test_run_opts err");
ASSERT_EQ(topts.retval, 1, "retval");
ASSERT_NEQ(skel->bss->data_end, 0, "data_end");
cgroup_skb_direct_packet_access__destroy(skel);
}

View File

@ -2,7 +2,7 @@
#define _GNU_SOURCE
#include <test_progs.h>
#include "progs/core_reloc_types.h"
#include "bpf_testmod/bpf_testmod.h"
#include "test_kmods/bpf_testmod.h"
#include <linux/limits.h>
#include <sys/mman.h>
#include <sys/syscall.h>

View File

@ -0,0 +1,441 @@
// SPDX-License-Identifier: GPL-2.0
#include <test_progs.h>
#include <linux/btf.h>
#include <bpf/bpf.h>
#include "../test_btf.h"
static inline int new_map(void)
{
const char *name = NULL;
__u32 max_entries = 1;
__u32 value_size = 8;
__u32 key_size = 4;
return bpf_map_create(BPF_MAP_TYPE_ARRAY, name,
key_size, value_size,
max_entries, NULL);
}
static int new_btf(void)
{
struct btf_blob {
struct btf_header btf_hdr;
__u32 types[8];
__u32 str;
} raw_btf = {
.btf_hdr = {
.magic = BTF_MAGIC,
.version = BTF_VERSION,
.hdr_len = sizeof(struct btf_header),
.type_len = sizeof(raw_btf.types),
.str_off = offsetof(struct btf_blob, str) - offsetof(struct btf_blob, types),
.str_len = sizeof(raw_btf.str),
},
.types = {
/* long */
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 64, 8), /* [1] */
/* unsigned long */
BTF_TYPE_INT_ENC(0, 0, 0, 64, 8), /* [2] */
},
};
return bpf_btf_load(&raw_btf, sizeof(raw_btf), NULL);
}
#define Close(FD) do { \
if ((FD) >= 0) { \
close(FD); \
FD = -1; \
} \
} while(0)
static bool map_exists(__u32 id)
{
int fd;
fd = bpf_map_get_fd_by_id(id);
if (fd >= 0) {
close(fd);
return true;
}
return false;
}
static bool btf_exists(__u32 id)
{
int fd;
fd = bpf_btf_get_fd_by_id(id);
if (fd >= 0) {
close(fd);
return true;
}
return false;
}
static inline int bpf_prog_get_map_ids(int prog_fd, __u32 *nr_map_ids, __u32 *map_ids)
{
__u32 len = sizeof(struct bpf_prog_info);
struct bpf_prog_info info;
int err;
memset(&info, 0, len);
info.nr_map_ids = *nr_map_ids,
info.map_ids = ptr_to_u64(map_ids),
err = bpf_prog_get_info_by_fd(prog_fd, &info, &len);
if (!ASSERT_OK(err, "bpf_prog_get_info_by_fd"))
return -1;
*nr_map_ids = info.nr_map_ids;
return 0;
}
static int __load_test_prog(int map_fd, const int *fd_array, int fd_array_cnt)
{
/* A trivial program which uses one map */
struct bpf_insn insns[] = {
BPF_LD_MAP_FD(BPF_REG_1, map_fd),
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
};
LIBBPF_OPTS(bpf_prog_load_opts, opts);
opts.fd_array = fd_array;
opts.fd_array_cnt = fd_array_cnt;
return bpf_prog_load(BPF_PROG_TYPE_XDP, NULL, "GPL", insns, ARRAY_SIZE(insns), &opts);
}
static int load_test_prog(const int *fd_array, int fd_array_cnt)
{
int map_fd;
int ret;
map_fd = new_map();
if (!ASSERT_GE(map_fd, 0, "new_map"))
return map_fd;
ret = __load_test_prog(map_fd, fd_array, fd_array_cnt);
close(map_fd);
return ret;
}
static bool check_expected_map_ids(int prog_fd, int expected, __u32 *map_ids, __u32 *nr_map_ids)
{
int err;
err = bpf_prog_get_map_ids(prog_fd, nr_map_ids, map_ids);
if (!ASSERT_OK(err, "bpf_prog_get_map_ids"))
return false;
if (!ASSERT_EQ(*nr_map_ids, expected, "unexpected nr_map_ids"))
return false;
return true;
}
/*
* Load a program, which uses one map. No fd_array maps are present.
* On return only one map is expected to be bound to prog.
*/
static void check_fd_array_cnt__no_fd_array(void)
{
__u32 map_ids[16];
__u32 nr_map_ids;
int prog_fd = -1;
prog_fd = load_test_prog(NULL, 0);
if (!ASSERT_GE(prog_fd, 0, "BPF_PROG_LOAD"))
return;
nr_map_ids = ARRAY_SIZE(map_ids);
check_expected_map_ids(prog_fd, 1, map_ids, &nr_map_ids);
close(prog_fd);
}
/*
* Load a program, which uses one map, and pass two extra, non-equal, maps in
* fd_array with fd_array_cnt=2. On return three maps are expected to be bound
* to the program.
*/
static void check_fd_array_cnt__fd_array_ok(void)
{
int extra_fds[2] = { -1, -1 };
__u32 map_ids[16];
__u32 nr_map_ids;
int prog_fd = -1;
extra_fds[0] = new_map();
if (!ASSERT_GE(extra_fds[0], 0, "new_map"))
goto cleanup;
extra_fds[1] = new_map();
if (!ASSERT_GE(extra_fds[1], 0, "new_map"))
goto cleanup;
prog_fd = load_test_prog(extra_fds, 2);
if (!ASSERT_GE(prog_fd, 0, "BPF_PROG_LOAD"))
goto cleanup;
nr_map_ids = ARRAY_SIZE(map_ids);
if (!check_expected_map_ids(prog_fd, 3, map_ids, &nr_map_ids))
goto cleanup;
/* maps should still exist when original file descriptors are closed */
Close(extra_fds[0]);
Close(extra_fds[1]);
if (!ASSERT_EQ(map_exists(map_ids[0]), true, "map_ids[0] should exist"))
goto cleanup;
if (!ASSERT_EQ(map_exists(map_ids[1]), true, "map_ids[1] should exist"))
goto cleanup;
/* some fds might be invalid, so ignore return codes */
cleanup:
Close(extra_fds[1]);
Close(extra_fds[0]);
Close(prog_fd);
}
/*
* Load a program with a few extra maps duplicated in the fd_array.
* After the load maps should only be referenced once.
*/
static void check_fd_array_cnt__duplicated_maps(void)
{
int extra_fds[4] = { -1, -1, -1, -1 };
__u32 map_ids[16];
__u32 nr_map_ids;
int prog_fd = -1;
extra_fds[0] = extra_fds[2] = new_map();
if (!ASSERT_GE(extra_fds[0], 0, "new_map"))
goto cleanup;
extra_fds[1] = extra_fds[3] = new_map();
if (!ASSERT_GE(extra_fds[1], 0, "new_map"))
goto cleanup;
prog_fd = load_test_prog(extra_fds, 4);
if (!ASSERT_GE(prog_fd, 0, "BPF_PROG_LOAD"))
goto cleanup;
nr_map_ids = ARRAY_SIZE(map_ids);
if (!check_expected_map_ids(prog_fd, 3, map_ids, &nr_map_ids))
goto cleanup;
/* maps should still exist when original file descriptors are closed */
Close(extra_fds[0]);
Close(extra_fds[1]);
if (!ASSERT_EQ(map_exists(map_ids[0]), true, "map should exist"))
goto cleanup;
if (!ASSERT_EQ(map_exists(map_ids[1]), true, "map should exist"))
goto cleanup;
/* some fds might be invalid, so ignore return codes */
cleanup:
Close(extra_fds[1]);
Close(extra_fds[0]);
Close(prog_fd);
}
/*
* Check that if maps which are referenced by a program are
* passed in fd_array, then they will be referenced only once
*/
static void check_fd_array_cnt__referenced_maps_in_fd_array(void)
{
int extra_fds[1] = { -1 };
__u32 map_ids[16];
__u32 nr_map_ids;
int prog_fd = -1;
extra_fds[0] = new_map();
if (!ASSERT_GE(extra_fds[0], 0, "new_map"))
goto cleanup;
prog_fd = __load_test_prog(extra_fds[0], extra_fds, 1);
if (!ASSERT_GE(prog_fd, 0, "BPF_PROG_LOAD"))
goto cleanup;
nr_map_ids = ARRAY_SIZE(map_ids);
if (!check_expected_map_ids(prog_fd, 1, map_ids, &nr_map_ids))
goto cleanup;
/* map should still exist when original file descriptor is closed */
Close(extra_fds[0]);
if (!ASSERT_EQ(map_exists(map_ids[0]), true, "map should exist"))
goto cleanup;
/* some fds might be invalid, so ignore return codes */
cleanup:
Close(extra_fds[0]);
Close(prog_fd);
}
static int get_btf_id_by_fd(int btf_fd, __u32 *id)
{
struct bpf_btf_info info;
__u32 info_len = sizeof(info);
int err;
memset(&info, 0, info_len);
err = bpf_btf_get_info_by_fd(btf_fd, &info, &info_len);
if (err)
return err;
if (id)
*id = info.id;
return 0;
}
/*
* Check that fd_array operates properly for btfs. Namely, to check that
* passing a btf fd in fd_array increases its reference count, do the
* following:
* 1) Create a new btf, it's referenced only by a file descriptor, so refcnt=1
* 2) Load a BPF prog with fd_array[0] = btf_fd; now btf's refcnt=2
* 3) Close the btf_fd, now refcnt=1
* Wait and check that BTF stil exists.
*/
static void check_fd_array_cnt__referenced_btfs(void)
{
int extra_fds[1] = { -1 };
int prog_fd = -1;
__u32 btf_id;
int tries;
int err;
extra_fds[0] = new_btf();
if (!ASSERT_GE(extra_fds[0], 0, "new_btf"))
goto cleanup;
prog_fd = load_test_prog(extra_fds, 1);
if (!ASSERT_GE(prog_fd, 0, "BPF_PROG_LOAD"))
goto cleanup;
/* btf should still exist when original file descriptor is closed */
err = get_btf_id_by_fd(extra_fds[0], &btf_id);
if (!ASSERT_GE(err, 0, "get_btf_id_by_fd"))
goto cleanup;
Close(extra_fds[0]);
if (!ASSERT_GE(kern_sync_rcu(), 0, "kern_sync_rcu 1"))
goto cleanup;
if (!ASSERT_EQ(btf_exists(btf_id), true, "btf should exist"))
goto cleanup;
Close(prog_fd);
/* The program is freed by a workqueue, so no reliable
* way to sync, so just wait a bit (max ~1 second). */
for (tries = 100; tries >= 0; tries--) {
usleep(1000);
if (!btf_exists(btf_id))
break;
if (tries)
continue;
PRINT_FAIL("btf should have been freed");
}
/* some fds might be invalid, so ignore return codes */
cleanup:
Close(extra_fds[0]);
Close(prog_fd);
}
/*
* Test that a program with trash in fd_array can't be loaded:
* only map and BTF file descriptors should be accepted.
*/
static void check_fd_array_cnt__fd_array_with_trash(void)
{
int extra_fds[3] = { -1, -1, -1 };
int prog_fd = -1;
extra_fds[0] = new_map();
if (!ASSERT_GE(extra_fds[0], 0, "new_map"))
goto cleanup;
extra_fds[1] = new_btf();
if (!ASSERT_GE(extra_fds[1], 0, "new_btf"))
goto cleanup;
/* trash 1: not a file descriptor */
extra_fds[2] = 0xbeef;
prog_fd = load_test_prog(extra_fds, 3);
if (!ASSERT_EQ(prog_fd, -EBADF, "prog should have been rejected with -EBADF"))
goto cleanup;
/* trash 2: not a map or btf */
extra_fds[2] = socket(AF_INET, SOCK_STREAM, 0);
if (!ASSERT_GE(extra_fds[2], 0, "socket"))
goto cleanup;
prog_fd = load_test_prog(extra_fds, 3);
if (!ASSERT_EQ(prog_fd, -EINVAL, "prog should have been rejected with -EINVAL"))
goto cleanup;
/* Validate that the prog is ok if trash is removed */
Close(extra_fds[2]);
extra_fds[2] = new_btf();
if (!ASSERT_GE(extra_fds[2], 0, "new_btf"))
goto cleanup;
prog_fd = load_test_prog(extra_fds, 3);
if (!ASSERT_GE(prog_fd, 0, "prog should have been loaded"))
goto cleanup;
/* some fds might be invalid, so ignore return codes */
cleanup:
Close(extra_fds[2]);
Close(extra_fds[1]);
Close(extra_fds[0]);
}
/*
* Test that a program with too big fd_array can't be loaded.
*/
static void check_fd_array_cnt__fd_array_too_big(void)
{
int extra_fds[65];
int prog_fd = -1;
int i;
for (i = 0; i < 65; i++) {
extra_fds[i] = new_map();
if (!ASSERT_GE(extra_fds[i], 0, "new_map"))
goto cleanup_fds;
}
prog_fd = load_test_prog(extra_fds, 65);
ASSERT_EQ(prog_fd, -E2BIG, "prog should have been rejected with -E2BIG");
cleanup_fds:
while (i > 0)
Close(extra_fds[--i]);
}
void test_fd_array_cnt(void)
{
if (test__start_subtest("no-fd-array"))
check_fd_array_cnt__no_fd_array();
if (test__start_subtest("fd-array-ok"))
check_fd_array_cnt__fd_array_ok();
if (test__start_subtest("fd-array-dup-input"))
check_fd_array_cnt__duplicated_maps();
if (test__start_subtest("fd-array-ref-maps-in-array"))
check_fd_array_cnt__referenced_maps_in_fd_array();
if (test__start_subtest("fd-array-ref-btfs"))
check_fd_array_cnt__referenced_btfs();
if (test__start_subtest("fd-array-trash-input"))
check_fd_array_cnt__fd_array_with_trash();
if (test__start_subtest("fd-array-2big"))
check_fd_array_cnt__fd_array_too_big();
}

View File

@ -171,6 +171,10 @@ static void test_kprobe_fill_link_info(struct test_fill_link_info *skel,
/* See also arch_adjust_kprobe_addr(). */
if (skel->kconfig->CONFIG_X86_KERNEL_IBT)
entry_offset = 4;
if (skel->kconfig->CONFIG_PPC64 &&
skel->kconfig->CONFIG_KPROBES_ON_FTRACE &&
!skel->kconfig->CONFIG_PPC_FTRACE_OUT_OF_LINE)
entry_offset = 4;
err = verify_perf_link_info(link_fd, type, kprobe_addr, 0, entry_offset);
ASSERT_OK(err, "verify_perf_link_info");
} else {

View File

@ -7,39 +7,14 @@
#include "bpf_flow.skel.h"
#define TEST_NS "flow_dissector_ns"
#define FLOW_CONTINUE_SADDR 0x7f00007f /* 127.0.0.127 */
#define TEST_NAME_MAX_LEN 64
#ifndef IP_MF
#define IP_MF 0x2000
#endif
#define CHECK_FLOW_KEYS(desc, got, expected) \
_CHECK(memcmp(&got, &expected, sizeof(got)) != 0, \
desc, \
topts.duration, \
"nhoff=%u/%u " \
"thoff=%u/%u " \
"addr_proto=0x%x/0x%x " \
"is_frag=%u/%u " \
"is_first_frag=%u/%u " \
"is_encap=%u/%u " \
"ip_proto=0x%x/0x%x " \
"n_proto=0x%x/0x%x " \
"flow_label=0x%x/0x%x " \
"sport=%u/%u " \
"dport=%u/%u\n", \
got.nhoff, expected.nhoff, \
got.thoff, expected.thoff, \
got.addr_proto, expected.addr_proto, \
got.is_frag, expected.is_frag, \
got.is_first_frag, expected.is_first_frag, \
got.is_encap, expected.is_encap, \
got.ip_proto, expected.ip_proto, \
got.n_proto, expected.n_proto, \
got.flow_label, expected.flow_label, \
got.sport, expected.sport, \
got.dport, expected.dport)
struct ipv4_pkt {
struct ethhdr eth;
struct iphdr iph;
@ -89,6 +64,19 @@ struct dvlan_ipv6_pkt {
struct tcphdr tcp;
} __packed;
struct gre_base_hdr {
__be16 flags;
__be16 protocol;
} gre_base_hdr;
struct gre_minimal_pkt {
struct ethhdr eth;
struct iphdr iph;
struct gre_base_hdr gre_hdr;
struct iphdr iph_inner;
struct tcphdr tcp;
} __packed;
struct test {
const char *name;
union {
@ -98,6 +86,7 @@ struct test {
struct ipv6_pkt ipv6;
struct ipv6_frag_pkt ipv6_frag;
struct dvlan_ipv6_pkt dvlan_ipv6;
struct gre_minimal_pkt gre_minimal;
} pkt;
struct bpf_flow_keys keys;
__u32 flags;
@ -106,7 +95,6 @@ struct test {
#define VLAN_HLEN 4
static __u32 duration;
struct test tests[] = {
{
.name = "ipv4",
@ -444,8 +432,137 @@ struct test tests[] = {
},
.retval = BPF_FLOW_DISSECTOR_CONTINUE,
},
{
.name = "ip-gre",
.pkt.gre_minimal = {
.eth.h_proto = __bpf_constant_htons(ETH_P_IP),
.iph.ihl = 5,
.iph.protocol = IPPROTO_GRE,
.iph.tot_len = __bpf_constant_htons(MAGIC_BYTES),
.gre_hdr = {
.flags = 0,
.protocol = __bpf_constant_htons(ETH_P_IP),
},
.iph_inner.ihl = 5,
.iph_inner.protocol = IPPROTO_TCP,
.iph_inner.tot_len =
__bpf_constant_htons(MAGIC_BYTES -
sizeof(struct iphdr)),
.tcp.doff = 5,
.tcp.source = 80,
.tcp.dest = 8080,
},
.keys = {
.nhoff = ETH_HLEN,
.thoff = ETH_HLEN + sizeof(struct iphdr) * 2 +
sizeof(struct gre_base_hdr),
.addr_proto = ETH_P_IP,
.ip_proto = IPPROTO_TCP,
.n_proto = __bpf_constant_htons(ETH_P_IP),
.is_encap = true,
.sport = 80,
.dport = 8080,
},
.retval = BPF_OK,
},
{
.name = "ip-gre-no-encap",
.pkt.ipip = {
.eth.h_proto = __bpf_constant_htons(ETH_P_IP),
.iph.ihl = 5,
.iph.protocol = IPPROTO_GRE,
.iph.tot_len = __bpf_constant_htons(MAGIC_BYTES),
.iph_inner.ihl = 5,
.iph_inner.protocol = IPPROTO_TCP,
.iph_inner.tot_len =
__bpf_constant_htons(MAGIC_BYTES -
sizeof(struct iphdr)),
.tcp.doff = 5,
.tcp.source = 80,
.tcp.dest = 8080,
},
.keys = {
.flags = BPF_FLOW_DISSECTOR_F_STOP_AT_ENCAP,
.nhoff = ETH_HLEN,
.thoff = ETH_HLEN + sizeof(struct iphdr)
+ sizeof(struct gre_base_hdr),
.addr_proto = ETH_P_IP,
.ip_proto = IPPROTO_GRE,
.n_proto = __bpf_constant_htons(ETH_P_IP),
.is_encap = true,
},
.flags = BPF_FLOW_DISSECTOR_F_STOP_AT_ENCAP,
.retval = BPF_OK,
},
};
void serial_test_flow_dissector_namespace(void)
{
struct bpf_flow *skel;
struct nstoken *ns;
int err, prog_fd;
skel = bpf_flow__open_and_load();
if (!ASSERT_OK_PTR(skel, "open/load skeleton"))
return;
prog_fd = bpf_program__fd(skel->progs._dissect);
if (!ASSERT_OK_FD(prog_fd, "get dissector fd"))
goto out_destroy_skel;
/* We must be able to attach a flow dissector to root namespace */
err = bpf_prog_attach(prog_fd, 0, BPF_FLOW_DISSECTOR, 0);
if (!ASSERT_OK(err, "attach on root namespace ok"))
goto out_destroy_skel;
err = make_netns(TEST_NS);
if (!ASSERT_OK(err, "create non-root net namespace"))
goto out_destroy_skel;
/* We must not be able to additionally attach a flow dissector to a
* non-root net namespace
*/
ns = open_netns(TEST_NS);
if (!ASSERT_OK_PTR(ns, "enter non-root net namespace"))
goto out_clean_ns;
err = bpf_prog_attach(prog_fd, 0, BPF_FLOW_DISSECTOR, 0);
if (!ASSERT_ERR(err,
"refuse new flow dissector in non-root net namespace"))
bpf_prog_detach2(prog_fd, 0, BPF_FLOW_DISSECTOR);
else
ASSERT_EQ(errno, EEXIST,
"refused because of already attached prog");
close_netns(ns);
/* If no flow dissector is attached to the root namespace, we must
* be able to attach one to a non-root net namespace
*/
bpf_prog_detach2(prog_fd, 0, BPF_FLOW_DISSECTOR);
ns = open_netns(TEST_NS);
ASSERT_OK_PTR(ns, "enter non-root net namespace");
err = bpf_prog_attach(prog_fd, 0, BPF_FLOW_DISSECTOR, 0);
close_netns(ns);
ASSERT_OK(err, "accept new flow dissector in non-root net namespace");
/* If a flow dissector is attached to non-root net namespace, attaching
* a flow dissector to root namespace must fail
*/
err = bpf_prog_attach(prog_fd, 0, BPF_FLOW_DISSECTOR, 0);
if (!ASSERT_ERR(err, "refuse new flow dissector on root namespace"))
bpf_prog_detach2(prog_fd, 0, BPF_FLOW_DISSECTOR);
else
ASSERT_EQ(errno, EEXIST,
"refused because of already attached prog");
ns = open_netns(TEST_NS);
bpf_prog_detach2(prog_fd, 0, BPF_FLOW_DISSECTOR);
close_netns(ns);
out_clean_ns:
remove_netns(TEST_NS);
out_destroy_skel:
bpf_flow__destroy(skel);
}
static int create_tap(const char *ifname)
{
struct ifreq ifr = {
@ -533,22 +650,27 @@ static int init_prog_array(struct bpf_object *obj, struct bpf_map *prog_array)
return 0;
}
static void run_tests_skb_less(int tap_fd, struct bpf_map *keys)
static void run_tests_skb_less(int tap_fd, struct bpf_map *keys,
char *test_suffix)
{
char test_name[TEST_NAME_MAX_LEN];
int i, err, keys_fd;
keys_fd = bpf_map__fd(keys);
if (CHECK(keys_fd < 0, "bpf_map__fd", "err %d\n", keys_fd))
if (!ASSERT_OK_FD(keys_fd, "bpf_map__fd"))
return;
for (i = 0; i < ARRAY_SIZE(tests); i++) {
/* Keep in sync with 'flags' from eth_get_headlen. */
__u32 eth_get_headlen_flags =
BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG;
LIBBPF_OPTS(bpf_test_run_opts, topts);
struct bpf_flow_keys flow_keys = {};
__u32 key = (__u32)(tests[i].keys.sport) << 16 |
tests[i].keys.dport;
snprintf(test_name, TEST_NAME_MAX_LEN, "%s-%s", tests[i].name,
test_suffix);
if (!test__start_subtest(test_name))
continue;
/* For skb-less case we can't pass input flags; run
* only the tests that have a matching set of flags.
@ -558,78 +680,139 @@ static void run_tests_skb_less(int tap_fd, struct bpf_map *keys)
continue;
err = tx_tap(tap_fd, &tests[i].pkt, sizeof(tests[i].pkt));
CHECK(err < 0, "tx_tap", "err %d errno %d\n", err, errno);
if (!ASSERT_EQ(err, sizeof(tests[i].pkt), "tx_tap"))
continue;
/* check the stored flow_keys only if BPF_OK expected */
if (tests[i].retval != BPF_OK)
continue;
err = bpf_map_lookup_elem(keys_fd, &key, &flow_keys);
ASSERT_OK(err, "bpf_map_lookup_elem");
if (!ASSERT_OK(err, "bpf_map_lookup_elem"))
continue;
CHECK_FLOW_KEYS(tests[i].name, flow_keys, tests[i].keys);
ASSERT_MEMEQ(&flow_keys, &tests[i].keys,
sizeof(struct bpf_flow_keys),
"returned flow keys");
err = bpf_map_delete_elem(keys_fd, &key);
ASSERT_OK(err, "bpf_map_delete_elem");
}
}
static void test_skb_less_prog_attach(struct bpf_flow *skel, int tap_fd)
void test_flow_dissector_skb_less_direct_attach(void)
{
int err, prog_fd;
int err, prog_fd, tap_fd;
struct bpf_flow *skel;
struct netns_obj *ns;
ns = netns_new("flow_dissector_skb_less_indirect_attach_ns", true);
if (!ASSERT_OK_PTR(ns, "create and open netns"))
return;
skel = bpf_flow__open_and_load();
if (!ASSERT_OK_PTR(skel, "open/load skeleton"))
goto out_clean_ns;
err = init_prog_array(skel->obj, skel->maps.jmp_table);
if (!ASSERT_OK(err, "init_prog_array"))
goto out_destroy_skel;
prog_fd = bpf_program__fd(skel->progs._dissect);
if (CHECK(prog_fd < 0, "bpf_program__fd", "err %d\n", prog_fd))
return;
if (!ASSERT_OK_FD(prog_fd, "bpf_program__fd"))
goto out_destroy_skel;
err = bpf_prog_attach(prog_fd, 0, BPF_FLOW_DISSECTOR, 0);
if (CHECK(err, "bpf_prog_attach", "err %d errno %d\n", err, errno))
return;
if (!ASSERT_OK(err, "bpf_prog_attach"))
goto out_destroy_skel;
run_tests_skb_less(tap_fd, skel->maps.last_dissection);
tap_fd = create_tap("tap0");
if (!ASSERT_OK_FD(tap_fd, "create_tap"))
goto out_destroy_skel;
err = ifup("tap0");
if (!ASSERT_OK(err, "ifup"))
goto out_close_tap;
run_tests_skb_less(tap_fd, skel->maps.last_dissection,
"non-skb-direct-attach");
err = bpf_prog_detach2(prog_fd, 0, BPF_FLOW_DISSECTOR);
CHECK(err, "bpf_prog_detach2", "err %d errno %d\n", err, errno);
ASSERT_OK(err, "bpf_prog_detach2");
out_close_tap:
close(tap_fd);
out_destroy_skel:
bpf_flow__destroy(skel);
out_clean_ns:
netns_free(ns);
}
static void test_skb_less_link_create(struct bpf_flow *skel, int tap_fd)
void test_flow_dissector_skb_less_indirect_attach(void)
{
int err, net_fd, tap_fd;
struct bpf_flow *skel;
struct bpf_link *link;
int err, net_fd;
struct netns_obj *ns;
ns = netns_new("flow_dissector_skb_less_indirect_attach_ns", true);
if (!ASSERT_OK_PTR(ns, "create and open netns"))
return;
skel = bpf_flow__open_and_load();
if (!ASSERT_OK_PTR(skel, "open/load skeleton"))
goto out_clean_ns;
net_fd = open("/proc/self/ns/net", O_RDONLY);
if (CHECK(net_fd < 0, "open(/proc/self/ns/net)", "err %d\n", errno))
return;
if (!ASSERT_OK_FD(net_fd, "open(/proc/self/ns/net"))
goto out_destroy_skel;
err = init_prog_array(skel->obj, skel->maps.jmp_table);
if (!ASSERT_OK(err, "init_prog_array"))
goto out_destroy_skel;
tap_fd = create_tap("tap0");
if (!ASSERT_OK_FD(tap_fd, "create_tap"))
goto out_close_ns;
err = ifup("tap0");
if (!ASSERT_OK(err, "ifup"))
goto out_close_tap;
link = bpf_program__attach_netns(skel->progs._dissect, net_fd);
if (!ASSERT_OK_PTR(link, "attach_netns"))
goto out_close;
goto out_close_tap;
run_tests_skb_less(tap_fd, skel->maps.last_dissection);
run_tests_skb_less(tap_fd, skel->maps.last_dissection,
"non-skb-indirect-attach");
err = bpf_link__destroy(link);
CHECK(err, "bpf_link__destroy", "err %d\n", err);
out_close:
ASSERT_OK(err, "bpf_link__destroy");
out_close_tap:
close(tap_fd);
out_close_ns:
close(net_fd);
out_destroy_skel:
bpf_flow__destroy(skel);
out_clean_ns:
netns_free(ns);
}
void test_flow_dissector(void)
void test_flow_dissector_skb(void)
{
int i, err, prog_fd, keys_fd = -1, tap_fd;
char test_name[TEST_NAME_MAX_LEN];
struct bpf_flow *skel;
int i, err, prog_fd;
skel = bpf_flow__open_and_load();
if (CHECK(!skel, "skel", "failed to open/load skeleton\n"))
if (!ASSERT_OK_PTR(skel, "open/load skeleton"))
return;
prog_fd = bpf_program__fd(skel->progs._dissect);
if (CHECK(prog_fd < 0, "bpf_program__fd", "err %d\n", prog_fd))
goto out_destroy_skel;
keys_fd = bpf_map__fd(skel->maps.last_dissection);
if (CHECK(keys_fd < 0, "bpf_map__fd", "err %d\n", keys_fd))
goto out_destroy_skel;
err = init_prog_array(skel->obj, skel->maps.jmp_table);
if (CHECK(err, "init_prog_array", "err %d\n", err))
if (!ASSERT_OK(err, "init_prog_array"))
goto out_destroy_skel;
prog_fd = bpf_program__fd(skel->progs._dissect);
if (!ASSERT_OK_FD(prog_fd, "bpf_program__fd"))
goto out_destroy_skel;
for (i = 0; i < ARRAY_SIZE(tests); i++) {
@ -641,6 +824,10 @@ void test_flow_dissector(void)
);
static struct bpf_flow_keys ctx = {};
snprintf(test_name, TEST_NAME_MAX_LEN, "%s-skb", tests[i].name);
if (!test__start_subtest(test_name))
continue;
if (tests[i].flags) {
topts.ctx_in = &ctx;
topts.ctx_size_in = sizeof(ctx);
@ -656,26 +843,12 @@ void test_flow_dissector(void)
continue;
ASSERT_EQ(topts.data_size_out, sizeof(flow_keys),
"test_run data_size_out");
CHECK_FLOW_KEYS(tests[i].name, flow_keys, tests[i].keys);
ASSERT_MEMEQ(&flow_keys, &tests[i].keys,
sizeof(struct bpf_flow_keys),
"returned flow keys");
}
/* Do the same tests but for skb-less flow dissector.
* We use a known path in the net/tun driver that calls
* eth_get_headlen and we manually export bpf_flow_keys
* via BPF map in this case.
*/
tap_fd = create_tap("tap0");
CHECK(tap_fd < 0, "create_tap", "tap_fd %d errno %d\n", tap_fd, errno);
err = ifup("tap0");
CHECK(err, "ifup", "err %d errno %d\n", err, errno);
/* Test direct prog attachment */
test_skb_less_prog_attach(skel, tap_fd);
/* Test indirect prog attachment via link */
test_skb_less_link_create(skel, tap_fd);
close(tap_fd);
out_destroy_skel:
bpf_flow__destroy(skel);
}

View File

@ -0,0 +1,792 @@
// SPDX-License-Identifier: GPL-2.0
#define _GNU_SOURCE
#include <stdbool.h>
#include <stdlib.h>
#include <stdio.h>
#include <bpf/bpf.h>
#include <linux/bpf.h>
#include <bpf/libbpf.h>
#include <arpa/inet.h>
#include <asm/byteorder.h>
#include <netinet/udp.h>
#include <poll.h>
#include <string.h>
#include <sys/ioctl.h>
#include <sys/socket.h>
#include <sys/time.h>
#include <unistd.h>
#include "test_progs.h"
#include "network_helpers.h"
#include "bpf_util.h"
#include "bpf_flow.skel.h"
#define CFG_PORT_INNER 8000
#define CFG_PORT_GUE 6080
#define SUBTEST_NAME_MAX_LEN 32
#define TEST_NAME_MAX_LEN (32 + SUBTEST_NAME_MAX_LEN)
#define MAX_SOURCE_PORTS 3
#define TEST_PACKETS_COUNT 10
#define TEST_PACKET_LEN 100
#define TEST_PACKET_PATTERN 'a'
#define TEST_IPV4 "192.168.0.1/32"
#define TEST_IPV6 "100::a/128"
#define TEST_TUNNEL_REMOTE "127.0.0.2"
#define TEST_TUNNEL_LOCAL "127.0.0.1"
#define INIT_ADDR4(addr4, port) \
{ \
.sin_family = AF_INET, \
.sin_port = __constant_htons(port), \
.sin_addr.s_addr = __constant_htonl(addr4), \
}
#define INIT_ADDR6(addr6, port) \
{ \
.sin6_family = AF_INET6, \
.sin6_port = __constant_htons(port), \
.sin6_addr = addr6, \
}
#define TEST_IN4_SRC_ADDR_DEFAULT INIT_ADDR4(INADDR_LOOPBACK + 2, 0)
#define TEST_IN4_DST_ADDR_DEFAULT INIT_ADDR4(INADDR_LOOPBACK, CFG_PORT_INNER)
#define TEST_OUT4_SRC_ADDR_DEFAULT INIT_ADDR4(INADDR_LOOPBACK + 1, 0)
#define TEST_OUT4_DST_ADDR_DEFAULT INIT_ADDR4(INADDR_LOOPBACK, 0)
#define TEST_IN6_SRC_ADDR_DEFAULT INIT_ADDR6(IN6ADDR_LOOPBACK_INIT, 0)
#define TEST_IN6_DST_ADDR_DEFAULT \
INIT_ADDR6(IN6ADDR_LOOPBACK_INIT, CFG_PORT_INNER)
#define TEST_OUT6_SRC_ADDR_DEFAULT INIT_ADDR6(IN6ADDR_LOOPBACK_INIT, 0)
#define TEST_OUT6_DST_ADDR_DEFAULT INIT_ADDR6(IN6ADDR_LOOPBACK_INIT, 0)
#define TEST_IN4_SRC_ADDR_DISSECT_CONTINUE INIT_ADDR4(INADDR_LOOPBACK + 126, 0)
#define TEST_IN4_SRC_ADDR_IPIP INIT_ADDR4((in_addr_t)0x01010101, 0)
#define TEST_IN4_DST_ADDR_IPIP INIT_ADDR4((in_addr_t)0xC0A80001, CFG_PORT_INNER)
struct grehdr {
uint16_t unused;
uint16_t protocol;
} __packed;
struct guehdr {
union {
struct {
#if defined(__LITTLE_ENDIAN_BITFIELD)
__u8 hlen : 5, control : 1, version : 2;
#elif defined(__BIG_ENDIAN_BITFIELD)
__u8 version : 2, control : 1, hlen : 5;
#else
#error "Please fix <asm/byteorder.h>"
#endif
__u8 proto_ctype;
__be16 flags;
};
__be32 word;
};
};
static char buf[ETH_DATA_LEN];
struct test_configuration {
char name[SUBTEST_NAME_MAX_LEN];
int (*test_setup)(void);
void (*test_teardown)(void);
int source_ports[MAX_SOURCE_PORTS];
int cfg_l3_inner;
struct sockaddr_in in_saddr4;
struct sockaddr_in in_daddr4;
struct sockaddr_in6 in_saddr6;
struct sockaddr_in6 in_daddr6;
int cfg_l3_outer;
struct sockaddr_in out_saddr4;
struct sockaddr_in out_daddr4;
struct sockaddr_in6 out_saddr6;
struct sockaddr_in6 out_daddr6;
int cfg_encap_proto;
uint8_t cfg_dsfield_inner;
uint8_t cfg_dsfield_outer;
int cfg_l3_extra;
struct sockaddr_in extra_saddr4;
struct sockaddr_in extra_daddr4;
struct sockaddr_in6 extra_saddr6;
struct sockaddr_in6 extra_daddr6;
};
static unsigned long util_gettime(void)
{
struct timeval tv;
gettimeofday(&tv, NULL);
return (tv.tv_sec * 1000) + (tv.tv_usec / 1000);
}
static void build_ipv4_header(void *header, uint8_t proto, uint32_t src,
uint32_t dst, int payload_len, uint8_t tos)
{
struct iphdr *iph = header;
iph->ihl = 5;
iph->version = 4;
iph->tos = tos;
iph->ttl = 8;
iph->tot_len = htons(sizeof(*iph) + payload_len);
iph->id = htons(1337);
iph->protocol = proto;
iph->saddr = src;
iph->daddr = dst;
iph->check = build_ip_csum((void *)iph);
}
static void ipv6_set_dsfield(struct ipv6hdr *ip6h, uint8_t dsfield)
{
uint16_t val, *ptr = (uint16_t *)ip6h;
val = ntohs(*ptr);
val &= 0xF00F;
val |= ((uint16_t)dsfield) << 4;
*ptr = htons(val);
}
static void build_ipv6_header(void *header, uint8_t proto,
const struct sockaddr_in6 *src,
const struct sockaddr_in6 *dst, int payload_len,
uint8_t dsfield)
{
struct ipv6hdr *ip6h = header;
ip6h->version = 6;
ip6h->payload_len = htons(payload_len);
ip6h->nexthdr = proto;
ip6h->hop_limit = 8;
ipv6_set_dsfield(ip6h, dsfield);
memcpy(&ip6h->saddr, &src->sin6_addr, sizeof(ip6h->saddr));
memcpy(&ip6h->daddr, &dst->sin6_addr, sizeof(ip6h->daddr));
}
static void build_udp_header(void *header, int payload_len, uint16_t sport,
uint16_t dport, int family)
{
struct udphdr *udph = header;
int len = sizeof(*udph) + payload_len;
udph->source = htons(sport);
udph->dest = htons(dport);
udph->len = htons(len);
udph->check = 0;
if (family == AF_INET)
udph->check = build_udp_v4_csum(header - sizeof(struct iphdr),
udph);
else
udph->check = build_udp_v6_csum(header - sizeof(struct ipv6hdr),
udph);
}
static void build_gue_header(void *header, uint8_t proto)
{
struct guehdr *gueh = header;
gueh->proto_ctype = proto;
}
static void build_gre_header(void *header, uint16_t proto)
{
struct grehdr *greh = header;
greh->protocol = htons(proto);
}
static int l3_length(int family)
{
if (family == AF_INET)
return sizeof(struct iphdr);
else
return sizeof(struct ipv6hdr);
}
static int build_packet(const struct test_configuration *test, uint16_t sport)
{
int ol3_len = 0, ol4_len = 0, il3_len = 0, il4_len = 0;
int el3_len = 0, packet_len;
memset(buf, 0, ETH_DATA_LEN);
if (test->cfg_l3_extra)
el3_len = l3_length(test->cfg_l3_extra);
/* calculate header offsets */
if (test->cfg_encap_proto) {
ol3_len = l3_length(test->cfg_l3_outer);
if (test->cfg_encap_proto == IPPROTO_GRE)
ol4_len = sizeof(struct grehdr);
else if (test->cfg_encap_proto == IPPROTO_UDP)
ol4_len = sizeof(struct udphdr) + sizeof(struct guehdr);
}
il3_len = l3_length(test->cfg_l3_inner);
il4_len = sizeof(struct udphdr);
packet_len = el3_len + ol3_len + ol4_len + il3_len + il4_len +
TEST_PACKET_LEN;
if (!ASSERT_LE(packet_len, sizeof(buf), "check packet size"))
return -1;
/*
* Fill packet from inside out, to calculate correct checksums.
* But create ip before udp headers, as udp uses ip for pseudo-sum.
*/
memset(buf + el3_len + ol3_len + ol4_len + il3_len + il4_len,
TEST_PACKET_PATTERN, TEST_PACKET_LEN);
/* add zero byte for udp csum padding */
buf[el3_len + ol3_len + ol4_len + il3_len + il4_len + TEST_PACKET_LEN] =
0;
switch (test->cfg_l3_inner) {
case PF_INET:
build_ipv4_header(buf + el3_len + ol3_len + ol4_len,
IPPROTO_UDP, test->in_saddr4.sin_addr.s_addr,
test->in_daddr4.sin_addr.s_addr,
il4_len + TEST_PACKET_LEN,
test->cfg_dsfield_inner);
break;
case PF_INET6:
build_ipv6_header(buf + el3_len + ol3_len + ol4_len,
IPPROTO_UDP, &test->in_saddr6,
&test->in_daddr6, il4_len + TEST_PACKET_LEN,
test->cfg_dsfield_inner);
break;
}
build_udp_header(buf + el3_len + ol3_len + ol4_len + il3_len,
TEST_PACKET_LEN, sport, CFG_PORT_INNER,
test->cfg_l3_inner);
if (!test->cfg_encap_proto)
return il3_len + il4_len + TEST_PACKET_LEN;
switch (test->cfg_l3_outer) {
case PF_INET:
build_ipv4_header(buf + el3_len, test->cfg_encap_proto,
test->out_saddr4.sin_addr.s_addr,
test->out_daddr4.sin_addr.s_addr,
ol4_len + il3_len + il4_len + TEST_PACKET_LEN,
test->cfg_dsfield_outer);
break;
case PF_INET6:
build_ipv6_header(buf + el3_len, test->cfg_encap_proto,
&test->out_saddr6, &test->out_daddr6,
ol4_len + il3_len + il4_len + TEST_PACKET_LEN,
test->cfg_dsfield_outer);
break;
}
switch (test->cfg_encap_proto) {
case IPPROTO_UDP:
build_gue_header(buf + el3_len + ol3_len + ol4_len -
sizeof(struct guehdr),
test->cfg_l3_inner == PF_INET ? IPPROTO_IPIP :
IPPROTO_IPV6);
build_udp_header(buf + el3_len + ol3_len,
sizeof(struct guehdr) + il3_len + il4_len +
TEST_PACKET_LEN,
sport, CFG_PORT_GUE, test->cfg_l3_outer);
break;
case IPPROTO_GRE:
build_gre_header(buf + el3_len + ol3_len,
test->cfg_l3_inner == PF_INET ? ETH_P_IP :
ETH_P_IPV6);
break;
}
switch (test->cfg_l3_extra) {
case PF_INET:
build_ipv4_header(buf,
test->cfg_l3_outer == PF_INET ? IPPROTO_IPIP :
IPPROTO_IPV6,
test->extra_saddr4.sin_addr.s_addr,
test->extra_daddr4.sin_addr.s_addr,
ol3_len + ol4_len + il3_len + il4_len +
TEST_PACKET_LEN,
0);
break;
case PF_INET6:
build_ipv6_header(buf,
test->cfg_l3_outer == PF_INET ? IPPROTO_IPIP :
IPPROTO_IPV6,
&test->extra_saddr6, &test->extra_daddr6,
ol3_len + ol4_len + il3_len + il4_len +
TEST_PACKET_LEN,
0);
break;
}
return el3_len + ol3_len + ol4_len + il3_len + il4_len +
TEST_PACKET_LEN;
}
/* sender transmits encapsulated over RAW or unencap'd over UDP */
static int setup_tx(const struct test_configuration *test)
{
int family, fd, ret;
if (test->cfg_l3_extra)
family = test->cfg_l3_extra;
else if (test->cfg_l3_outer)
family = test->cfg_l3_outer;
else
family = test->cfg_l3_inner;
fd = socket(family, SOCK_RAW, IPPROTO_RAW);
if (!ASSERT_OK_FD(fd, "setup tx socket"))
return fd;
if (test->cfg_l3_extra) {
if (test->cfg_l3_extra == PF_INET)
ret = connect(fd, (void *)&test->extra_daddr4,
sizeof(test->extra_daddr4));
else
ret = connect(fd, (void *)&test->extra_daddr6,
sizeof(test->extra_daddr6));
if (!ASSERT_OK(ret, "connect")) {
close(fd);
return ret;
}
} else if (test->cfg_l3_outer) {
/* connect to destination if not encapsulated */
if (test->cfg_l3_outer == PF_INET)
ret = connect(fd, (void *)&test->out_daddr4,
sizeof(test->out_daddr4));
else
ret = connect(fd, (void *)&test->out_daddr6,
sizeof(test->out_daddr6));
if (!ASSERT_OK(ret, "connect")) {
close(fd);
return ret;
}
} else {
/* otherwise using loopback */
if (test->cfg_l3_inner == PF_INET)
ret = connect(fd, (void *)&test->in_daddr4,
sizeof(test->in_daddr4));
else
ret = connect(fd, (void *)&test->in_daddr6,
sizeof(test->in_daddr6));
if (!ASSERT_OK(ret, "connect")) {
close(fd);
return ret;
}
}
return fd;
}
/* receiver reads unencapsulated UDP */
static int setup_rx(const struct test_configuration *test)
{
int fd, ret;
fd = socket(test->cfg_l3_inner, SOCK_DGRAM, 0);
if (!ASSERT_OK_FD(fd, "socket rx"))
return fd;
if (test->cfg_l3_inner == PF_INET)
ret = bind(fd, (void *)&test->in_daddr4,
sizeof(test->in_daddr4));
else
ret = bind(fd, (void *)&test->in_daddr6,
sizeof(test->in_daddr6));
if (!ASSERT_OK(ret, "bind rx")) {
close(fd);
return ret;
}
return fd;
}
static int do_tx(int fd, const char *pkt, int len)
{
int ret;
ret = write(fd, pkt, len);
return ret != len;
}
static int do_poll(int fd, short events, int timeout)
{
struct pollfd pfd;
int ret;
pfd.fd = fd;
pfd.events = events;
ret = poll(&pfd, 1, timeout);
return ret;
}
static int do_rx(int fd)
{
char rbuf;
int ret, num = 0;
while (1) {
ret = recv(fd, &rbuf, 1, MSG_DONTWAIT);
if (ret == -1 && errno == EAGAIN)
break;
if (ret < 0)
return -1;
if (!ASSERT_EQ(rbuf, TEST_PACKET_PATTERN, "check pkt pattern"))
return -1;
num++;
}
return num;
}
static int run_test(const struct test_configuration *test,
int source_port_index)
{
int fdt = -1, fdr = -1, len, tx = 0, rx = 0, err;
unsigned long tstop, tcur;
fdr = setup_rx(test);
fdt = setup_tx(test);
if (!ASSERT_OK_FD(fdr, "setup rx") || !ASSERT_OK_FD(fdt, "setup tx")) {
err = -1;
goto out_close_sockets;
}
len = build_packet(test,
(uint16_t)test->source_ports[source_port_index]);
if (!ASSERT_GT(len, 0, "build test packet"))
return -1;
tcur = util_gettime();
tstop = tcur;
while (tx < TEST_PACKETS_COUNT) {
if (!ASSERT_OK(do_tx(fdt, buf, len), "do_tx"))
break;
tx++;
err = do_rx(fdr);
if (!ASSERT_GE(err, 0, "do_rx"))
break;
rx += err;
}
/* read straggler packets, if any */
if (rx < tx) {
tstop = util_gettime() + 100;
while (rx < tx) {
tcur = util_gettime();
if (tcur >= tstop)
break;
err = do_poll(fdr, POLLIN, tstop - tcur);
if (err < 0)
break;
err = do_rx(fdr);
if (err >= 0)
rx += err;
}
}
out_close_sockets:
close(fdt);
close(fdr);
return rx;
}
static int attach_and_configure_program(struct bpf_flow *skel)
{
struct bpf_map *prog_array = skel->maps.jmp_table;
int main_prog_fd, sub_prog_fd, map_fd, i, err;
struct bpf_program *prog;
char prog_name[32];
main_prog_fd = bpf_program__fd(skel->progs._dissect);
if (main_prog_fd < 0)
return main_prog_fd;
err = bpf_prog_attach(main_prog_fd, 0, BPF_FLOW_DISSECTOR, 0);
if (err)
return err;
map_fd = bpf_map__fd(prog_array);
if (map_fd < 0)
return map_fd;
for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
snprintf(prog_name, sizeof(prog_name), "flow_dissector_%d", i);
prog = bpf_object__find_program_by_name(skel->obj, prog_name);
if (!prog)
return -1;
sub_prog_fd = bpf_program__fd(prog);
if (sub_prog_fd < 0)
return -1;
err = bpf_map_update_elem(map_fd, &i, &sub_prog_fd, BPF_ANY);
if (err)
return -1;
}
return main_prog_fd;
}
static void detach_program(struct bpf_flow *skel, int prog_fd)
{
bpf_prog_detach2(prog_fd, 0, BPF_FLOW_DISSECTOR);
}
static int set_port_drop(int pf, bool multi_port)
{
SYS(fail, "tc qdisc add dev lo ingress");
SYS(fail_delete_qdisc, "tc filter add %s %s %s %s %s %s %s %s %s %s",
"dev lo",
"parent FFFF:",
"protocol", pf == PF_INET6 ? "ipv6" : "ip",
"pref 1337",
"flower",
"ip_proto udp",
"src_port", multi_port ? "8-10" : "9",
"action drop");
return 0;
fail_delete_qdisc:
SYS_NOFAIL("tc qdisc del dev lo ingress");
fail:
return 1;
}
static void remove_filter(void)
{
SYS_NOFAIL("tc filter del dev lo ingress");
SYS_NOFAIL("tc qdisc del dev lo ingress");
}
static int ipv4_setup(void)
{
return set_port_drop(PF_INET, false);
}
static int ipv6_setup(void)
{
return set_port_drop(PF_INET6, false);
}
static int port_range_setup(void)
{
return set_port_drop(PF_INET, true);
}
static int set_addresses(void)
{
SYS(out, "ip -4 addr add %s dev lo", TEST_IPV4);
SYS(out_remove_ipv4, "ip -6 addr add %s dev lo", TEST_IPV6);
return 0;
out_remove_ipv4:
SYS_NOFAIL("ip -4 addr del %s dev lo", TEST_IPV4);
out:
return -1;
}
static void unset_addresses(void)
{
SYS_NOFAIL("ip -4 addr del %s dev lo", TEST_IPV4);
SYS_NOFAIL("ip -6 addr del %s dev lo", TEST_IPV6);
}
static int ipip_setup(void)
{
if (!ASSERT_OK(set_addresses(), "configure addresses"))
return -1;
if (!ASSERT_OK(set_port_drop(PF_INET, false), "set filter"))
goto out_unset_addresses;
SYS(out_remove_filter,
"ip link add ipip_test type ipip remote %s local %s dev lo",
TEST_TUNNEL_REMOTE, TEST_TUNNEL_LOCAL);
SYS(out_clean_netif, "ip link set ipip_test up");
return 0;
out_clean_netif:
SYS_NOFAIL("ip link del ipip_test");
out_remove_filter:
remove_filter();
out_unset_addresses:
unset_addresses();
return -1;
}
static void ipip_shutdown(void)
{
SYS_NOFAIL("ip link del ipip_test");
remove_filter();
unset_addresses();
}
static int gre_setup(void)
{
if (!ASSERT_OK(set_addresses(), "configure addresses"))
return -1;
if (!ASSERT_OK(set_port_drop(PF_INET, false), "set filter"))
goto out_unset_addresses;
SYS(out_remove_filter,
"ip link add gre_test type gre remote %s local %s dev lo",
TEST_TUNNEL_REMOTE, TEST_TUNNEL_LOCAL);
SYS(out_clean_netif, "ip link set gre_test up");
return 0;
out_clean_netif:
SYS_NOFAIL("ip link del ipip_test");
out_remove_filter:
remove_filter();
out_unset_addresses:
unset_addresses();
return -1;
}
static void gre_shutdown(void)
{
SYS_NOFAIL("ip link del gre_test");
remove_filter();
unset_addresses();
}
static const struct test_configuration tests_input[] = {
{
.name = "ipv4",
.test_setup = ipv4_setup,
.test_teardown = remove_filter,
.source_ports = { 8, 9, 10 },
.cfg_l3_inner = PF_INET,
.in_saddr4 = TEST_IN4_SRC_ADDR_DEFAULT,
.in_daddr4 = TEST_IN4_DST_ADDR_DEFAULT
},
{
.name = "ipv4_continue_dissect",
.test_setup = ipv4_setup,
.test_teardown = remove_filter,
.source_ports = { 8, 9, 10 },
.cfg_l3_inner = PF_INET,
.in_saddr4 = TEST_IN4_SRC_ADDR_DISSECT_CONTINUE,
.in_daddr4 = TEST_IN4_DST_ADDR_DEFAULT },
{
.name = "ipip",
.test_setup = ipip_setup,
.test_teardown = ipip_shutdown,
.source_ports = { 8, 9, 10 },
.cfg_l3_inner = PF_INET,
.in_saddr4 = TEST_IN4_SRC_ADDR_IPIP,
.in_daddr4 = TEST_IN4_DST_ADDR_IPIP,
.out_saddr4 = TEST_OUT4_SRC_ADDR_DEFAULT,
.out_daddr4 = TEST_OUT4_DST_ADDR_DEFAULT,
.cfg_l3_outer = PF_INET,
.cfg_encap_proto = IPPROTO_IPIP,
},
{
.name = "gre",
.test_setup = gre_setup,
.test_teardown = gre_shutdown,
.source_ports = { 8, 9, 10 },
.cfg_l3_inner = PF_INET,
.in_saddr4 = TEST_IN4_SRC_ADDR_IPIP,
.in_daddr4 = TEST_IN4_DST_ADDR_IPIP,
.out_saddr4 = TEST_OUT4_SRC_ADDR_DEFAULT,
.out_daddr4 = TEST_OUT4_DST_ADDR_DEFAULT,
.cfg_l3_outer = PF_INET,
.cfg_encap_proto = IPPROTO_GRE,
},
{
.name = "port_range",
.test_setup = port_range_setup,
.test_teardown = remove_filter,
.source_ports = { 7, 9, 11 },
.cfg_l3_inner = PF_INET,
.in_saddr4 = TEST_IN4_SRC_ADDR_DEFAULT,
.in_daddr4 = TEST_IN4_DST_ADDR_DEFAULT },
{
.name = "ipv6",
.test_setup = ipv6_setup,
.test_teardown = remove_filter,
.source_ports = { 8, 9, 10 },
.cfg_l3_inner = PF_INET6,
.in_saddr6 = TEST_IN6_SRC_ADDR_DEFAULT,
.in_daddr6 = TEST_IN6_DST_ADDR_DEFAULT
},
};
struct test_ctx {
struct bpf_flow *skel;
struct netns_obj *ns;
int prog_fd;
};
static int test_global_init(struct test_ctx *ctx)
{
int err;
ctx->skel = bpf_flow__open_and_load();
if (!ASSERT_OK_PTR(ctx->skel, "open and load flow_dissector"))
return -1;
ctx->ns = netns_new("flow_dissector_classification", true);
if (!ASSERT_OK_PTR(ctx->ns, "switch ns"))
goto out_destroy_skel;
err = write_sysctl("/proc/sys/net/ipv4/conf/default/rp_filter", "0");
err |= write_sysctl("/proc/sys/net/ipv4/conf/all/rp_filter", "0");
err |= write_sysctl("/proc/sys/net/ipv4/conf/lo/rp_filter", "0");
if (!ASSERT_OK(err, "configure net tunables"))
goto out_clean_ns;
ctx->prog_fd = attach_and_configure_program(ctx->skel);
if (!ASSERT_OK_FD(ctx->prog_fd, "attach and configure program"))
goto out_clean_ns;
return 0;
out_clean_ns:
netns_free(ctx->ns);
out_destroy_skel:
bpf_flow__destroy(ctx->skel);
return -1;
}
static void test_global_shutdown(struct test_ctx *ctx)
{
detach_program(ctx->skel, ctx->prog_fd);
netns_free(ctx->ns);
bpf_flow__destroy(ctx->skel);
}
void test_flow_dissector_classification(void)
{
struct test_ctx ctx;
const struct test_configuration *test;
int i;
if (test_global_init(&ctx))
return;
for (i = 0; i < ARRAY_SIZE(tests_input); i++) {
if (!test__start_subtest(tests_input[i].name))
continue;
test = &tests_input[i];
/* All tests are expected to have one rx-ok port first,
* then a non-working rx port, and finally a rx-ok port
*/
if (test->test_setup &&
!ASSERT_OK(test->test_setup(), "init filter"))
continue;
ASSERT_EQ(run_test(test, 0), TEST_PACKETS_COUNT,
"test first port");
ASSERT_EQ(run_test(test, 1), 0, "test second port");
ASSERT_EQ(run_test(test, 2), TEST_PACKETS_COUNT,
"test third port");
if (test->test_teardown)
test->test_teardown();
}
test_global_shutdown(&ctx);
}

View File

@ -85,6 +85,7 @@ static void test_missed_kprobe_recursion(void)
ASSERT_GE(get_missed_count(bpf_program__fd(skel->progs.test3)), 1, "test3_recursion_misses");
ASSERT_GE(get_missed_count(bpf_program__fd(skel->progs.test4)), 1, "test4_recursion_misses");
ASSERT_GE(get_missed_count(bpf_program__fd(skel->progs.test5)), 1, "test5_recursion_misses");
ASSERT_EQ(get_missed_count(bpf_program__fd(skel->progs.test6)), 1, "test6_recursion_misses");
cleanup:
missed_kprobe_recursion__destroy(skel);

View File

@ -2,7 +2,7 @@
#include <test_progs.h>
#include "cgroup_helpers.h"
#include <linux/tcp.h>
#include <netinet/tcp.h>
#include <linux/netlink.h>
#include "sockopt_sk.skel.h"

View File

@ -98,6 +98,7 @@
#include "verifier_xdp_direct_packet_access.skel.h"
#include "verifier_bits_iter.skel.h"
#include "verifier_lsm.skel.h"
#include "irq.skel.h"
#define MAX_ENTRIES 11
@ -225,6 +226,7 @@ void test_verifier_xdp(void) { RUN(verifier_xdp); }
void test_verifier_xdp_direct_packet_access(void) { RUN(verifier_xdp_direct_packet_access); }
void test_verifier_bits_iter(void) { RUN(verifier_bits_iter); }
void test_verifier_lsm(void) { RUN(verifier_lsm); }
void test_irq(void) { RUN(irq); }
void test_verifier_mtu(void) { RUN(verifier_mtu); }
static int init_test_val_map(struct bpf_object *obj, char *map_name)

View File

@ -17,7 +17,7 @@
#include "network_helpers.h"
#include <linux/if_bonding.h>
#include <linux/limits.h>
#include <linux/udp.h>
#include <netinet/udp.h>
#include <uapi/linux/netdev.h>
#include "xdp_dummy.skel.h"

View File

@ -7,7 +7,7 @@
#include <linux/if_link.h>
#include <linux/ipv6.h>
#include <linux/in6.h>
#include <linux/udp.h>
#include <netinet/udp.h>
#include <bpf/bpf_endian.h>
#include <uapi/linux/netdev.h>
#include "test_xdp_do_redirect.skel.h"

View File

@ -3,7 +3,7 @@
#include <network_helpers.h>
#include <bpf/btf.h>
#include <linux/if_link.h>
#include <linux/udp.h>
#include <netinet/udp.h>
#include <net/if.h>
#include <unistd.h>

View File

@ -10,7 +10,7 @@
#include <linux/errqueue.h>
#include <linux/if_link.h>
#include <linux/net_tstamp.h>
#include <linux/udp.h>
#include <netinet/udp.h>
#include <sys/mman.h>
#include <net/if.h>
#include <poll.h>
@ -133,23 +133,6 @@ static void close_xsk(struct xsk *xsk)
munmap(xsk->umem_area, UMEM_SIZE);
}
static void ip_csum(struct iphdr *iph)
{
__u32 sum = 0;
__u16 *p;
int i;
iph->check = 0;
p = (void *)iph;
for (i = 0; i < sizeof(*iph) / sizeof(*p); i++)
sum += p[i];
while (sum >> 16)
sum = (sum & 0xffff) + (sum >> 16);
iph->check = ~sum;
}
static int generate_packet(struct xsk *xsk, __u16 dst_port)
{
struct xsk_tx_metadata *meta;
@ -192,7 +175,7 @@ static int generate_packet(struct xsk *xsk, __u16 dst_port)
iph->protocol = IPPROTO_UDP;
ASSERT_EQ(inet_pton(FAMILY, TX_ADDR, &iph->saddr), 1, "inet_pton(TX_ADDR)");
ASSERT_EQ(inet_pton(FAMILY, RX_ADDR, &iph->daddr), 1, "inet_pton(RX_ADDR)");
ip_csum(iph);
iph->check = build_ip_csum(iph);
udph->source = htons(UDP_SOURCE_PORT);
udph->dest = htons(dst_port);

View File

@ -3,7 +3,7 @@
#include <vmlinux.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
#include "../bpf_testmod/bpf_testmod.h"
#include "../test_kmods/bpf_testmod.h"
char _license[] SEC("license") = "GPL";

View File

@ -2,7 +2,7 @@
#include <vmlinux.h>
#include <bpf/bpf_tracing.h>
#include <bpf/bpf_helpers.h>
#include "../bpf_testmod/bpf_testmod_kfunc.h"
#include "../test_kmods/bpf_testmod_kfunc.h"
struct map_value {
struct prog_test_ref_kfunc __kptr *ptr;

View File

@ -0,0 +1,15 @@
// SPDX-License-Identifier: GPL-2.0
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
__u32 data_end;
SEC("cgroup_skb/ingress")
int direct_packet_access(struct __sk_buff *skb)
{
data_end = skb->data_end;
return 1;
}
char _license[] SEC("license") = "GPL";

View File

@ -4,8 +4,8 @@
#include <vmlinux.h>
#include <bpf/bpf_tracing.h>
#include "bpf_misc.h"
#include "../bpf_testmod/bpf_testmod.h"
#include "../bpf_testmod/bpf_testmod_kfunc.h"
#include "../test_kmods/bpf_testmod.h"
#include "../test_kmods/bpf_testmod_kfunc.h"
char _license[] SEC("license") = "GPL";

View File

@ -4,8 +4,8 @@
#include <vmlinux.h>
#include <bpf/bpf_tracing.h>
#include "bpf_misc.h"
#include "../bpf_testmod/bpf_testmod.h"
#include "../bpf_testmod/bpf_testmod_kfunc.h"
#include "../test_kmods/bpf_testmod.h"
#include "../test_kmods/bpf_testmod_kfunc.h"
char _license[] SEC("license") = "GPL";

View File

@ -131,7 +131,7 @@ int reject_subprog_with_lock(void *ctx)
}
SEC("?tc")
__failure __msg("BPF_EXIT instruction cannot be used inside bpf_rcu_read_lock-ed region")
__failure __msg("BPF_EXIT instruction in main prog cannot be used inside bpf_rcu_read_lock-ed region")
int reject_with_rcu_read_lock(void *ctx)
{
bpf_rcu_read_lock();
@ -147,7 +147,7 @@ __noinline static int throwing_subprog(struct __sk_buff *ctx)
}
SEC("?tc")
__failure __msg("BPF_EXIT instruction cannot be used inside bpf_rcu_read_lock-ed region")
__failure __msg("BPF_EXIT instruction in main prog cannot be used inside bpf_rcu_read_lock-ed region")
int reject_subprog_with_rcu_read_lock(void *ctx)
{
bpf_rcu_read_lock();

View File

@ -0,0 +1,444 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
#include <vmlinux.h>
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
#include "bpf_experimental.h"
unsigned long global_flags;
extern void bpf_local_irq_save(unsigned long *) __weak __ksym;
extern void bpf_local_irq_restore(unsigned long *) __weak __ksym;
extern int bpf_copy_from_user_str(void *dst, u32 dst__sz, const void *unsafe_ptr__ign, u64 flags) __weak __ksym;
SEC("?tc")
__failure __msg("arg#0 doesn't point to an irq flag on stack")
int irq_save_bad_arg(struct __sk_buff *ctx)
{
bpf_local_irq_save(&global_flags);
return 0;
}
SEC("?tc")
__failure __msg("arg#0 doesn't point to an irq flag on stack")
int irq_restore_bad_arg(struct __sk_buff *ctx)
{
bpf_local_irq_restore(&global_flags);
return 0;
}
SEC("?tc")
__failure __msg("BPF_EXIT instruction in main prog cannot be used inside bpf_local_irq_save-ed region")
int irq_restore_missing_2(struct __sk_buff *ctx)
{
unsigned long flags1;
unsigned long flags2;
bpf_local_irq_save(&flags1);
bpf_local_irq_save(&flags2);
return 0;
}
SEC("?tc")
__failure __msg("BPF_EXIT instruction in main prog cannot be used inside bpf_local_irq_save-ed region")
int irq_restore_missing_3(struct __sk_buff *ctx)
{
unsigned long flags1;
unsigned long flags2;
unsigned long flags3;
bpf_local_irq_save(&flags1);
bpf_local_irq_save(&flags2);
bpf_local_irq_save(&flags3);
return 0;
}
SEC("?tc")
__failure __msg("BPF_EXIT instruction in main prog cannot be used inside bpf_local_irq_save-ed region")
int irq_restore_missing_3_minus_2(struct __sk_buff *ctx)
{
unsigned long flags1;
unsigned long flags2;
unsigned long flags3;
bpf_local_irq_save(&flags1);
bpf_local_irq_save(&flags2);
bpf_local_irq_save(&flags3);
bpf_local_irq_restore(&flags3);
bpf_local_irq_restore(&flags2);
return 0;
}
static __noinline void local_irq_save(unsigned long *flags)
{
bpf_local_irq_save(flags);
}
static __noinline void local_irq_restore(unsigned long *flags)
{
bpf_local_irq_restore(flags);
}
SEC("?tc")
__failure __msg("BPF_EXIT instruction in main prog cannot be used inside bpf_local_irq_save-ed region")
int irq_restore_missing_1_subprog(struct __sk_buff *ctx)
{
unsigned long flags;
local_irq_save(&flags);
return 0;
}
SEC("?tc")
__failure __msg("BPF_EXIT instruction in main prog cannot be used inside bpf_local_irq_save-ed region")
int irq_restore_missing_2_subprog(struct __sk_buff *ctx)
{
unsigned long flags1;
unsigned long flags2;
local_irq_save(&flags1);
local_irq_save(&flags2);
return 0;
}
SEC("?tc")
__failure __msg("BPF_EXIT instruction in main prog cannot be used inside bpf_local_irq_save-ed region")
int irq_restore_missing_3_subprog(struct __sk_buff *ctx)
{
unsigned long flags1;
unsigned long flags2;
unsigned long flags3;
local_irq_save(&flags1);
local_irq_save(&flags2);
local_irq_save(&flags3);
return 0;
}
SEC("?tc")
__failure __msg("BPF_EXIT instruction in main prog cannot be used inside bpf_local_irq_save-ed region")
int irq_restore_missing_3_minus_2_subprog(struct __sk_buff *ctx)
{
unsigned long flags1;
unsigned long flags2;
unsigned long flags3;
local_irq_save(&flags1);
local_irq_save(&flags2);
local_irq_save(&flags3);
local_irq_restore(&flags3);
local_irq_restore(&flags2);
return 0;
}
SEC("?tc")
__success
int irq_balance(struct __sk_buff *ctx)
{
unsigned long flags;
local_irq_save(&flags);
local_irq_restore(&flags);
return 0;
}
SEC("?tc")
__success
int irq_balance_n(struct __sk_buff *ctx)
{
unsigned long flags1;
unsigned long flags2;
unsigned long flags3;
local_irq_save(&flags1);
local_irq_save(&flags2);
local_irq_save(&flags3);
local_irq_restore(&flags3);
local_irq_restore(&flags2);
local_irq_restore(&flags1);
return 0;
}
static __noinline void local_irq_balance(void)
{
unsigned long flags;
local_irq_save(&flags);
local_irq_restore(&flags);
}
static __noinline void local_irq_balance_n(void)
{
unsigned long flags1;
unsigned long flags2;
unsigned long flags3;
local_irq_save(&flags1);
local_irq_save(&flags2);
local_irq_save(&flags3);
local_irq_restore(&flags3);
local_irq_restore(&flags2);
local_irq_restore(&flags1);
}
SEC("?tc")
__success
int irq_balance_subprog(struct __sk_buff *ctx)
{
local_irq_balance();
return 0;
}
SEC("?fentry.s/" SYS_PREFIX "sys_getpgid")
__failure __msg("sleepable helper bpf_copy_from_user#")
int irq_sleepable_helper(void *ctx)
{
unsigned long flags;
u32 data;
local_irq_save(&flags);
bpf_copy_from_user(&data, sizeof(data), NULL);
local_irq_restore(&flags);
return 0;
}
SEC("?fentry.s/" SYS_PREFIX "sys_getpgid")
__failure __msg("kernel func bpf_copy_from_user_str is sleepable within IRQ-disabled region")
int irq_sleepable_kfunc(void *ctx)
{
unsigned long flags;
u32 data;
local_irq_save(&flags);
bpf_copy_from_user_str(&data, sizeof(data), NULL, 0);
local_irq_restore(&flags);
return 0;
}
int __noinline global_local_irq_balance(void)
{
local_irq_balance_n();
return 0;
}
SEC("?tc")
__failure __msg("global function calls are not allowed with IRQs disabled")
int irq_global_subprog(struct __sk_buff *ctx)
{
unsigned long flags;
bpf_local_irq_save(&flags);
global_local_irq_balance();
bpf_local_irq_restore(&flags);
return 0;
}
SEC("?tc")
__failure __msg("cannot restore irq state out of order")
int irq_restore_ooo(struct __sk_buff *ctx)
{
unsigned long flags1;
unsigned long flags2;
bpf_local_irq_save(&flags1);
bpf_local_irq_save(&flags2);
bpf_local_irq_restore(&flags1);
bpf_local_irq_restore(&flags2);
return 0;
}
SEC("?tc")
__failure __msg("cannot restore irq state out of order")
int irq_restore_ooo_3(struct __sk_buff *ctx)
{
unsigned long flags1;
unsigned long flags2;
unsigned long flags3;
bpf_local_irq_save(&flags1);
bpf_local_irq_save(&flags2);
bpf_local_irq_restore(&flags2);
bpf_local_irq_save(&flags3);
bpf_local_irq_restore(&flags1);
bpf_local_irq_restore(&flags3);
return 0;
}
static __noinline void local_irq_save_3(unsigned long *flags1, unsigned long *flags2,
unsigned long *flags3)
{
local_irq_save(flags1);
local_irq_save(flags2);
local_irq_save(flags3);
}
SEC("?tc")
__success
int irq_restore_3_subprog(struct __sk_buff *ctx)
{
unsigned long flags1;
unsigned long flags2;
unsigned long flags3;
local_irq_save_3(&flags1, &flags2, &flags3);
bpf_local_irq_restore(&flags3);
bpf_local_irq_restore(&flags2);
bpf_local_irq_restore(&flags1);
return 0;
}
SEC("?tc")
__failure __msg("cannot restore irq state out of order")
int irq_restore_4_subprog(struct __sk_buff *ctx)
{
unsigned long flags1;
unsigned long flags2;
unsigned long flags3;
unsigned long flags4;
local_irq_save_3(&flags1, &flags2, &flags3);
bpf_local_irq_restore(&flags3);
bpf_local_irq_save(&flags4);
bpf_local_irq_restore(&flags4);
bpf_local_irq_restore(&flags1);
return 0;
}
SEC("?tc")
__failure __msg("cannot restore irq state out of order")
int irq_restore_ooo_3_subprog(struct __sk_buff *ctx)
{
unsigned long flags1;
unsigned long flags2;
unsigned long flags3;
local_irq_save_3(&flags1, &flags2, &flags3);
bpf_local_irq_restore(&flags3);
bpf_local_irq_restore(&flags2);
bpf_local_irq_save(&flags3);
bpf_local_irq_restore(&flags1);
return 0;
}
SEC("?tc")
__failure __msg("expected an initialized")
int irq_restore_invalid(struct __sk_buff *ctx)
{
unsigned long flags1;
unsigned long flags = 0xfaceb00c;
bpf_local_irq_save(&flags1);
bpf_local_irq_restore(&flags);
return 0;
}
SEC("?tc")
__failure __msg("expected uninitialized")
int irq_save_invalid(struct __sk_buff *ctx)
{
unsigned long flags1;
bpf_local_irq_save(&flags1);
bpf_local_irq_save(&flags1);
return 0;
}
SEC("?tc")
__failure __msg("expected an initialized")
int irq_restore_iter(struct __sk_buff *ctx)
{
struct bpf_iter_num it;
bpf_iter_num_new(&it, 0, 42);
bpf_local_irq_restore((unsigned long *)&it);
return 0;
}
SEC("?tc")
__failure __msg("Unreleased reference id=1")
int irq_save_iter(struct __sk_buff *ctx)
{
struct bpf_iter_num it;
/* Ensure same sized slot has st->ref_obj_id set, so we reject based on
* slot_type != STACK_IRQ_FLAG...
*/
_Static_assert(sizeof(it) == sizeof(unsigned long), "broken iterator size");
bpf_iter_num_new(&it, 0, 42);
bpf_local_irq_save((unsigned long *)&it);
bpf_local_irq_restore((unsigned long *)&it);
return 0;
}
SEC("?tc")
__failure __msg("expected an initialized")
int irq_flag_overwrite(struct __sk_buff *ctx)
{
unsigned long flags;
bpf_local_irq_save(&flags);
flags = 0xdeadbeef;
bpf_local_irq_restore(&flags);
return 0;
}
SEC("?tc")
__failure __msg("expected an initialized")
int irq_flag_overwrite_partial(struct __sk_buff *ctx)
{
unsigned long flags;
bpf_local_irq_save(&flags);
*(((char *)&flags) + 1) = 0xff;
bpf_local_irq_restore(&flags);
return 0;
}
SEC("?tc")
__failure __msg("cannot restore irq state out of order")
int irq_ooo_refs_array(struct __sk_buff *ctx)
{
unsigned long flags[4];
struct { int i; } *p;
/* refs=1 */
bpf_local_irq_save(&flags[0]);
/* refs=1,2 */
p = bpf_obj_new(typeof(*p));
if (!p) {
bpf_local_irq_restore(&flags[0]);
return 0;
}
/* refs=1,2,3 */
bpf_local_irq_save(&flags[1]);
/* refs=1,2,3,4 */
bpf_local_irq_save(&flags[2]);
/* Now when we remove ref=2, the verifier must not break the ordering in
* the refs array between 1,3,4. With an older implementation, the
* verifier would swap the last element with the removed element, but to
* maintain the stack property we need to use memmove.
*/
bpf_obj_drop(p);
/* Save and restore to reset active_irq_id to 3, as the ordering is now
* refs=1,4,3. When restoring the linear scan will find prev_id in order
* as 3 instead of 4.
*/
bpf_local_irq_save(&flags[3]);
bpf_local_irq_restore(&flags[3]);
/* With the incorrect implementation, we can release flags[1], flags[2],
* and flags[0], i.e. in the wrong order.
*/
bpf_local_irq_restore(&flags[1]);
bpf_local_irq_restore(&flags[2]);
bpf_local_irq_restore(&flags[0]);
return 0;
}
char _license[] SEC("license") = "GPL";

View File

@ -4,7 +4,7 @@
#include "bpf_experimental.h"
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
#include "../bpf_testmod/bpf_testmod_kfunc.h"
#include "../test_kmods/bpf_testmod_kfunc.h"
char _license[] SEC("license") = "GPL";

View File

@ -3,7 +3,7 @@
#include <vmlinux.h>
#include <bpf/bpf_tracing.h>
#include <bpf/bpf_helpers.h>
#include "../bpf_testmod/bpf_testmod_kfunc.h"
#include "../test_kmods/bpf_testmod_kfunc.h"
static struct prog_test_ref_kfunc __kptr *v;
long total_sum = -1;

View File

@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
#include <vmlinux.h>
#include <bpf/bpf_helpers.h>
#include "../bpf_testmod/bpf_testmod_kfunc.h"
#include "../test_kmods/bpf_testmod_kfunc.h"
SEC("tc")
int kfunc_destructive_test(void)

View File

@ -2,7 +2,7 @@
/* Copyright (c) 2021 Facebook */
#include <vmlinux.h>
#include <bpf/bpf_helpers.h>
#include "../bpf_testmod/bpf_testmod_kfunc.h"
#include "../test_kmods/bpf_testmod_kfunc.h"
struct syscall_test_args {
__u8 data[16];

View File

@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
#include <vmlinux.h>
#include <bpf/bpf_helpers.h>
#include "../bpf_testmod/bpf_testmod_kfunc.h"
#include "../test_kmods/bpf_testmod_kfunc.h"
SEC("tc")
int kfunc_call_fail(struct __sk_buff *ctx)

View File

@ -2,7 +2,7 @@
/* Copyright (c) 2021 Facebook */
#include <vmlinux.h>
#include <bpf/bpf_helpers.h>
#include "../bpf_testmod/bpf_testmod_kfunc.h"
#include "../test_kmods/bpf_testmod_kfunc.h"
SEC("tc")
int kfunc_call_test4(struct __sk_buff *skb)

View File

@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2021 Facebook */
#include "../bpf_testmod/bpf_testmod_kfunc.h"
#include "../test_kmods/bpf_testmod_kfunc.h"
extern const int bpf_prog_active __ksym;
int active_res = -1;

View File

@ -6,7 +6,7 @@
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_core_read.h>
#include "../bpf_experimental.h"
#include "../bpf_testmod/bpf_testmod_kfunc.h"
#include "../test_kmods/bpf_testmod_kfunc.h"
struct plain_local;

View File

@ -2,7 +2,7 @@
#include <vmlinux.h>
#include <bpf/bpf_tracing.h>
#include <bpf/bpf_helpers.h>
#include "../bpf_testmod/bpf_testmod_kfunc.h"
#include "../test_kmods/bpf_testmod_kfunc.h"
struct map_value {
struct prog_test_ref_kfunc __kptr_untrusted *unref_ptr;

View File

@ -4,7 +4,7 @@
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_core_read.h>
#include "bpf_misc.h"
#include "../bpf_testmod/bpf_testmod_kfunc.h"
#include "../test_kmods/bpf_testmod_kfunc.h"
struct map_value {
char buf[8];

View File

@ -2,7 +2,7 @@
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
#include "../bpf_testmod/bpf_testmod_kfunc.h"
#include "../test_kmods/bpf_testmod_kfunc.h"
char _license[] SEC("license") = "GPL";

View File

@ -2,7 +2,7 @@
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
#include "../bpf_testmod/bpf_testmod_kfunc.h"
#include "../test_kmods/bpf_testmod_kfunc.h"
char _license[] SEC("license") = "GPL";
@ -46,3 +46,9 @@ int test5(struct pt_regs *ctx)
{
return 0;
}
SEC("kprobe.session/bpf_kfunc_common_test")
int test6(struct pt_regs *ctx)
{
return 0;
}

View File

@ -4,7 +4,7 @@
#include <bpf/bpf_tracing.h>
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
#include "../bpf_testmod/bpf_testmod_kfunc.h"
#include "../test_kmods/bpf_testmod_kfunc.h"
char _license[] SEC("license") = "GPL";

View File

@ -5,8 +5,10 @@
#include "bpf_misc.h"
#include "bpf_experimental.h"
extern int bpf_copy_from_user_str(void *dst, u32 dst__sz, const void *unsafe_ptr__ign, u64 flags) __weak __ksym;
SEC("?tc")
__failure __msg("BPF_EXIT instruction cannot be used inside bpf_preempt_disable-ed region")
__failure __msg("BPF_EXIT instruction in main prog cannot be used inside bpf_preempt_disable-ed region")
int preempt_lock_missing_1(struct __sk_buff *ctx)
{
bpf_preempt_disable();
@ -14,7 +16,7 @@ int preempt_lock_missing_1(struct __sk_buff *ctx)
}
SEC("?tc")
__failure __msg("BPF_EXIT instruction cannot be used inside bpf_preempt_disable-ed region")
__failure __msg("BPF_EXIT instruction in main prog cannot be used inside bpf_preempt_disable-ed region")
int preempt_lock_missing_2(struct __sk_buff *ctx)
{
bpf_preempt_disable();
@ -23,7 +25,7 @@ int preempt_lock_missing_2(struct __sk_buff *ctx)
}
SEC("?tc")
__failure __msg("BPF_EXIT instruction cannot be used inside bpf_preempt_disable-ed region")
__failure __msg("BPF_EXIT instruction in main prog cannot be used inside bpf_preempt_disable-ed region")
int preempt_lock_missing_3(struct __sk_buff *ctx)
{
bpf_preempt_disable();
@ -33,7 +35,7 @@ int preempt_lock_missing_3(struct __sk_buff *ctx)
}
SEC("?tc")
__failure __msg("BPF_EXIT instruction cannot be used inside bpf_preempt_disable-ed region")
__failure __msg("BPF_EXIT instruction in main prog cannot be used inside bpf_preempt_disable-ed region")
int preempt_lock_missing_3_minus_2(struct __sk_buff *ctx)
{
bpf_preempt_disable();
@ -55,7 +57,7 @@ static __noinline void preempt_enable(void)
}
SEC("?tc")
__failure __msg("BPF_EXIT instruction cannot be used inside bpf_preempt_disable-ed region")
__failure __msg("BPF_EXIT instruction in main prog cannot be used inside bpf_preempt_disable-ed region")
int preempt_lock_missing_1_subprog(struct __sk_buff *ctx)
{
preempt_disable();
@ -63,7 +65,7 @@ int preempt_lock_missing_1_subprog(struct __sk_buff *ctx)
}
SEC("?tc")
__failure __msg("BPF_EXIT instruction cannot be used inside bpf_preempt_disable-ed region")
__failure __msg("BPF_EXIT instruction in main prog cannot be used inside bpf_preempt_disable-ed region")
int preempt_lock_missing_2_subprog(struct __sk_buff *ctx)
{
preempt_disable();
@ -72,7 +74,7 @@ int preempt_lock_missing_2_subprog(struct __sk_buff *ctx)
}
SEC("?tc")
__failure __msg("BPF_EXIT instruction cannot be used inside bpf_preempt_disable-ed region")
__failure __msg("BPF_EXIT instruction in main prog cannot be used inside bpf_preempt_disable-ed region")
int preempt_lock_missing_2_minus_1_subprog(struct __sk_buff *ctx)
{
preempt_disable();
@ -113,6 +115,18 @@ int preempt_sleepable_helper(void *ctx)
return 0;
}
SEC("?fentry.s/" SYS_PREFIX "sys_getpgid")
__failure __msg("kernel func bpf_copy_from_user_str is sleepable within non-preemptible region")
int preempt_sleepable_kfunc(void *ctx)
{
u32 data;
bpf_preempt_disable();
bpf_copy_from_user_str(&data, sizeof(data), NULL, 0);
bpf_preempt_enable();
return 0;
}
int __noinline preempt_global_subprog(void)
{
preempt_balance_subprog();

View File

@ -4,8 +4,8 @@
#include <vmlinux.h>
#include <bpf/bpf_tracing.h>
#include "bpf_misc.h"
#include "../bpf_testmod/bpf_testmod.h"
#include "../bpf_testmod/bpf_testmod_kfunc.h"
#include "../test_kmods/bpf_testmod.h"
#include "../test_kmods/bpf_testmod_kfunc.h"
char _license[] SEC("license") = "GPL";

View File

@ -4,8 +4,8 @@
#include <vmlinux.h>
#include <bpf/bpf_tracing.h>
#include "bpf_misc.h"
#include "../bpf_testmod/bpf_testmod.h"
#include "../bpf_testmod/bpf_testmod_kfunc.h"
#include "../test_kmods/bpf_testmod.h"
#include "../test_kmods/bpf_testmod_kfunc.h"
char _license[] SEC("license") = "GPL";

View File

@ -2,7 +2,7 @@
/* Copyright (c) 2024 Google LLC */
#include <vmlinux.h>
#include <bpf/bpf_helpers.h>
#include "../bpf_testmod/bpf_testmod_kfunc.h"
#include "../test_kmods/bpf_testmod_kfunc.h"
SEC("syscall")
int init_sock(struct init_sock_args *args)

View File

@ -2,7 +2,7 @@
/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
#include <vmlinux.h>
#include <bpf/bpf_helpers.h>
#include "../bpf_testmod/bpf_testmod.h"
#include "../test_kmods/bpf_testmod.h"
char _license[] SEC("license") = "GPL";

View File

@ -2,7 +2,7 @@
/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
#include <vmlinux.h>
#include <bpf/bpf_tracing.h>
#include "../bpf_testmod/bpf_testmod.h"
#include "../test_kmods/bpf_testmod.h"
char _license[] SEC("license") = "GPL";

View File

@ -2,7 +2,7 @@
/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
#include <vmlinux.h>
#include <bpf/bpf_tracing.h>
#include "../bpf_testmod/bpf_testmod.h"
#include "../test_kmods/bpf_testmod.h"
char _license[] SEC("license") = "GPL";

View File

@ -2,7 +2,7 @@
/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
#include <vmlinux.h>
#include <bpf/bpf_tracing.h>
#include "../bpf_testmod/bpf_testmod.h"
#include "../test_kmods/bpf_testmod.h"
char _license[] SEC("license") = "GPL";

View File

@ -3,7 +3,7 @@
#include <vmlinux.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
#include "../bpf_testmod/bpf_testmod.h"
#include "../test_kmods/bpf_testmod.h"
char _license[] SEC("license") = "GPL";

View File

@ -3,7 +3,7 @@
#include <vmlinux.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
#include "../bpf_testmod/bpf_testmod.h"
#include "../test_kmods/bpf_testmod.h"
char _license[] SEC("license") = "GPL";

View File

@ -2,7 +2,7 @@
/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
#include <vmlinux.h>
#include <bpf/bpf_tracing.h>
#include "../bpf_testmod/bpf_testmod.h"
#include "../test_kmods/bpf_testmod.h"
char _license[] SEC("license") = "GPL";

View File

@ -3,7 +3,7 @@
#include <vmlinux.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
#include "../bpf_testmod/bpf_testmod.h"
#include "../test_kmods/bpf_testmod.h"
char _license[] SEC("license") = "GPL";

View File

@ -3,7 +3,7 @@
#include <vmlinux.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
#include "../bpf_testmod/bpf_testmod.h"
#include "../test_kmods/bpf_testmod.h"
char _license[] SEC("license") = "GPL";

View File

@ -3,7 +3,7 @@
#include <vmlinux.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
#include "../bpf_testmod/bpf_testmod.h"
#include "../test_kmods/bpf_testmod.h"
char _license[] SEC("license") = "GPL";

Some files were not shown because too many files have changed in this diff Show More