mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-12-29 09:16:33 +00:00
Alexei Starovoitov says: ==================== pull-request: bpf-next 2021-11-01 We've added 181 non-merge commits during the last 28 day(s) which contain a total of 280 files changed, 11791 insertions(+), 5879 deletions(-). The main changes are: 1) Fix bpf verifier propagation of 64-bit bounds, from Alexei. 2) Parallelize bpf test_progs, from Yucong and Andrii. 3) Deprecate various libbpf apis including af_xdp, from Andrii, Hengqi, Magnus. 4) Improve bpf selftests on s390, from Ilya. 5) bloomfilter bpf map type, from Joanne. 6) Big improvements to JIT tests especially on Mips, from Johan. 7) Support kernel module function calls from bpf, from Kumar. 8) Support typeless and weak ksym in light skeleton, from Kumar. 9) Disallow unprivileged bpf by default, from Pawan. 10) BTF_KIND_DECL_TAG support, from Yonghong. 11) Various bpftool cleanups, from Quentin. * https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next: (181 commits) libbpf: Deprecate AF_XDP support kbuild: Unify options for BTF generation for vmlinux and modules selftests/bpf: Add a testcase for 64-bit bounds propagation issue. bpf: Fix propagation of signed bounds from 64-bit min/max into 32-bit. bpf: Fix propagation of bounds from 64-bit min/max into 32-bit and var_off. selftests/bpf: Fix also no-alu32 strobemeta selftest bpf: Add missing map_delete_elem method to bloom filter map selftests/bpf: Add bloom map success test for userspace calls bpf: Add alignment padding for "map_extra" + consolidate holes bpf: Bloom filter map naming fixups selftests/bpf: Add test cases for struct_ops prog bpf: Add dummy BPF STRUCT_OPS for test purpose bpf: Factor out helpers for ctx access checking bpf: Factor out a helper to prepare trampoline for struct_ops prog selftests, bpf: Fix broken riscv build riscv, libbpf: Add RISC-V (RV64) support to bpf_tracing.h tools, build: Add RISC-V to HOSTARCH parsing riscv, bpf: Increase the maximum number of iterations selftests, bpf: Add one test for sockmap with strparser selftests, bpf: Fix test_txmsg_ingress_parser error ... ==================== Link: https://lore.kernel.org/r/20211102013123.9005-1-alexei.starovoitov@gmail.com Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
commit
b7b98f8689
@ -85,7 +85,7 @@ sequentially and type id is assigned to each recognized type starting from id
|
||||
#define BTF_KIND_VAR 14 /* Variable */
|
||||
#define BTF_KIND_DATASEC 15 /* Section */
|
||||
#define BTF_KIND_FLOAT 16 /* Floating point */
|
||||
#define BTF_KIND_TAG 17 /* Tag */
|
||||
#define BTF_KIND_DECL_TAG 17 /* Decl Tag */
|
||||
|
||||
Note that the type section encodes debug info, not just pure types.
|
||||
``BTF_KIND_FUNC`` is not a type, and it represents a defined subprogram.
|
||||
@ -107,7 +107,7 @@ Each type contains the following common data::
|
||||
* "size" tells the size of the type it is describing.
|
||||
*
|
||||
* "type" is used by PTR, TYPEDEF, VOLATILE, CONST, RESTRICT,
|
||||
* FUNC, FUNC_PROTO and TAG.
|
||||
* FUNC, FUNC_PROTO and DECL_TAG.
|
||||
* "type" is a type_id referring to another type.
|
||||
*/
|
||||
union {
|
||||
@ -466,30 +466,30 @@ map definition.
|
||||
|
||||
No additional type data follow ``btf_type``.
|
||||
|
||||
2.2.17 BTF_KIND_TAG
|
||||
~~~~~~~~~~~~~~~~~~~
|
||||
2.2.17 BTF_KIND_DECL_TAG
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
``struct btf_type`` encoding requirement:
|
||||
* ``name_off``: offset to a non-empty string
|
||||
* ``info.kind_flag``: 0
|
||||
* ``info.kind``: BTF_KIND_TAG
|
||||
* ``info.kind``: BTF_KIND_DECL_TAG
|
||||
* ``info.vlen``: 0
|
||||
* ``type``: ``struct``, ``union``, ``func`` or ``var``
|
||||
* ``type``: ``struct``, ``union``, ``func``, ``var`` or ``typedef``
|
||||
|
||||
``btf_type`` is followed by ``struct btf_tag``.::
|
||||
``btf_type`` is followed by ``struct btf_decl_tag``.::
|
||||
|
||||
struct btf_tag {
|
||||
struct btf_decl_tag {
|
||||
__u32 component_idx;
|
||||
};
|
||||
|
||||
The ``name_off`` encodes btf_tag attribute string.
|
||||
The ``type`` should be ``struct``, ``union``, ``func`` or ``var``.
|
||||
For ``var`` type, ``btf_tag.component_idx`` must be ``-1``.
|
||||
For the other three types, if the btf_tag attribute is
|
||||
The ``name_off`` encodes btf_decl_tag attribute string.
|
||||
The ``type`` should be ``struct``, ``union``, ``func``, ``var`` or ``typedef``.
|
||||
For ``var`` or ``typedef`` type, ``btf_decl_tag.component_idx`` must be ``-1``.
|
||||
For the other three types, if the btf_decl_tag attribute is
|
||||
applied to the ``struct``, ``union`` or ``func`` itself,
|
||||
``btf_tag.component_idx`` must be ``-1``. Otherwise,
|
||||
``btf_decl_tag.component_idx`` must be ``-1``. Otherwise,
|
||||
the attribute is applied to a ``struct``/``union`` member or
|
||||
a ``func`` argument, and ``btf_tag.component_idx`` should be a
|
||||
a ``func`` argument, and ``btf_decl_tag.component_idx`` should be a
|
||||
valid index (starting from 0) pointing to a member or an argument.
|
||||
|
||||
3. BTF Kernel API
|
||||
|
@ -150,6 +150,46 @@ mirror of the mainline's version of libbpf for a stand-alone build.
|
||||
However, all changes to libbpf's code base must be upstreamed through
|
||||
the mainline kernel tree.
|
||||
|
||||
|
||||
API documentation convention
|
||||
============================
|
||||
|
||||
The libbpf API is documented via comments above definitions in
|
||||
header files. These comments can be rendered by doxygen and sphinx
|
||||
for well organized html output. This section describes the
|
||||
convention in which these comments should be formated.
|
||||
|
||||
Here is an example from btf.h:
|
||||
|
||||
.. code-block:: c
|
||||
|
||||
/**
|
||||
* @brief **btf__new()** creates a new instance of a BTF object from the raw
|
||||
* bytes of an ELF's BTF section
|
||||
* @param data raw bytes
|
||||
* @param size number of bytes passed in `data`
|
||||
* @return new BTF object instance which has to be eventually freed with
|
||||
* **btf__free()**
|
||||
*
|
||||
* On error, error-code-encoded-as-pointer is returned, not a NULL. To extract
|
||||
* error code from such a pointer `libbpf_get_error()` should be used. If
|
||||
* `libbpf_set_strict_mode(LIBBPF_STRICT_CLEAN_PTRS)` is enabled, NULL is
|
||||
* returned on error instead. In both cases thread-local `errno` variable is
|
||||
* always set to error code as well.
|
||||
*/
|
||||
|
||||
The comment must start with a block comment of the form '/\*\*'.
|
||||
|
||||
The documentation always starts with a @brief directive. This line is a short
|
||||
description about this API. It starts with the name of the API, denoted in bold
|
||||
like so: **api_name**. Please include an open and close parenthesis if this is a
|
||||
function. Follow with the short description of the API. A longer form description
|
||||
can be added below the last directive, at the bottom of the comment.
|
||||
|
||||
Parameters are denoted with the @param directive, there should be one for each
|
||||
parameter. If this is a function with a non-void return, use the @return directive
|
||||
to document it.
|
||||
|
||||
License
|
||||
-------------------
|
||||
|
||||
|
@ -3442,6 +3442,7 @@ S: Supported
|
||||
F: arch/arm64/net/
|
||||
|
||||
BPF JIT for MIPS (32-BIT AND 64-BIT)
|
||||
M: Johan Almbladh <johan.almbladh@anyfinetworks.com>
|
||||
M: Paul Burton <paulburton@kernel.org>
|
||||
L: netdev@vger.kernel.org
|
||||
L: bpf@vger.kernel.org
|
||||
|
3
Makefile
3
Makefile
@ -480,6 +480,8 @@ LZ4 = lz4c
|
||||
XZ = xz
|
||||
ZSTD = zstd
|
||||
|
||||
PAHOLE_FLAGS = $(shell PAHOLE=$(PAHOLE) $(srctree)/scripts/pahole-flags.sh)
|
||||
|
||||
CHECKFLAGS := -D__linux__ -Dlinux -D__STDC__ -Dunix -D__unix__ \
|
||||
-Wbitwise -Wno-return-void -Wno-unknown-attribute $(CF)
|
||||
NOSTDINC_FLAGS :=
|
||||
@ -534,6 +536,7 @@ export KBUILD_CFLAGS CFLAGS_KERNEL CFLAGS_MODULE
|
||||
export KBUILD_AFLAGS AFLAGS_KERNEL AFLAGS_MODULE
|
||||
export KBUILD_AFLAGS_MODULE KBUILD_CFLAGS_MODULE KBUILD_LDFLAGS_MODULE
|
||||
export KBUILD_AFLAGS_KERNEL KBUILD_CFLAGS_KERNEL
|
||||
export PAHOLE_FLAGS
|
||||
|
||||
# Files to ignore in find ... statements
|
||||
|
||||
|
@ -1882,11 +1882,6 @@ static int validate_code(struct jit_ctx *ctx)
|
||||
return 0;
|
||||
}
|
||||
|
||||
void bpf_jit_compile(struct bpf_prog *prog)
|
||||
{
|
||||
/* Nothing to do here. We support Internal BPF. */
|
||||
}
|
||||
|
||||
bool bpf_jit_needs_zext(void)
|
||||
{
|
||||
return true;
|
||||
|
@ -57,7 +57,6 @@ config MIPS
|
||||
select HAVE_ARCH_TRACEHOOK
|
||||
select HAVE_ARCH_TRANSPARENT_HUGEPAGE if CPU_SUPPORTS_HUGEPAGES
|
||||
select HAVE_ASM_MODVERSIONS
|
||||
select HAVE_CBPF_JIT if !64BIT && !CPU_MICROMIPS
|
||||
select HAVE_CONTEXT_TRACKING
|
||||
select HAVE_TIF_NOHZ
|
||||
select HAVE_C_RECORDMCOUNT
|
||||
@ -65,7 +64,10 @@ config MIPS
|
||||
select HAVE_DEBUG_STACKOVERFLOW
|
||||
select HAVE_DMA_CONTIGUOUS
|
||||
select HAVE_DYNAMIC_FTRACE
|
||||
select HAVE_EBPF_JIT if 64BIT && !CPU_MICROMIPS && TARGET_ISA_REV >= 2
|
||||
select HAVE_EBPF_JIT if !CPU_MICROMIPS && \
|
||||
!CPU_DADDI_WORKAROUNDS && \
|
||||
!CPU_R4000_WORKAROUNDS && \
|
||||
!CPU_R4400_WORKAROUNDS
|
||||
select HAVE_EXIT_THREAD
|
||||
select HAVE_FAST_GUP
|
||||
select HAVE_FTRACE_MCOUNT_RECORD
|
||||
@ -1212,15 +1214,6 @@ config SYS_SUPPORTS_RELOCATABLE
|
||||
The platform must provide plat_get_fdt() if it selects CONFIG_USE_OF
|
||||
to allow access to command line and entropy sources.
|
||||
|
||||
config MIPS_CBPF_JIT
|
||||
def_bool y
|
||||
depends on BPF_JIT && HAVE_CBPF_JIT
|
||||
|
||||
config MIPS_EBPF_JIT
|
||||
def_bool y
|
||||
depends on BPF_JIT && HAVE_EBPF_JIT
|
||||
|
||||
|
||||
#
|
||||
# Endianness selection. Sufficiently obscure so many users don't know what to
|
||||
# answer,so we try hard to limit the available choices. Also the use of a
|
||||
|
@ -145,6 +145,7 @@ Ip_u1(_mtlo);
|
||||
Ip_u3u1u2(_mul);
|
||||
Ip_u1u2(_multu);
|
||||
Ip_u3u1u2(_mulu);
|
||||
Ip_u3u1u2(_muhu);
|
||||
Ip_u3u1u2(_nor);
|
||||
Ip_u3u1u2(_or);
|
||||
Ip_u2u1u3(_ori);
|
||||
@ -248,7 +249,11 @@ static inline void uasm_l##lb(struct uasm_label **lab, u32 *addr) \
|
||||
#define uasm_i_bnezl(buf, rs, off) uasm_i_bnel(buf, rs, 0, off)
|
||||
#define uasm_i_ehb(buf) uasm_i_sll(buf, 0, 0, 3)
|
||||
#define uasm_i_move(buf, a, b) UASM_i_ADDU(buf, a, 0, b)
|
||||
#ifdef CONFIG_CPU_NOP_WORKAROUNDS
|
||||
#define uasm_i_nop(buf) uasm_i_or(buf, 1, 1, 0)
|
||||
#else
|
||||
#define uasm_i_nop(buf) uasm_i_sll(buf, 0, 0, 0)
|
||||
#endif
|
||||
#define uasm_i_ssnop(buf) uasm_i_sll(buf, 0, 0, 1)
|
||||
|
||||
static inline void uasm_i_drotr_safe(u32 **p, unsigned int a1,
|
||||
|
@ -90,7 +90,7 @@ static const struct insn insn_table[insn_invalid] = {
|
||||
RS | RT | RD},
|
||||
[insn_dmtc0] = {M(cop0_op, dmtc_op, 0, 0, 0, 0), RT | RD | SET},
|
||||
[insn_dmultu] = {M(spec_op, 0, 0, 0, 0, dmultu_op), RS | RT},
|
||||
[insn_dmulu] = {M(spec_op, 0, 0, 0, dmult_dmul_op, dmultu_op),
|
||||
[insn_dmulu] = {M(spec_op, 0, 0, 0, dmultu_dmulu_op, dmultu_op),
|
||||
RS | RT | RD},
|
||||
[insn_drotr] = {M(spec_op, 1, 0, 0, 0, dsrl_op), RT | RD | RE},
|
||||
[insn_drotr32] = {M(spec_op, 1, 0, 0, 0, dsrl32_op), RT | RD | RE},
|
||||
@ -150,6 +150,8 @@ static const struct insn insn_table[insn_invalid] = {
|
||||
[insn_mtlo] = {M(spec_op, 0, 0, 0, 0, mtlo_op), RS},
|
||||
[insn_mulu] = {M(spec_op, 0, 0, 0, multu_mulu_op, multu_op),
|
||||
RS | RT | RD},
|
||||
[insn_muhu] = {M(spec_op, 0, 0, 0, multu_muhu_op, multu_op),
|
||||
RS | RT | RD},
|
||||
#ifndef CONFIG_CPU_MIPSR6
|
||||
[insn_mul] = {M(spec2_op, 0, 0, 0, 0, mul_op), RS | RT | RD},
|
||||
#else
|
||||
|
@ -59,7 +59,7 @@ enum opcode {
|
||||
insn_lddir, insn_ldpte, insn_ldx, insn_lh, insn_lhu, insn_ll, insn_lld,
|
||||
insn_lui, insn_lw, insn_lwu, insn_lwx, insn_mfc0, insn_mfhc0, insn_mfhi,
|
||||
insn_mflo, insn_modu, insn_movn, insn_movz, insn_mtc0, insn_mthc0,
|
||||
insn_mthi, insn_mtlo, insn_mul, insn_multu, insn_mulu, insn_nor,
|
||||
insn_mthi, insn_mtlo, insn_mul, insn_multu, insn_mulu, insn_muhu, insn_nor,
|
||||
insn_or, insn_ori, insn_pref, insn_rfe, insn_rotr, insn_sb, insn_sc,
|
||||
insn_scd, insn_seleqz, insn_selnez, insn_sd, insn_sh, insn_sll,
|
||||
insn_sllv, insn_slt, insn_slti, insn_sltiu, insn_sltu, insn_sra,
|
||||
@ -344,6 +344,7 @@ I_u1(_mtlo)
|
||||
I_u3u1u2(_mul)
|
||||
I_u1u2(_multu)
|
||||
I_u3u1u2(_mulu)
|
||||
I_u3u1u2(_muhu)
|
||||
I_u3u1u2(_nor)
|
||||
I_u3u1u2(_or)
|
||||
I_u2u1u3(_ori)
|
||||
|
@ -1,5 +1,10 @@
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
# MIPS networking code
|
||||
|
||||
obj-$(CONFIG_MIPS_CBPF_JIT) += bpf_jit.o bpf_jit_asm.o
|
||||
obj-$(CONFIG_MIPS_EBPF_JIT) += ebpf_jit.o
|
||||
obj-$(CONFIG_BPF_JIT) += bpf_jit_comp.o
|
||||
|
||||
ifeq ($(CONFIG_32BIT),y)
|
||||
obj-$(CONFIG_BPF_JIT) += bpf_jit_comp32.o
|
||||
else
|
||||
obj-$(CONFIG_BPF_JIT) += bpf_jit_comp64.o
|
||||
endif
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -1,81 +0,0 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Just-In-Time compiler for BPF filters on MIPS
|
||||
*
|
||||
* Copyright (c) 2014 Imagination Technologies Ltd.
|
||||
* Author: Markos Chandras <markos.chandras@imgtec.com>
|
||||
*/
|
||||
|
||||
#ifndef BPF_JIT_MIPS_OP_H
|
||||
#define BPF_JIT_MIPS_OP_H
|
||||
|
||||
/* Registers used by JIT */
|
||||
#define MIPS_R_ZERO 0
|
||||
#define MIPS_R_V0 2
|
||||
#define MIPS_R_A0 4
|
||||
#define MIPS_R_A1 5
|
||||
#define MIPS_R_T4 12
|
||||
#define MIPS_R_T5 13
|
||||
#define MIPS_R_T6 14
|
||||
#define MIPS_R_T7 15
|
||||
#define MIPS_R_S0 16
|
||||
#define MIPS_R_S1 17
|
||||
#define MIPS_R_S2 18
|
||||
#define MIPS_R_S3 19
|
||||
#define MIPS_R_S4 20
|
||||
#define MIPS_R_S5 21
|
||||
#define MIPS_R_S6 22
|
||||
#define MIPS_R_S7 23
|
||||
#define MIPS_R_SP 29
|
||||
#define MIPS_R_RA 31
|
||||
|
||||
/* Conditional codes */
|
||||
#define MIPS_COND_EQ 0x1
|
||||
#define MIPS_COND_GE (0x1 << 1)
|
||||
#define MIPS_COND_GT (0x1 << 2)
|
||||
#define MIPS_COND_NE (0x1 << 3)
|
||||
#define MIPS_COND_ALL (0x1 << 4)
|
||||
/* Conditionals on X register or K immediate */
|
||||
#define MIPS_COND_X (0x1 << 5)
|
||||
#define MIPS_COND_K (0x1 << 6)
|
||||
|
||||
#define r_ret MIPS_R_V0
|
||||
|
||||
/*
|
||||
* Use 2 scratch registers to avoid pipeline interlocks.
|
||||
* There is no overhead during epilogue and prologue since
|
||||
* any of the $s0-$s6 registers will only be preserved if
|
||||
* they are going to actually be used.
|
||||
*/
|
||||
#define r_skb_hl MIPS_R_S0 /* skb header length */
|
||||
#define r_skb_data MIPS_R_S1 /* skb actual data */
|
||||
#define r_off MIPS_R_S2
|
||||
#define r_A MIPS_R_S3
|
||||
#define r_X MIPS_R_S4
|
||||
#define r_skb MIPS_R_S5
|
||||
#define r_M MIPS_R_S6
|
||||
#define r_skb_len MIPS_R_S7
|
||||
#define r_s0 MIPS_R_T4 /* scratch reg 1 */
|
||||
#define r_s1 MIPS_R_T5 /* scratch reg 2 */
|
||||
#define r_tmp_imm MIPS_R_T6 /* No need to preserve this */
|
||||
#define r_tmp MIPS_R_T7 /* No need to preserve this */
|
||||
#define r_zero MIPS_R_ZERO
|
||||
#define r_sp MIPS_R_SP
|
||||
#define r_ra MIPS_R_RA
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
/* Declare ASM helpers */
|
||||
|
||||
#define DECLARE_LOAD_FUNC(func) \
|
||||
extern u8 func(unsigned long *skb, int offset); \
|
||||
extern u8 func##_negative(unsigned long *skb, int offset); \
|
||||
extern u8 func##_positive(unsigned long *skb, int offset)
|
||||
|
||||
DECLARE_LOAD_FUNC(sk_load_word);
|
||||
DECLARE_LOAD_FUNC(sk_load_half);
|
||||
DECLARE_LOAD_FUNC(sk_load_byte);
|
||||
|
||||
#endif
|
||||
|
||||
#endif /* BPF_JIT_MIPS_OP_H */
|
@ -1,285 +0,0 @@
|
||||
/*
|
||||
* bpf_jib_asm.S: Packet/header access helper functions for MIPS/MIPS64 BPF
|
||||
* compiler.
|
||||
*
|
||||
* Copyright (C) 2015 Imagination Technologies Ltd.
|
||||
* Author: Markos Chandras <markos.chandras@imgtec.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License as published by the
|
||||
* Free Software Foundation; version 2 of the License.
|
||||
*/
|
||||
|
||||
#include <asm/asm.h>
|
||||
#include <asm/isa-rev.h>
|
||||
#include <asm/regdef.h>
|
||||
#include "bpf_jit.h"
|
||||
|
||||
/* ABI
|
||||
*
|
||||
* r_skb_hl skb header length
|
||||
* r_skb_data skb data
|
||||
* r_off(a1) offset register
|
||||
* r_A BPF register A
|
||||
* r_X PF register X
|
||||
* r_skb(a0) *skb
|
||||
* r_M *scratch memory
|
||||
* r_skb_le skb length
|
||||
* r_s0 Scratch register 0
|
||||
* r_s1 Scratch register 1
|
||||
*
|
||||
* On entry:
|
||||
* a0: *skb
|
||||
* a1: offset (imm or imm + X)
|
||||
*
|
||||
* All non-BPF-ABI registers are free for use. On return, we only
|
||||
* care about r_ret. The BPF-ABI registers are assumed to remain
|
||||
* unmodified during the entire filter operation.
|
||||
*/
|
||||
|
||||
#define skb a0
|
||||
#define offset a1
|
||||
#define SKF_LL_OFF (-0x200000) /* Can't include linux/filter.h in assembly */
|
||||
|
||||
/* We know better :) so prevent assembler reordering etc */
|
||||
.set noreorder
|
||||
|
||||
#define is_offset_negative(TYPE) \
|
||||
/* If offset is negative we have more work to do */ \
|
||||
slti t0, offset, 0; \
|
||||
bgtz t0, bpf_slow_path_##TYPE##_neg; \
|
||||
/* Be careful what follows in DS. */
|
||||
|
||||
#define is_offset_in_header(SIZE, TYPE) \
|
||||
/* Reading from header? */ \
|
||||
addiu $r_s0, $r_skb_hl, -SIZE; \
|
||||
slt t0, $r_s0, offset; \
|
||||
bgtz t0, bpf_slow_path_##TYPE; \
|
||||
|
||||
LEAF(sk_load_word)
|
||||
is_offset_negative(word)
|
||||
FEXPORT(sk_load_word_positive)
|
||||
is_offset_in_header(4, word)
|
||||
/* Offset within header boundaries */
|
||||
PTR_ADDU t1, $r_skb_data, offset
|
||||
.set reorder
|
||||
lw $r_A, 0(t1)
|
||||
.set noreorder
|
||||
#ifdef CONFIG_CPU_LITTLE_ENDIAN
|
||||
# if MIPS_ISA_REV >= 2
|
||||
wsbh t0, $r_A
|
||||
rotr $r_A, t0, 16
|
||||
# else
|
||||
sll t0, $r_A, 24
|
||||
srl t1, $r_A, 24
|
||||
srl t2, $r_A, 8
|
||||
or t0, t0, t1
|
||||
andi t2, t2, 0xff00
|
||||
andi t1, $r_A, 0xff00
|
||||
or t0, t0, t2
|
||||
sll t1, t1, 8
|
||||
or $r_A, t0, t1
|
||||
# endif
|
||||
#endif
|
||||
jr $r_ra
|
||||
move $r_ret, zero
|
||||
END(sk_load_word)
|
||||
|
||||
LEAF(sk_load_half)
|
||||
is_offset_negative(half)
|
||||
FEXPORT(sk_load_half_positive)
|
||||
is_offset_in_header(2, half)
|
||||
/* Offset within header boundaries */
|
||||
PTR_ADDU t1, $r_skb_data, offset
|
||||
lhu $r_A, 0(t1)
|
||||
#ifdef CONFIG_CPU_LITTLE_ENDIAN
|
||||
# if MIPS_ISA_REV >= 2
|
||||
wsbh $r_A, $r_A
|
||||
# else
|
||||
sll t0, $r_A, 8
|
||||
srl t1, $r_A, 8
|
||||
andi t0, t0, 0xff00
|
||||
or $r_A, t0, t1
|
||||
# endif
|
||||
#endif
|
||||
jr $r_ra
|
||||
move $r_ret, zero
|
||||
END(sk_load_half)
|
||||
|
||||
LEAF(sk_load_byte)
|
||||
is_offset_negative(byte)
|
||||
FEXPORT(sk_load_byte_positive)
|
||||
is_offset_in_header(1, byte)
|
||||
/* Offset within header boundaries */
|
||||
PTR_ADDU t1, $r_skb_data, offset
|
||||
lbu $r_A, 0(t1)
|
||||
jr $r_ra
|
||||
move $r_ret, zero
|
||||
END(sk_load_byte)
|
||||
|
||||
/*
|
||||
* call skb_copy_bits:
|
||||
* (prototype in linux/skbuff.h)
|
||||
*
|
||||
* int skb_copy_bits(sk_buff *skb, int offset, void *to, int len)
|
||||
*
|
||||
* o32 mandates we leave 4 spaces for argument registers in case
|
||||
* the callee needs to use them. Even though we don't care about
|
||||
* the argument registers ourselves, we need to allocate that space
|
||||
* to remain ABI compliant since the callee may want to use that space.
|
||||
* We also allocate 2 more spaces for $r_ra and our return register (*to).
|
||||
*
|
||||
* n64 is a bit different. The *caller* will allocate the space to preserve
|
||||
* the arguments. So in 64-bit kernels, we allocate the 4-arg space for no
|
||||
* good reason but it does not matter that much really.
|
||||
*
|
||||
* (void *to) is returned in r_s0
|
||||
*
|
||||
*/
|
||||
#ifdef CONFIG_CPU_LITTLE_ENDIAN
|
||||
#define DS_OFFSET(SIZE) (4 * SZREG)
|
||||
#else
|
||||
#define DS_OFFSET(SIZE) ((4 * SZREG) + (4 - SIZE))
|
||||
#endif
|
||||
#define bpf_slow_path_common(SIZE) \
|
||||
/* Quick check. Are we within reasonable boundaries? */ \
|
||||
LONG_ADDIU $r_s1, $r_skb_len, -SIZE; \
|
||||
sltu $r_s0, offset, $r_s1; \
|
||||
beqz $r_s0, fault; \
|
||||
/* Load 4th argument in DS */ \
|
||||
LONG_ADDIU a3, zero, SIZE; \
|
||||
PTR_ADDIU $r_sp, $r_sp, -(6 * SZREG); \
|
||||
PTR_LA t0, skb_copy_bits; \
|
||||
PTR_S $r_ra, (5 * SZREG)($r_sp); \
|
||||
/* Assign low slot to a2 */ \
|
||||
PTR_ADDIU a2, $r_sp, DS_OFFSET(SIZE); \
|
||||
jalr t0; \
|
||||
/* Reset our destination slot (DS but it's ok) */ \
|
||||
INT_S zero, (4 * SZREG)($r_sp); \
|
||||
/* \
|
||||
* skb_copy_bits returns 0 on success and -EFAULT \
|
||||
* on error. Our data live in a2. Do not bother with \
|
||||
* our data if an error has been returned. \
|
||||
*/ \
|
||||
/* Restore our frame */ \
|
||||
PTR_L $r_ra, (5 * SZREG)($r_sp); \
|
||||
INT_L $r_s0, (4 * SZREG)($r_sp); \
|
||||
bltz v0, fault; \
|
||||
PTR_ADDIU $r_sp, $r_sp, 6 * SZREG; \
|
||||
move $r_ret, zero; \
|
||||
|
||||
NESTED(bpf_slow_path_word, (6 * SZREG), $r_sp)
|
||||
bpf_slow_path_common(4)
|
||||
#ifdef CONFIG_CPU_LITTLE_ENDIAN
|
||||
# if MIPS_ISA_REV >= 2
|
||||
wsbh t0, $r_s0
|
||||
jr $r_ra
|
||||
rotr $r_A, t0, 16
|
||||
# else
|
||||
sll t0, $r_s0, 24
|
||||
srl t1, $r_s0, 24
|
||||
srl t2, $r_s0, 8
|
||||
or t0, t0, t1
|
||||
andi t2, t2, 0xff00
|
||||
andi t1, $r_s0, 0xff00
|
||||
or t0, t0, t2
|
||||
sll t1, t1, 8
|
||||
jr $r_ra
|
||||
or $r_A, t0, t1
|
||||
# endif
|
||||
#else
|
||||
jr $r_ra
|
||||
move $r_A, $r_s0
|
||||
#endif
|
||||
|
||||
END(bpf_slow_path_word)
|
||||
|
||||
NESTED(bpf_slow_path_half, (6 * SZREG), $r_sp)
|
||||
bpf_slow_path_common(2)
|
||||
#ifdef CONFIG_CPU_LITTLE_ENDIAN
|
||||
# if MIPS_ISA_REV >= 2
|
||||
jr $r_ra
|
||||
wsbh $r_A, $r_s0
|
||||
# else
|
||||
sll t0, $r_s0, 8
|
||||
andi t1, $r_s0, 0xff00
|
||||
andi t0, t0, 0xff00
|
||||
srl t1, t1, 8
|
||||
jr $r_ra
|
||||
or $r_A, t0, t1
|
||||
# endif
|
||||
#else
|
||||
jr $r_ra
|
||||
move $r_A, $r_s0
|
||||
#endif
|
||||
|
||||
END(bpf_slow_path_half)
|
||||
|
||||
NESTED(bpf_slow_path_byte, (6 * SZREG), $r_sp)
|
||||
bpf_slow_path_common(1)
|
||||
jr $r_ra
|
||||
move $r_A, $r_s0
|
||||
|
||||
END(bpf_slow_path_byte)
|
||||
|
||||
/*
|
||||
* Negative entry points
|
||||
*/
|
||||
.macro bpf_is_end_of_data
|
||||
li t0, SKF_LL_OFF
|
||||
/* Reading link layer data? */
|
||||
slt t1, offset, t0
|
||||
bgtz t1, fault
|
||||
/* Be careful what follows in DS. */
|
||||
.endm
|
||||
/*
|
||||
* call skb_copy_bits:
|
||||
* (prototype in linux/filter.h)
|
||||
*
|
||||
* void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb,
|
||||
* int k, unsigned int size)
|
||||
*
|
||||
* see above (bpf_slow_path_common) for ABI restrictions
|
||||
*/
|
||||
#define bpf_negative_common(SIZE) \
|
||||
PTR_ADDIU $r_sp, $r_sp, -(6 * SZREG); \
|
||||
PTR_LA t0, bpf_internal_load_pointer_neg_helper; \
|
||||
PTR_S $r_ra, (5 * SZREG)($r_sp); \
|
||||
jalr t0; \
|
||||
li a2, SIZE; \
|
||||
PTR_L $r_ra, (5 * SZREG)($r_sp); \
|
||||
/* Check return pointer */ \
|
||||
beqz v0, fault; \
|
||||
PTR_ADDIU $r_sp, $r_sp, 6 * SZREG; \
|
||||
/* Preserve our pointer */ \
|
||||
move $r_s0, v0; \
|
||||
/* Set return value */ \
|
||||
move $r_ret, zero; \
|
||||
|
||||
bpf_slow_path_word_neg:
|
||||
bpf_is_end_of_data
|
||||
NESTED(sk_load_word_negative, (6 * SZREG), $r_sp)
|
||||
bpf_negative_common(4)
|
||||
jr $r_ra
|
||||
lw $r_A, 0($r_s0)
|
||||
END(sk_load_word_negative)
|
||||
|
||||
bpf_slow_path_half_neg:
|
||||
bpf_is_end_of_data
|
||||
NESTED(sk_load_half_negative, (6 * SZREG), $r_sp)
|
||||
bpf_negative_common(2)
|
||||
jr $r_ra
|
||||
lhu $r_A, 0($r_s0)
|
||||
END(sk_load_half_negative)
|
||||
|
||||
bpf_slow_path_byte_neg:
|
||||
bpf_is_end_of_data
|
||||
NESTED(sk_load_byte_negative, (6 * SZREG), $r_sp)
|
||||
bpf_negative_common(1)
|
||||
jr $r_ra
|
||||
lbu $r_A, 0($r_s0)
|
||||
END(sk_load_byte_negative)
|
||||
|
||||
fault:
|
||||
jr $r_ra
|
||||
addiu $r_ret, zero, 1
|
1034
arch/mips/net/bpf_jit_comp.c
Normal file
1034
arch/mips/net/bpf_jit_comp.c
Normal file
File diff suppressed because it is too large
Load Diff
235
arch/mips/net/bpf_jit_comp.h
Normal file
235
arch/mips/net/bpf_jit_comp.h
Normal file
@ -0,0 +1,235 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Just-In-Time compiler for eBPF bytecode on 32-bit and 64-bit MIPS.
|
||||
*
|
||||
* Copyright (c) 2021 Anyfi Networks AB.
|
||||
* Author: Johan Almbladh <johan.almbladh@gmail.com>
|
||||
*
|
||||
* Based on code and ideas from
|
||||
* Copyright (c) 2017 Cavium, Inc.
|
||||
* Copyright (c) 2017 Shubham Bansal <illusionist.neo@gmail.com>
|
||||
* Copyright (c) 2011 Mircea Gherzan <mgherzan@gmail.com>
|
||||
*/
|
||||
|
||||
#ifndef _BPF_JIT_COMP_H
|
||||
#define _BPF_JIT_COMP_H
|
||||
|
||||
/* MIPS registers */
|
||||
#define MIPS_R_ZERO 0 /* Const zero */
|
||||
#define MIPS_R_AT 1 /* Asm temp */
|
||||
#define MIPS_R_V0 2 /* Result */
|
||||
#define MIPS_R_V1 3 /* Result */
|
||||
#define MIPS_R_A0 4 /* Argument */
|
||||
#define MIPS_R_A1 5 /* Argument */
|
||||
#define MIPS_R_A2 6 /* Argument */
|
||||
#define MIPS_R_A3 7 /* Argument */
|
||||
#define MIPS_R_A4 8 /* Arg (n64) */
|
||||
#define MIPS_R_A5 9 /* Arg (n64) */
|
||||
#define MIPS_R_A6 10 /* Arg (n64) */
|
||||
#define MIPS_R_A7 11 /* Arg (n64) */
|
||||
#define MIPS_R_T0 8 /* Temp (o32) */
|
||||
#define MIPS_R_T1 9 /* Temp (o32) */
|
||||
#define MIPS_R_T2 10 /* Temp (o32) */
|
||||
#define MIPS_R_T3 11 /* Temp (o32) */
|
||||
#define MIPS_R_T4 12 /* Temporary */
|
||||
#define MIPS_R_T5 13 /* Temporary */
|
||||
#define MIPS_R_T6 14 /* Temporary */
|
||||
#define MIPS_R_T7 15 /* Temporary */
|
||||
#define MIPS_R_S0 16 /* Saved */
|
||||
#define MIPS_R_S1 17 /* Saved */
|
||||
#define MIPS_R_S2 18 /* Saved */
|
||||
#define MIPS_R_S3 19 /* Saved */
|
||||
#define MIPS_R_S4 20 /* Saved */
|
||||
#define MIPS_R_S5 21 /* Saved */
|
||||
#define MIPS_R_S6 22 /* Saved */
|
||||
#define MIPS_R_S7 23 /* Saved */
|
||||
#define MIPS_R_T8 24 /* Temporary */
|
||||
#define MIPS_R_T9 25 /* Temporary */
|
||||
/* MIPS_R_K0 26 Reserved */
|
||||
/* MIPS_R_K1 27 Reserved */
|
||||
#define MIPS_R_GP 28 /* Global ptr */
|
||||
#define MIPS_R_SP 29 /* Stack ptr */
|
||||
#define MIPS_R_FP 30 /* Frame ptr */
|
||||
#define MIPS_R_RA 31 /* Return */
|
||||
|
||||
/*
|
||||
* Jump address mask for immediate jumps. The four most significant bits
|
||||
* must be equal to PC.
|
||||
*/
|
||||
#define MIPS_JMP_MASK 0x0fffffffUL
|
||||
|
||||
/* Maximum number of iterations in offset table computation */
|
||||
#define JIT_MAX_ITERATIONS 8
|
||||
|
||||
/*
|
||||
* Jump pseudo-instructions used internally
|
||||
* for branch conversion and branch optimization.
|
||||
*/
|
||||
#define JIT_JNSET 0xe0
|
||||
#define JIT_JNOP 0xf0
|
||||
|
||||
/* Descriptor flag for PC-relative branch conversion */
|
||||
#define JIT_DESC_CONVERT BIT(31)
|
||||
|
||||
/* JIT context for an eBPF program */
|
||||
struct jit_context {
|
||||
struct bpf_prog *program; /* The eBPF program being JITed */
|
||||
u32 *descriptors; /* eBPF to JITed CPU insn descriptors */
|
||||
u32 *target; /* JITed code buffer */
|
||||
u32 bpf_index; /* Index of current BPF program insn */
|
||||
u32 jit_index; /* Index of current JIT target insn */
|
||||
u32 changes; /* Number of PC-relative branch conv */
|
||||
u32 accessed; /* Bit mask of read eBPF registers */
|
||||
u32 clobbered; /* Bit mask of modified CPU registers */
|
||||
u32 stack_size; /* Total allocated stack size in bytes */
|
||||
u32 saved_size; /* Size of callee-saved registers */
|
||||
u32 stack_used; /* Stack size used for function calls */
|
||||
};
|
||||
|
||||
/* Emit the instruction if the JIT memory space has been allocated */
|
||||
#define __emit(ctx, func, ...) \
|
||||
do { \
|
||||
if ((ctx)->target != NULL) { \
|
||||
u32 *p = &(ctx)->target[ctx->jit_index]; \
|
||||
uasm_i_##func(&p, ##__VA_ARGS__); \
|
||||
} \
|
||||
(ctx)->jit_index++; \
|
||||
} while (0)
|
||||
#define emit(...) __emit(__VA_ARGS__)
|
||||
|
||||
/* Workaround for R10000 ll/sc errata */
|
||||
#ifdef CONFIG_WAR_R10000
|
||||
#define LLSC_beqz beqzl
|
||||
#else
|
||||
#define LLSC_beqz beqz
|
||||
#endif
|
||||
|
||||
/* Workaround for Loongson-3 ll/sc errata */
|
||||
#ifdef CONFIG_CPU_LOONGSON3_WORKAROUNDS
|
||||
#define LLSC_sync(ctx) emit(ctx, sync, 0)
|
||||
#define LLSC_offset 4
|
||||
#else
|
||||
#define LLSC_sync(ctx)
|
||||
#define LLSC_offset 0
|
||||
#endif
|
||||
|
||||
/* Workaround for Loongson-2F jump errata */
|
||||
#ifdef CONFIG_CPU_JUMP_WORKAROUNDS
|
||||
#define JALR_MASK 0xffffffffcfffffffULL
|
||||
#else
|
||||
#define JALR_MASK (~0ULL)
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Mark a BPF register as accessed, it needs to be
|
||||
* initialized by the program if expected, e.g. FP.
|
||||
*/
|
||||
static inline void access_reg(struct jit_context *ctx, u8 reg)
|
||||
{
|
||||
ctx->accessed |= BIT(reg);
|
||||
}
|
||||
|
||||
/*
|
||||
* Mark a CPU register as clobbered, it needs to be
|
||||
* saved/restored by the program if callee-saved.
|
||||
*/
|
||||
static inline void clobber_reg(struct jit_context *ctx, u8 reg)
|
||||
{
|
||||
ctx->clobbered |= BIT(reg);
|
||||
}
|
||||
|
||||
/*
|
||||
* Push registers on the stack, starting at a given depth from the stack
|
||||
* pointer and increasing. The next depth to be written is returned.
|
||||
*/
|
||||
int push_regs(struct jit_context *ctx, u32 mask, u32 excl, int depth);
|
||||
|
||||
/*
|
||||
* Pop registers from the stack, starting at a given depth from the stack
|
||||
* pointer and increasing. The next depth to be read is returned.
|
||||
*/
|
||||
int pop_regs(struct jit_context *ctx, u32 mask, u32 excl, int depth);
|
||||
|
||||
/* Compute the 28-bit jump target address from a BPF program location */
|
||||
int get_target(struct jit_context *ctx, u32 loc);
|
||||
|
||||
/* Compute the PC-relative offset to relative BPF program offset */
|
||||
int get_offset(const struct jit_context *ctx, int off);
|
||||
|
||||
/* dst = imm (32-bit) */
|
||||
void emit_mov_i(struct jit_context *ctx, u8 dst, s32 imm);
|
||||
|
||||
/* dst = src (32-bit) */
|
||||
void emit_mov_r(struct jit_context *ctx, u8 dst, u8 src);
|
||||
|
||||
/* Validate ALU/ALU64 immediate range */
|
||||
bool valid_alu_i(u8 op, s32 imm);
|
||||
|
||||
/* Rewrite ALU/ALU64 immediate operation */
|
||||
bool rewrite_alu_i(u8 op, s32 imm, u8 *alu, s32 *val);
|
||||
|
||||
/* ALU immediate operation (32-bit) */
|
||||
void emit_alu_i(struct jit_context *ctx, u8 dst, s32 imm, u8 op);
|
||||
|
||||
/* ALU register operation (32-bit) */
|
||||
void emit_alu_r(struct jit_context *ctx, u8 dst, u8 src, u8 op);
|
||||
|
||||
/* Atomic read-modify-write (32-bit) */
|
||||
void emit_atomic_r(struct jit_context *ctx, u8 dst, u8 src, s16 off, u8 code);
|
||||
|
||||
/* Atomic compare-and-exchange (32-bit) */
|
||||
void emit_cmpxchg_r(struct jit_context *ctx, u8 dst, u8 src, u8 res, s16 off);
|
||||
|
||||
/* Swap bytes and truncate a register word or half word */
|
||||
void emit_bswap_r(struct jit_context *ctx, u8 dst, u32 width);
|
||||
|
||||
/* Validate JMP/JMP32 immediate range */
|
||||
bool valid_jmp_i(u8 op, s32 imm);
|
||||
|
||||
/* Prepare a PC-relative jump operation with immediate conditional */
|
||||
void setup_jmp_i(struct jit_context *ctx, s32 imm, u8 width,
|
||||
u8 bpf_op, s16 bpf_off, u8 *jit_op, s32 *jit_off);
|
||||
|
||||
/* Prepare a PC-relative jump operation with register conditional */
|
||||
void setup_jmp_r(struct jit_context *ctx, bool same_reg,
|
||||
u8 bpf_op, s16 bpf_off, u8 *jit_op, s32 *jit_off);
|
||||
|
||||
/* Finish a PC-relative jump operation */
|
||||
int finish_jmp(struct jit_context *ctx, u8 jit_op, s16 bpf_off);
|
||||
|
||||
/* Conditional JMP/JMP32 immediate */
|
||||
void emit_jmp_i(struct jit_context *ctx, u8 dst, s32 imm, s32 off, u8 op);
|
||||
|
||||
/* Conditional JMP/JMP32 register */
|
||||
void emit_jmp_r(struct jit_context *ctx, u8 dst, u8 src, s32 off, u8 op);
|
||||
|
||||
/* Jump always */
|
||||
int emit_ja(struct jit_context *ctx, s16 off);
|
||||
|
||||
/* Jump to epilogue */
|
||||
int emit_exit(struct jit_context *ctx);
|
||||
|
||||
/*
|
||||
* Build program prologue to set up the stack and registers.
|
||||
* This function is implemented separately for 32-bit and 64-bit JITs.
|
||||
*/
|
||||
void build_prologue(struct jit_context *ctx);
|
||||
|
||||
/*
|
||||
* Build the program epilogue to restore the stack and registers.
|
||||
* This function is implemented separately for 32-bit and 64-bit JITs.
|
||||
*/
|
||||
void build_epilogue(struct jit_context *ctx, int dest_reg);
|
||||
|
||||
/*
|
||||
* Convert an eBPF instruction to native instruction, i.e
|
||||
* JITs an eBPF instruction.
|
||||
* Returns :
|
||||
* 0 - Successfully JITed an 8-byte eBPF instruction
|
||||
* >0 - Successfully JITed a 16-byte eBPF instruction
|
||||
* <0 - Failed to JIT.
|
||||
* This function is implemented separately for 32-bit and 64-bit JITs.
|
||||
*/
|
||||
int build_insn(const struct bpf_insn *insn, struct jit_context *ctx);
|
||||
|
||||
#endif /* _BPF_JIT_COMP_H */
|
1899
arch/mips/net/bpf_jit_comp32.c
Normal file
1899
arch/mips/net/bpf_jit_comp32.c
Normal file
File diff suppressed because it is too large
Load Diff
1060
arch/mips/net/bpf_jit_comp64.c
Normal file
1060
arch/mips/net/bpf_jit_comp64.c
Normal file
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@ -11,14 +11,23 @@
|
||||
#include <linux/module.h>
|
||||
#include <linux/uaccess.h>
|
||||
|
||||
#ifdef CONFIG_BPF_JIT
|
||||
int rv_bpf_fixup_exception(const struct exception_table_entry *ex, struct pt_regs *regs);
|
||||
#endif
|
||||
|
||||
int fixup_exception(struct pt_regs *regs)
|
||||
{
|
||||
const struct exception_table_entry *fixup;
|
||||
|
||||
fixup = search_exception_tables(regs->epc);
|
||||
if (fixup) {
|
||||
regs->epc = fixup->fixup;
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
if (!fixup)
|
||||
return 0;
|
||||
|
||||
#ifdef CONFIG_BPF_JIT
|
||||
if (regs->epc >= BPF_JIT_REGION_START && regs->epc < BPF_JIT_REGION_END)
|
||||
return rv_bpf_fixup_exception(fixup, regs);
|
||||
#endif
|
||||
|
||||
regs->epc = fixup->fixup;
|
||||
return 1;
|
||||
}
|
||||
|
@ -71,6 +71,7 @@ struct rv_jit_context {
|
||||
int ninsns;
|
||||
int epilogue_offset;
|
||||
int *offset; /* BPF to RV */
|
||||
int nexentries;
|
||||
unsigned long flags;
|
||||
int stack_size;
|
||||
};
|
||||
|
@ -5,6 +5,7 @@
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/bitfield.h>
|
||||
#include <linux/bpf.h>
|
||||
#include <linux/filter.h>
|
||||
#include "bpf_jit.h"
|
||||
@ -27,6 +28,21 @@ static const int regmap[] = {
|
||||
[BPF_REG_AX] = RV_REG_T0,
|
||||
};
|
||||
|
||||
static const int pt_regmap[] = {
|
||||
[RV_REG_A0] = offsetof(struct pt_regs, a0),
|
||||
[RV_REG_A1] = offsetof(struct pt_regs, a1),
|
||||
[RV_REG_A2] = offsetof(struct pt_regs, a2),
|
||||
[RV_REG_A3] = offsetof(struct pt_regs, a3),
|
||||
[RV_REG_A4] = offsetof(struct pt_regs, a4),
|
||||
[RV_REG_A5] = offsetof(struct pt_regs, a5),
|
||||
[RV_REG_S1] = offsetof(struct pt_regs, s1),
|
||||
[RV_REG_S2] = offsetof(struct pt_regs, s2),
|
||||
[RV_REG_S3] = offsetof(struct pt_regs, s3),
|
||||
[RV_REG_S4] = offsetof(struct pt_regs, s4),
|
||||
[RV_REG_S5] = offsetof(struct pt_regs, s5),
|
||||
[RV_REG_T0] = offsetof(struct pt_regs, t0),
|
||||
};
|
||||
|
||||
enum {
|
||||
RV_CTX_F_SEEN_TAIL_CALL = 0,
|
||||
RV_CTX_F_SEEN_CALL = RV_REG_RA,
|
||||
@ -440,6 +456,69 @@ static int emit_call(bool fixed, u64 addr, struct rv_jit_context *ctx)
|
||||
return 0;
|
||||
}
|
||||
|
||||
#define BPF_FIXUP_OFFSET_MASK GENMASK(26, 0)
|
||||
#define BPF_FIXUP_REG_MASK GENMASK(31, 27)
|
||||
|
||||
int rv_bpf_fixup_exception(const struct exception_table_entry *ex,
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
off_t offset = FIELD_GET(BPF_FIXUP_OFFSET_MASK, ex->fixup);
|
||||
int regs_offset = FIELD_GET(BPF_FIXUP_REG_MASK, ex->fixup);
|
||||
|
||||
*(unsigned long *)((void *)regs + pt_regmap[regs_offset]) = 0;
|
||||
regs->epc = (unsigned long)&ex->fixup - offset;
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
/* For accesses to BTF pointers, add an entry to the exception table */
|
||||
static int add_exception_handler(const struct bpf_insn *insn,
|
||||
struct rv_jit_context *ctx,
|
||||
int dst_reg, int insn_len)
|
||||
{
|
||||
struct exception_table_entry *ex;
|
||||
unsigned long pc;
|
||||
off_t offset;
|
||||
|
||||
if (!ctx->insns || !ctx->prog->aux->extable || BPF_MODE(insn->code) != BPF_PROBE_MEM)
|
||||
return 0;
|
||||
|
||||
if (WARN_ON_ONCE(ctx->nexentries >= ctx->prog->aux->num_exentries))
|
||||
return -EINVAL;
|
||||
|
||||
if (WARN_ON_ONCE(insn_len > ctx->ninsns))
|
||||
return -EINVAL;
|
||||
|
||||
if (WARN_ON_ONCE(!rvc_enabled() && insn_len == 1))
|
||||
return -EINVAL;
|
||||
|
||||
ex = &ctx->prog->aux->extable[ctx->nexentries];
|
||||
pc = (unsigned long)&ctx->insns[ctx->ninsns - insn_len];
|
||||
|
||||
offset = pc - (long)&ex->insn;
|
||||
if (WARN_ON_ONCE(offset >= 0 || offset < INT_MIN))
|
||||
return -ERANGE;
|
||||
ex->insn = pc;
|
||||
|
||||
/*
|
||||
* Since the extable follows the program, the fixup offset is always
|
||||
* negative and limited to BPF_JIT_REGION_SIZE. Store a positive value
|
||||
* to keep things simple, and put the destination register in the upper
|
||||
* bits. We don't need to worry about buildtime or runtime sort
|
||||
* modifying the upper bits because the table is already sorted, and
|
||||
* isn't part of the main exception table.
|
||||
*/
|
||||
offset = (long)&ex->fixup - (pc + insn_len * sizeof(u16));
|
||||
if (!FIELD_FIT(BPF_FIXUP_OFFSET_MASK, offset))
|
||||
return -ERANGE;
|
||||
|
||||
ex->fixup = FIELD_PREP(BPF_FIXUP_OFFSET_MASK, offset) |
|
||||
FIELD_PREP(BPF_FIXUP_REG_MASK, dst_reg);
|
||||
|
||||
ctx->nexentries++;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx,
|
||||
bool extra_pass)
|
||||
{
|
||||
@ -893,52 +972,86 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx,
|
||||
|
||||
/* LDX: dst = *(size *)(src + off) */
|
||||
case BPF_LDX | BPF_MEM | BPF_B:
|
||||
if (is_12b_int(off)) {
|
||||
emit(rv_lbu(rd, off, rs), ctx);
|
||||
break;
|
||||
}
|
||||
|
||||
emit_imm(RV_REG_T1, off, ctx);
|
||||
emit_add(RV_REG_T1, RV_REG_T1, rs, ctx);
|
||||
emit(rv_lbu(rd, 0, RV_REG_T1), ctx);
|
||||
if (insn_is_zext(&insn[1]))
|
||||
return 1;
|
||||
break;
|
||||
case BPF_LDX | BPF_MEM | BPF_H:
|
||||
if (is_12b_int(off)) {
|
||||
emit(rv_lhu(rd, off, rs), ctx);
|
||||
break;
|
||||
}
|
||||
|
||||
emit_imm(RV_REG_T1, off, ctx);
|
||||
emit_add(RV_REG_T1, RV_REG_T1, rs, ctx);
|
||||
emit(rv_lhu(rd, 0, RV_REG_T1), ctx);
|
||||
if (insn_is_zext(&insn[1]))
|
||||
return 1;
|
||||
break;
|
||||
case BPF_LDX | BPF_MEM | BPF_W:
|
||||
if (is_12b_int(off)) {
|
||||
emit(rv_lwu(rd, off, rs), ctx);
|
||||
break;
|
||||
}
|
||||
|
||||
emit_imm(RV_REG_T1, off, ctx);
|
||||
emit_add(RV_REG_T1, RV_REG_T1, rs, ctx);
|
||||
emit(rv_lwu(rd, 0, RV_REG_T1), ctx);
|
||||
if (insn_is_zext(&insn[1]))
|
||||
return 1;
|
||||
break;
|
||||
case BPF_LDX | BPF_MEM | BPF_DW:
|
||||
if (is_12b_int(off)) {
|
||||
emit_ld(rd, off, rs, ctx);
|
||||
case BPF_LDX | BPF_PROBE_MEM | BPF_B:
|
||||
case BPF_LDX | BPF_PROBE_MEM | BPF_H:
|
||||
case BPF_LDX | BPF_PROBE_MEM | BPF_W:
|
||||
case BPF_LDX | BPF_PROBE_MEM | BPF_DW:
|
||||
{
|
||||
int insn_len, insns_start;
|
||||
|
||||
switch (BPF_SIZE(code)) {
|
||||
case BPF_B:
|
||||
if (is_12b_int(off)) {
|
||||
insns_start = ctx->ninsns;
|
||||
emit(rv_lbu(rd, off, rs), ctx);
|
||||
insn_len = ctx->ninsns - insns_start;
|
||||
break;
|
||||
}
|
||||
|
||||
emit_imm(RV_REG_T1, off, ctx);
|
||||
emit_add(RV_REG_T1, RV_REG_T1, rs, ctx);
|
||||
insns_start = ctx->ninsns;
|
||||
emit(rv_lbu(rd, 0, RV_REG_T1), ctx);
|
||||
insn_len = ctx->ninsns - insns_start;
|
||||
if (insn_is_zext(&insn[1]))
|
||||
return 1;
|
||||
break;
|
||||
case BPF_H:
|
||||
if (is_12b_int(off)) {
|
||||
insns_start = ctx->ninsns;
|
||||
emit(rv_lhu(rd, off, rs), ctx);
|
||||
insn_len = ctx->ninsns - insns_start;
|
||||
break;
|
||||
}
|
||||
|
||||
emit_imm(RV_REG_T1, off, ctx);
|
||||
emit_add(RV_REG_T1, RV_REG_T1, rs, ctx);
|
||||
insns_start = ctx->ninsns;
|
||||
emit(rv_lhu(rd, 0, RV_REG_T1), ctx);
|
||||
insn_len = ctx->ninsns - insns_start;
|
||||
if (insn_is_zext(&insn[1]))
|
||||
return 1;
|
||||
break;
|
||||
case BPF_W:
|
||||
if (is_12b_int(off)) {
|
||||
insns_start = ctx->ninsns;
|
||||
emit(rv_lwu(rd, off, rs), ctx);
|
||||
insn_len = ctx->ninsns - insns_start;
|
||||
break;
|
||||
}
|
||||
|
||||
emit_imm(RV_REG_T1, off, ctx);
|
||||
emit_add(RV_REG_T1, RV_REG_T1, rs, ctx);
|
||||
insns_start = ctx->ninsns;
|
||||
emit(rv_lwu(rd, 0, RV_REG_T1), ctx);
|
||||
insn_len = ctx->ninsns - insns_start;
|
||||
if (insn_is_zext(&insn[1]))
|
||||
return 1;
|
||||
break;
|
||||
case BPF_DW:
|
||||
if (is_12b_int(off)) {
|
||||
insns_start = ctx->ninsns;
|
||||
emit_ld(rd, off, rs, ctx);
|
||||
insn_len = ctx->ninsns - insns_start;
|
||||
break;
|
||||
}
|
||||
|
||||
emit_imm(RV_REG_T1, off, ctx);
|
||||
emit_add(RV_REG_T1, RV_REG_T1, rs, ctx);
|
||||
insns_start = ctx->ninsns;
|
||||
emit_ld(rd, 0, RV_REG_T1, ctx);
|
||||
insn_len = ctx->ninsns - insns_start;
|
||||
break;
|
||||
}
|
||||
|
||||
emit_imm(RV_REG_T1, off, ctx);
|
||||
emit_add(RV_REG_T1, RV_REG_T1, rs, ctx);
|
||||
emit_ld(rd, 0, RV_REG_T1, ctx);
|
||||
ret = add_exception_handler(insn, ctx, rd, insn_len);
|
||||
if (ret)
|
||||
return ret;
|
||||
break;
|
||||
|
||||
}
|
||||
/* speculation barrier */
|
||||
case BPF_ST | BPF_NOSPEC:
|
||||
break;
|
||||
|
@ -11,7 +11,7 @@
|
||||
#include "bpf_jit.h"
|
||||
|
||||
/* Number of iterations to try until offsets converge. */
|
||||
#define NR_JIT_ITERATIONS 16
|
||||
#define NR_JIT_ITERATIONS 32
|
||||
|
||||
static int build_body(struct rv_jit_context *ctx, bool extra_pass, int *offset)
|
||||
{
|
||||
@ -41,12 +41,12 @@ bool bpf_jit_needs_zext(void)
|
||||
|
||||
struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
|
||||
{
|
||||
unsigned int prog_size = 0, extable_size = 0;
|
||||
bool tmp_blinded = false, extra_pass = false;
|
||||
struct bpf_prog *tmp, *orig_prog = prog;
|
||||
int pass = 0, prev_ninsns = 0, i;
|
||||
struct rv_jit_data *jit_data;
|
||||
struct rv_jit_context *ctx;
|
||||
unsigned int image_size = 0;
|
||||
|
||||
if (!prog->jit_requested)
|
||||
return orig_prog;
|
||||
@ -73,7 +73,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
|
||||
|
||||
if (ctx->offset) {
|
||||
extra_pass = true;
|
||||
image_size = sizeof(*ctx->insns) * ctx->ninsns;
|
||||
prog_size = sizeof(*ctx->insns) * ctx->ninsns;
|
||||
goto skip_init_ctx;
|
||||
}
|
||||
|
||||
@ -102,10 +102,13 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
|
||||
if (ctx->ninsns == prev_ninsns) {
|
||||
if (jit_data->header)
|
||||
break;
|
||||
/* obtain the actual image size */
|
||||
extable_size = prog->aux->num_exentries *
|
||||
sizeof(struct exception_table_entry);
|
||||
prog_size = sizeof(*ctx->insns) * ctx->ninsns;
|
||||
|
||||
image_size = sizeof(*ctx->insns) * ctx->ninsns;
|
||||
jit_data->header =
|
||||
bpf_jit_binary_alloc(image_size,
|
||||
bpf_jit_binary_alloc(prog_size + extable_size,
|
||||
&jit_data->image,
|
||||
sizeof(u32),
|
||||
bpf_fill_ill_insns);
|
||||
@ -131,9 +134,13 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
|
||||
goto out_offset;
|
||||
}
|
||||
|
||||
if (extable_size)
|
||||
prog->aux->extable = (void *)ctx->insns + prog_size;
|
||||
|
||||
skip_init_ctx:
|
||||
pass++;
|
||||
ctx->ninsns = 0;
|
||||
ctx->nexentries = 0;
|
||||
|
||||
bpf_jit_build_prologue(ctx);
|
||||
if (build_body(ctx, extra_pass, NULL)) {
|
||||
@ -144,11 +151,11 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
|
||||
bpf_jit_build_epilogue(ctx);
|
||||
|
||||
if (bpf_jit_enable > 1)
|
||||
bpf_jit_dump(prog->len, image_size, pass, ctx->insns);
|
||||
bpf_jit_dump(prog->len, prog_size, pass, ctx->insns);
|
||||
|
||||
prog->bpf_func = (void *)ctx->insns;
|
||||
prog->jited = 1;
|
||||
prog->jited_len = image_size;
|
||||
prog->jited_len = prog_size;
|
||||
|
||||
bpf_flush_icache(jit_data->header, ctx->insns + ctx->ninsns);
|
||||
|
||||
|
@ -721,6 +721,20 @@ static void maybe_emit_mod(u8 **pprog, u32 dst_reg, u32 src_reg, bool is64)
|
||||
*pprog = prog;
|
||||
}
|
||||
|
||||
/*
|
||||
* Similar version of maybe_emit_mod() for a single register
|
||||
*/
|
||||
static void maybe_emit_1mod(u8 **pprog, u32 reg, bool is64)
|
||||
{
|
||||
u8 *prog = *pprog;
|
||||
|
||||
if (is64)
|
||||
EMIT1(add_1mod(0x48, reg));
|
||||
else if (is_ereg(reg))
|
||||
EMIT1(add_1mod(0x40, reg));
|
||||
*pprog = prog;
|
||||
}
|
||||
|
||||
/* LDX: dst_reg = *(u8*)(src_reg + off) */
|
||||
static void emit_ldx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off)
|
||||
{
|
||||
@ -951,10 +965,8 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
|
||||
/* neg dst */
|
||||
case BPF_ALU | BPF_NEG:
|
||||
case BPF_ALU64 | BPF_NEG:
|
||||
if (BPF_CLASS(insn->code) == BPF_ALU64)
|
||||
EMIT1(add_1mod(0x48, dst_reg));
|
||||
else if (is_ereg(dst_reg))
|
||||
EMIT1(add_1mod(0x40, dst_reg));
|
||||
maybe_emit_1mod(&prog, dst_reg,
|
||||
BPF_CLASS(insn->code) == BPF_ALU64);
|
||||
EMIT2(0xF7, add_1reg(0xD8, dst_reg));
|
||||
break;
|
||||
|
||||
@ -968,10 +980,8 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
|
||||
case BPF_ALU64 | BPF_AND | BPF_K:
|
||||
case BPF_ALU64 | BPF_OR | BPF_K:
|
||||
case BPF_ALU64 | BPF_XOR | BPF_K:
|
||||
if (BPF_CLASS(insn->code) == BPF_ALU64)
|
||||
EMIT1(add_1mod(0x48, dst_reg));
|
||||
else if (is_ereg(dst_reg))
|
||||
EMIT1(add_1mod(0x40, dst_reg));
|
||||
maybe_emit_1mod(&prog, dst_reg,
|
||||
BPF_CLASS(insn->code) == BPF_ALU64);
|
||||
|
||||
/*
|
||||
* b3 holds 'normal' opcode, b2 short form only valid
|
||||
@ -1028,19 +1038,30 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
|
||||
case BPF_ALU64 | BPF_MOD | BPF_X:
|
||||
case BPF_ALU64 | BPF_DIV | BPF_X:
|
||||
case BPF_ALU64 | BPF_MOD | BPF_K:
|
||||
case BPF_ALU64 | BPF_DIV | BPF_K:
|
||||
EMIT1(0x50); /* push rax */
|
||||
EMIT1(0x52); /* push rdx */
|
||||
case BPF_ALU64 | BPF_DIV | BPF_K: {
|
||||
bool is64 = BPF_CLASS(insn->code) == BPF_ALU64;
|
||||
|
||||
if (BPF_SRC(insn->code) == BPF_X)
|
||||
/* mov r11, src_reg */
|
||||
EMIT_mov(AUX_REG, src_reg);
|
||||
else
|
||||
if (dst_reg != BPF_REG_0)
|
||||
EMIT1(0x50); /* push rax */
|
||||
if (dst_reg != BPF_REG_3)
|
||||
EMIT1(0x52); /* push rdx */
|
||||
|
||||
if (BPF_SRC(insn->code) == BPF_X) {
|
||||
if (src_reg == BPF_REG_0 ||
|
||||
src_reg == BPF_REG_3) {
|
||||
/* mov r11, src_reg */
|
||||
EMIT_mov(AUX_REG, src_reg);
|
||||
src_reg = AUX_REG;
|
||||
}
|
||||
} else {
|
||||
/* mov r11, imm32 */
|
||||
EMIT3_off32(0x49, 0xC7, 0xC3, imm32);
|
||||
src_reg = AUX_REG;
|
||||
}
|
||||
|
||||
/* mov rax, dst_reg */
|
||||
EMIT_mov(BPF_REG_0, dst_reg);
|
||||
if (dst_reg != BPF_REG_0)
|
||||
/* mov rax, dst_reg */
|
||||
emit_mov_reg(&prog, is64, BPF_REG_0, dst_reg);
|
||||
|
||||
/*
|
||||
* xor edx, edx
|
||||
@ -1048,33 +1069,30 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
|
||||
*/
|
||||
EMIT2(0x31, 0xd2);
|
||||
|
||||
if (BPF_CLASS(insn->code) == BPF_ALU64)
|
||||
/* div r11 */
|
||||
EMIT3(0x49, 0xF7, 0xF3);
|
||||
else
|
||||
/* div r11d */
|
||||
EMIT3(0x41, 0xF7, 0xF3);
|
||||
/* div src_reg */
|
||||
maybe_emit_1mod(&prog, src_reg, is64);
|
||||
EMIT2(0xF7, add_1reg(0xF0, src_reg));
|
||||
|
||||
if (BPF_OP(insn->code) == BPF_MOD)
|
||||
/* mov r11, rdx */
|
||||
EMIT3(0x49, 0x89, 0xD3);
|
||||
else
|
||||
/* mov r11, rax */
|
||||
EMIT3(0x49, 0x89, 0xC3);
|
||||
if (BPF_OP(insn->code) == BPF_MOD &&
|
||||
dst_reg != BPF_REG_3)
|
||||
/* mov dst_reg, rdx */
|
||||
emit_mov_reg(&prog, is64, dst_reg, BPF_REG_3);
|
||||
else if (BPF_OP(insn->code) == BPF_DIV &&
|
||||
dst_reg != BPF_REG_0)
|
||||
/* mov dst_reg, rax */
|
||||
emit_mov_reg(&prog, is64, dst_reg, BPF_REG_0);
|
||||
|
||||
EMIT1(0x5A); /* pop rdx */
|
||||
EMIT1(0x58); /* pop rax */
|
||||
|
||||
/* mov dst_reg, r11 */
|
||||
EMIT_mov(dst_reg, AUX_REG);
|
||||
if (dst_reg != BPF_REG_3)
|
||||
EMIT1(0x5A); /* pop rdx */
|
||||
if (dst_reg != BPF_REG_0)
|
||||
EMIT1(0x58); /* pop rax */
|
||||
break;
|
||||
}
|
||||
|
||||
case BPF_ALU | BPF_MUL | BPF_K:
|
||||
case BPF_ALU64 | BPF_MUL | BPF_K:
|
||||
if (BPF_CLASS(insn->code) == BPF_ALU64)
|
||||
EMIT1(add_2mod(0x48, dst_reg, dst_reg));
|
||||
else if (is_ereg(dst_reg))
|
||||
EMIT1(add_2mod(0x40, dst_reg, dst_reg));
|
||||
maybe_emit_mod(&prog, dst_reg, dst_reg,
|
||||
BPF_CLASS(insn->code) == BPF_ALU64);
|
||||
|
||||
if (is_imm8(imm32))
|
||||
/* imul dst_reg, dst_reg, imm8 */
|
||||
@ -1089,10 +1107,8 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
|
||||
|
||||
case BPF_ALU | BPF_MUL | BPF_X:
|
||||
case BPF_ALU64 | BPF_MUL | BPF_X:
|
||||
if (BPF_CLASS(insn->code) == BPF_ALU64)
|
||||
EMIT1(add_2mod(0x48, src_reg, dst_reg));
|
||||
else if (is_ereg(dst_reg) || is_ereg(src_reg))
|
||||
EMIT1(add_2mod(0x40, src_reg, dst_reg));
|
||||
maybe_emit_mod(&prog, src_reg, dst_reg,
|
||||
BPF_CLASS(insn->code) == BPF_ALU64);
|
||||
|
||||
/* imul dst_reg, src_reg */
|
||||
EMIT3(0x0F, 0xAF, add_2reg(0xC0, src_reg, dst_reg));
|
||||
@ -1105,10 +1121,8 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
|
||||
case BPF_ALU64 | BPF_LSH | BPF_K:
|
||||
case BPF_ALU64 | BPF_RSH | BPF_K:
|
||||
case BPF_ALU64 | BPF_ARSH | BPF_K:
|
||||
if (BPF_CLASS(insn->code) == BPF_ALU64)
|
||||
EMIT1(add_1mod(0x48, dst_reg));
|
||||
else if (is_ereg(dst_reg))
|
||||
EMIT1(add_1mod(0x40, dst_reg));
|
||||
maybe_emit_1mod(&prog, dst_reg,
|
||||
BPF_CLASS(insn->code) == BPF_ALU64);
|
||||
|
||||
b3 = simple_alu_opcodes[BPF_OP(insn->code)];
|
||||
if (imm32 == 1)
|
||||
@ -1139,10 +1153,8 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
|
||||
}
|
||||
|
||||
/* shl %rax, %cl | shr %rax, %cl | sar %rax, %cl */
|
||||
if (BPF_CLASS(insn->code) == BPF_ALU64)
|
||||
EMIT1(add_1mod(0x48, dst_reg));
|
||||
else if (is_ereg(dst_reg))
|
||||
EMIT1(add_1mod(0x40, dst_reg));
|
||||
maybe_emit_1mod(&prog, dst_reg,
|
||||
BPF_CLASS(insn->code) == BPF_ALU64);
|
||||
|
||||
b3 = simple_alu_opcodes[BPF_OP(insn->code)];
|
||||
EMIT2(0xD3, add_1reg(b3, dst_reg));
|
||||
@ -1452,10 +1464,8 @@ st: if (is_imm8(insn->off))
|
||||
case BPF_JMP | BPF_JSET | BPF_K:
|
||||
case BPF_JMP32 | BPF_JSET | BPF_K:
|
||||
/* test dst_reg, imm32 */
|
||||
if (BPF_CLASS(insn->code) == BPF_JMP)
|
||||
EMIT1(add_1mod(0x48, dst_reg));
|
||||
else if (is_ereg(dst_reg))
|
||||
EMIT1(add_1mod(0x40, dst_reg));
|
||||
maybe_emit_1mod(&prog, dst_reg,
|
||||
BPF_CLASS(insn->code) == BPF_JMP);
|
||||
EMIT2_off32(0xF7, add_1reg(0xC0, dst_reg), imm32);
|
||||
goto emit_cond_jmp;
|
||||
|
||||
@ -1488,10 +1498,8 @@ st: if (is_imm8(insn->off))
|
||||
}
|
||||
|
||||
/* cmp dst_reg, imm8/32 */
|
||||
if (BPF_CLASS(insn->code) == BPF_JMP)
|
||||
EMIT1(add_1mod(0x48, dst_reg));
|
||||
else if (is_ereg(dst_reg))
|
||||
EMIT1(add_1mod(0x40, dst_reg));
|
||||
maybe_emit_1mod(&prog, dst_reg,
|
||||
BPF_CLASS(insn->code) == BPF_JMP);
|
||||
|
||||
if (is_imm8(imm32))
|
||||
EMIT3(0x83, add_1reg(0xF8, dst_reg), imm32);
|
||||
|
@ -168,6 +168,7 @@ struct bpf_map {
|
||||
u32 key_size;
|
||||
u32 value_size;
|
||||
u32 max_entries;
|
||||
u64 map_extra; /* any per-map-type extra fields */
|
||||
u32 map_flags;
|
||||
int spin_lock_off; /* >=0 valid offset, <0 error */
|
||||
int timer_off; /* >=0 valid offset, <0 error */
|
||||
@ -175,15 +176,15 @@ struct bpf_map {
|
||||
int numa_node;
|
||||
u32 btf_key_type_id;
|
||||
u32 btf_value_type_id;
|
||||
u32 btf_vmlinux_value_type_id;
|
||||
struct btf *btf;
|
||||
#ifdef CONFIG_MEMCG_KMEM
|
||||
struct mem_cgroup *memcg;
|
||||
#endif
|
||||
char name[BPF_OBJ_NAME_LEN];
|
||||
u32 btf_vmlinux_value_type_id;
|
||||
bool bypass_spec_v1;
|
||||
bool frozen; /* write-once; write-protected by freeze_mutex */
|
||||
/* 22 bytes hole */
|
||||
/* 14 bytes hole */
|
||||
|
||||
/* The 3rd and 4th cacheline with misc members to avoid false sharing
|
||||
* particularly with refcounting.
|
||||
@ -513,7 +514,7 @@ struct bpf_verifier_ops {
|
||||
const struct btf_type *t, int off, int size,
|
||||
enum bpf_access_type atype,
|
||||
u32 *next_btf_id);
|
||||
bool (*check_kfunc_call)(u32 kfunc_btf_id);
|
||||
bool (*check_kfunc_call)(u32 kfunc_btf_id, struct module *owner);
|
||||
};
|
||||
|
||||
struct bpf_prog_offload_ops {
|
||||
@ -877,6 +878,7 @@ struct bpf_prog_aux {
|
||||
void *jit_data; /* JIT specific data. arch dependent */
|
||||
struct bpf_jit_poke_descriptor *poke_tab;
|
||||
struct bpf_kfunc_desc_tab *kfunc_tab;
|
||||
struct bpf_kfunc_btf_tab *kfunc_btf_tab;
|
||||
u32 size_poke_tab;
|
||||
struct bpf_ksym ksym;
|
||||
const struct bpf_prog_ops *ops;
|
||||
@ -886,6 +888,7 @@ struct bpf_prog_aux {
|
||||
struct bpf_prog *prog;
|
||||
struct user_struct *user;
|
||||
u64 load_time; /* ns since boottime */
|
||||
u32 verified_insns;
|
||||
struct bpf_map *cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE];
|
||||
char name[BPF_OBJ_NAME_LEN];
|
||||
#ifdef CONFIG_SECURITY
|
||||
@ -1000,6 +1003,10 @@ bool bpf_struct_ops_get(const void *kdata);
|
||||
void bpf_struct_ops_put(const void *kdata);
|
||||
int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map, void *key,
|
||||
void *value);
|
||||
int bpf_struct_ops_prepare_trampoline(struct bpf_tramp_progs *tprogs,
|
||||
struct bpf_prog *prog,
|
||||
const struct btf_func_model *model,
|
||||
void *image, void *image_end);
|
||||
static inline bool bpf_try_module_get(const void *data, struct module *owner)
|
||||
{
|
||||
if (owner == BPF_MODULE_OWNER)
|
||||
@ -1014,6 +1021,22 @@ static inline void bpf_module_put(const void *data, struct module *owner)
|
||||
else
|
||||
module_put(owner);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_NET
|
||||
/* Define it here to avoid the use of forward declaration */
|
||||
struct bpf_dummy_ops_state {
|
||||
int val;
|
||||
};
|
||||
|
||||
struct bpf_dummy_ops {
|
||||
int (*test_1)(struct bpf_dummy_ops_state *cb);
|
||||
int (*test_2)(struct bpf_dummy_ops_state *cb, int a1, unsigned short a2,
|
||||
char a3, unsigned long a4);
|
||||
};
|
||||
|
||||
int bpf_struct_ops_test_run(struct bpf_prog *prog, const union bpf_attr *kattr,
|
||||
union bpf_attr __user *uattr);
|
||||
#endif
|
||||
#else
|
||||
static inline const struct bpf_struct_ops *bpf_struct_ops_find(u32 type_id)
|
||||
{
|
||||
@ -1642,10 +1665,33 @@ int bpf_prog_test_run_raw_tp(struct bpf_prog *prog,
|
||||
int bpf_prog_test_run_sk_lookup(struct bpf_prog *prog,
|
||||
const union bpf_attr *kattr,
|
||||
union bpf_attr __user *uattr);
|
||||
bool bpf_prog_test_check_kfunc_call(u32 kfunc_id);
|
||||
bool bpf_prog_test_check_kfunc_call(u32 kfunc_id, struct module *owner);
|
||||
bool btf_ctx_access(int off, int size, enum bpf_access_type type,
|
||||
const struct bpf_prog *prog,
|
||||
struct bpf_insn_access_aux *info);
|
||||
|
||||
static inline bool bpf_tracing_ctx_access(int off, int size,
|
||||
enum bpf_access_type type)
|
||||
{
|
||||
if (off < 0 || off >= sizeof(__u64) * MAX_BPF_FUNC_ARGS)
|
||||
return false;
|
||||
if (type != BPF_READ)
|
||||
return false;
|
||||
if (off % size != 0)
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
static inline bool bpf_tracing_btf_ctx_access(int off, int size,
|
||||
enum bpf_access_type type,
|
||||
const struct bpf_prog *prog,
|
||||
struct bpf_insn_access_aux *info)
|
||||
{
|
||||
if (!bpf_tracing_ctx_access(off, size, type))
|
||||
return false;
|
||||
return btf_ctx_access(off, size, type, prog, info);
|
||||
}
|
||||
|
||||
int btf_struct_access(struct bpf_verifier_log *log, const struct btf *btf,
|
||||
const struct btf_type *t, int off, int size,
|
||||
enum bpf_access_type atype,
|
||||
@ -1863,7 +1909,8 @@ static inline int bpf_prog_test_run_sk_lookup(struct bpf_prog *prog,
|
||||
return -ENOTSUPP;
|
||||
}
|
||||
|
||||
static inline bool bpf_prog_test_check_kfunc_call(u32 kfunc_id)
|
||||
static inline bool bpf_prog_test_check_kfunc_call(u32 kfunc_id,
|
||||
struct module *owner)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
@ -2094,6 +2141,7 @@ extern const struct bpf_func_proto bpf_skc_to_tcp_sock_proto;
|
||||
extern const struct bpf_func_proto bpf_skc_to_tcp_timewait_sock_proto;
|
||||
extern const struct bpf_func_proto bpf_skc_to_tcp_request_sock_proto;
|
||||
extern const struct bpf_func_proto bpf_skc_to_udp6_sock_proto;
|
||||
extern const struct bpf_func_proto bpf_skc_to_unix_sock_proto;
|
||||
extern const struct bpf_func_proto bpf_copy_from_user_proto;
|
||||
extern const struct bpf_func_proto bpf_snprintf_btf_proto;
|
||||
extern const struct bpf_func_proto bpf_snprintf_proto;
|
||||
@ -2108,6 +2156,7 @@ extern const struct bpf_func_proto bpf_for_each_map_elem_proto;
|
||||
extern const struct bpf_func_proto bpf_btf_find_by_name_kind_proto;
|
||||
extern const struct bpf_func_proto bpf_sk_setsockopt_proto;
|
||||
extern const struct bpf_func_proto bpf_sk_getsockopt_proto;
|
||||
extern const struct bpf_func_proto bpf_kallsyms_lookup_name_proto;
|
||||
|
||||
const struct bpf_func_proto *tracing_prog_func_proto(
|
||||
enum bpf_func_id func_id, const struct bpf_prog *prog);
|
||||
|
@ -125,6 +125,7 @@ BPF_MAP_TYPE(BPF_MAP_TYPE_STACK, stack_map_ops)
|
||||
BPF_MAP_TYPE(BPF_MAP_TYPE_STRUCT_OPS, bpf_struct_ops_map_ops)
|
||||
#endif
|
||||
BPF_MAP_TYPE(BPF_MAP_TYPE_RINGBUF, ringbuf_map_ops)
|
||||
BPF_MAP_TYPE(BPF_MAP_TYPE_BLOOM_FILTER, bloom_filter_map_ops)
|
||||
|
||||
BPF_LINK_TYPE(BPF_LINK_TYPE_RAW_TRACEPOINT, raw_tracepoint)
|
||||
BPF_LINK_TYPE(BPF_LINK_TYPE_TRACING, tracing)
|
||||
|
@ -527,5 +527,7 @@ int bpf_check_attach_target(struct bpf_verifier_log *log,
|
||||
const struct bpf_prog *tgt_prog,
|
||||
u32 btf_id,
|
||||
struct bpf_attach_target_info *tgt_info);
|
||||
void bpf_free_kfunc_btf_tab(struct bpf_kfunc_btf_tab *tab);
|
||||
|
||||
|
||||
#endif /* _LINUX_BPF_VERIFIER_H */
|
||||
|
@ -3,6 +3,7 @@
|
||||
#ifndef _LINUX_BPFPTR_H
|
||||
#define _LINUX_BPFPTR_H
|
||||
|
||||
#include <linux/mm.h>
|
||||
#include <linux/sockptr.h>
|
||||
|
||||
typedef sockptr_t bpfptr_t;
|
||||
|
@ -5,6 +5,7 @@
|
||||
#define _LINUX_BTF_H 1
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/bpfptr.h>
|
||||
#include <uapi/linux/btf.h>
|
||||
#include <uapi/linux/bpf.h>
|
||||
|
||||
@ -238,4 +239,42 @@ static inline const char *btf_name_by_offset(const struct btf *btf,
|
||||
}
|
||||
#endif
|
||||
|
||||
struct kfunc_btf_id_set {
|
||||
struct list_head list;
|
||||
struct btf_id_set *set;
|
||||
struct module *owner;
|
||||
};
|
||||
|
||||
struct kfunc_btf_id_list;
|
||||
|
||||
#ifdef CONFIG_DEBUG_INFO_BTF_MODULES
|
||||
void register_kfunc_btf_id_set(struct kfunc_btf_id_list *l,
|
||||
struct kfunc_btf_id_set *s);
|
||||
void unregister_kfunc_btf_id_set(struct kfunc_btf_id_list *l,
|
||||
struct kfunc_btf_id_set *s);
|
||||
bool bpf_check_mod_kfunc_call(struct kfunc_btf_id_list *klist, u32 kfunc_id,
|
||||
struct module *owner);
|
||||
#else
|
||||
static inline void register_kfunc_btf_id_set(struct kfunc_btf_id_list *l,
|
||||
struct kfunc_btf_id_set *s)
|
||||
{
|
||||
}
|
||||
static inline void unregister_kfunc_btf_id_set(struct kfunc_btf_id_list *l,
|
||||
struct kfunc_btf_id_set *s)
|
||||
{
|
||||
}
|
||||
static inline bool bpf_check_mod_kfunc_call(struct kfunc_btf_id_list *klist,
|
||||
u32 kfunc_id, struct module *owner)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
#endif
|
||||
|
||||
#define DEFINE_KFUNC_BTF_ID_SET(set, name) \
|
||||
struct kfunc_btf_id_set name = { LIST_HEAD_INIT(name.list), (set), \
|
||||
THIS_MODULE }
|
||||
|
||||
extern struct kfunc_btf_id_list bpf_tcp_ca_kfunc_list;
|
||||
extern struct kfunc_btf_id_list prog_test_kfunc_list;
|
||||
|
||||
#endif
|
||||
|
@ -553,9 +553,9 @@ struct bpf_binary_header {
|
||||
};
|
||||
|
||||
struct bpf_prog_stats {
|
||||
u64 cnt;
|
||||
u64 nsecs;
|
||||
u64 misses;
|
||||
u64_stats_t cnt;
|
||||
u64_stats_t nsecs;
|
||||
u64_stats_t misses;
|
||||
struct u64_stats_sync syncp;
|
||||
} __aligned(2 * sizeof(u64));
|
||||
|
||||
@ -612,13 +612,14 @@ static __always_inline u32 __bpf_prog_run(const struct bpf_prog *prog,
|
||||
if (static_branch_unlikely(&bpf_stats_enabled_key)) {
|
||||
struct bpf_prog_stats *stats;
|
||||
u64 start = sched_clock();
|
||||
unsigned long flags;
|
||||
|
||||
ret = dfunc(ctx, prog->insnsi, prog->bpf_func);
|
||||
stats = this_cpu_ptr(prog->stats);
|
||||
u64_stats_update_begin(&stats->syncp);
|
||||
stats->cnt++;
|
||||
stats->nsecs += sched_clock() - start;
|
||||
u64_stats_update_end(&stats->syncp);
|
||||
flags = u64_stats_update_begin_irqsave(&stats->syncp);
|
||||
u64_stats_inc(&stats->cnt);
|
||||
u64_stats_add(&stats->nsecs, sched_clock() - start);
|
||||
u64_stats_update_end_irqrestore(&stats->syncp, flags);
|
||||
} else {
|
||||
ret = dfunc(ctx, prog->insnsi, prog->bpf_func);
|
||||
}
|
||||
|
@ -509,8 +509,22 @@ static inline bool sk_psock_strp_enabled(struct sk_psock *psock)
|
||||
|
||||
#if IS_ENABLED(CONFIG_NET_SOCK_MSG)
|
||||
|
||||
/* We only have one bit so far. */
|
||||
#define BPF_F_PTR_MASK ~(BPF_F_INGRESS)
|
||||
#define BPF_F_STRPARSER (1UL << 1)
|
||||
|
||||
/* We only have two bits so far. */
|
||||
#define BPF_F_PTR_MASK ~(BPF_F_INGRESS | BPF_F_STRPARSER)
|
||||
|
||||
static inline bool skb_bpf_strparser(const struct sk_buff *skb)
|
||||
{
|
||||
unsigned long sk_redir = skb->_sk_redir;
|
||||
|
||||
return sk_redir & BPF_F_STRPARSER;
|
||||
}
|
||||
|
||||
static inline void skb_bpf_set_strparser(struct sk_buff *skb)
|
||||
{
|
||||
skb->_sk_redir |= BPF_F_STRPARSER;
|
||||
}
|
||||
|
||||
static inline bool skb_bpf_ingress(const struct sk_buff *skb)
|
||||
{
|
||||
|
@ -93,8 +93,7 @@ __section("__bpf_raw_tp_map") = { \
|
||||
|
||||
#define FIRST(x, ...) x
|
||||
|
||||
#undef DEFINE_EVENT_WRITABLE
|
||||
#define DEFINE_EVENT_WRITABLE(template, call, proto, args, size) \
|
||||
#define __CHECK_WRITABLE_BUF_SIZE(call, proto, args, size) \
|
||||
static inline void bpf_test_buffer_##call(void) \
|
||||
{ \
|
||||
/* BUILD_BUG_ON() is ignored if the code is completely eliminated, but \
|
||||
@ -103,8 +102,12 @@ static inline void bpf_test_buffer_##call(void) \
|
||||
*/ \
|
||||
FIRST(proto); \
|
||||
(void)BUILD_BUG_ON_ZERO(size != sizeof(*FIRST(args))); \
|
||||
} \
|
||||
__DEFINE_EVENT(template, call, PARAMS(proto), PARAMS(args), size)
|
||||
}
|
||||
|
||||
#undef DEFINE_EVENT_WRITABLE
|
||||
#define DEFINE_EVENT_WRITABLE(template, call, proto, args, size) \
|
||||
__CHECK_WRITABLE_BUF_SIZE(call, PARAMS(proto), PARAMS(args), size) \
|
||||
__DEFINE_EVENT(template, call, PARAMS(proto), PARAMS(args), size)
|
||||
|
||||
#undef DEFINE_EVENT
|
||||
#define DEFINE_EVENT(template, call, proto, args) \
|
||||
@ -119,9 +122,17 @@ __DEFINE_EVENT(template, call, PARAMS(proto), PARAMS(args), size)
|
||||
__BPF_DECLARE_TRACE(call, PARAMS(proto), PARAMS(args)) \
|
||||
__DEFINE_EVENT(call, call, PARAMS(proto), PARAMS(args), 0)
|
||||
|
||||
#undef DECLARE_TRACE_WRITABLE
|
||||
#define DECLARE_TRACE_WRITABLE(call, proto, args, size) \
|
||||
__CHECK_WRITABLE_BUF_SIZE(call, PARAMS(proto), PARAMS(args), size) \
|
||||
__BPF_DECLARE_TRACE(call, PARAMS(proto), PARAMS(args)) \
|
||||
__DEFINE_EVENT(call, call, PARAMS(proto), PARAMS(args), size)
|
||||
|
||||
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
|
||||
|
||||
#undef DECLARE_TRACE_WRITABLE
|
||||
#undef DEFINE_EVENT_WRITABLE
|
||||
#undef __CHECK_WRITABLE_BUF_SIZE
|
||||
#undef __DEFINE_EVENT
|
||||
#undef FIRST
|
||||
|
||||
|
@ -906,6 +906,7 @@ enum bpf_map_type {
|
||||
BPF_MAP_TYPE_RINGBUF,
|
||||
BPF_MAP_TYPE_INODE_STORAGE,
|
||||
BPF_MAP_TYPE_TASK_STORAGE,
|
||||
BPF_MAP_TYPE_BLOOM_FILTER,
|
||||
};
|
||||
|
||||
/* Note that tracing related programs such as
|
||||
@ -1274,6 +1275,13 @@ union bpf_attr {
|
||||
* struct stored as the
|
||||
* map value
|
||||
*/
|
||||
/* Any per-map-type extra fields
|
||||
*
|
||||
* BPF_MAP_TYPE_BLOOM_FILTER - the lowest 4 bits indicate the
|
||||
* number of hash functions (if 0, the bloom filter will default
|
||||
* to using 5 hash functions).
|
||||
*/
|
||||
__u64 map_extra;
|
||||
};
|
||||
|
||||
struct { /* anonymous struct used by BPF_MAP_*_ELEM commands */
|
||||
@ -4909,6 +4917,27 @@ union bpf_attr {
|
||||
* Return
|
||||
* The number of bytes written to the buffer, or a negative error
|
||||
* in case of failure.
|
||||
*
|
||||
* struct unix_sock *bpf_skc_to_unix_sock(void *sk)
|
||||
* Description
|
||||
* Dynamically cast a *sk* pointer to a *unix_sock* pointer.
|
||||
* Return
|
||||
* *sk* if casting is valid, or **NULL** otherwise.
|
||||
*
|
||||
* long bpf_kallsyms_lookup_name(const char *name, int name_sz, int flags, u64 *res)
|
||||
* Description
|
||||
* Get the address of a kernel symbol, returned in *res*. *res* is
|
||||
* set to 0 if the symbol is not found.
|
||||
* Return
|
||||
* On success, zero. On error, a negative value.
|
||||
*
|
||||
* **-EINVAL** if *flags* is not zero.
|
||||
*
|
||||
* **-EINVAL** if string *name* is not the same size as *name_sz*.
|
||||
*
|
||||
* **-ENOENT** if symbol is not found.
|
||||
*
|
||||
* **-EPERM** if caller does not have permission to obtain kernel address.
|
||||
*/
|
||||
#define __BPF_FUNC_MAPPER(FN) \
|
||||
FN(unspec), \
|
||||
@ -5089,6 +5118,8 @@ union bpf_attr {
|
||||
FN(task_pt_regs), \
|
||||
FN(get_branch_snapshot), \
|
||||
FN(trace_vprintk), \
|
||||
FN(skc_to_unix_sock), \
|
||||
FN(kallsyms_lookup_name), \
|
||||
/* */
|
||||
|
||||
/* integer value in 'imm' field of BPF_CALL instruction selects which helper
|
||||
@ -5613,6 +5644,7 @@ struct bpf_prog_info {
|
||||
__u64 run_time_ns;
|
||||
__u64 run_cnt;
|
||||
__u64 recursion_misses;
|
||||
__u32 verified_insns;
|
||||
} __attribute__((aligned(8)));
|
||||
|
||||
struct bpf_map_info {
|
||||
@ -5630,6 +5662,8 @@ struct bpf_map_info {
|
||||
__u32 btf_id;
|
||||
__u32 btf_key_type_id;
|
||||
__u32 btf_value_type_id;
|
||||
__u32 :32; /* alignment pad */
|
||||
__u64 map_extra;
|
||||
} __attribute__((aligned(8)));
|
||||
|
||||
struct bpf_btf_info {
|
||||
|
@ -43,7 +43,7 @@ struct btf_type {
|
||||
* "size" tells the size of the type it is describing.
|
||||
*
|
||||
* "type" is used by PTR, TYPEDEF, VOLATILE, CONST, RESTRICT,
|
||||
* FUNC, FUNC_PROTO, VAR and TAG.
|
||||
* FUNC, FUNC_PROTO, VAR and DECL_TAG.
|
||||
* "type" is a type_id referring to another type.
|
||||
*/
|
||||
union {
|
||||
@ -74,7 +74,7 @@ enum {
|
||||
BTF_KIND_VAR = 14, /* Variable */
|
||||
BTF_KIND_DATASEC = 15, /* Section */
|
||||
BTF_KIND_FLOAT = 16, /* Floating point */
|
||||
BTF_KIND_TAG = 17, /* Tag */
|
||||
BTF_KIND_DECL_TAG = 17, /* Decl Tag */
|
||||
|
||||
NR_BTF_KINDS,
|
||||
BTF_KIND_MAX = NR_BTF_KINDS - 1,
|
||||
@ -174,14 +174,14 @@ struct btf_var_secinfo {
|
||||
__u32 size;
|
||||
};
|
||||
|
||||
/* BTF_KIND_TAG is followed by a single "struct btf_tag" to describe
|
||||
/* BTF_KIND_DECL_TAG is followed by a single "struct btf_decl_tag" to describe
|
||||
* additional information related to the tag applied location.
|
||||
* If component_idx == -1, the tag is applied to a struct, union,
|
||||
* variable or function. Otherwise, it is applied to a struct/union
|
||||
* member or a func argument, and component_idx indicates which member
|
||||
* or argument (0 ... vlen-1).
|
||||
*/
|
||||
struct btf_tag {
|
||||
struct btf_decl_tag {
|
||||
__s32 component_idx;
|
||||
};
|
||||
|
||||
|
@ -64,6 +64,7 @@ config BPF_JIT_DEFAULT_ON
|
||||
|
||||
config BPF_UNPRIV_DEFAULT_OFF
|
||||
bool "Disable unprivileged BPF by default"
|
||||
default y
|
||||
depends on BPF_SYSCALL
|
||||
help
|
||||
Disables unprivileged BPF by default by setting the corresponding
|
||||
@ -72,6 +73,12 @@ config BPF_UNPRIV_DEFAULT_OFF
|
||||
disable it by setting it to 1 (from which no other transition to
|
||||
0 is possible anymore).
|
||||
|
||||
Unprivileged BPF could be used to exploit certain potential
|
||||
speculative execution side-channel vulnerabilities on unmitigated
|
||||
affected hardware.
|
||||
|
||||
If you are unsure how to answer this question, answer Y.
|
||||
|
||||
source "kernel/bpf/preload/Kconfig"
|
||||
|
||||
config BPF_LSM
|
||||
|
@ -7,7 +7,7 @@ endif
|
||||
CFLAGS_core.o += $(call cc-disable-warning, override-init) $(cflags-nogcse-yy)
|
||||
|
||||
obj-$(CONFIG_BPF_SYSCALL) += syscall.o verifier.o inode.o helpers.o tnum.o bpf_iter.o map_iter.o task_iter.o prog_iter.o
|
||||
obj-$(CONFIG_BPF_SYSCALL) += hashtab.o arraymap.o percpu_freelist.o bpf_lru_list.o lpm_trie.o map_in_map.o
|
||||
obj-$(CONFIG_BPF_SYSCALL) += hashtab.o arraymap.o percpu_freelist.o bpf_lru_list.o lpm_trie.o map_in_map.o bloom_filter.o
|
||||
obj-$(CONFIG_BPF_SYSCALL) += local_storage.o queue_stack_maps.o ringbuf.o
|
||||
obj-$(CONFIG_BPF_SYSCALL) += bpf_local_storage.o bpf_task_storage.o
|
||||
obj-${CONFIG_BPF_LSM} += bpf_inode_storage.o
|
||||
|
204
kernel/bpf/bloom_filter.c
Normal file
204
kernel/bpf/bloom_filter.c
Normal file
@ -0,0 +1,204 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/* Copyright (c) 2021 Facebook */
|
||||
|
||||
#include <linux/bitmap.h>
|
||||
#include <linux/bpf.h>
|
||||
#include <linux/btf.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/jhash.h>
|
||||
#include <linux/random.h>
|
||||
|
||||
#define BLOOM_CREATE_FLAG_MASK \
|
||||
(BPF_F_NUMA_NODE | BPF_F_ZERO_SEED | BPF_F_ACCESS_MASK)
|
||||
|
||||
struct bpf_bloom_filter {
|
||||
struct bpf_map map;
|
||||
u32 bitset_mask;
|
||||
u32 hash_seed;
|
||||
/* If the size of the values in the bloom filter is u32 aligned,
|
||||
* then it is more performant to use jhash2 as the underlying hash
|
||||
* function, else we use jhash. This tracks the number of u32s
|
||||
* in an u32-aligned value size. If the value size is not u32 aligned,
|
||||
* this will be 0.
|
||||
*/
|
||||
u32 aligned_u32_count;
|
||||
u32 nr_hash_funcs;
|
||||
unsigned long bitset[];
|
||||
};
|
||||
|
||||
static u32 hash(struct bpf_bloom_filter *bloom, void *value,
|
||||
u32 value_size, u32 index)
|
||||
{
|
||||
u32 h;
|
||||
|
||||
if (bloom->aligned_u32_count)
|
||||
h = jhash2(value, bloom->aligned_u32_count,
|
||||
bloom->hash_seed + index);
|
||||
else
|
||||
h = jhash(value, value_size, bloom->hash_seed + index);
|
||||
|
||||
return h & bloom->bitset_mask;
|
||||
}
|
||||
|
||||
static int bloom_map_peek_elem(struct bpf_map *map, void *value)
|
||||
{
|
||||
struct bpf_bloom_filter *bloom =
|
||||
container_of(map, struct bpf_bloom_filter, map);
|
||||
u32 i, h;
|
||||
|
||||
for (i = 0; i < bloom->nr_hash_funcs; i++) {
|
||||
h = hash(bloom, value, map->value_size, i);
|
||||
if (!test_bit(h, bloom->bitset))
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int bloom_map_push_elem(struct bpf_map *map, void *value, u64 flags)
|
||||
{
|
||||
struct bpf_bloom_filter *bloom =
|
||||
container_of(map, struct bpf_bloom_filter, map);
|
||||
u32 i, h;
|
||||
|
||||
if (flags != BPF_ANY)
|
||||
return -EINVAL;
|
||||
|
||||
for (i = 0; i < bloom->nr_hash_funcs; i++) {
|
||||
h = hash(bloom, value, map->value_size, i);
|
||||
set_bit(h, bloom->bitset);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int bloom_map_pop_elem(struct bpf_map *map, void *value)
|
||||
{
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
static int bloom_map_delete_elem(struct bpf_map *map, void *value)
|
||||
{
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
static struct bpf_map *bloom_map_alloc(union bpf_attr *attr)
|
||||
{
|
||||
u32 bitset_bytes, bitset_mask, nr_hash_funcs, nr_bits;
|
||||
int numa_node = bpf_map_attr_numa_node(attr);
|
||||
struct bpf_bloom_filter *bloom;
|
||||
|
||||
if (!bpf_capable())
|
||||
return ERR_PTR(-EPERM);
|
||||
|
||||
if (attr->key_size != 0 || attr->value_size == 0 ||
|
||||
attr->max_entries == 0 ||
|
||||
attr->map_flags & ~BLOOM_CREATE_FLAG_MASK ||
|
||||
!bpf_map_flags_access_ok(attr->map_flags) ||
|
||||
/* The lower 4 bits of map_extra (0xF) specify the number
|
||||
* of hash functions
|
||||
*/
|
||||
(attr->map_extra & ~0xF))
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
nr_hash_funcs = attr->map_extra;
|
||||
if (nr_hash_funcs == 0)
|
||||
/* Default to using 5 hash functions if unspecified */
|
||||
nr_hash_funcs = 5;
|
||||
|
||||
/* For the bloom filter, the optimal bit array size that minimizes the
|
||||
* false positive probability is n * k / ln(2) where n is the number of
|
||||
* expected entries in the bloom filter and k is the number of hash
|
||||
* functions. We use 7 / 5 to approximate 1 / ln(2).
|
||||
*
|
||||
* We round this up to the nearest power of two to enable more efficient
|
||||
* hashing using bitmasks. The bitmask will be the bit array size - 1.
|
||||
*
|
||||
* If this overflows a u32, the bit array size will have 2^32 (4
|
||||
* GB) bits.
|
||||
*/
|
||||
if (check_mul_overflow(attr->max_entries, nr_hash_funcs, &nr_bits) ||
|
||||
check_mul_overflow(nr_bits / 5, (u32)7, &nr_bits) ||
|
||||
nr_bits > (1UL << 31)) {
|
||||
/* The bit array size is 2^32 bits but to avoid overflowing the
|
||||
* u32, we use U32_MAX, which will round up to the equivalent
|
||||
* number of bytes
|
||||
*/
|
||||
bitset_bytes = BITS_TO_BYTES(U32_MAX);
|
||||
bitset_mask = U32_MAX;
|
||||
} else {
|
||||
if (nr_bits <= BITS_PER_LONG)
|
||||
nr_bits = BITS_PER_LONG;
|
||||
else
|
||||
nr_bits = roundup_pow_of_two(nr_bits);
|
||||
bitset_bytes = BITS_TO_BYTES(nr_bits);
|
||||
bitset_mask = nr_bits - 1;
|
||||
}
|
||||
|
||||
bitset_bytes = roundup(bitset_bytes, sizeof(unsigned long));
|
||||
bloom = bpf_map_area_alloc(sizeof(*bloom) + bitset_bytes, numa_node);
|
||||
|
||||
if (!bloom)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
bpf_map_init_from_attr(&bloom->map, attr);
|
||||
|
||||
bloom->nr_hash_funcs = nr_hash_funcs;
|
||||
bloom->bitset_mask = bitset_mask;
|
||||
|
||||
/* Check whether the value size is u32-aligned */
|
||||
if ((attr->value_size & (sizeof(u32) - 1)) == 0)
|
||||
bloom->aligned_u32_count =
|
||||
attr->value_size / sizeof(u32);
|
||||
|
||||
if (!(attr->map_flags & BPF_F_ZERO_SEED))
|
||||
bloom->hash_seed = get_random_int();
|
||||
|
||||
return &bloom->map;
|
||||
}
|
||||
|
||||
static void bloom_map_free(struct bpf_map *map)
|
||||
{
|
||||
struct bpf_bloom_filter *bloom =
|
||||
container_of(map, struct bpf_bloom_filter, map);
|
||||
|
||||
bpf_map_area_free(bloom);
|
||||
}
|
||||
|
||||
static void *bloom_map_lookup_elem(struct bpf_map *map, void *key)
|
||||
{
|
||||
/* The eBPF program should use map_peek_elem instead */
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
static int bloom_map_update_elem(struct bpf_map *map, void *key,
|
||||
void *value, u64 flags)
|
||||
{
|
||||
/* The eBPF program should use map_push_elem instead */
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static int bloom_map_check_btf(const struct bpf_map *map,
|
||||
const struct btf *btf,
|
||||
const struct btf_type *key_type,
|
||||
const struct btf_type *value_type)
|
||||
{
|
||||
/* Bloom filter maps are keyless */
|
||||
return btf_type_is_void(key_type) ? 0 : -EINVAL;
|
||||
}
|
||||
|
||||
static int bpf_bloom_map_btf_id;
|
||||
const struct bpf_map_ops bloom_filter_map_ops = {
|
||||
.map_meta_equal = bpf_map_meta_equal,
|
||||
.map_alloc = bloom_map_alloc,
|
||||
.map_free = bloom_map_free,
|
||||
.map_push_elem = bloom_map_push_elem,
|
||||
.map_peek_elem = bloom_map_peek_elem,
|
||||
.map_pop_elem = bloom_map_pop_elem,
|
||||
.map_lookup_elem = bloom_map_lookup_elem,
|
||||
.map_update_elem = bloom_map_update_elem,
|
||||
.map_delete_elem = bloom_map_delete_elem,
|
||||
.map_check_btf = bloom_map_check_btf,
|
||||
.map_btf_name = "bpf_bloom_filter",
|
||||
.map_btf_id = &bpf_bloom_map_btf_id,
|
||||
};
|
@ -93,6 +93,9 @@ const struct bpf_verifier_ops bpf_struct_ops_verifier_ops = {
|
||||
};
|
||||
|
||||
const struct bpf_prog_ops bpf_struct_ops_prog_ops = {
|
||||
#ifdef CONFIG_NET
|
||||
.test_run = bpf_struct_ops_test_run,
|
||||
#endif
|
||||
};
|
||||
|
||||
static const struct btf_type *module_type;
|
||||
@ -312,6 +315,20 @@ static int check_zero_holes(const struct btf_type *t, void *data)
|
||||
return 0;
|
||||
}
|
||||
|
||||
int bpf_struct_ops_prepare_trampoline(struct bpf_tramp_progs *tprogs,
|
||||
struct bpf_prog *prog,
|
||||
const struct btf_func_model *model,
|
||||
void *image, void *image_end)
|
||||
{
|
||||
u32 flags;
|
||||
|
||||
tprogs[BPF_TRAMP_FENTRY].progs[0] = prog;
|
||||
tprogs[BPF_TRAMP_FENTRY].nr_progs = 1;
|
||||
flags = model->ret_size > 0 ? BPF_TRAMP_F_RET_FENTRY_RET : 0;
|
||||
return arch_prepare_bpf_trampoline(NULL, image, image_end,
|
||||
model, flags, tprogs, NULL);
|
||||
}
|
||||
|
||||
static int bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key,
|
||||
void *value, u64 flags)
|
||||
{
|
||||
@ -323,7 +340,7 @@ static int bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key,
|
||||
struct bpf_tramp_progs *tprogs = NULL;
|
||||
void *udata, *kdata;
|
||||
int prog_fd, err = 0;
|
||||
void *image;
|
||||
void *image, *image_end;
|
||||
u32 i;
|
||||
|
||||
if (flags)
|
||||
@ -363,12 +380,12 @@ static int bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key,
|
||||
udata = &uvalue->data;
|
||||
kdata = &kvalue->data;
|
||||
image = st_map->image;
|
||||
image_end = st_map->image + PAGE_SIZE;
|
||||
|
||||
for_each_member(i, t, member) {
|
||||
const struct btf_type *mtype, *ptype;
|
||||
struct bpf_prog *prog;
|
||||
u32 moff;
|
||||
u32 flags;
|
||||
|
||||
moff = btf_member_bit_offset(t, member) / 8;
|
||||
ptype = btf_type_resolve_ptr(btf_vmlinux, member->type, NULL);
|
||||
@ -430,14 +447,9 @@ static int bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key,
|
||||
goto reset_unlock;
|
||||
}
|
||||
|
||||
tprogs[BPF_TRAMP_FENTRY].progs[0] = prog;
|
||||
tprogs[BPF_TRAMP_FENTRY].nr_progs = 1;
|
||||
flags = st_ops->func_models[i].ret_size > 0 ?
|
||||
BPF_TRAMP_F_RET_FENTRY_RET : 0;
|
||||
err = arch_prepare_bpf_trampoline(NULL, image,
|
||||
st_map->image + PAGE_SIZE,
|
||||
&st_ops->func_models[i],
|
||||
flags, tprogs, NULL);
|
||||
err = bpf_struct_ops_prepare_trampoline(tprogs, prog,
|
||||
&st_ops->func_models[i],
|
||||
image, image_end);
|
||||
if (err < 0)
|
||||
goto reset_unlock;
|
||||
|
||||
|
@ -2,6 +2,9 @@
|
||||
/* internal file - do not include directly */
|
||||
|
||||
#ifdef CONFIG_BPF_JIT
|
||||
#ifdef CONFIG_NET
|
||||
BPF_STRUCT_OPS_TYPE(bpf_dummy_ops)
|
||||
#endif
|
||||
#ifdef CONFIG_INET
|
||||
#include <net/tcp.h>
|
||||
BPF_STRUCT_OPS_TYPE(tcp_congestion_ops)
|
||||
|
103
kernel/bpf/btf.c
103
kernel/bpf/btf.c
@ -281,7 +281,7 @@ static const char * const btf_kind_str[NR_BTF_KINDS] = {
|
||||
[BTF_KIND_VAR] = "VAR",
|
||||
[BTF_KIND_DATASEC] = "DATASEC",
|
||||
[BTF_KIND_FLOAT] = "FLOAT",
|
||||
[BTF_KIND_TAG] = "TAG",
|
||||
[BTF_KIND_DECL_TAG] = "DECL_TAG",
|
||||
};
|
||||
|
||||
const char *btf_type_str(const struct btf_type *t)
|
||||
@ -460,15 +460,15 @@ static bool btf_type_is_datasec(const struct btf_type *t)
|
||||
return BTF_INFO_KIND(t->info) == BTF_KIND_DATASEC;
|
||||
}
|
||||
|
||||
static bool btf_type_is_tag(const struct btf_type *t)
|
||||
static bool btf_type_is_decl_tag(const struct btf_type *t)
|
||||
{
|
||||
return BTF_INFO_KIND(t->info) == BTF_KIND_TAG;
|
||||
return BTF_INFO_KIND(t->info) == BTF_KIND_DECL_TAG;
|
||||
}
|
||||
|
||||
static bool btf_type_is_tag_target(const struct btf_type *t)
|
||||
static bool btf_type_is_decl_tag_target(const struct btf_type *t)
|
||||
{
|
||||
return btf_type_is_func(t) || btf_type_is_struct(t) ||
|
||||
btf_type_is_var(t);
|
||||
btf_type_is_var(t) || btf_type_is_typedef(t);
|
||||
}
|
||||
|
||||
u32 btf_nr_types(const struct btf *btf)
|
||||
@ -549,7 +549,7 @@ const struct btf_type *btf_type_resolve_func_ptr(const struct btf *btf,
|
||||
static bool btf_type_is_resolve_source_only(const struct btf_type *t)
|
||||
{
|
||||
return btf_type_is_var(t) ||
|
||||
btf_type_is_tag(t) ||
|
||||
btf_type_is_decl_tag(t) ||
|
||||
btf_type_is_datasec(t);
|
||||
}
|
||||
|
||||
@ -576,7 +576,7 @@ static bool btf_type_needs_resolve(const struct btf_type *t)
|
||||
btf_type_is_struct(t) ||
|
||||
btf_type_is_array(t) ||
|
||||
btf_type_is_var(t) ||
|
||||
btf_type_is_tag(t) ||
|
||||
btf_type_is_decl_tag(t) ||
|
||||
btf_type_is_datasec(t);
|
||||
}
|
||||
|
||||
@ -630,9 +630,9 @@ static const struct btf_var *btf_type_var(const struct btf_type *t)
|
||||
return (const struct btf_var *)(t + 1);
|
||||
}
|
||||
|
||||
static const struct btf_tag *btf_type_tag(const struct btf_type *t)
|
||||
static const struct btf_decl_tag *btf_type_decl_tag(const struct btf_type *t)
|
||||
{
|
||||
return (const struct btf_tag *)(t + 1);
|
||||
return (const struct btf_decl_tag *)(t + 1);
|
||||
}
|
||||
|
||||
static const struct btf_kind_operations *btf_type_ops(const struct btf_type *t)
|
||||
@ -3820,11 +3820,11 @@ static const struct btf_kind_operations float_ops = {
|
||||
.show = btf_df_show,
|
||||
};
|
||||
|
||||
static s32 btf_tag_check_meta(struct btf_verifier_env *env,
|
||||
static s32 btf_decl_tag_check_meta(struct btf_verifier_env *env,
|
||||
const struct btf_type *t,
|
||||
u32 meta_left)
|
||||
{
|
||||
const struct btf_tag *tag;
|
||||
const struct btf_decl_tag *tag;
|
||||
u32 meta_needed = sizeof(*tag);
|
||||
s32 component_idx;
|
||||
const char *value;
|
||||
@ -3852,7 +3852,7 @@ static s32 btf_tag_check_meta(struct btf_verifier_env *env,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
component_idx = btf_type_tag(t)->component_idx;
|
||||
component_idx = btf_type_decl_tag(t)->component_idx;
|
||||
if (component_idx < -1) {
|
||||
btf_verifier_log_type(env, t, "Invalid component_idx");
|
||||
return -EINVAL;
|
||||
@ -3863,7 +3863,7 @@ static s32 btf_tag_check_meta(struct btf_verifier_env *env,
|
||||
return meta_needed;
|
||||
}
|
||||
|
||||
static int btf_tag_resolve(struct btf_verifier_env *env,
|
||||
static int btf_decl_tag_resolve(struct btf_verifier_env *env,
|
||||
const struct resolve_vertex *v)
|
||||
{
|
||||
const struct btf_type *next_type;
|
||||
@ -3874,7 +3874,7 @@ static int btf_tag_resolve(struct btf_verifier_env *env,
|
||||
u32 vlen;
|
||||
|
||||
next_type = btf_type_by_id(btf, next_type_id);
|
||||
if (!next_type || !btf_type_is_tag_target(next_type)) {
|
||||
if (!next_type || !btf_type_is_decl_tag_target(next_type)) {
|
||||
btf_verifier_log_type(env, v->t, "Invalid type_id");
|
||||
return -EINVAL;
|
||||
}
|
||||
@ -3883,9 +3883,9 @@ static int btf_tag_resolve(struct btf_verifier_env *env,
|
||||
!env_type_is_resolved(env, next_type_id))
|
||||
return env_stack_push(env, next_type, next_type_id);
|
||||
|
||||
component_idx = btf_type_tag(t)->component_idx;
|
||||
component_idx = btf_type_decl_tag(t)->component_idx;
|
||||
if (component_idx != -1) {
|
||||
if (btf_type_is_var(next_type)) {
|
||||
if (btf_type_is_var(next_type) || btf_type_is_typedef(next_type)) {
|
||||
btf_verifier_log_type(env, v->t, "Invalid component_idx");
|
||||
return -EINVAL;
|
||||
}
|
||||
@ -3909,18 +3909,18 @@ static int btf_tag_resolve(struct btf_verifier_env *env,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void btf_tag_log(struct btf_verifier_env *env, const struct btf_type *t)
|
||||
static void btf_decl_tag_log(struct btf_verifier_env *env, const struct btf_type *t)
|
||||
{
|
||||
btf_verifier_log(env, "type=%u component_idx=%d", t->type,
|
||||
btf_type_tag(t)->component_idx);
|
||||
btf_type_decl_tag(t)->component_idx);
|
||||
}
|
||||
|
||||
static const struct btf_kind_operations tag_ops = {
|
||||
.check_meta = btf_tag_check_meta,
|
||||
.resolve = btf_tag_resolve,
|
||||
static const struct btf_kind_operations decl_tag_ops = {
|
||||
.check_meta = btf_decl_tag_check_meta,
|
||||
.resolve = btf_decl_tag_resolve,
|
||||
.check_member = btf_df_check_member,
|
||||
.check_kflag_member = btf_df_check_kflag_member,
|
||||
.log_details = btf_tag_log,
|
||||
.log_details = btf_decl_tag_log,
|
||||
.show = btf_df_show,
|
||||
};
|
||||
|
||||
@ -4058,7 +4058,7 @@ static const struct btf_kind_operations * const kind_ops[NR_BTF_KINDS] = {
|
||||
[BTF_KIND_VAR] = &var_ops,
|
||||
[BTF_KIND_DATASEC] = &datasec_ops,
|
||||
[BTF_KIND_FLOAT] = &float_ops,
|
||||
[BTF_KIND_TAG] = &tag_ops,
|
||||
[BTF_KIND_DECL_TAG] = &decl_tag_ops,
|
||||
};
|
||||
|
||||
static s32 btf_check_meta(struct btf_verifier_env *env,
|
||||
@ -4143,7 +4143,7 @@ static bool btf_resolve_valid(struct btf_verifier_env *env,
|
||||
return !btf_resolved_type_id(btf, type_id) &&
|
||||
!btf_resolved_type_size(btf, type_id);
|
||||
|
||||
if (btf_type_is_tag(t))
|
||||
if (btf_type_is_decl_tag(t))
|
||||
return btf_resolved_type_id(btf, type_id) &&
|
||||
!btf_resolved_type_size(btf, type_id);
|
||||
|
||||
@ -6343,3 +6343,58 @@ const struct bpf_func_proto bpf_btf_find_by_name_kind_proto = {
|
||||
};
|
||||
|
||||
BTF_ID_LIST_GLOBAL_SINGLE(btf_task_struct_ids, struct, task_struct)
|
||||
|
||||
/* BTF ID set registration API for modules */
|
||||
|
||||
struct kfunc_btf_id_list {
|
||||
struct list_head list;
|
||||
struct mutex mutex;
|
||||
};
|
||||
|
||||
#ifdef CONFIG_DEBUG_INFO_BTF_MODULES
|
||||
|
||||
void register_kfunc_btf_id_set(struct kfunc_btf_id_list *l,
|
||||
struct kfunc_btf_id_set *s)
|
||||
{
|
||||
mutex_lock(&l->mutex);
|
||||
list_add(&s->list, &l->list);
|
||||
mutex_unlock(&l->mutex);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(register_kfunc_btf_id_set);
|
||||
|
||||
void unregister_kfunc_btf_id_set(struct kfunc_btf_id_list *l,
|
||||
struct kfunc_btf_id_set *s)
|
||||
{
|
||||
mutex_lock(&l->mutex);
|
||||
list_del_init(&s->list);
|
||||
mutex_unlock(&l->mutex);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(unregister_kfunc_btf_id_set);
|
||||
|
||||
bool bpf_check_mod_kfunc_call(struct kfunc_btf_id_list *klist, u32 kfunc_id,
|
||||
struct module *owner)
|
||||
{
|
||||
struct kfunc_btf_id_set *s;
|
||||
|
||||
if (!owner)
|
||||
return false;
|
||||
mutex_lock(&klist->mutex);
|
||||
list_for_each_entry(s, &klist->list, list) {
|
||||
if (s->owner == owner && btf_id_set_contains(s->set, kfunc_id)) {
|
||||
mutex_unlock(&klist->mutex);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
mutex_unlock(&klist->mutex);
|
||||
return false;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
#define DEFINE_KFUNC_BTF_ID_LIST(name) \
|
||||
struct kfunc_btf_id_list name = { LIST_HEAD_INIT(name.list), \
|
||||
__MUTEX_INITIALIZER(name.mutex) }; \
|
||||
EXPORT_SYMBOL_GPL(name)
|
||||
|
||||
DEFINE_KFUNC_BTF_ID_LIST(bpf_tcp_ca_kfunc_list);
|
||||
DEFINE_KFUNC_BTF_ID_LIST(prog_test_kfunc_list);
|
||||
|
@ -32,6 +32,7 @@
|
||||
#include <linux/perf_event.h>
|
||||
#include <linux/extable.h>
|
||||
#include <linux/log2.h>
|
||||
#include <linux/bpf_verifier.h>
|
||||
|
||||
#include <asm/barrier.h>
|
||||
#include <asm/unaligned.h>
|
||||
@ -2263,6 +2264,9 @@ static void bpf_prog_free_deferred(struct work_struct *work)
|
||||
int i;
|
||||
|
||||
aux = container_of(work, struct bpf_prog_aux, work);
|
||||
#ifdef CONFIG_BPF_SYSCALL
|
||||
bpf_free_kfunc_btf_tab(aux->kfunc_btf_tab);
|
||||
#endif
|
||||
bpf_free_used_maps(aux);
|
||||
bpf_free_used_btfs(aux);
|
||||
if (bpf_prog_is_dev_bound(aux))
|
||||
|
4
kernel/bpf/preload/.gitignore
vendored
4
kernel/bpf/preload/.gitignore
vendored
@ -1,4 +1,2 @@
|
||||
/FEATURE-DUMP.libbpf
|
||||
/bpf_helper_defs.h
|
||||
/feature
|
||||
/libbpf
|
||||
/bpf_preload_umd
|
||||
|
@ -1,21 +1,35 @@
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
|
||||
LIBBPF_SRCS = $(srctree)/tools/lib/bpf/
|
||||
LIBBPF_A = $(obj)/libbpf.a
|
||||
LIBBPF_OUT = $(abspath $(obj))
|
||||
LIBBPF_OUT = $(abspath $(obj))/libbpf
|
||||
LIBBPF_A = $(LIBBPF_OUT)/libbpf.a
|
||||
LIBBPF_DESTDIR = $(LIBBPF_OUT)
|
||||
LIBBPF_INCLUDE = $(LIBBPF_DESTDIR)/include
|
||||
|
||||
# Although not in use by libbpf's Makefile, set $(O) so that the "dummy" test
|
||||
# in tools/scripts/Makefile.include always succeeds when building the kernel
|
||||
# with $(O) pointing to a relative path, as in "make O=build bindeb-pkg".
|
||||
$(LIBBPF_A):
|
||||
$(Q)$(MAKE) -C $(LIBBPF_SRCS) O=$(LIBBPF_OUT)/ OUTPUT=$(LIBBPF_OUT)/ $(LIBBPF_OUT)/libbpf.a
|
||||
$(LIBBPF_A): | $(LIBBPF_OUT)
|
||||
$(Q)$(MAKE) -C $(LIBBPF_SRCS) O=$(LIBBPF_OUT)/ OUTPUT=$(LIBBPF_OUT)/ \
|
||||
DESTDIR=$(LIBBPF_DESTDIR) prefix= \
|
||||
$(LIBBPF_OUT)/libbpf.a install_headers
|
||||
|
||||
libbpf_hdrs: $(LIBBPF_A)
|
||||
|
||||
.PHONY: libbpf_hdrs
|
||||
|
||||
$(LIBBPF_OUT):
|
||||
$(call msg,MKDIR,$@)
|
||||
$(Q)mkdir -p $@
|
||||
|
||||
userccflags += -I $(srctree)/tools/include/ -I $(srctree)/tools/include/uapi \
|
||||
-I $(srctree)/tools/lib/ -Wno-unused-result
|
||||
-I $(LIBBPF_INCLUDE) -Wno-unused-result
|
||||
|
||||
userprogs := bpf_preload_umd
|
||||
|
||||
clean-files := $(userprogs) bpf_helper_defs.h FEATURE-DUMP.libbpf staticobjs/ feature/
|
||||
clean-files := libbpf/
|
||||
|
||||
$(obj)/iterators/iterators.o: | libbpf_hdrs
|
||||
|
||||
bpf_preload_umd-objs := iterators/iterators.o
|
||||
bpf_preload_umd-userldlibs := $(LIBBPF_A) -lelf -lz
|
||||
|
@ -1,18 +1,26 @@
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
OUTPUT := .output
|
||||
abs_out := $(abspath $(OUTPUT))
|
||||
|
||||
CLANG ?= clang
|
||||
LLC ?= llc
|
||||
LLVM_STRIP ?= llvm-strip
|
||||
|
||||
TOOLS_PATH := $(abspath ../../../../tools)
|
||||
BPFTOOL_SRC := $(TOOLS_PATH)/bpf/bpftool
|
||||
BPFTOOL_OUTPUT := $(abs_out)/bpftool
|
||||
DEFAULT_BPFTOOL := $(OUTPUT)/sbin/bpftool
|
||||
BPFTOOL ?= $(DEFAULT_BPFTOOL)
|
||||
LIBBPF_SRC := $(abspath ../../../../tools/lib/bpf)
|
||||
BPFOBJ := $(OUTPUT)/libbpf.a
|
||||
BPF_INCLUDE := $(OUTPUT)
|
||||
INCLUDES := -I$(OUTPUT) -I$(BPF_INCLUDE) -I$(abspath ../../../../tools/lib) \
|
||||
-I$(abspath ../../../../tools/include/uapi)
|
||||
|
||||
LIBBPF_SRC := $(TOOLS_PATH)/lib/bpf
|
||||
LIBBPF_OUTPUT := $(abs_out)/libbpf
|
||||
LIBBPF_DESTDIR := $(LIBBPF_OUTPUT)
|
||||
LIBBPF_INCLUDE := $(LIBBPF_DESTDIR)/include
|
||||
BPFOBJ := $(LIBBPF_OUTPUT)/libbpf.a
|
||||
|
||||
INCLUDES := -I$(OUTPUT) -I$(LIBBPF_INCLUDE) -I$(TOOLS_PATH)/include/uapi
|
||||
CFLAGS := -g -Wall
|
||||
|
||||
abs_out := $(abspath $(OUTPUT))
|
||||
ifeq ($(V),1)
|
||||
Q =
|
||||
msg =
|
||||
@ -44,14 +52,18 @@ $(OUTPUT)/iterators.bpf.o: iterators.bpf.c $(BPFOBJ) | $(OUTPUT)
|
||||
-c $(filter %.c,$^) -o $@ && \
|
||||
$(LLVM_STRIP) -g $@
|
||||
|
||||
$(OUTPUT):
|
||||
$(OUTPUT) $(LIBBPF_OUTPUT) $(BPFTOOL_OUTPUT):
|
||||
$(call msg,MKDIR,$@)
|
||||
$(Q)mkdir -p $(OUTPUT)
|
||||
$(Q)mkdir -p $@
|
||||
|
||||
$(BPFOBJ): $(wildcard $(LIBBPF_SRC)/*.[ch] $(LIBBPF_SRC)/Makefile) | $(OUTPUT)
|
||||
$(BPFOBJ): $(wildcard $(LIBBPF_SRC)/*.[ch] $(LIBBPF_SRC)/Makefile) | $(LIBBPF_OUTPUT)
|
||||
$(Q)$(MAKE) $(submake_extras) -C $(LIBBPF_SRC) \
|
||||
OUTPUT=$(abspath $(dir $@))/ $(abspath $@)
|
||||
OUTPUT=$(abspath $(dir $@))/ prefix= \
|
||||
DESTDIR=$(LIBBPF_DESTDIR) $(abspath $@) install_headers
|
||||
|
||||
$(DEFAULT_BPFTOOL):
|
||||
$(Q)$(MAKE) $(submake_extras) -C ../../../../tools/bpf/bpftool \
|
||||
prefix= OUTPUT=$(abs_out)/ DESTDIR=$(abs_out) install
|
||||
$(DEFAULT_BPFTOOL): $(BPFOBJ) | $(BPFTOOL_OUTPUT)
|
||||
$(Q)$(MAKE) $(submake_extras) -C $(BPFTOOL_SRC) \
|
||||
OUTPUT=$(BPFTOOL_OUTPUT)/ \
|
||||
LIBBPF_OUTPUT=$(LIBBPF_OUTPUT)/ \
|
||||
LIBBPF_DESTDIR=$(LIBBPF_DESTDIR)/ \
|
||||
prefix= DESTDIR=$(abs_out)/ install-bin
|
||||
|
@ -199,7 +199,8 @@ static int bpf_map_update_value(struct bpf_map *map, struct fd f, void *key,
|
||||
err = bpf_fd_reuseport_array_update_elem(map, key, value,
|
||||
flags);
|
||||
} else if (map->map_type == BPF_MAP_TYPE_QUEUE ||
|
||||
map->map_type == BPF_MAP_TYPE_STACK) {
|
||||
map->map_type == BPF_MAP_TYPE_STACK ||
|
||||
map->map_type == BPF_MAP_TYPE_BLOOM_FILTER) {
|
||||
err = map->ops->map_push_elem(map, value, flags);
|
||||
} else {
|
||||
rcu_read_lock();
|
||||
@ -238,7 +239,8 @@ static int bpf_map_copy_value(struct bpf_map *map, void *key, void *value,
|
||||
} else if (map->map_type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY) {
|
||||
err = bpf_fd_reuseport_array_lookup_elem(map, key, value);
|
||||
} else if (map->map_type == BPF_MAP_TYPE_QUEUE ||
|
||||
map->map_type == BPF_MAP_TYPE_STACK) {
|
||||
map->map_type == BPF_MAP_TYPE_STACK ||
|
||||
map->map_type == BPF_MAP_TYPE_BLOOM_FILTER) {
|
||||
err = map->ops->map_peek_elem(map, value);
|
||||
} else if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS) {
|
||||
/* struct_ops map requires directly updating "value" */
|
||||
@ -348,6 +350,7 @@ void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr)
|
||||
map->max_entries = attr->max_entries;
|
||||
map->map_flags = bpf_map_flags_retain_permanent(attr->map_flags);
|
||||
map->numa_node = bpf_map_attr_numa_node(attr);
|
||||
map->map_extra = attr->map_extra;
|
||||
}
|
||||
|
||||
static int bpf_map_alloc_id(struct bpf_map *map)
|
||||
@ -555,6 +558,7 @@ static void bpf_map_show_fdinfo(struct seq_file *m, struct file *filp)
|
||||
"value_size:\t%u\n"
|
||||
"max_entries:\t%u\n"
|
||||
"map_flags:\t%#x\n"
|
||||
"map_extra:\t%#llx\n"
|
||||
"memlock:\t%lu\n"
|
||||
"map_id:\t%u\n"
|
||||
"frozen:\t%u\n",
|
||||
@ -563,6 +567,7 @@ static void bpf_map_show_fdinfo(struct seq_file *m, struct file *filp)
|
||||
map->value_size,
|
||||
map->max_entries,
|
||||
map->map_flags,
|
||||
(unsigned long long)map->map_extra,
|
||||
bpf_map_memory_footprint(map),
|
||||
map->id,
|
||||
READ_ONCE(map->frozen));
|
||||
@ -812,7 +817,7 @@ static int map_check_btf(struct bpf_map *map, const struct btf *btf,
|
||||
return ret;
|
||||
}
|
||||
|
||||
#define BPF_MAP_CREATE_LAST_FIELD btf_vmlinux_value_type_id
|
||||
#define BPF_MAP_CREATE_LAST_FIELD map_extra
|
||||
/* called via syscall */
|
||||
static int map_create(union bpf_attr *attr)
|
||||
{
|
||||
@ -833,6 +838,10 @@ static int map_create(union bpf_attr *attr)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (attr->map_type != BPF_MAP_TYPE_BLOOM_FILTER &&
|
||||
attr->map_extra != 0)
|
||||
return -EINVAL;
|
||||
|
||||
f_flags = bpf_get_file_flag(attr->map_flags);
|
||||
if (f_flags < 0)
|
||||
return f_flags;
|
||||
@ -1082,6 +1091,14 @@ static int map_lookup_elem(union bpf_attr *attr)
|
||||
if (!value)
|
||||
goto free_key;
|
||||
|
||||
if (map->map_type == BPF_MAP_TYPE_BLOOM_FILTER) {
|
||||
if (copy_from_user(value, uvalue, value_size))
|
||||
err = -EFAULT;
|
||||
else
|
||||
err = bpf_map_copy_value(map, key, value, attr->flags);
|
||||
goto free_value;
|
||||
}
|
||||
|
||||
err = bpf_map_copy_value(map, key, value, attr->flags);
|
||||
if (err)
|
||||
goto free_value;
|
||||
@ -1807,8 +1824,14 @@ static int bpf_prog_release(struct inode *inode, struct file *filp)
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct bpf_prog_kstats {
|
||||
u64 nsecs;
|
||||
u64 cnt;
|
||||
u64 misses;
|
||||
};
|
||||
|
||||
static void bpf_prog_get_stats(const struct bpf_prog *prog,
|
||||
struct bpf_prog_stats *stats)
|
||||
struct bpf_prog_kstats *stats)
|
||||
{
|
||||
u64 nsecs = 0, cnt = 0, misses = 0;
|
||||
int cpu;
|
||||
@ -1821,9 +1844,9 @@ static void bpf_prog_get_stats(const struct bpf_prog *prog,
|
||||
st = per_cpu_ptr(prog->stats, cpu);
|
||||
do {
|
||||
start = u64_stats_fetch_begin_irq(&st->syncp);
|
||||
tnsecs = st->nsecs;
|
||||
tcnt = st->cnt;
|
||||
tmisses = st->misses;
|
||||
tnsecs = u64_stats_read(&st->nsecs);
|
||||
tcnt = u64_stats_read(&st->cnt);
|
||||
tmisses = u64_stats_read(&st->misses);
|
||||
} while (u64_stats_fetch_retry_irq(&st->syncp, start));
|
||||
nsecs += tnsecs;
|
||||
cnt += tcnt;
|
||||
@ -1839,7 +1862,7 @@ static void bpf_prog_show_fdinfo(struct seq_file *m, struct file *filp)
|
||||
{
|
||||
const struct bpf_prog *prog = filp->private_data;
|
||||
char prog_tag[sizeof(prog->tag) * 2 + 1] = { };
|
||||
struct bpf_prog_stats stats;
|
||||
struct bpf_prog_kstats stats;
|
||||
|
||||
bpf_prog_get_stats(prog, &stats);
|
||||
bin2hex(prog_tag, prog->tag, sizeof(prog->tag));
|
||||
@ -1851,7 +1874,8 @@ static void bpf_prog_show_fdinfo(struct seq_file *m, struct file *filp)
|
||||
"prog_id:\t%u\n"
|
||||
"run_time_ns:\t%llu\n"
|
||||
"run_cnt:\t%llu\n"
|
||||
"recursion_misses:\t%llu\n",
|
||||
"recursion_misses:\t%llu\n"
|
||||
"verified_insns:\t%u\n",
|
||||
prog->type,
|
||||
prog->jited,
|
||||
prog_tag,
|
||||
@ -1859,7 +1883,8 @@ static void bpf_prog_show_fdinfo(struct seq_file *m, struct file *filp)
|
||||
prog->aux->id,
|
||||
stats.nsecs,
|
||||
stats.cnt,
|
||||
stats.misses);
|
||||
stats.misses,
|
||||
prog->aux->verified_insns);
|
||||
}
|
||||
#endif
|
||||
|
||||
@ -3578,7 +3603,7 @@ static int bpf_prog_get_info_by_fd(struct file *file,
|
||||
struct bpf_prog_info __user *uinfo = u64_to_user_ptr(attr->info.info);
|
||||
struct bpf_prog_info info;
|
||||
u32 info_len = attr->info.info_len;
|
||||
struct bpf_prog_stats stats;
|
||||
struct bpf_prog_kstats stats;
|
||||
char __user *uinsns;
|
||||
u32 ulen;
|
||||
int err;
|
||||
@ -3628,6 +3653,8 @@ static int bpf_prog_get_info_by_fd(struct file *file,
|
||||
info.run_cnt = stats.cnt;
|
||||
info.recursion_misses = stats.misses;
|
||||
|
||||
info.verified_insns = prog->aux->verified_insns;
|
||||
|
||||
if (!bpf_capable()) {
|
||||
info.jited_prog_len = 0;
|
||||
info.xlated_prog_len = 0;
|
||||
@ -3874,6 +3901,7 @@ static int bpf_map_get_info_by_fd(struct file *file,
|
||||
info.value_size = map->value_size;
|
||||
info.max_entries = map->max_entries;
|
||||
info.map_flags = map->map_flags;
|
||||
info.map_extra = map->map_extra;
|
||||
memcpy(info.name, map->name, sizeof(map->name));
|
||||
|
||||
if (map->btf) {
|
||||
@ -4756,6 +4784,31 @@ static const struct bpf_func_proto bpf_sys_close_proto = {
|
||||
.arg1_type = ARG_ANYTHING,
|
||||
};
|
||||
|
||||
BPF_CALL_4(bpf_kallsyms_lookup_name, const char *, name, int, name_sz, int, flags, u64 *, res)
|
||||
{
|
||||
if (flags)
|
||||
return -EINVAL;
|
||||
|
||||
if (name_sz <= 1 || name[name_sz - 1])
|
||||
return -EINVAL;
|
||||
|
||||
if (!bpf_dump_raw_ok(current_cred()))
|
||||
return -EPERM;
|
||||
|
||||
*res = kallsyms_lookup_name(name);
|
||||
return *res ? 0 : -ENOENT;
|
||||
}
|
||||
|
||||
const struct bpf_func_proto bpf_kallsyms_lookup_name_proto = {
|
||||
.func = bpf_kallsyms_lookup_name,
|
||||
.gpl_only = false,
|
||||
.ret_type = RET_INTEGER,
|
||||
.arg1_type = ARG_PTR_TO_MEM,
|
||||
.arg2_type = ARG_CONST_SIZE,
|
||||
.arg3_type = ARG_ANYTHING,
|
||||
.arg4_type = ARG_PTR_TO_LONG,
|
||||
};
|
||||
|
||||
static const struct bpf_func_proto *
|
||||
syscall_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
|
||||
{
|
||||
@ -4766,6 +4819,8 @@ syscall_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
|
||||
return &bpf_btf_find_by_name_kind_proto;
|
||||
case BPF_FUNC_sys_close:
|
||||
return &bpf_sys_close_proto;
|
||||
case BPF_FUNC_kallsyms_lookup_name:
|
||||
return &bpf_kallsyms_lookup_name_proto;
|
||||
default:
|
||||
return tracing_prog_func_proto(func_id, prog);
|
||||
}
|
||||
|
@ -545,7 +545,7 @@ static void notrace inc_misses_counter(struct bpf_prog *prog)
|
||||
|
||||
stats = this_cpu_ptr(prog->stats);
|
||||
u64_stats_update_begin(&stats->syncp);
|
||||
stats->misses++;
|
||||
u64_stats_inc(&stats->misses);
|
||||
u64_stats_update_end(&stats->syncp);
|
||||
}
|
||||
|
||||
@ -586,11 +586,13 @@ static void notrace update_prog_stats(struct bpf_prog *prog,
|
||||
* Hence check that 'start' is valid.
|
||||
*/
|
||||
start > NO_START_TIME) {
|
||||
unsigned long flags;
|
||||
|
||||
stats = this_cpu_ptr(prog->stats);
|
||||
u64_stats_update_begin(&stats->syncp);
|
||||
stats->cnt++;
|
||||
stats->nsecs += sched_clock() - start;
|
||||
u64_stats_update_end(&stats->syncp);
|
||||
flags = u64_stats_update_begin_irqsave(&stats->syncp);
|
||||
u64_stats_inc(&stats->cnt);
|
||||
u64_stats_add(&stats->nsecs, sched_clock() - start);
|
||||
u64_stats_update_end_irqrestore(&stats->syncp, flags);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1420,12 +1420,12 @@ static void __reg_combine_32_into_64(struct bpf_reg_state *reg)
|
||||
|
||||
static bool __reg64_bound_s32(s64 a)
|
||||
{
|
||||
return a > S32_MIN && a < S32_MAX;
|
||||
return a >= S32_MIN && a <= S32_MAX;
|
||||
}
|
||||
|
||||
static bool __reg64_bound_u32(u64 a)
|
||||
{
|
||||
return a > U32_MIN && a < U32_MAX;
|
||||
return a >= U32_MIN && a <= U32_MAX;
|
||||
}
|
||||
|
||||
static void __reg_combine_64_into_32(struct bpf_reg_state *reg)
|
||||
@ -1640,52 +1640,168 @@ static int add_subprog(struct bpf_verifier_env *env, int off)
|
||||
return env->subprog_cnt - 1;
|
||||
}
|
||||
|
||||
#define MAX_KFUNC_DESCS 256
|
||||
#define MAX_KFUNC_BTFS 256
|
||||
|
||||
struct bpf_kfunc_desc {
|
||||
struct btf_func_model func_model;
|
||||
u32 func_id;
|
||||
s32 imm;
|
||||
u16 offset;
|
||||
};
|
||||
|
||||
struct bpf_kfunc_btf {
|
||||
struct btf *btf;
|
||||
struct module *module;
|
||||
u16 offset;
|
||||
};
|
||||
|
||||
#define MAX_KFUNC_DESCS 256
|
||||
struct bpf_kfunc_desc_tab {
|
||||
struct bpf_kfunc_desc descs[MAX_KFUNC_DESCS];
|
||||
u32 nr_descs;
|
||||
};
|
||||
|
||||
static int kfunc_desc_cmp_by_id(const void *a, const void *b)
|
||||
struct bpf_kfunc_btf_tab {
|
||||
struct bpf_kfunc_btf descs[MAX_KFUNC_BTFS];
|
||||
u32 nr_descs;
|
||||
};
|
||||
|
||||
static int kfunc_desc_cmp_by_id_off(const void *a, const void *b)
|
||||
{
|
||||
const struct bpf_kfunc_desc *d0 = a;
|
||||
const struct bpf_kfunc_desc *d1 = b;
|
||||
|
||||
/* func_id is not greater than BTF_MAX_TYPE */
|
||||
return d0->func_id - d1->func_id;
|
||||
return d0->func_id - d1->func_id ?: d0->offset - d1->offset;
|
||||
}
|
||||
|
||||
static int kfunc_btf_cmp_by_off(const void *a, const void *b)
|
||||
{
|
||||
const struct bpf_kfunc_btf *d0 = a;
|
||||
const struct bpf_kfunc_btf *d1 = b;
|
||||
|
||||
return d0->offset - d1->offset;
|
||||
}
|
||||
|
||||
static const struct bpf_kfunc_desc *
|
||||
find_kfunc_desc(const struct bpf_prog *prog, u32 func_id)
|
||||
find_kfunc_desc(const struct bpf_prog *prog, u32 func_id, u16 offset)
|
||||
{
|
||||
struct bpf_kfunc_desc desc = {
|
||||
.func_id = func_id,
|
||||
.offset = offset,
|
||||
};
|
||||
struct bpf_kfunc_desc_tab *tab;
|
||||
|
||||
tab = prog->aux->kfunc_tab;
|
||||
return bsearch(&desc, tab->descs, tab->nr_descs,
|
||||
sizeof(tab->descs[0]), kfunc_desc_cmp_by_id);
|
||||
sizeof(tab->descs[0]), kfunc_desc_cmp_by_id_off);
|
||||
}
|
||||
|
||||
static int add_kfunc_call(struct bpf_verifier_env *env, u32 func_id)
|
||||
static struct btf *__find_kfunc_desc_btf(struct bpf_verifier_env *env,
|
||||
s16 offset, struct module **btf_modp)
|
||||
{
|
||||
struct bpf_kfunc_btf kf_btf = { .offset = offset };
|
||||
struct bpf_kfunc_btf_tab *tab;
|
||||
struct bpf_kfunc_btf *b;
|
||||
struct module *mod;
|
||||
struct btf *btf;
|
||||
int btf_fd;
|
||||
|
||||
tab = env->prog->aux->kfunc_btf_tab;
|
||||
b = bsearch(&kf_btf, tab->descs, tab->nr_descs,
|
||||
sizeof(tab->descs[0]), kfunc_btf_cmp_by_off);
|
||||
if (!b) {
|
||||
if (tab->nr_descs == MAX_KFUNC_BTFS) {
|
||||
verbose(env, "too many different module BTFs\n");
|
||||
return ERR_PTR(-E2BIG);
|
||||
}
|
||||
|
||||
if (bpfptr_is_null(env->fd_array)) {
|
||||
verbose(env, "kfunc offset > 0 without fd_array is invalid\n");
|
||||
return ERR_PTR(-EPROTO);
|
||||
}
|
||||
|
||||
if (copy_from_bpfptr_offset(&btf_fd, env->fd_array,
|
||||
offset * sizeof(btf_fd),
|
||||
sizeof(btf_fd)))
|
||||
return ERR_PTR(-EFAULT);
|
||||
|
||||
btf = btf_get_by_fd(btf_fd);
|
||||
if (IS_ERR(btf)) {
|
||||
verbose(env, "invalid module BTF fd specified\n");
|
||||
return btf;
|
||||
}
|
||||
|
||||
if (!btf_is_module(btf)) {
|
||||
verbose(env, "BTF fd for kfunc is not a module BTF\n");
|
||||
btf_put(btf);
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
mod = btf_try_get_module(btf);
|
||||
if (!mod) {
|
||||
btf_put(btf);
|
||||
return ERR_PTR(-ENXIO);
|
||||
}
|
||||
|
||||
b = &tab->descs[tab->nr_descs++];
|
||||
b->btf = btf;
|
||||
b->module = mod;
|
||||
b->offset = offset;
|
||||
|
||||
sort(tab->descs, tab->nr_descs, sizeof(tab->descs[0]),
|
||||
kfunc_btf_cmp_by_off, NULL);
|
||||
}
|
||||
if (btf_modp)
|
||||
*btf_modp = b->module;
|
||||
return b->btf;
|
||||
}
|
||||
|
||||
void bpf_free_kfunc_btf_tab(struct bpf_kfunc_btf_tab *tab)
|
||||
{
|
||||
if (!tab)
|
||||
return;
|
||||
|
||||
while (tab->nr_descs--) {
|
||||
module_put(tab->descs[tab->nr_descs].module);
|
||||
btf_put(tab->descs[tab->nr_descs].btf);
|
||||
}
|
||||
kfree(tab);
|
||||
}
|
||||
|
||||
static struct btf *find_kfunc_desc_btf(struct bpf_verifier_env *env,
|
||||
u32 func_id, s16 offset,
|
||||
struct module **btf_modp)
|
||||
{
|
||||
if (offset) {
|
||||
if (offset < 0) {
|
||||
/* In the future, this can be allowed to increase limit
|
||||
* of fd index into fd_array, interpreted as u16.
|
||||
*/
|
||||
verbose(env, "negative offset disallowed for kernel module function call\n");
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
return __find_kfunc_desc_btf(env, offset, btf_modp);
|
||||
}
|
||||
return btf_vmlinux ?: ERR_PTR(-ENOENT);
|
||||
}
|
||||
|
||||
static int add_kfunc_call(struct bpf_verifier_env *env, u32 func_id, s16 offset)
|
||||
{
|
||||
const struct btf_type *func, *func_proto;
|
||||
struct bpf_kfunc_btf_tab *btf_tab;
|
||||
struct bpf_kfunc_desc_tab *tab;
|
||||
struct bpf_prog_aux *prog_aux;
|
||||
struct bpf_kfunc_desc *desc;
|
||||
const char *func_name;
|
||||
struct btf *desc_btf;
|
||||
unsigned long addr;
|
||||
int err;
|
||||
|
||||
prog_aux = env->prog->aux;
|
||||
tab = prog_aux->kfunc_tab;
|
||||
btf_tab = prog_aux->kfunc_btf_tab;
|
||||
if (!tab) {
|
||||
if (!btf_vmlinux) {
|
||||
verbose(env, "calling kernel function is not supported without CONFIG_DEBUG_INFO_BTF\n");
|
||||
@ -1713,7 +1829,29 @@ static int add_kfunc_call(struct bpf_verifier_env *env, u32 func_id)
|
||||
prog_aux->kfunc_tab = tab;
|
||||
}
|
||||
|
||||
if (find_kfunc_desc(env->prog, func_id))
|
||||
/* func_id == 0 is always invalid, but instead of returning an error, be
|
||||
* conservative and wait until the code elimination pass before returning
|
||||
* error, so that invalid calls that get pruned out can be in BPF programs
|
||||
* loaded from userspace. It is also required that offset be untouched
|
||||
* for such calls.
|
||||
*/
|
||||
if (!func_id && !offset)
|
||||
return 0;
|
||||
|
||||
if (!btf_tab && offset) {
|
||||
btf_tab = kzalloc(sizeof(*btf_tab), GFP_KERNEL);
|
||||
if (!btf_tab)
|
||||
return -ENOMEM;
|
||||
prog_aux->kfunc_btf_tab = btf_tab;
|
||||
}
|
||||
|
||||
desc_btf = find_kfunc_desc_btf(env, func_id, offset, NULL);
|
||||
if (IS_ERR(desc_btf)) {
|
||||
verbose(env, "failed to find BTF for kernel function\n");
|
||||
return PTR_ERR(desc_btf);
|
||||
}
|
||||
|
||||
if (find_kfunc_desc(env->prog, func_id, offset))
|
||||
return 0;
|
||||
|
||||
if (tab->nr_descs == MAX_KFUNC_DESCS) {
|
||||
@ -1721,20 +1859,20 @@ static int add_kfunc_call(struct bpf_verifier_env *env, u32 func_id)
|
||||
return -E2BIG;
|
||||
}
|
||||
|
||||
func = btf_type_by_id(btf_vmlinux, func_id);
|
||||
func = btf_type_by_id(desc_btf, func_id);
|
||||
if (!func || !btf_type_is_func(func)) {
|
||||
verbose(env, "kernel btf_id %u is not a function\n",
|
||||
func_id);
|
||||
return -EINVAL;
|
||||
}
|
||||
func_proto = btf_type_by_id(btf_vmlinux, func->type);
|
||||
func_proto = btf_type_by_id(desc_btf, func->type);
|
||||
if (!func_proto || !btf_type_is_func_proto(func_proto)) {
|
||||
verbose(env, "kernel function btf_id %u does not have a valid func_proto\n",
|
||||
func_id);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
func_name = btf_name_by_offset(btf_vmlinux, func->name_off);
|
||||
func_name = btf_name_by_offset(desc_btf, func->name_off);
|
||||
addr = kallsyms_lookup_name(func_name);
|
||||
if (!addr) {
|
||||
verbose(env, "cannot find address for kernel function %s\n",
|
||||
@ -1745,12 +1883,13 @@ static int add_kfunc_call(struct bpf_verifier_env *env, u32 func_id)
|
||||
desc = &tab->descs[tab->nr_descs++];
|
||||
desc->func_id = func_id;
|
||||
desc->imm = BPF_CALL_IMM(addr);
|
||||
err = btf_distill_func_proto(&env->log, btf_vmlinux,
|
||||
desc->offset = offset;
|
||||
err = btf_distill_func_proto(&env->log, desc_btf,
|
||||
func_proto, func_name,
|
||||
&desc->func_model);
|
||||
if (!err)
|
||||
sort(tab->descs, tab->nr_descs, sizeof(tab->descs[0]),
|
||||
kfunc_desc_cmp_by_id, NULL);
|
||||
kfunc_desc_cmp_by_id_off, NULL);
|
||||
return err;
|
||||
}
|
||||
|
||||
@ -1829,7 +1968,7 @@ static int add_subprog_and_kfunc(struct bpf_verifier_env *env)
|
||||
} else if (bpf_pseudo_call(insn)) {
|
||||
ret = add_subprog(env, i + insn->imm + 1);
|
||||
} else {
|
||||
ret = add_kfunc_call(env, insn->imm);
|
||||
ret = add_kfunc_call(env, insn->imm, insn->off);
|
||||
}
|
||||
|
||||
if (ret < 0)
|
||||
@ -2166,12 +2305,17 @@ static int get_prev_insn_idx(struct bpf_verifier_state *st, int i,
|
||||
static const char *disasm_kfunc_name(void *data, const struct bpf_insn *insn)
|
||||
{
|
||||
const struct btf_type *func;
|
||||
struct btf *desc_btf;
|
||||
|
||||
if (insn->src_reg != BPF_PSEUDO_KFUNC_CALL)
|
||||
return NULL;
|
||||
|
||||
func = btf_type_by_id(btf_vmlinux, insn->imm);
|
||||
return btf_name_by_offset(btf_vmlinux, func->name_off);
|
||||
desc_btf = find_kfunc_desc_btf(data, insn->imm, insn->off, NULL);
|
||||
if (IS_ERR(desc_btf))
|
||||
return "<error>";
|
||||
|
||||
func = btf_type_by_id(desc_btf, insn->imm);
|
||||
return btf_name_by_offset(desc_btf, func->name_off);
|
||||
}
|
||||
|
||||
/* For given verifier state backtrack_insn() is called from the last insn to
|
||||
@ -4858,7 +5002,10 @@ static int resolve_map_arg_type(struct bpf_verifier_env *env,
|
||||
return -EINVAL;
|
||||
}
|
||||
break;
|
||||
|
||||
case BPF_MAP_TYPE_BLOOM_FILTER:
|
||||
if (meta->func_id == BPF_FUNC_map_peek_elem)
|
||||
*arg_type = ARG_PTR_TO_MAP_VALUE;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
@ -5433,6 +5580,11 @@ static int check_map_func_compatibility(struct bpf_verifier_env *env,
|
||||
func_id != BPF_FUNC_task_storage_delete)
|
||||
goto error;
|
||||
break;
|
||||
case BPF_MAP_TYPE_BLOOM_FILTER:
|
||||
if (func_id != BPF_FUNC_map_peek_elem &&
|
||||
func_id != BPF_FUNC_map_push_elem)
|
||||
goto error;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
@ -5500,13 +5652,18 @@ static int check_map_func_compatibility(struct bpf_verifier_env *env,
|
||||
map->map_type != BPF_MAP_TYPE_SOCKHASH)
|
||||
goto error;
|
||||
break;
|
||||
case BPF_FUNC_map_peek_elem:
|
||||
case BPF_FUNC_map_pop_elem:
|
||||
case BPF_FUNC_map_push_elem:
|
||||
if (map->map_type != BPF_MAP_TYPE_QUEUE &&
|
||||
map->map_type != BPF_MAP_TYPE_STACK)
|
||||
goto error;
|
||||
break;
|
||||
case BPF_FUNC_map_peek_elem:
|
||||
case BPF_FUNC_map_push_elem:
|
||||
if (map->map_type != BPF_MAP_TYPE_QUEUE &&
|
||||
map->map_type != BPF_MAP_TYPE_STACK &&
|
||||
map->map_type != BPF_MAP_TYPE_BLOOM_FILTER)
|
||||
goto error;
|
||||
break;
|
||||
case BPF_FUNC_sk_storage_get:
|
||||
case BPF_FUNC_sk_storage_delete:
|
||||
if (map->map_type != BPF_MAP_TYPE_SK_STORAGE)
|
||||
@ -6530,23 +6687,33 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn)
|
||||
struct bpf_reg_state *regs = cur_regs(env);
|
||||
const char *func_name, *ptr_type_name;
|
||||
u32 i, nargs, func_id, ptr_type_id;
|
||||
struct module *btf_mod = NULL;
|
||||
const struct btf_param *args;
|
||||
struct btf *desc_btf;
|
||||
int err;
|
||||
|
||||
/* skip for now, but return error when we find this in fixup_kfunc_call */
|
||||
if (!insn->imm)
|
||||
return 0;
|
||||
|
||||
desc_btf = find_kfunc_desc_btf(env, insn->imm, insn->off, &btf_mod);
|
||||
if (IS_ERR(desc_btf))
|
||||
return PTR_ERR(desc_btf);
|
||||
|
||||
func_id = insn->imm;
|
||||
func = btf_type_by_id(btf_vmlinux, func_id);
|
||||
func_name = btf_name_by_offset(btf_vmlinux, func->name_off);
|
||||
func_proto = btf_type_by_id(btf_vmlinux, func->type);
|
||||
func = btf_type_by_id(desc_btf, func_id);
|
||||
func_name = btf_name_by_offset(desc_btf, func->name_off);
|
||||
func_proto = btf_type_by_id(desc_btf, func->type);
|
||||
|
||||
if (!env->ops->check_kfunc_call ||
|
||||
!env->ops->check_kfunc_call(func_id)) {
|
||||
!env->ops->check_kfunc_call(func_id, btf_mod)) {
|
||||
verbose(env, "calling kernel function %s is not allowed\n",
|
||||
func_name);
|
||||
return -EACCES;
|
||||
}
|
||||
|
||||
/* Check the arguments */
|
||||
err = btf_check_kfunc_arg_match(env, btf_vmlinux, func_id, regs);
|
||||
err = btf_check_kfunc_arg_match(env, desc_btf, func_id, regs);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
@ -6554,15 +6721,15 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn)
|
||||
mark_reg_not_init(env, regs, caller_saved[i]);
|
||||
|
||||
/* Check return type */
|
||||
t = btf_type_skip_modifiers(btf_vmlinux, func_proto->type, NULL);
|
||||
t = btf_type_skip_modifiers(desc_btf, func_proto->type, NULL);
|
||||
if (btf_type_is_scalar(t)) {
|
||||
mark_reg_unknown(env, regs, BPF_REG_0);
|
||||
mark_btf_func_reg_size(env, BPF_REG_0, t->size);
|
||||
} else if (btf_type_is_ptr(t)) {
|
||||
ptr_type = btf_type_skip_modifiers(btf_vmlinux, t->type,
|
||||
ptr_type = btf_type_skip_modifiers(desc_btf, t->type,
|
||||
&ptr_type_id);
|
||||
if (!btf_type_is_struct(ptr_type)) {
|
||||
ptr_type_name = btf_name_by_offset(btf_vmlinux,
|
||||
ptr_type_name = btf_name_by_offset(desc_btf,
|
||||
ptr_type->name_off);
|
||||
verbose(env, "kernel function %s returns pointer type %s %s is not supported\n",
|
||||
func_name, btf_type_str(ptr_type),
|
||||
@ -6570,7 +6737,7 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn)
|
||||
return -EINVAL;
|
||||
}
|
||||
mark_reg_known_zero(env, regs, BPF_REG_0);
|
||||
regs[BPF_REG_0].btf = btf_vmlinux;
|
||||
regs[BPF_REG_0].btf = desc_btf;
|
||||
regs[BPF_REG_0].type = PTR_TO_BTF_ID;
|
||||
regs[BPF_REG_0].btf_id = ptr_type_id;
|
||||
mark_btf_func_reg_size(env, BPF_REG_0, sizeof(void *));
|
||||
@ -6581,7 +6748,7 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn)
|
||||
for (i = 0; i < nargs; i++) {
|
||||
u32 regno = i + 1;
|
||||
|
||||
t = btf_type_skip_modifiers(btf_vmlinux, args[i].type, NULL);
|
||||
t = btf_type_skip_modifiers(desc_btf, args[i].type, NULL);
|
||||
if (btf_type_is_ptr(t))
|
||||
mark_btf_func_reg_size(env, regno, sizeof(void *));
|
||||
else
|
||||
@ -11121,7 +11288,8 @@ static int do_check(struct bpf_verifier_env *env)
|
||||
env->jmps_processed++;
|
||||
if (opcode == BPF_CALL) {
|
||||
if (BPF_SRC(insn->code) != BPF_K ||
|
||||
insn->off != 0 ||
|
||||
(insn->src_reg != BPF_PSEUDO_KFUNC_CALL
|
||||
&& insn->off != 0) ||
|
||||
(insn->src_reg != BPF_REG_0 &&
|
||||
insn->src_reg != BPF_PSEUDO_CALL &&
|
||||
insn->src_reg != BPF_PSEUDO_KFUNC_CALL) ||
|
||||
@ -12477,6 +12645,7 @@ static int jit_subprogs(struct bpf_verifier_env *env)
|
||||
func[i]->aux->stack_depth = env->subprog_info[i].stack_depth;
|
||||
func[i]->jit_requested = 1;
|
||||
func[i]->aux->kfunc_tab = prog->aux->kfunc_tab;
|
||||
func[i]->aux->kfunc_btf_tab = prog->aux->kfunc_btf_tab;
|
||||
func[i]->aux->linfo = prog->aux->linfo;
|
||||
func[i]->aux->nr_linfo = prog->aux->nr_linfo;
|
||||
func[i]->aux->jited_linfo = prog->aux->jited_linfo;
|
||||
@ -12662,10 +12831,15 @@ static int fixup_kfunc_call(struct bpf_verifier_env *env,
|
||||
{
|
||||
const struct bpf_kfunc_desc *desc;
|
||||
|
||||
if (!insn->imm) {
|
||||
verbose(env, "invalid kernel function call not eliminated in verifier pass\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* insn->imm has the btf func_id. Replace it with
|
||||
* an address (relative to __bpf_base_call).
|
||||
*/
|
||||
desc = find_kfunc_desc(env->prog, insn->imm);
|
||||
desc = find_kfunc_desc(env->prog, insn->imm, insn->off);
|
||||
if (!desc) {
|
||||
verbose(env, "verifier internal error: kernel function descriptor not found for func_id %u\n",
|
||||
insn->imm);
|
||||
@ -12946,7 +13120,8 @@ static int do_misc_fixups(struct bpf_verifier_env *env)
|
||||
insn->imm == BPF_FUNC_map_push_elem ||
|
||||
insn->imm == BPF_FUNC_map_pop_elem ||
|
||||
insn->imm == BPF_FUNC_map_peek_elem ||
|
||||
insn->imm == BPF_FUNC_redirect_map)) {
|
||||
insn->imm == BPF_FUNC_redirect_map ||
|
||||
insn->imm == BPF_FUNC_for_each_map_elem)) {
|
||||
aux = &env->insn_aux_data[i + delta];
|
||||
if (bpf_map_ptr_poisoned(aux))
|
||||
goto patch_call_imm;
|
||||
@ -12990,6 +13165,11 @@ static int do_misc_fixups(struct bpf_verifier_env *env)
|
||||
(int (*)(struct bpf_map *map, void *value))NULL));
|
||||
BUILD_BUG_ON(!__same_type(ops->map_redirect,
|
||||
(int (*)(struct bpf_map *map, u32 ifindex, u64 flags))NULL));
|
||||
BUILD_BUG_ON(!__same_type(ops->map_for_each_callback,
|
||||
(int (*)(struct bpf_map *map,
|
||||
bpf_callback_t callback_fn,
|
||||
void *callback_ctx,
|
||||
u64 flags))NULL));
|
||||
|
||||
patch_map_ops_generic:
|
||||
switch (insn->imm) {
|
||||
@ -13014,6 +13194,9 @@ static int do_misc_fixups(struct bpf_verifier_env *env)
|
||||
case BPF_FUNC_redirect_map:
|
||||
insn->imm = BPF_CALL_IMM(ops->map_redirect);
|
||||
continue;
|
||||
case BPF_FUNC_for_each_map_elem:
|
||||
insn->imm = BPF_CALL_IMM(ops->map_for_each_callback);
|
||||
continue;
|
||||
}
|
||||
|
||||
goto patch_call_imm;
|
||||
@ -13863,6 +14046,7 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr, bpfptr_t uattr)
|
||||
|
||||
env->verification_time = ktime_get_ns() - start_time;
|
||||
print_verification_stats(env);
|
||||
env->prog->aux->verified_insns = env->insn_processed;
|
||||
|
||||
if (log->level && bpf_verifier_log_full(log))
|
||||
ret = -ENOSPC;
|
||||
|
@ -1608,6 +1608,8 @@ tracing_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
|
||||
return &bpf_skc_to_tcp_request_sock_proto;
|
||||
case BPF_FUNC_skc_to_udp6_sock:
|
||||
return &bpf_skc_to_udp6_sock_proto;
|
||||
case BPF_FUNC_skc_to_unix_sock:
|
||||
return &bpf_skc_to_unix_sock_proto;
|
||||
case BPF_FUNC_sk_storage_get:
|
||||
return &bpf_sk_storage_get_tracing_proto;
|
||||
case BPF_FUNC_sk_storage_delete:
|
||||
@ -1644,13 +1646,7 @@ static bool raw_tp_prog_is_valid_access(int off, int size,
|
||||
const struct bpf_prog *prog,
|
||||
struct bpf_insn_access_aux *info)
|
||||
{
|
||||
if (off < 0 || off >= sizeof(__u64) * MAX_BPF_FUNC_ARGS)
|
||||
return false;
|
||||
if (type != BPF_READ)
|
||||
return false;
|
||||
if (off % size != 0)
|
||||
return false;
|
||||
return true;
|
||||
return bpf_tracing_ctx_access(off, size, type);
|
||||
}
|
||||
|
||||
static bool tracing_prog_is_valid_access(int off, int size,
|
||||
@ -1658,13 +1654,7 @@ static bool tracing_prog_is_valid_access(int off, int size,
|
||||
const struct bpf_prog *prog,
|
||||
struct bpf_insn_access_aux *info)
|
||||
{
|
||||
if (off < 0 || off >= sizeof(__u64) * MAX_BPF_FUNC_ARGS)
|
||||
return false;
|
||||
if (type != BPF_READ)
|
||||
return false;
|
||||
if (off % size != 0)
|
||||
return false;
|
||||
return btf_ctx_access(off, size, type, prog, info);
|
||||
return bpf_tracing_btf_ctx_access(off, size, type, prog, info);
|
||||
}
|
||||
|
||||
int __weak bpf_prog_test_run_tracing(struct bpf_prog *prog,
|
||||
|
332
lib/test_bpf.c
332
lib/test_bpf.c
@ -2134,7 +2134,7 @@ static int bpf_fill_atomic32_cmpxchg_reg_pairs(struct bpf_test *self)
|
||||
* of the immediate value. This is often the case if the native instruction
|
||||
* immediate field width is narrower than 32 bits.
|
||||
*/
|
||||
static int bpf_fill_ld_imm64(struct bpf_test *self)
|
||||
static int bpf_fill_ld_imm64_magn(struct bpf_test *self)
|
||||
{
|
||||
int block = 64; /* Increase for more tests per MSB position */
|
||||
int len = 3 + 8 * 63 * block * 2;
|
||||
@ -2180,6 +2180,88 @@ static int bpf_fill_ld_imm64(struct bpf_test *self)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Test the two-instruction 64-bit immediate load operation for different
|
||||
* combinations of bytes. Each byte in the 64-bit word is constructed as
|
||||
* (base & mask) | (rand() & ~mask), where rand() is a deterministic LCG.
|
||||
* All patterns (base1, mask1) and (base2, mask2) bytes are tested.
|
||||
*/
|
||||
static int __bpf_fill_ld_imm64_bytes(struct bpf_test *self,
|
||||
u8 base1, u8 mask1,
|
||||
u8 base2, u8 mask2)
|
||||
{
|
||||
struct bpf_insn *insn;
|
||||
int len = 3 + 8 * BIT(8);
|
||||
int pattern, index;
|
||||
u32 rand = 1;
|
||||
int i = 0;
|
||||
|
||||
insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
|
||||
if (!insn)
|
||||
return -ENOMEM;
|
||||
|
||||
insn[i++] = BPF_ALU64_IMM(BPF_MOV, R0, 0);
|
||||
|
||||
for (pattern = 0; pattern < BIT(8); pattern++) {
|
||||
u64 imm = 0;
|
||||
|
||||
for (index = 0; index < 8; index++) {
|
||||
int byte;
|
||||
|
||||
if (pattern & BIT(index))
|
||||
byte = (base1 & mask1) | (rand & ~mask1);
|
||||
else
|
||||
byte = (base2 & mask2) | (rand & ~mask2);
|
||||
imm = (imm << 8) | byte;
|
||||
}
|
||||
|
||||
/* Update our LCG */
|
||||
rand = rand * 1664525 + 1013904223;
|
||||
|
||||
/* Perform operation */
|
||||
i += __bpf_ld_imm64(&insn[i], R1, imm);
|
||||
|
||||
/* Load reference */
|
||||
insn[i++] = BPF_ALU32_IMM(BPF_MOV, R2, imm);
|
||||
insn[i++] = BPF_ALU32_IMM(BPF_MOV, R3, (u32)(imm >> 32));
|
||||
insn[i++] = BPF_ALU64_IMM(BPF_LSH, R3, 32);
|
||||
insn[i++] = BPF_ALU64_REG(BPF_OR, R2, R3);
|
||||
|
||||
/* Check result */
|
||||
insn[i++] = BPF_JMP_REG(BPF_JEQ, R1, R2, 1);
|
||||
insn[i++] = BPF_EXIT_INSN();
|
||||
}
|
||||
|
||||
insn[i++] = BPF_ALU64_IMM(BPF_MOV, R0, 1);
|
||||
insn[i++] = BPF_EXIT_INSN();
|
||||
|
||||
self->u.ptr.insns = insn;
|
||||
self->u.ptr.len = len;
|
||||
BUG_ON(i != len);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int bpf_fill_ld_imm64_checker(struct bpf_test *self)
|
||||
{
|
||||
return __bpf_fill_ld_imm64_bytes(self, 0, 0xff, 0xff, 0xff);
|
||||
}
|
||||
|
||||
static int bpf_fill_ld_imm64_pos_neg(struct bpf_test *self)
|
||||
{
|
||||
return __bpf_fill_ld_imm64_bytes(self, 1, 0x81, 0x80, 0x80);
|
||||
}
|
||||
|
||||
static int bpf_fill_ld_imm64_pos_zero(struct bpf_test *self)
|
||||
{
|
||||
return __bpf_fill_ld_imm64_bytes(self, 1, 0x81, 0, 0xff);
|
||||
}
|
||||
|
||||
static int bpf_fill_ld_imm64_neg_zero(struct bpf_test *self)
|
||||
{
|
||||
return __bpf_fill_ld_imm64_bytes(self, 0x80, 0x80, 0, 0xff);
|
||||
}
|
||||
|
||||
/*
|
||||
* Exhaustive tests of JMP operations for all combinations of power-of-two
|
||||
* magnitudes of the operands, both for positive and negative values. The
|
||||
@ -12401,14 +12483,46 @@ static struct bpf_test tests[] = {
|
||||
.fill_helper = bpf_fill_alu32_mod_reg,
|
||||
.nr_testruns = NR_PATTERN_RUNS,
|
||||
},
|
||||
/* LD_IMM64 immediate magnitudes */
|
||||
/* LD_IMM64 immediate magnitudes and byte patterns */
|
||||
{
|
||||
"LD_IMM64: all immediate value magnitudes",
|
||||
{ },
|
||||
INTERNAL | FLAG_NO_DATA,
|
||||
{ },
|
||||
{ { 0, 1 } },
|
||||
.fill_helper = bpf_fill_ld_imm64,
|
||||
.fill_helper = bpf_fill_ld_imm64_magn,
|
||||
},
|
||||
{
|
||||
"LD_IMM64: checker byte patterns",
|
||||
{ },
|
||||
INTERNAL | FLAG_NO_DATA,
|
||||
{ },
|
||||
{ { 0, 1 } },
|
||||
.fill_helper = bpf_fill_ld_imm64_checker,
|
||||
},
|
||||
{
|
||||
"LD_IMM64: random positive and zero byte patterns",
|
||||
{ },
|
||||
INTERNAL | FLAG_NO_DATA,
|
||||
{ },
|
||||
{ { 0, 1 } },
|
||||
.fill_helper = bpf_fill_ld_imm64_pos_zero,
|
||||
},
|
||||
{
|
||||
"LD_IMM64: random negative and zero byte patterns",
|
||||
{ },
|
||||
INTERNAL | FLAG_NO_DATA,
|
||||
{ },
|
||||
{ { 0, 1 } },
|
||||
.fill_helper = bpf_fill_ld_imm64_neg_zero,
|
||||
},
|
||||
{
|
||||
"LD_IMM64: random positive and negative byte patterns",
|
||||
{ },
|
||||
INTERNAL | FLAG_NO_DATA,
|
||||
{ },
|
||||
{ { 0, 1 } },
|
||||
.fill_helper = bpf_fill_ld_imm64_pos_neg,
|
||||
},
|
||||
/* 64-bit ATOMIC register combinations */
|
||||
{
|
||||
@ -14202,72 +14316,9 @@ module_param_string(test_name, test_name, sizeof(test_name), 0);
|
||||
static int test_id = -1;
|
||||
module_param(test_id, int, 0);
|
||||
|
||||
static int test_range[2] = { 0, ARRAY_SIZE(tests) - 1 };
|
||||
static int test_range[2] = { 0, INT_MAX };
|
||||
module_param_array(test_range, int, NULL, 0);
|
||||
|
||||
static __init int find_test_index(const char *test_name)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(tests); i++) {
|
||||
if (!strcmp(tests[i].descr, test_name))
|
||||
return i;
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
|
||||
static __init int prepare_bpf_tests(void)
|
||||
{
|
||||
if (test_id >= 0) {
|
||||
/*
|
||||
* if a test_id was specified, use test_range to
|
||||
* cover only that test.
|
||||
*/
|
||||
if (test_id >= ARRAY_SIZE(tests)) {
|
||||
pr_err("test_bpf: invalid test_id specified.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
test_range[0] = test_id;
|
||||
test_range[1] = test_id;
|
||||
} else if (*test_name) {
|
||||
/*
|
||||
* if a test_name was specified, find it and setup
|
||||
* test_range to cover only that test.
|
||||
*/
|
||||
int idx = find_test_index(test_name);
|
||||
|
||||
if (idx < 0) {
|
||||
pr_err("test_bpf: no test named '%s' found.\n",
|
||||
test_name);
|
||||
return -EINVAL;
|
||||
}
|
||||
test_range[0] = idx;
|
||||
test_range[1] = idx;
|
||||
} else {
|
||||
/*
|
||||
* check that the supplied test_range is valid.
|
||||
*/
|
||||
if (test_range[0] >= ARRAY_SIZE(tests) ||
|
||||
test_range[1] >= ARRAY_SIZE(tests) ||
|
||||
test_range[0] < 0 || test_range[1] < 0) {
|
||||
pr_err("test_bpf: test_range is out of bound.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (test_range[1] < test_range[0]) {
|
||||
pr_err("test_bpf: test_range is ending before it starts.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static __init void destroy_bpf_tests(void)
|
||||
{
|
||||
}
|
||||
|
||||
static bool exclude_test(int test_id)
|
||||
{
|
||||
return test_id < test_range[0] || test_id > test_range[1];
|
||||
@ -14439,6 +14490,10 @@ static __init int test_skb_segment(void)
|
||||
for (i = 0; i < ARRAY_SIZE(skb_segment_tests); i++) {
|
||||
const struct skb_segment_test *test = &skb_segment_tests[i];
|
||||
|
||||
cond_resched();
|
||||
if (exclude_test(i))
|
||||
continue;
|
||||
|
||||
pr_info("#%d %s ", i, test->descr);
|
||||
|
||||
if (test_skb_segment_single(test)) {
|
||||
@ -14820,6 +14875,8 @@ static __init int test_tail_calls(struct bpf_array *progs)
|
||||
int ret;
|
||||
|
||||
cond_resched();
|
||||
if (exclude_test(i))
|
||||
continue;
|
||||
|
||||
pr_info("#%d %s ", i, test->descr);
|
||||
if (!fp) {
|
||||
@ -14852,29 +14909,144 @@ static __init int test_tail_calls(struct bpf_array *progs)
|
||||
return err_cnt ? -EINVAL : 0;
|
||||
}
|
||||
|
||||
static char test_suite[32];
|
||||
module_param_string(test_suite, test_suite, sizeof(test_suite), 0);
|
||||
|
||||
static __init int find_test_index(const char *test_name)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (!strcmp(test_suite, "test_bpf")) {
|
||||
for (i = 0; i < ARRAY_SIZE(tests); i++) {
|
||||
if (!strcmp(tests[i].descr, test_name))
|
||||
return i;
|
||||
}
|
||||
}
|
||||
|
||||
if (!strcmp(test_suite, "test_tail_calls")) {
|
||||
for (i = 0; i < ARRAY_SIZE(tail_call_tests); i++) {
|
||||
if (!strcmp(tail_call_tests[i].descr, test_name))
|
||||
return i;
|
||||
}
|
||||
}
|
||||
|
||||
if (!strcmp(test_suite, "test_skb_segment")) {
|
||||
for (i = 0; i < ARRAY_SIZE(skb_segment_tests); i++) {
|
||||
if (!strcmp(skb_segment_tests[i].descr, test_name))
|
||||
return i;
|
||||
}
|
||||
}
|
||||
|
||||
return -1;
|
||||
}
|
||||
|
||||
static __init int prepare_test_range(void)
|
||||
{
|
||||
int valid_range;
|
||||
|
||||
if (!strcmp(test_suite, "test_bpf"))
|
||||
valid_range = ARRAY_SIZE(tests);
|
||||
else if (!strcmp(test_suite, "test_tail_calls"))
|
||||
valid_range = ARRAY_SIZE(tail_call_tests);
|
||||
else if (!strcmp(test_suite, "test_skb_segment"))
|
||||
valid_range = ARRAY_SIZE(skb_segment_tests);
|
||||
else
|
||||
return 0;
|
||||
|
||||
if (test_id >= 0) {
|
||||
/*
|
||||
* if a test_id was specified, use test_range to
|
||||
* cover only that test.
|
||||
*/
|
||||
if (test_id >= valid_range) {
|
||||
pr_err("test_bpf: invalid test_id specified for '%s' suite.\n",
|
||||
test_suite);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
test_range[0] = test_id;
|
||||
test_range[1] = test_id;
|
||||
} else if (*test_name) {
|
||||
/*
|
||||
* if a test_name was specified, find it and setup
|
||||
* test_range to cover only that test.
|
||||
*/
|
||||
int idx = find_test_index(test_name);
|
||||
|
||||
if (idx < 0) {
|
||||
pr_err("test_bpf: no test named '%s' found for '%s' suite.\n",
|
||||
test_name, test_suite);
|
||||
return -EINVAL;
|
||||
}
|
||||
test_range[0] = idx;
|
||||
test_range[1] = idx;
|
||||
} else if (test_range[0] != 0 || test_range[1] != INT_MAX) {
|
||||
/*
|
||||
* check that the supplied test_range is valid.
|
||||
*/
|
||||
if (test_range[0] < 0 || test_range[1] >= valid_range) {
|
||||
pr_err("test_bpf: test_range is out of bound for '%s' suite.\n",
|
||||
test_suite);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (test_range[1] < test_range[0]) {
|
||||
pr_err("test_bpf: test_range is ending before it starts.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __init test_bpf_init(void)
|
||||
{
|
||||
struct bpf_array *progs = NULL;
|
||||
int ret;
|
||||
|
||||
ret = prepare_bpf_tests();
|
||||
if (strlen(test_suite) &&
|
||||
strcmp(test_suite, "test_bpf") &&
|
||||
strcmp(test_suite, "test_tail_calls") &&
|
||||
strcmp(test_suite, "test_skb_segment")) {
|
||||
pr_err("test_bpf: invalid test_suite '%s' specified.\n", test_suite);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/*
|
||||
* if test_suite is not specified, but test_id, test_name or test_range
|
||||
* is specified, set 'test_bpf' as the default test suite.
|
||||
*/
|
||||
if (!strlen(test_suite) &&
|
||||
(test_id != -1 || strlen(test_name) ||
|
||||
(test_range[0] != 0 || test_range[1] != INT_MAX))) {
|
||||
pr_info("test_bpf: set 'test_bpf' as the default test_suite.\n");
|
||||
strscpy(test_suite, "test_bpf", sizeof(test_suite));
|
||||
}
|
||||
|
||||
ret = prepare_test_range();
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
ret = test_bpf();
|
||||
destroy_bpf_tests();
|
||||
if (ret)
|
||||
return ret;
|
||||
if (!strlen(test_suite) || !strcmp(test_suite, "test_bpf")) {
|
||||
ret = test_bpf();
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = prepare_tail_call_tests(&progs);
|
||||
if (ret)
|
||||
return ret;
|
||||
ret = test_tail_calls(progs);
|
||||
destroy_tail_call_tests(progs);
|
||||
if (ret)
|
||||
return ret;
|
||||
if (!strlen(test_suite) || !strcmp(test_suite, "test_tail_calls")) {
|
||||
ret = prepare_tail_call_tests(&progs);
|
||||
if (ret)
|
||||
return ret;
|
||||
ret = test_tail_calls(progs);
|
||||
destroy_tail_call_tests(progs);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
return test_skb_segment();
|
||||
if (!strlen(test_suite) || !strcmp(test_suite, "test_skb_segment"))
|
||||
return test_skb_segment();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void __exit test_bpf_exit(void)
|
||||
|
@ -1,2 +1,5 @@
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
obj-$(CONFIG_BPF_SYSCALL) := test_run.o
|
||||
ifeq ($(CONFIG_BPF_JIT),y)
|
||||
obj-$(CONFIG_BPF_SYSCALL) += bpf_dummy_struct_ops.o
|
||||
endif
|
||||
|
200
net/bpf/bpf_dummy_struct_ops.c
Normal file
200
net/bpf/bpf_dummy_struct_ops.c
Normal file
@ -0,0 +1,200 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* Copyright (C) 2021. Huawei Technologies Co., Ltd
|
||||
*/
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/bpf_verifier.h>
|
||||
#include <linux/bpf.h>
|
||||
#include <linux/btf.h>
|
||||
|
||||
extern struct bpf_struct_ops bpf_bpf_dummy_ops;
|
||||
|
||||
/* A common type for test_N with return value in bpf_dummy_ops */
|
||||
typedef int (*dummy_ops_test_ret_fn)(struct bpf_dummy_ops_state *state, ...);
|
||||
|
||||
struct bpf_dummy_ops_test_args {
|
||||
u64 args[MAX_BPF_FUNC_ARGS];
|
||||
struct bpf_dummy_ops_state state;
|
||||
};
|
||||
|
||||
static struct bpf_dummy_ops_test_args *
|
||||
dummy_ops_init_args(const union bpf_attr *kattr, unsigned int nr)
|
||||
{
|
||||
__u32 size_in;
|
||||
struct bpf_dummy_ops_test_args *args;
|
||||
void __user *ctx_in;
|
||||
void __user *u_state;
|
||||
|
||||
size_in = kattr->test.ctx_size_in;
|
||||
if (size_in != sizeof(u64) * nr)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
args = kzalloc(sizeof(*args), GFP_KERNEL);
|
||||
if (!args)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
ctx_in = u64_to_user_ptr(kattr->test.ctx_in);
|
||||
if (copy_from_user(args->args, ctx_in, size_in))
|
||||
goto out;
|
||||
|
||||
/* args[0] is 0 means state argument of test_N will be NULL */
|
||||
u_state = u64_to_user_ptr(args->args[0]);
|
||||
if (u_state && copy_from_user(&args->state, u_state,
|
||||
sizeof(args->state)))
|
||||
goto out;
|
||||
|
||||
return args;
|
||||
out:
|
||||
kfree(args);
|
||||
return ERR_PTR(-EFAULT);
|
||||
}
|
||||
|
||||
static int dummy_ops_copy_args(struct bpf_dummy_ops_test_args *args)
|
||||
{
|
||||
void __user *u_state;
|
||||
|
||||
u_state = u64_to_user_ptr(args->args[0]);
|
||||
if (u_state && copy_to_user(u_state, &args->state, sizeof(args->state)))
|
||||
return -EFAULT;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int dummy_ops_call_op(void *image, struct bpf_dummy_ops_test_args *args)
|
||||
{
|
||||
dummy_ops_test_ret_fn test = (void *)image;
|
||||
struct bpf_dummy_ops_state *state = NULL;
|
||||
|
||||
/* state needs to be NULL if args[0] is 0 */
|
||||
if (args->args[0])
|
||||
state = &args->state;
|
||||
return test(state, args->args[1], args->args[2],
|
||||
args->args[3], args->args[4]);
|
||||
}
|
||||
|
||||
int bpf_struct_ops_test_run(struct bpf_prog *prog, const union bpf_attr *kattr,
|
||||
union bpf_attr __user *uattr)
|
||||
{
|
||||
const struct bpf_struct_ops *st_ops = &bpf_bpf_dummy_ops;
|
||||
const struct btf_type *func_proto;
|
||||
struct bpf_dummy_ops_test_args *args;
|
||||
struct bpf_tramp_progs *tprogs;
|
||||
void *image = NULL;
|
||||
unsigned int op_idx;
|
||||
int prog_ret;
|
||||
int err;
|
||||
|
||||
if (prog->aux->attach_btf_id != st_ops->type_id)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
func_proto = prog->aux->attach_func_proto;
|
||||
args = dummy_ops_init_args(kattr, btf_type_vlen(func_proto));
|
||||
if (IS_ERR(args))
|
||||
return PTR_ERR(args);
|
||||
|
||||
tprogs = kcalloc(BPF_TRAMP_MAX, sizeof(*tprogs), GFP_KERNEL);
|
||||
if (!tprogs) {
|
||||
err = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
image = bpf_jit_alloc_exec(PAGE_SIZE);
|
||||
if (!image) {
|
||||
err = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
set_vm_flush_reset_perms(image);
|
||||
|
||||
op_idx = prog->expected_attach_type;
|
||||
err = bpf_struct_ops_prepare_trampoline(tprogs, prog,
|
||||
&st_ops->func_models[op_idx],
|
||||
image, image + PAGE_SIZE);
|
||||
if (err < 0)
|
||||
goto out;
|
||||
|
||||
set_memory_ro((long)image, 1);
|
||||
set_memory_x((long)image, 1);
|
||||
prog_ret = dummy_ops_call_op(image, args);
|
||||
|
||||
err = dummy_ops_copy_args(args);
|
||||
if (err)
|
||||
goto out;
|
||||
if (put_user(prog_ret, &uattr->test.retval))
|
||||
err = -EFAULT;
|
||||
out:
|
||||
kfree(args);
|
||||
bpf_jit_free_exec(image);
|
||||
kfree(tprogs);
|
||||
return err;
|
||||
}
|
||||
|
||||
static int bpf_dummy_init(struct btf *btf)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool bpf_dummy_ops_is_valid_access(int off, int size,
|
||||
enum bpf_access_type type,
|
||||
const struct bpf_prog *prog,
|
||||
struct bpf_insn_access_aux *info)
|
||||
{
|
||||
return bpf_tracing_btf_ctx_access(off, size, type, prog, info);
|
||||
}
|
||||
|
||||
static int bpf_dummy_ops_btf_struct_access(struct bpf_verifier_log *log,
|
||||
const struct btf *btf,
|
||||
const struct btf_type *t, int off,
|
||||
int size, enum bpf_access_type atype,
|
||||
u32 *next_btf_id)
|
||||
{
|
||||
const struct btf_type *state;
|
||||
s32 type_id;
|
||||
int err;
|
||||
|
||||
type_id = btf_find_by_name_kind(btf, "bpf_dummy_ops_state",
|
||||
BTF_KIND_STRUCT);
|
||||
if (type_id < 0)
|
||||
return -EINVAL;
|
||||
|
||||
state = btf_type_by_id(btf, type_id);
|
||||
if (t != state) {
|
||||
bpf_log(log, "only access to bpf_dummy_ops_state is supported\n");
|
||||
return -EACCES;
|
||||
}
|
||||
|
||||
err = btf_struct_access(log, btf, t, off, size, atype, next_btf_id);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
return atype == BPF_READ ? err : NOT_INIT;
|
||||
}
|
||||
|
||||
static const struct bpf_verifier_ops bpf_dummy_verifier_ops = {
|
||||
.is_valid_access = bpf_dummy_ops_is_valid_access,
|
||||
.btf_struct_access = bpf_dummy_ops_btf_struct_access,
|
||||
};
|
||||
|
||||
static int bpf_dummy_init_member(const struct btf_type *t,
|
||||
const struct btf_member *member,
|
||||
void *kdata, const void *udata)
|
||||
{
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
static int bpf_dummy_reg(void *kdata)
|
||||
{
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
static void bpf_dummy_unreg(void *kdata)
|
||||
{
|
||||
}
|
||||
|
||||
struct bpf_struct_ops bpf_bpf_dummy_ops = {
|
||||
.verifier_ops = &bpf_dummy_verifier_ops,
|
||||
.init = bpf_dummy_init,
|
||||
.init_member = bpf_dummy_init_member,
|
||||
.reg = bpf_dummy_reg,
|
||||
.unreg = bpf_dummy_unreg,
|
||||
.name = "bpf_dummy_ops",
|
||||
};
|
@ -2,6 +2,7 @@
|
||||
/* Copyright (c) 2017 Facebook
|
||||
*/
|
||||
#include <linux/bpf.h>
|
||||
#include <linux/btf.h>
|
||||
#include <linux/btf_ids.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/vmalloc.h>
|
||||
@ -241,9 +242,11 @@ BTF_ID(func, bpf_kfunc_call_test2)
|
||||
BTF_ID(func, bpf_kfunc_call_test3)
|
||||
BTF_SET_END(test_sk_kfunc_ids)
|
||||
|
||||
bool bpf_prog_test_check_kfunc_call(u32 kfunc_id)
|
||||
bool bpf_prog_test_check_kfunc_call(u32 kfunc_id, struct module *owner)
|
||||
{
|
||||
return btf_id_set_contains(&test_sk_kfunc_ids, kfunc_id);
|
||||
if (btf_id_set_contains(&test_sk_kfunc_ids, kfunc_id))
|
||||
return true;
|
||||
return bpf_check_mod_kfunc_call(&prog_test_kfunc_list, kfunc_id, owner);
|
||||
}
|
||||
|
||||
static void *bpf_test_init(const union bpf_attr *kattr, u32 size,
|
||||
@ -355,13 +358,9 @@ int bpf_prog_test_run_raw_tp(struct bpf_prog *prog,
|
||||
return -EINVAL;
|
||||
|
||||
if (ctx_size_in) {
|
||||
info.ctx = kzalloc(ctx_size_in, GFP_USER);
|
||||
if (!info.ctx)
|
||||
return -ENOMEM;
|
||||
if (copy_from_user(info.ctx, ctx_in, ctx_size_in)) {
|
||||
err = -EFAULT;
|
||||
goto out;
|
||||
}
|
||||
info.ctx = memdup_user(ctx_in, ctx_size_in);
|
||||
if (IS_ERR(info.ctx))
|
||||
return PTR_ERR(info.ctx);
|
||||
} else {
|
||||
info.ctx = NULL;
|
||||
}
|
||||
@ -389,7 +388,6 @@ int bpf_prog_test_run_raw_tp(struct bpf_prog *prog,
|
||||
copy_to_user(&uattr->test.retval, &info.retval, sizeof(u32)))
|
||||
err = -EFAULT;
|
||||
|
||||
out:
|
||||
kfree(info.ctx);
|
||||
return err;
|
||||
}
|
||||
@ -1049,13 +1047,9 @@ int bpf_prog_test_run_syscall(struct bpf_prog *prog,
|
||||
return -EINVAL;
|
||||
|
||||
if (ctx_size_in) {
|
||||
ctx = kzalloc(ctx_size_in, GFP_USER);
|
||||
if (!ctx)
|
||||
return -ENOMEM;
|
||||
if (copy_from_user(ctx, ctx_in, ctx_size_in)) {
|
||||
err = -EFAULT;
|
||||
goto out;
|
||||
}
|
||||
ctx = memdup_user(ctx_in, ctx_size_in);
|
||||
if (IS_ERR(ctx))
|
||||
return PTR_ERR(ctx);
|
||||
}
|
||||
|
||||
rcu_read_lock_trace();
|
||||
|
@ -10723,6 +10723,26 @@ const struct bpf_func_proto bpf_skc_to_udp6_sock_proto = {
|
||||
.ret_btf_id = &btf_sock_ids[BTF_SOCK_TYPE_UDP6],
|
||||
};
|
||||
|
||||
BPF_CALL_1(bpf_skc_to_unix_sock, struct sock *, sk)
|
||||
{
|
||||
/* unix_sock type is not generated in dwarf and hence btf,
|
||||
* trigger an explicit type generation here.
|
||||
*/
|
||||
BTF_TYPE_EMIT(struct unix_sock);
|
||||
if (sk && sk_fullsock(sk) && sk->sk_family == AF_UNIX)
|
||||
return (unsigned long)sk;
|
||||
|
||||
return (unsigned long)NULL;
|
||||
}
|
||||
|
||||
const struct bpf_func_proto bpf_skc_to_unix_sock_proto = {
|
||||
.func = bpf_skc_to_unix_sock,
|
||||
.gpl_only = false,
|
||||
.ret_type = RET_PTR_TO_BTF_ID_OR_NULL,
|
||||
.arg1_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON,
|
||||
.ret_btf_id = &btf_sock_ids[BTF_SOCK_TYPE_UNIX],
|
||||
};
|
||||
|
||||
BPF_CALL_1(bpf_sock_from_file, struct file *, file)
|
||||
{
|
||||
return (unsigned long)sock_from_file(file);
|
||||
@ -10762,6 +10782,9 @@ bpf_sk_base_func_proto(enum bpf_func_id func_id)
|
||||
case BPF_FUNC_skc_to_udp6_sock:
|
||||
func = &bpf_skc_to_udp6_sock_proto;
|
||||
break;
|
||||
case BPF_FUNC_skc_to_unix_sock:
|
||||
func = &bpf_skc_to_unix_sock_proto;
|
||||
break;
|
||||
default:
|
||||
return bpf_base_func_proto(func_id);
|
||||
}
|
||||
|
@ -508,6 +508,7 @@ static struct sk_msg *sk_psock_create_ingress_msg(struct sock *sk,
|
||||
}
|
||||
|
||||
static int sk_psock_skb_ingress_enqueue(struct sk_buff *skb,
|
||||
u32 off, u32 len,
|
||||
struct sk_psock *psock,
|
||||
struct sock *sk,
|
||||
struct sk_msg *msg)
|
||||
@ -521,11 +522,11 @@ static int sk_psock_skb_ingress_enqueue(struct sk_buff *skb,
|
||||
*/
|
||||
if (skb_linearize(skb))
|
||||
return -EAGAIN;
|
||||
num_sge = skb_to_sgvec(skb, msg->sg.data, 0, skb->len);
|
||||
num_sge = skb_to_sgvec(skb, msg->sg.data, off, len);
|
||||
if (unlikely(num_sge < 0))
|
||||
return num_sge;
|
||||
|
||||
copied = skb->len;
|
||||
copied = len;
|
||||
msg->sg.start = 0;
|
||||
msg->sg.size = copied;
|
||||
msg->sg.end = num_sge;
|
||||
@ -536,9 +537,11 @@ static int sk_psock_skb_ingress_enqueue(struct sk_buff *skb,
|
||||
return copied;
|
||||
}
|
||||
|
||||
static int sk_psock_skb_ingress_self(struct sk_psock *psock, struct sk_buff *skb);
|
||||
static int sk_psock_skb_ingress_self(struct sk_psock *psock, struct sk_buff *skb,
|
||||
u32 off, u32 len);
|
||||
|
||||
static int sk_psock_skb_ingress(struct sk_psock *psock, struct sk_buff *skb)
|
||||
static int sk_psock_skb_ingress(struct sk_psock *psock, struct sk_buff *skb,
|
||||
u32 off, u32 len)
|
||||
{
|
||||
struct sock *sk = psock->sk;
|
||||
struct sk_msg *msg;
|
||||
@ -549,7 +552,7 @@ static int sk_psock_skb_ingress(struct sk_psock *psock, struct sk_buff *skb)
|
||||
* correctly.
|
||||
*/
|
||||
if (unlikely(skb->sk == sk))
|
||||
return sk_psock_skb_ingress_self(psock, skb);
|
||||
return sk_psock_skb_ingress_self(psock, skb, off, len);
|
||||
msg = sk_psock_create_ingress_msg(sk, skb);
|
||||
if (!msg)
|
||||
return -EAGAIN;
|
||||
@ -561,7 +564,7 @@ static int sk_psock_skb_ingress(struct sk_psock *psock, struct sk_buff *skb)
|
||||
* into user buffers.
|
||||
*/
|
||||
skb_set_owner_r(skb, sk);
|
||||
err = sk_psock_skb_ingress_enqueue(skb, psock, sk, msg);
|
||||
err = sk_psock_skb_ingress_enqueue(skb, off, len, psock, sk, msg);
|
||||
if (err < 0)
|
||||
kfree(msg);
|
||||
return err;
|
||||
@ -571,7 +574,8 @@ static int sk_psock_skb_ingress(struct sk_psock *psock, struct sk_buff *skb)
|
||||
* skb. In this case we do not need to check memory limits or skb_set_owner_r
|
||||
* because the skb is already accounted for here.
|
||||
*/
|
||||
static int sk_psock_skb_ingress_self(struct sk_psock *psock, struct sk_buff *skb)
|
||||
static int sk_psock_skb_ingress_self(struct sk_psock *psock, struct sk_buff *skb,
|
||||
u32 off, u32 len)
|
||||
{
|
||||
struct sk_msg *msg = kzalloc(sizeof(*msg), __GFP_NOWARN | GFP_ATOMIC);
|
||||
struct sock *sk = psock->sk;
|
||||
@ -581,7 +585,7 @@ static int sk_psock_skb_ingress_self(struct sk_psock *psock, struct sk_buff *skb
|
||||
return -EAGAIN;
|
||||
sk_msg_init(msg);
|
||||
skb_set_owner_r(skb, sk);
|
||||
err = sk_psock_skb_ingress_enqueue(skb, psock, sk, msg);
|
||||
err = sk_psock_skb_ingress_enqueue(skb, off, len, psock, sk, msg);
|
||||
if (err < 0)
|
||||
kfree(msg);
|
||||
return err;
|
||||
@ -595,7 +599,7 @@ static int sk_psock_handle_skb(struct sk_psock *psock, struct sk_buff *skb,
|
||||
return -EAGAIN;
|
||||
return skb_send_sock(psock->sk, skb, off, len);
|
||||
}
|
||||
return sk_psock_skb_ingress(psock, skb);
|
||||
return sk_psock_skb_ingress(psock, skb, off, len);
|
||||
}
|
||||
|
||||
static void sk_psock_skb_state(struct sk_psock *psock,
|
||||
@ -638,6 +642,12 @@ static void sk_psock_backlog(struct work_struct *work)
|
||||
while ((skb = skb_dequeue(&psock->ingress_skb))) {
|
||||
len = skb->len;
|
||||
off = 0;
|
||||
if (skb_bpf_strparser(skb)) {
|
||||
struct strp_msg *stm = strp_msg(skb);
|
||||
|
||||
off = stm->offset;
|
||||
len = stm->full_len;
|
||||
}
|
||||
start:
|
||||
ingress = skb_bpf_ingress(skb);
|
||||
skb_bpf_redirect_clear(skb);
|
||||
@ -877,6 +887,7 @@ static int sk_psock_skb_redirect(struct sk_psock *from, struct sk_buff *skb)
|
||||
* return code, but then didn't set a redirect interface.
|
||||
*/
|
||||
if (unlikely(!sk_other)) {
|
||||
skb_bpf_redirect_clear(skb);
|
||||
sock_drop(from->sk, skb);
|
||||
return -EIO;
|
||||
}
|
||||
@ -944,6 +955,7 @@ static int sk_psock_verdict_apply(struct sk_psock *psock, struct sk_buff *skb,
|
||||
{
|
||||
struct sock *sk_other;
|
||||
int err = 0;
|
||||
u32 len, off;
|
||||
|
||||
switch (verdict) {
|
||||
case __SK_PASS:
|
||||
@ -951,6 +963,7 @@ static int sk_psock_verdict_apply(struct sk_psock *psock, struct sk_buff *skb,
|
||||
sk_other = psock->sk;
|
||||
if (sock_flag(sk_other, SOCK_DEAD) ||
|
||||
!sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) {
|
||||
skb_bpf_redirect_clear(skb);
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
@ -963,7 +976,15 @@ static int sk_psock_verdict_apply(struct sk_psock *psock, struct sk_buff *skb,
|
||||
* retrying later from workqueue.
|
||||
*/
|
||||
if (skb_queue_empty(&psock->ingress_skb)) {
|
||||
err = sk_psock_skb_ingress_self(psock, skb);
|
||||
len = skb->len;
|
||||
off = 0;
|
||||
if (skb_bpf_strparser(skb)) {
|
||||
struct strp_msg *stm = strp_msg(skb);
|
||||
|
||||
off = stm->offset;
|
||||
len = stm->full_len;
|
||||
}
|
||||
err = sk_psock_skb_ingress_self(psock, skb, off, len);
|
||||
}
|
||||
if (err < 0) {
|
||||
spin_lock_bh(&psock->ingress_lock);
|
||||
@ -1029,6 +1050,8 @@ static void sk_psock_strp_read(struct strparser *strp, struct sk_buff *skb)
|
||||
skb_dst_drop(skb);
|
||||
skb_bpf_redirect_clear(skb);
|
||||
ret = bpf_prog_run_pin_on_cpu(prog, skb);
|
||||
if (ret == SK_PASS)
|
||||
skb_bpf_set_strparser(skb);
|
||||
ret = sk_psock_map_verd(ret, skb_bpf_redirect_fetch(skb));
|
||||
skb->sk = NULL;
|
||||
}
|
||||
|
@ -81,14 +81,7 @@ static bool bpf_tcp_ca_is_valid_access(int off, int size,
|
||||
const struct bpf_prog *prog,
|
||||
struct bpf_insn_access_aux *info)
|
||||
{
|
||||
if (off < 0 || off >= sizeof(__u64) * MAX_BPF_FUNC_ARGS)
|
||||
return false;
|
||||
if (type != BPF_READ)
|
||||
return false;
|
||||
if (off % size != 0)
|
||||
return false;
|
||||
|
||||
if (!btf_ctx_access(off, size, type, prog, info))
|
||||
if (!bpf_tracing_btf_ctx_access(off, size, type, prog, info))
|
||||
return false;
|
||||
|
||||
if (info->reg_type == PTR_TO_BTF_ID && info->btf_id == sock_id)
|
||||
@ -223,41 +216,13 @@ BTF_ID(func, tcp_reno_cong_avoid)
|
||||
BTF_ID(func, tcp_reno_undo_cwnd)
|
||||
BTF_ID(func, tcp_slow_start)
|
||||
BTF_ID(func, tcp_cong_avoid_ai)
|
||||
#ifdef CONFIG_X86
|
||||
#ifdef CONFIG_DYNAMIC_FTRACE
|
||||
#if IS_BUILTIN(CONFIG_TCP_CONG_CUBIC)
|
||||
BTF_ID(func, cubictcp_init)
|
||||
BTF_ID(func, cubictcp_recalc_ssthresh)
|
||||
BTF_ID(func, cubictcp_cong_avoid)
|
||||
BTF_ID(func, cubictcp_state)
|
||||
BTF_ID(func, cubictcp_cwnd_event)
|
||||
BTF_ID(func, cubictcp_acked)
|
||||
#endif
|
||||
#if IS_BUILTIN(CONFIG_TCP_CONG_DCTCP)
|
||||
BTF_ID(func, dctcp_init)
|
||||
BTF_ID(func, dctcp_update_alpha)
|
||||
BTF_ID(func, dctcp_cwnd_event)
|
||||
BTF_ID(func, dctcp_ssthresh)
|
||||
BTF_ID(func, dctcp_cwnd_undo)
|
||||
BTF_ID(func, dctcp_state)
|
||||
#endif
|
||||
#if IS_BUILTIN(CONFIG_TCP_CONG_BBR)
|
||||
BTF_ID(func, bbr_init)
|
||||
BTF_ID(func, bbr_main)
|
||||
BTF_ID(func, bbr_sndbuf_expand)
|
||||
BTF_ID(func, bbr_undo_cwnd)
|
||||
BTF_ID(func, bbr_cwnd_event)
|
||||
BTF_ID(func, bbr_ssthresh)
|
||||
BTF_ID(func, bbr_min_tso_segs)
|
||||
BTF_ID(func, bbr_set_state)
|
||||
#endif
|
||||
#endif /* CONFIG_DYNAMIC_FTRACE */
|
||||
#endif /* CONFIG_X86 */
|
||||
BTF_SET_END(bpf_tcp_ca_kfunc_ids)
|
||||
|
||||
static bool bpf_tcp_ca_check_kfunc_call(u32 kfunc_btf_id)
|
||||
static bool bpf_tcp_ca_check_kfunc_call(u32 kfunc_btf_id, struct module *owner)
|
||||
{
|
||||
return btf_id_set_contains(&bpf_tcp_ca_kfunc_ids, kfunc_btf_id);
|
||||
if (btf_id_set_contains(&bpf_tcp_ca_kfunc_ids, kfunc_btf_id))
|
||||
return true;
|
||||
return bpf_check_mod_kfunc_call(&bpf_tcp_ca_kfunc_list, kfunc_btf_id, owner);
|
||||
}
|
||||
|
||||
static const struct bpf_verifier_ops bpf_tcp_ca_verifier_ops = {
|
||||
|
@ -56,6 +56,8 @@
|
||||
* otherwise TCP stack falls back to an internal pacing using one high
|
||||
* resolution timer per TCP socket and may use more resources.
|
||||
*/
|
||||
#include <linux/btf.h>
|
||||
#include <linux/btf_ids.h>
|
||||
#include <linux/module.h>
|
||||
#include <net/tcp.h>
|
||||
#include <linux/inet_diag.h>
|
||||
@ -1152,14 +1154,38 @@ static struct tcp_congestion_ops tcp_bbr_cong_ops __read_mostly = {
|
||||
.set_state = bbr_set_state,
|
||||
};
|
||||
|
||||
BTF_SET_START(tcp_bbr_kfunc_ids)
|
||||
#ifdef CONFIG_X86
|
||||
#ifdef CONFIG_DYNAMIC_FTRACE
|
||||
BTF_ID(func, bbr_init)
|
||||
BTF_ID(func, bbr_main)
|
||||
BTF_ID(func, bbr_sndbuf_expand)
|
||||
BTF_ID(func, bbr_undo_cwnd)
|
||||
BTF_ID(func, bbr_cwnd_event)
|
||||
BTF_ID(func, bbr_ssthresh)
|
||||
BTF_ID(func, bbr_min_tso_segs)
|
||||
BTF_ID(func, bbr_set_state)
|
||||
#endif
|
||||
#endif
|
||||
BTF_SET_END(tcp_bbr_kfunc_ids)
|
||||
|
||||
static DEFINE_KFUNC_BTF_ID_SET(&tcp_bbr_kfunc_ids, tcp_bbr_kfunc_btf_set);
|
||||
|
||||
static int __init bbr_register(void)
|
||||
{
|
||||
int ret;
|
||||
|
||||
BUILD_BUG_ON(sizeof(struct bbr) > ICSK_CA_PRIV_SIZE);
|
||||
return tcp_register_congestion_control(&tcp_bbr_cong_ops);
|
||||
ret = tcp_register_congestion_control(&tcp_bbr_cong_ops);
|
||||
if (ret)
|
||||
return ret;
|
||||
register_kfunc_btf_id_set(&bpf_tcp_ca_kfunc_list, &tcp_bbr_kfunc_btf_set);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void __exit bbr_unregister(void)
|
||||
{
|
||||
unregister_kfunc_btf_id_set(&bpf_tcp_ca_kfunc_list, &tcp_bbr_kfunc_btf_set);
|
||||
tcp_unregister_congestion_control(&tcp_bbr_cong_ops);
|
||||
}
|
||||
|
||||
|
@ -25,6 +25,8 @@
|
||||
*/
|
||||
|
||||
#include <linux/mm.h>
|
||||
#include <linux/btf.h>
|
||||
#include <linux/btf_ids.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/math64.h>
|
||||
#include <net/tcp.h>
|
||||
@ -482,8 +484,25 @@ static struct tcp_congestion_ops cubictcp __read_mostly = {
|
||||
.name = "cubic",
|
||||
};
|
||||
|
||||
BTF_SET_START(tcp_cubic_kfunc_ids)
|
||||
#ifdef CONFIG_X86
|
||||
#ifdef CONFIG_DYNAMIC_FTRACE
|
||||
BTF_ID(func, cubictcp_init)
|
||||
BTF_ID(func, cubictcp_recalc_ssthresh)
|
||||
BTF_ID(func, cubictcp_cong_avoid)
|
||||
BTF_ID(func, cubictcp_state)
|
||||
BTF_ID(func, cubictcp_cwnd_event)
|
||||
BTF_ID(func, cubictcp_acked)
|
||||
#endif
|
||||
#endif
|
||||
BTF_SET_END(tcp_cubic_kfunc_ids)
|
||||
|
||||
static DEFINE_KFUNC_BTF_ID_SET(&tcp_cubic_kfunc_ids, tcp_cubic_kfunc_btf_set);
|
||||
|
||||
static int __init cubictcp_register(void)
|
||||
{
|
||||
int ret;
|
||||
|
||||
BUILD_BUG_ON(sizeof(struct bictcp) > ICSK_CA_PRIV_SIZE);
|
||||
|
||||
/* Precompute a bunch of the scaling factors that are used per-packet
|
||||
@ -514,11 +533,16 @@ static int __init cubictcp_register(void)
|
||||
/* divide by bic_scale and by constant Srtt (100ms) */
|
||||
do_div(cube_factor, bic_scale * 10);
|
||||
|
||||
return tcp_register_congestion_control(&cubictcp);
|
||||
ret = tcp_register_congestion_control(&cubictcp);
|
||||
if (ret)
|
||||
return ret;
|
||||
register_kfunc_btf_id_set(&bpf_tcp_ca_kfunc_list, &tcp_cubic_kfunc_btf_set);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void __exit cubictcp_unregister(void)
|
||||
{
|
||||
unregister_kfunc_btf_id_set(&bpf_tcp_ca_kfunc_list, &tcp_cubic_kfunc_btf_set);
|
||||
tcp_unregister_congestion_control(&cubictcp);
|
||||
}
|
||||
|
||||
|
@ -36,6 +36,8 @@
|
||||
* Glenn Judd <glenn.judd@morganstanley.com>
|
||||
*/
|
||||
|
||||
#include <linux/btf.h>
|
||||
#include <linux/btf_ids.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/mm.h>
|
||||
#include <net/tcp.h>
|
||||
@ -236,14 +238,36 @@ static struct tcp_congestion_ops dctcp_reno __read_mostly = {
|
||||
.name = "dctcp-reno",
|
||||
};
|
||||
|
||||
BTF_SET_START(tcp_dctcp_kfunc_ids)
|
||||
#ifdef CONFIG_X86
|
||||
#ifdef CONFIG_DYNAMIC_FTRACE
|
||||
BTF_ID(func, dctcp_init)
|
||||
BTF_ID(func, dctcp_update_alpha)
|
||||
BTF_ID(func, dctcp_cwnd_event)
|
||||
BTF_ID(func, dctcp_ssthresh)
|
||||
BTF_ID(func, dctcp_cwnd_undo)
|
||||
BTF_ID(func, dctcp_state)
|
||||
#endif
|
||||
#endif
|
||||
BTF_SET_END(tcp_dctcp_kfunc_ids)
|
||||
|
||||
static DEFINE_KFUNC_BTF_ID_SET(&tcp_dctcp_kfunc_ids, tcp_dctcp_kfunc_btf_set);
|
||||
|
||||
static int __init dctcp_register(void)
|
||||
{
|
||||
int ret;
|
||||
|
||||
BUILD_BUG_ON(sizeof(struct dctcp) > ICSK_CA_PRIV_SIZE);
|
||||
return tcp_register_congestion_control(&dctcp);
|
||||
ret = tcp_register_congestion_control(&dctcp);
|
||||
if (ret)
|
||||
return ret;
|
||||
register_kfunc_btf_id_set(&bpf_tcp_ca_kfunc_list, &tcp_dctcp_kfunc_btf_set);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void __exit dctcp_unregister(void)
|
||||
{
|
||||
unregister_kfunc_btf_id_set(&bpf_tcp_ca_kfunc_list, &tcp_dctcp_kfunc_btf_set);
|
||||
tcp_unregister_congestion_control(&dctcp);
|
||||
}
|
||||
|
||||
|
4
samples/bpf/.gitignore
vendored
4
samples/bpf/.gitignore
vendored
@ -57,3 +57,7 @@ testfile.img
|
||||
hbm_out.log
|
||||
iperf.*
|
||||
*.out
|
||||
*.skel.h
|
||||
/vmlinux.h
|
||||
/bpftool/
|
||||
/libbpf/
|
||||
|
@ -3,6 +3,8 @@
|
||||
BPF_SAMPLES_PATH ?= $(abspath $(srctree)/$(src))
|
||||
TOOLS_PATH := $(BPF_SAMPLES_PATH)/../../tools
|
||||
|
||||
pound := \#
|
||||
|
||||
# List of programs to build
|
||||
tprogs-y := test_lru_dist
|
||||
tprogs-y += sock_example
|
||||
@ -59,7 +61,11 @@ tprogs-y += xdp_redirect
|
||||
tprogs-y += xdp_monitor
|
||||
|
||||
# Libbpf dependencies
|
||||
LIBBPF = $(TOOLS_PATH)/lib/bpf/libbpf.a
|
||||
LIBBPF_SRC = $(TOOLS_PATH)/lib/bpf
|
||||
LIBBPF_OUTPUT = $(abspath $(BPF_SAMPLES_PATH))/libbpf
|
||||
LIBBPF_DESTDIR = $(LIBBPF_OUTPUT)
|
||||
LIBBPF_INCLUDE = $(LIBBPF_DESTDIR)/include
|
||||
LIBBPF = $(LIBBPF_OUTPUT)/libbpf.a
|
||||
|
||||
CGROUP_HELPERS := ../../tools/testing/selftests/bpf/cgroup_helpers.o
|
||||
TRACE_HELPERS := ../../tools/testing/selftests/bpf/trace_helpers.o
|
||||
@ -198,7 +204,7 @@ TPROGS_CFLAGS += -Wstrict-prototypes
|
||||
|
||||
TPROGS_CFLAGS += -I$(objtree)/usr/include
|
||||
TPROGS_CFLAGS += -I$(srctree)/tools/testing/selftests/bpf/
|
||||
TPROGS_CFLAGS += -I$(srctree)/tools/lib/
|
||||
TPROGS_CFLAGS += -I$(LIBBPF_INCLUDE)
|
||||
TPROGS_CFLAGS += -I$(srctree)/tools/include
|
||||
TPROGS_CFLAGS += -I$(srctree)/tools/perf
|
||||
TPROGS_CFLAGS += -DHAVE_ATTR_TEST=0
|
||||
@ -223,6 +229,7 @@ CLANG ?= clang
|
||||
OPT ?= opt
|
||||
LLVM_DIS ?= llvm-dis
|
||||
LLVM_OBJCOPY ?= llvm-objcopy
|
||||
LLVM_READELF ?= llvm-readelf
|
||||
BTF_PAHOLE ?= pahole
|
||||
|
||||
# Detect that we're cross compiling and use the cross compiler
|
||||
@ -232,7 +239,7 @@ endif
|
||||
|
||||
# Don't evaluate probes and warnings if we need to run make recursively
|
||||
ifneq ($(src),)
|
||||
HDR_PROBE := $(shell printf "\#include <linux/types.h>\n struct list_head { int a; }; int main() { return 0; }" | \
|
||||
HDR_PROBE := $(shell printf "$(pound)include <linux/types.h>\n struct list_head { int a; }; int main() { return 0; }" | \
|
||||
$(CC) $(TPROGS_CFLAGS) $(TPROGS_LDFLAGS) -x c - \
|
||||
-o /dev/null 2>/dev/null && echo okay)
|
||||
|
||||
@ -246,7 +253,7 @@ BTF_PAHOLE_PROBE := $(shell $(BTF_PAHOLE) --help 2>&1 | grep BTF)
|
||||
BTF_OBJCOPY_PROBE := $(shell $(LLVM_OBJCOPY) --help 2>&1 | grep -i 'usage.*llvm')
|
||||
BTF_LLVM_PROBE := $(shell echo "int main() { return 0; }" | \
|
||||
$(CLANG) -target bpf -O2 -g -c -x c - -o ./llvm_btf_verify.o; \
|
||||
readelf -S ./llvm_btf_verify.o | grep BTF; \
|
||||
$(LLVM_READELF) -S ./llvm_btf_verify.o | grep BTF; \
|
||||
/bin/rm -f ./llvm_btf_verify.o)
|
||||
|
||||
BPF_EXTRA_CFLAGS += -fno-stack-protector
|
||||
@ -268,16 +275,27 @@ all:
|
||||
clean:
|
||||
$(MAKE) -C ../../ M=$(CURDIR) clean
|
||||
@find $(CURDIR) -type f -name '*~' -delete
|
||||
@$(RM) -r $(CURDIR)/libbpf $(CURDIR)/bpftool
|
||||
|
||||
$(LIBBPF): FORCE
|
||||
$(LIBBPF): $(wildcard $(LIBBPF_SRC)/*.[ch] $(LIBBPF_SRC)/Makefile) | $(LIBBPF_OUTPUT)
|
||||
# Fix up variables inherited from Kbuild that tools/ build system won't like
|
||||
$(MAKE) -C $(dir $@) RM='rm -rf' EXTRA_CFLAGS="$(TPROGS_CFLAGS)" \
|
||||
LDFLAGS=$(TPROGS_LDFLAGS) srctree=$(BPF_SAMPLES_PATH)/../../ O=
|
||||
$(MAKE) -C $(LIBBPF_SRC) RM='rm -rf' EXTRA_CFLAGS="$(TPROGS_CFLAGS)" \
|
||||
LDFLAGS=$(TPROGS_LDFLAGS) srctree=$(BPF_SAMPLES_PATH)/../../ \
|
||||
O= OUTPUT=$(LIBBPF_OUTPUT)/ DESTDIR=$(LIBBPF_DESTDIR) prefix= \
|
||||
$@ install_headers
|
||||
|
||||
BPFTOOLDIR := $(TOOLS_PATH)/bpf/bpftool
|
||||
BPFTOOL := $(BPFTOOLDIR)/bpftool
|
||||
$(BPFTOOL): $(wildcard $(BPFTOOLDIR)/*.[ch] $(BPFTOOLDIR)/Makefile)
|
||||
$(MAKE) -C $(BPFTOOLDIR) srctree=$(BPF_SAMPLES_PATH)/../../
|
||||
BPFTOOL_OUTPUT := $(abspath $(BPF_SAMPLES_PATH))/bpftool
|
||||
BPFTOOL := $(BPFTOOL_OUTPUT)/bpftool
|
||||
$(BPFTOOL): $(LIBBPF) $(wildcard $(BPFTOOLDIR)/*.[ch] $(BPFTOOLDIR)/Makefile) | $(BPFTOOL_OUTPUT)
|
||||
$(MAKE) -C $(BPFTOOLDIR) srctree=$(BPF_SAMPLES_PATH)/../../ \
|
||||
OUTPUT=$(BPFTOOL_OUTPUT)/ \
|
||||
LIBBPF_OUTPUT=$(LIBBPF_OUTPUT)/ \
|
||||
LIBBPF_DESTDIR=$(LIBBPF_DESTDIR)/
|
||||
|
||||
$(LIBBPF_OUTPUT) $(BPFTOOL_OUTPUT):
|
||||
$(call msg,MKDIR,$@)
|
||||
$(Q)mkdir -p $@
|
||||
|
||||
$(obj)/syscall_nrs.h: $(obj)/syscall_nrs.s FORCE
|
||||
$(call filechk,offsets,__SYSCALL_NRS_H__)
|
||||
@ -309,6 +327,11 @@ verify_target_bpf: verify_cmds
|
||||
$(BPF_SAMPLES_PATH)/*.c: verify_target_bpf $(LIBBPF)
|
||||
$(src)/*.c: verify_target_bpf $(LIBBPF)
|
||||
|
||||
libbpf_hdrs: $(LIBBPF)
|
||||
$(obj)/$(TRACE_HELPERS): | libbpf_hdrs
|
||||
|
||||
.PHONY: libbpf_hdrs
|
||||
|
||||
$(obj)/xdp_redirect_cpu_user.o: $(obj)/xdp_redirect_cpu.skel.h
|
||||
$(obj)/xdp_redirect_map_multi_user.o: $(obj)/xdp_redirect_map_multi.skel.h
|
||||
$(obj)/xdp_redirect_map_user.o: $(obj)/xdp_redirect_map.skel.h
|
||||
@ -366,7 +389,7 @@ $(obj)/%.bpf.o: $(src)/%.bpf.c $(obj)/vmlinux.h $(src)/xdp_sample.bpf.h $(src)/x
|
||||
$(Q)$(CLANG) -g -O2 -target bpf -D__TARGET_ARCH_$(SRCARCH) \
|
||||
-Wno-compare-distinct-pointer-types -I$(srctree)/include \
|
||||
-I$(srctree)/samples/bpf -I$(srctree)/tools/include \
|
||||
-I$(srctree)/tools/lib $(CLANG_SYS_INCLUDES) \
|
||||
-I$(LIBBPF_INCLUDE) $(CLANG_SYS_INCLUDES) \
|
||||
-c $(filter %.bpf.c,$^) -o $@
|
||||
|
||||
LINKED_SKELS := xdp_redirect_cpu.skel.h xdp_redirect_map_multi.skel.h \
|
||||
@ -403,7 +426,7 @@ $(obj)/%.o: $(src)/%.c
|
||||
@echo " CLANG-bpf " $@
|
||||
$(Q)$(CLANG) $(NOSTDINC_FLAGS) $(LINUXINCLUDE) $(BPF_EXTRA_CFLAGS) \
|
||||
-I$(obj) -I$(srctree)/tools/testing/selftests/bpf/ \
|
||||
-I$(srctree)/tools/lib/ \
|
||||
-I$(LIBBPF_INCLUDE) \
|
||||
-D__KERNEL__ -D__BPF_TRACING__ -Wno-unused-value -Wno-pointer-sign \
|
||||
-D__TARGET_ARCH_$(SRCARCH) -Wno-compare-distinct-pointer-types \
|
||||
-Wno-gnu-variable-sized-type-not-at-end \
|
||||
|
@ -128,7 +128,7 @@ int main(int argc, char **argv)
|
||||
if (bpf_prog_load_xattr(&prog_load_attr, &obj, &prog_fd))
|
||||
return 1;
|
||||
|
||||
map = bpf_map__next(NULL, obj);
|
||||
map = bpf_object__next_map(obj, NULL);
|
||||
if (!map) {
|
||||
printf("finding a map in obj file failed\n");
|
||||
return 1;
|
||||
|
@ -325,7 +325,6 @@ int main(int argc, char **argv)
|
||||
int add_cpu = -1;
|
||||
int ifindex = -1;
|
||||
int *cpu, i, opt;
|
||||
char *ifname;
|
||||
__u32 qsize;
|
||||
int n_cpus;
|
||||
|
||||
@ -393,9 +392,8 @@ int main(int argc, char **argv)
|
||||
fprintf(stderr, "-d/--dev name too long\n");
|
||||
goto end_cpu;
|
||||
}
|
||||
ifname = (char *)&ifname_buf;
|
||||
safe_strncpy(ifname, optarg, sizeof(ifname));
|
||||
ifindex = if_nametoindex(ifname);
|
||||
safe_strncpy(ifname_buf, optarg, strlen(ifname_buf));
|
||||
ifindex = if_nametoindex(ifname_buf);
|
||||
if (!ifindex)
|
||||
ifindex = strtoul(optarg, NULL, 0);
|
||||
if (!ifindex) {
|
||||
|
@ -154,7 +154,7 @@ int main(int argc, char **argv)
|
||||
return 1;
|
||||
}
|
||||
|
||||
map = bpf_map__next(NULL, obj);
|
||||
map = bpf_object__next_map(obj, NULL);
|
||||
if (!map) {
|
||||
printf("finding a map in obj file failed\n");
|
||||
return 1;
|
||||
|
@ -62,9 +62,9 @@ void seccomp_bpf_print(struct sock_filter *filter, size_t count);
|
||||
#define EXPAND(...) __VA_ARGS__
|
||||
|
||||
/* Ensure that we load the logically correct offset. */
|
||||
#if __BYTE_ORDER == __LITTLE_ENDIAN
|
||||
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
|
||||
#define LO_ARG(idx) offsetof(struct seccomp_data, args[(idx)])
|
||||
#elif __BYTE_ORDER == __BIG_ENDIAN
|
||||
#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
|
||||
#define LO_ARG(idx) offsetof(struct seccomp_data, args[(idx)]) + sizeof(__u32)
|
||||
#else
|
||||
#error "Unknown endianness"
|
||||
@ -85,10 +85,10 @@ void seccomp_bpf_print(struct sock_filter *filter, size_t count);
|
||||
#elif __BITS_PER_LONG == 64
|
||||
|
||||
/* Ensure that we load the logically correct offset. */
|
||||
#if __BYTE_ORDER == __LITTLE_ENDIAN
|
||||
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
|
||||
#define ENDIAN(_lo, _hi) _lo, _hi
|
||||
#define HI_ARG(idx) offsetof(struct seccomp_data, args[(idx)]) + sizeof(__u32)
|
||||
#elif __BYTE_ORDER == __BIG_ENDIAN
|
||||
#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
|
||||
#define ENDIAN(_lo, _hi) _hi, _lo
|
||||
#define HI_ARG(idx) offsetof(struct seccomp_data, args[(idx)])
|
||||
#endif
|
||||
|
@ -40,7 +40,8 @@ quiet_cmd_ld_ko_o = LD [M] $@
|
||||
quiet_cmd_btf_ko = BTF [M] $@
|
||||
cmd_btf_ko = \
|
||||
if [ -f vmlinux ]; then \
|
||||
LLVM_OBJCOPY="$(OBJCOPY)" $(PAHOLE) -J --btf_base vmlinux $@; \
|
||||
LLVM_OBJCOPY="$(OBJCOPY)" $(PAHOLE) -J $(PAHOLE_FLAGS) --btf_base vmlinux $@; \
|
||||
$(RESOLVE_BTFIDS) -b vmlinux $@; \
|
||||
else \
|
||||
printf "Skipping BTF generation for %s due to unavailability of vmlinux\n" $@ 1>&2; \
|
||||
fi;
|
||||
|
@ -537,6 +537,7 @@ class PrinterHelpers(Printer):
|
||||
'struct tcp_timewait_sock',
|
||||
'struct tcp_request_sock',
|
||||
'struct udp6_sock',
|
||||
'struct unix_sock',
|
||||
'struct task_struct',
|
||||
|
||||
'struct __sk_buff',
|
||||
@ -589,6 +590,7 @@ class PrinterHelpers(Printer):
|
||||
'struct tcp_timewait_sock',
|
||||
'struct tcp_request_sock',
|
||||
'struct udp6_sock',
|
||||
'struct unix_sock',
|
||||
'struct task_struct',
|
||||
'struct path',
|
||||
'struct btf_ptr',
|
||||
|
@ -205,7 +205,6 @@ vmlinux_link()
|
||||
gen_btf()
|
||||
{
|
||||
local pahole_ver
|
||||
local extra_paholeopt=
|
||||
|
||||
if ! [ -x "$(command -v ${PAHOLE})" ]; then
|
||||
echo >&2 "BTF: ${1}: pahole (${PAHOLE}) is not available"
|
||||
@ -220,16 +219,8 @@ gen_btf()
|
||||
|
||||
vmlinux_link ${1}
|
||||
|
||||
if [ "${pahole_ver}" -ge "118" ] && [ "${pahole_ver}" -le "121" ]; then
|
||||
# pahole 1.18 through 1.21 can't handle zero-sized per-CPU vars
|
||||
extra_paholeopt="${extra_paholeopt} --skip_encoding_btf_vars"
|
||||
fi
|
||||
if [ "${pahole_ver}" -ge "121" ]; then
|
||||
extra_paholeopt="${extra_paholeopt} --btf_gen_floats"
|
||||
fi
|
||||
|
||||
info "BTF" ${2}
|
||||
LLVM_OBJCOPY="${OBJCOPY}" ${PAHOLE} -J ${extra_paholeopt} ${1}
|
||||
LLVM_OBJCOPY="${OBJCOPY}" ${PAHOLE} -J ${PAHOLE_FLAGS} ${1}
|
||||
|
||||
# Create ${2} which contains just .BTF section but no symbols. Add
|
||||
# SHF_ALLOC because .BTF will be part of the vmlinux image. --strip-all
|
||||
|
20
scripts/pahole-flags.sh
Executable file
20
scripts/pahole-flags.sh
Executable file
@ -0,0 +1,20 @@
|
||||
#!/bin/sh
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
|
||||
extra_paholeopt=
|
||||
|
||||
if ! [ -x "$(command -v ${PAHOLE})" ]; then
|
||||
return
|
||||
fi
|
||||
|
||||
pahole_ver=$(${PAHOLE} --version | sed -E 's/v([0-9]+)\.([0-9]+)/\1\2/')
|
||||
|
||||
if [ "${pahole_ver}" -ge "118" ] && [ "${pahole_ver}" -le "121" ]; then
|
||||
# pahole 1.18 through 1.21 can't handle zero-sized per-CPU vars
|
||||
extra_paholeopt="${extra_paholeopt} --skip_encoding_btf_vars"
|
||||
fi
|
||||
if [ "${pahole_ver}" -ge "121" ]; then
|
||||
extra_paholeopt="${extra_paholeopt} --btf_gen_floats"
|
||||
fi
|
||||
|
||||
echo ${extra_paholeopt}
|
@ -14,33 +14,43 @@ else
|
||||
Q = @
|
||||
endif
|
||||
|
||||
BPF_DIR = $(srctree)/tools/lib/bpf/
|
||||
BPF_DIR = $(srctree)/tools/lib/bpf
|
||||
|
||||
ifneq ($(OUTPUT),)
|
||||
LIBBPF_OUTPUT = $(OUTPUT)/libbpf/
|
||||
LIBBPF_PATH = $(LIBBPF_OUTPUT)
|
||||
BOOTSTRAP_OUTPUT = $(OUTPUT)/bootstrap/
|
||||
_OUTPUT := $(OUTPUT)
|
||||
else
|
||||
LIBBPF_OUTPUT =
|
||||
LIBBPF_PATH = $(BPF_DIR)
|
||||
BOOTSTRAP_OUTPUT = $(CURDIR)/bootstrap/
|
||||
_OUTPUT := $(CURDIR)
|
||||
endif
|
||||
BOOTSTRAP_OUTPUT := $(_OUTPUT)/bootstrap/
|
||||
LIBBPF_OUTPUT := $(_OUTPUT)/libbpf/
|
||||
LIBBPF_DESTDIR := $(LIBBPF_OUTPUT)
|
||||
LIBBPF_INCLUDE := $(LIBBPF_DESTDIR)/include
|
||||
LIBBPF_HDRS_DIR := $(LIBBPF_INCLUDE)/bpf
|
||||
|
||||
LIBBPF = $(LIBBPF_PATH)libbpf.a
|
||||
LIBBPF = $(LIBBPF_OUTPUT)libbpf.a
|
||||
LIBBPF_BOOTSTRAP_OUTPUT = $(BOOTSTRAP_OUTPUT)libbpf/
|
||||
LIBBPF_BOOTSTRAP = $(LIBBPF_BOOTSTRAP_OUTPUT)libbpf.a
|
||||
|
||||
# We need to copy hashmap.h and nlattr.h which is not otherwise exported by
|
||||
# libbpf, but still required by bpftool.
|
||||
LIBBPF_INTERNAL_HDRS := $(addprefix $(LIBBPF_HDRS_DIR)/,hashmap.h nlattr.h)
|
||||
|
||||
ifeq ($(BPFTOOL_VERSION),)
|
||||
BPFTOOL_VERSION := $(shell make -rR --no-print-directory -sC ../../.. kernelversion)
|
||||
endif
|
||||
|
||||
$(LIBBPF_OUTPUT) $(BOOTSTRAP_OUTPUT) $(LIBBPF_BOOTSTRAP_OUTPUT):
|
||||
$(LIBBPF_OUTPUT) $(BOOTSTRAP_OUTPUT) $(LIBBPF_BOOTSTRAP_OUTPUT) $(LIBBPF_HDRS_DIR):
|
||||
$(QUIET_MKDIR)mkdir -p $@
|
||||
|
||||
$(LIBBPF): FORCE | $(LIBBPF_OUTPUT)
|
||||
$(Q)$(MAKE) -C $(BPF_DIR) OUTPUT=$(LIBBPF_OUTPUT) $(LIBBPF_OUTPUT)libbpf.a
|
||||
$(LIBBPF): $(wildcard $(BPF_DIR)/*.[ch] $(BPF_DIR)/Makefile) | $(LIBBPF_OUTPUT)
|
||||
$(Q)$(MAKE) -C $(BPF_DIR) OUTPUT=$(LIBBPF_OUTPUT) \
|
||||
DESTDIR=$(LIBBPF_DESTDIR) prefix= $(LIBBPF) install_headers
|
||||
|
||||
$(LIBBPF_BOOTSTRAP): FORCE | $(LIBBPF_BOOTSTRAP_OUTPUT)
|
||||
$(LIBBPF_INTERNAL_HDRS): $(LIBBPF_HDRS_DIR)/%.h: $(BPF_DIR)/%.h | $(LIBBPF_HDRS_DIR)
|
||||
$(call QUIET_INSTALL, $@)
|
||||
$(Q)install -m 644 -t $(LIBBPF_HDRS_DIR) $<
|
||||
|
||||
$(LIBBPF_BOOTSTRAP): $(wildcard $(BPF_DIR)/*.[ch] $(BPF_DIR)/Makefile) | $(LIBBPF_BOOTSTRAP_OUTPUT)
|
||||
$(Q)$(MAKE) -C $(BPF_DIR) OUTPUT=$(LIBBPF_BOOTSTRAP_OUTPUT) \
|
||||
ARCH= CC=$(HOSTCC) LD=$(HOSTLD) $@
|
||||
|
||||
@ -60,11 +70,10 @@ CFLAGS += -W -Wall -Wextra -Wno-unused-parameter -Wno-missing-field-initializers
|
||||
CFLAGS += $(filter-out -Wswitch-enum -Wnested-externs,$(EXTRA_WARNINGS))
|
||||
CFLAGS += -DPACKAGE='"bpftool"' -D__EXPORTED_HEADERS__ \
|
||||
-I$(if $(OUTPUT),$(OUTPUT),.) \
|
||||
-I$(LIBBPF_INCLUDE) \
|
||||
-I$(srctree)/kernel/bpf/ \
|
||||
-I$(srctree)/tools/include \
|
||||
-I$(srctree)/tools/include/uapi \
|
||||
-I$(srctree)/tools/lib \
|
||||
-I$(srctree)/tools/perf
|
||||
-I$(srctree)/tools/include/uapi
|
||||
CFLAGS += -DBPFTOOL_VERSION='"$(BPFTOOL_VERSION)"'
|
||||
ifneq ($(EXTRA_CFLAGS),)
|
||||
CFLAGS += $(EXTRA_CFLAGS)
|
||||
@ -140,7 +149,7 @@ BOOTSTRAP_OBJS = $(addprefix $(BOOTSTRAP_OUTPUT),main.o common.o json_writer.o g
|
||||
$(BOOTSTRAP_OBJS): $(LIBBPF_BOOTSTRAP)
|
||||
|
||||
OBJS = $(patsubst %.c,$(OUTPUT)%.o,$(SRCS)) $(OUTPUT)disasm.o
|
||||
$(OBJS): $(LIBBPF)
|
||||
$(OBJS): $(LIBBPF) $(LIBBPF_INTERNAL_HDRS)
|
||||
|
||||
VMLINUX_BTF_PATHS ?= $(if $(O),$(O)/vmlinux) \
|
||||
$(if $(KBUILD_OUTPUT),$(KBUILD_OUTPUT)/vmlinux) \
|
||||
@ -167,8 +176,7 @@ $(OUTPUT)%.bpf.o: skeleton/%.bpf.c $(OUTPUT)vmlinux.h $(LIBBPF)
|
||||
$(QUIET_CLANG)$(CLANG) \
|
||||
-I$(if $(OUTPUT),$(OUTPUT),.) \
|
||||
-I$(srctree)/tools/include/uapi/ \
|
||||
-I$(LIBBPF_PATH) \
|
||||
-I$(srctree)/tools/lib \
|
||||
-I$(LIBBPF_INCLUDE) \
|
||||
-g -O2 -Wall -target bpf -c $< -o $@ && $(LLVM_STRIP) -g $@
|
||||
|
||||
$(OUTPUT)%.skel.h: $(OUTPUT)%.bpf.o $(BPFTOOL_BOOTSTRAP)
|
||||
@ -189,7 +197,10 @@ $(BOOTSTRAP_OUTPUT)disasm.o: $(srctree)/kernel/bpf/disasm.c
|
||||
$(OUTPUT)disasm.o: $(srctree)/kernel/bpf/disasm.c
|
||||
$(QUIET_CC)$(CC) $(CFLAGS) -c -MMD -o $@ $<
|
||||
|
||||
$(OUTPUT)feature.o: | zdep
|
||||
$(OUTPUT)feature.o:
|
||||
ifneq ($(feature-zlib), 1)
|
||||
$(error "No zlib found")
|
||||
endif
|
||||
|
||||
$(BPFTOOL_BOOTSTRAP): $(BOOTSTRAP_OBJS) $(LIBBPF_BOOTSTRAP)
|
||||
$(QUIET_LINK)$(HOSTCC) $(CFLAGS) $(LDFLAGS) -o $@ $(BOOTSTRAP_OBJS) \
|
||||
@ -198,7 +209,7 @@ $(BPFTOOL_BOOTSTRAP): $(BOOTSTRAP_OBJS) $(LIBBPF_BOOTSTRAP)
|
||||
$(OUTPUT)bpftool: $(OBJS) $(LIBBPF)
|
||||
$(QUIET_LINK)$(CC) $(CFLAGS) $(LDFLAGS) -o $@ $(OBJS) $(LIBS)
|
||||
|
||||
$(BOOTSTRAP_OUTPUT)%.o: %.c | $(BOOTSTRAP_OUTPUT)
|
||||
$(BOOTSTRAP_OUTPUT)%.o: %.c $(LIBBPF_INTERNAL_HDRS) | $(BOOTSTRAP_OUTPUT)
|
||||
$(QUIET_CC)$(HOSTCC) $(CFLAGS) -c -MMD -o $@ $<
|
||||
|
||||
$(OUTPUT)%.o: %.c
|
||||
@ -217,10 +228,12 @@ clean: $(LIBBPF)-clean $(LIBBPF_BOOTSTRAP)-clean feature-detect-clean
|
||||
$(Q)$(RM) -- $(OUTPUT)FEATURE-DUMP.bpftool
|
||||
$(Q)$(RM) -r -- $(OUTPUT)feature/
|
||||
|
||||
install: $(OUTPUT)bpftool
|
||||
install-bin: $(OUTPUT)bpftool
|
||||
$(call QUIET_INSTALL, bpftool)
|
||||
$(Q)$(INSTALL) -m 0755 -d $(DESTDIR)$(prefix)/sbin
|
||||
$(Q)$(INSTALL) $(OUTPUT)bpftool $(DESTDIR)$(prefix)/sbin/bpftool
|
||||
|
||||
install: install-bin
|
||||
$(Q)$(INSTALL) -m 0755 -d $(DESTDIR)$(bash_compdir)
|
||||
$(Q)$(INSTALL) -m 0644 bash-completion/bpftool $(DESTDIR)$(bash_compdir)
|
||||
|
||||
@ -243,10 +256,7 @@ doc-uninstall:
|
||||
|
||||
FORCE:
|
||||
|
||||
zdep:
|
||||
@if [ "$(feature-zlib)" != "1" ]; then echo "No zlib found"; exit 1 ; fi
|
||||
|
||||
.SECONDARY:
|
||||
.PHONY: all FORCE clean install uninstall zdep
|
||||
.PHONY: all FORCE clean install-bin install uninstall
|
||||
.PHONY: doc doc-clean doc-install doc-uninstall
|
||||
.DEFAULT_GOAL := all
|
||||
|
@ -8,14 +8,15 @@
|
||||
#include <stdio.h>
|
||||
#include <string.h>
|
||||
#include <unistd.h>
|
||||
#include <bpf/bpf.h>
|
||||
#include <bpf/btf.h>
|
||||
#include <bpf/libbpf.h>
|
||||
#include <linux/btf.h>
|
||||
#include <linux/hashtable.h>
|
||||
#include <sys/types.h>
|
||||
#include <sys/stat.h>
|
||||
|
||||
#include <bpf/bpf.h>
|
||||
#include <bpf/btf.h>
|
||||
#include <bpf/hashmap.h>
|
||||
#include <bpf/libbpf.h>
|
||||
|
||||
#include "json_writer.h"
|
||||
#include "main.h"
|
||||
|
||||
@ -37,17 +38,12 @@ static const char * const btf_kind_str[NR_BTF_KINDS] = {
|
||||
[BTF_KIND_VAR] = "VAR",
|
||||
[BTF_KIND_DATASEC] = "DATASEC",
|
||||
[BTF_KIND_FLOAT] = "FLOAT",
|
||||
[BTF_KIND_TAG] = "TAG",
|
||||
};
|
||||
|
||||
struct btf_attach_table {
|
||||
DECLARE_HASHTABLE(table, 16);
|
||||
[BTF_KIND_DECL_TAG] = "DECL_TAG",
|
||||
};
|
||||
|
||||
struct btf_attach_point {
|
||||
__u32 obj_id;
|
||||
__u32 btf_id;
|
||||
struct hlist_node hash;
|
||||
};
|
||||
|
||||
static const char *btf_int_enc_str(__u8 encoding)
|
||||
@ -329,7 +325,7 @@ static int dump_btf_type(const struct btf *btf, __u32 id,
|
||||
printf("\n\ttype_id=%u offset=%u size=%u",
|
||||
v->type, v->offset, v->size);
|
||||
|
||||
if (v->type <= btf__get_nr_types(btf)) {
|
||||
if (v->type < btf__type_cnt(btf)) {
|
||||
vt = btf__type_by_id(btf, v->type);
|
||||
printf(" (%s '%s')",
|
||||
btf_kind_str[btf_kind_safe(btf_kind(vt))],
|
||||
@ -348,8 +344,8 @@ static int dump_btf_type(const struct btf *btf, __u32 id,
|
||||
printf(" size=%u", t->size);
|
||||
break;
|
||||
}
|
||||
case BTF_KIND_TAG: {
|
||||
const struct btf_tag *tag = (const void *)(t + 1);
|
||||
case BTF_KIND_DECL_TAG: {
|
||||
const struct btf_decl_tag *tag = (const void *)(t + 1);
|
||||
|
||||
if (json_output) {
|
||||
jsonw_uint_field(w, "type_id", t->type);
|
||||
@ -390,14 +386,14 @@ static int dump_btf_raw(const struct btf *btf,
|
||||
}
|
||||
} else {
|
||||
const struct btf *base;
|
||||
int cnt = btf__get_nr_types(btf);
|
||||
int cnt = btf__type_cnt(btf);
|
||||
int start_id = 1;
|
||||
|
||||
base = btf__base_btf(btf);
|
||||
if (base)
|
||||
start_id = btf__get_nr_types(base) + 1;
|
||||
start_id = btf__type_cnt(base);
|
||||
|
||||
for (i = start_id; i <= cnt; i++) {
|
||||
for (i = start_id; i < cnt; i++) {
|
||||
t = btf__type_by_id(btf, i);
|
||||
dump_btf_type(btf, i, t);
|
||||
}
|
||||
@ -440,9 +436,9 @@ static int dump_btf_c(const struct btf *btf,
|
||||
goto done;
|
||||
}
|
||||
} else {
|
||||
int cnt = btf__get_nr_types(btf);
|
||||
int cnt = btf__type_cnt(btf);
|
||||
|
||||
for (i = 1; i <= cnt; i++) {
|
||||
for (i = 1; i < cnt; i++) {
|
||||
err = btf_dump__dump_type(d, i);
|
||||
if (err)
|
||||
goto done;
|
||||
@ -645,21 +641,8 @@ static int btf_parse_fd(int *argc, char ***argv)
|
||||
return fd;
|
||||
}
|
||||
|
||||
static void delete_btf_table(struct btf_attach_table *tab)
|
||||
{
|
||||
struct btf_attach_point *obj;
|
||||
struct hlist_node *tmp;
|
||||
|
||||
unsigned int bkt;
|
||||
|
||||
hash_for_each_safe(tab->table, bkt, tmp, obj, hash) {
|
||||
hash_del(&obj->hash);
|
||||
free(obj);
|
||||
}
|
||||
}
|
||||
|
||||
static int
|
||||
build_btf_type_table(struct btf_attach_table *tab, enum bpf_obj_type type,
|
||||
build_btf_type_table(struct hashmap *tab, enum bpf_obj_type type,
|
||||
void *info, __u32 *len)
|
||||
{
|
||||
static const char * const names[] = {
|
||||
@ -667,7 +650,6 @@ build_btf_type_table(struct btf_attach_table *tab, enum bpf_obj_type type,
|
||||
[BPF_OBJ_PROG] = "prog",
|
||||
[BPF_OBJ_MAP] = "map",
|
||||
};
|
||||
struct btf_attach_point *obj_node;
|
||||
__u32 btf_id, id = 0;
|
||||
int err;
|
||||
int fd;
|
||||
@ -741,28 +723,25 @@ build_btf_type_table(struct btf_attach_table *tab, enum bpf_obj_type type,
|
||||
if (!btf_id)
|
||||
continue;
|
||||
|
||||
obj_node = calloc(1, sizeof(*obj_node));
|
||||
if (!obj_node) {
|
||||
p_err("failed to allocate memory: %s", strerror(errno));
|
||||
err = -ENOMEM;
|
||||
err = hashmap__append(tab, u32_as_hash_field(btf_id),
|
||||
u32_as_hash_field(id));
|
||||
if (err) {
|
||||
p_err("failed to append entry to hashmap for BTF ID %u, object ID %u: %s",
|
||||
btf_id, id, strerror(errno));
|
||||
goto err_free;
|
||||
}
|
||||
|
||||
obj_node->obj_id = id;
|
||||
obj_node->btf_id = btf_id;
|
||||
hash_add(tab->table, &obj_node->hash, obj_node->btf_id);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
err_free:
|
||||
delete_btf_table(tab);
|
||||
hashmap__free(tab);
|
||||
return err;
|
||||
}
|
||||
|
||||
static int
|
||||
build_btf_tables(struct btf_attach_table *btf_prog_table,
|
||||
struct btf_attach_table *btf_map_table)
|
||||
build_btf_tables(struct hashmap *btf_prog_table,
|
||||
struct hashmap *btf_map_table)
|
||||
{
|
||||
struct bpf_prog_info prog_info;
|
||||
__u32 prog_len = sizeof(prog_info);
|
||||
@ -778,7 +757,7 @@ build_btf_tables(struct btf_attach_table *btf_prog_table,
|
||||
err = build_btf_type_table(btf_map_table, BPF_OBJ_MAP, &map_info,
|
||||
&map_len);
|
||||
if (err) {
|
||||
delete_btf_table(btf_prog_table);
|
||||
hashmap__free(btf_prog_table);
|
||||
return err;
|
||||
}
|
||||
|
||||
@ -787,10 +766,10 @@ build_btf_tables(struct btf_attach_table *btf_prog_table,
|
||||
|
||||
static void
|
||||
show_btf_plain(struct bpf_btf_info *info, int fd,
|
||||
struct btf_attach_table *btf_prog_table,
|
||||
struct btf_attach_table *btf_map_table)
|
||||
struct hashmap *btf_prog_table,
|
||||
struct hashmap *btf_map_table)
|
||||
{
|
||||
struct btf_attach_point *obj;
|
||||
struct hashmap_entry *entry;
|
||||
const char *name = u64_to_ptr(info->name);
|
||||
int n;
|
||||
|
||||
@ -804,29 +783,30 @@ show_btf_plain(struct bpf_btf_info *info, int fd,
|
||||
printf("size %uB", info->btf_size);
|
||||
|
||||
n = 0;
|
||||
hash_for_each_possible(btf_prog_table->table, obj, hash, info->id) {
|
||||
if (obj->btf_id == info->id)
|
||||
printf("%s%u", n++ == 0 ? " prog_ids " : ",",
|
||||
obj->obj_id);
|
||||
hashmap__for_each_key_entry(btf_prog_table, entry,
|
||||
u32_as_hash_field(info->id)) {
|
||||
printf("%s%u", n++ == 0 ? " prog_ids " : ",",
|
||||
hash_field_as_u32(entry->value));
|
||||
}
|
||||
|
||||
n = 0;
|
||||
hash_for_each_possible(btf_map_table->table, obj, hash, info->id) {
|
||||
if (obj->btf_id == info->id)
|
||||
printf("%s%u", n++ == 0 ? " map_ids " : ",",
|
||||
obj->obj_id);
|
||||
hashmap__for_each_key_entry(btf_map_table, entry,
|
||||
u32_as_hash_field(info->id)) {
|
||||
printf("%s%u", n++ == 0 ? " map_ids " : ",",
|
||||
hash_field_as_u32(entry->value));
|
||||
}
|
||||
emit_obj_refs_plain(&refs_table, info->id, "\n\tpids ");
|
||||
|
||||
emit_obj_refs_plain(refs_table, info->id, "\n\tpids ");
|
||||
|
||||
printf("\n");
|
||||
}
|
||||
|
||||
static void
|
||||
show_btf_json(struct bpf_btf_info *info, int fd,
|
||||
struct btf_attach_table *btf_prog_table,
|
||||
struct btf_attach_table *btf_map_table)
|
||||
struct hashmap *btf_prog_table,
|
||||
struct hashmap *btf_map_table)
|
||||
{
|
||||
struct btf_attach_point *obj;
|
||||
struct hashmap_entry *entry;
|
||||
const char *name = u64_to_ptr(info->name);
|
||||
|
||||
jsonw_start_object(json_wtr); /* btf object */
|
||||
@ -835,23 +815,21 @@ show_btf_json(struct bpf_btf_info *info, int fd,
|
||||
|
||||
jsonw_name(json_wtr, "prog_ids");
|
||||
jsonw_start_array(json_wtr); /* prog_ids */
|
||||
hash_for_each_possible(btf_prog_table->table, obj, hash,
|
||||
info->id) {
|
||||
if (obj->btf_id == info->id)
|
||||
jsonw_uint(json_wtr, obj->obj_id);
|
||||
hashmap__for_each_key_entry(btf_prog_table, entry,
|
||||
u32_as_hash_field(info->id)) {
|
||||
jsonw_uint(json_wtr, hash_field_as_u32(entry->value));
|
||||
}
|
||||
jsonw_end_array(json_wtr); /* prog_ids */
|
||||
|
||||
jsonw_name(json_wtr, "map_ids");
|
||||
jsonw_start_array(json_wtr); /* map_ids */
|
||||
hash_for_each_possible(btf_map_table->table, obj, hash,
|
||||
info->id) {
|
||||
if (obj->btf_id == info->id)
|
||||
jsonw_uint(json_wtr, obj->obj_id);
|
||||
hashmap__for_each_key_entry(btf_map_table, entry,
|
||||
u32_as_hash_field(info->id)) {
|
||||
jsonw_uint(json_wtr, hash_field_as_u32(entry->value));
|
||||
}
|
||||
jsonw_end_array(json_wtr); /* map_ids */
|
||||
|
||||
emit_obj_refs_json(&refs_table, info->id, json_wtr); /* pids */
|
||||
emit_obj_refs_json(refs_table, info->id, json_wtr); /* pids */
|
||||
|
||||
jsonw_bool_field(json_wtr, "kernel", info->kernel_btf);
|
||||
|
||||
@ -862,8 +840,8 @@ show_btf_json(struct bpf_btf_info *info, int fd,
|
||||
}
|
||||
|
||||
static int
|
||||
show_btf(int fd, struct btf_attach_table *btf_prog_table,
|
||||
struct btf_attach_table *btf_map_table)
|
||||
show_btf(int fd, struct hashmap *btf_prog_table,
|
||||
struct hashmap *btf_map_table)
|
||||
{
|
||||
struct bpf_btf_info info;
|
||||
__u32 len = sizeof(info);
|
||||
@ -900,8 +878,8 @@ show_btf(int fd, struct btf_attach_table *btf_prog_table,
|
||||
|
||||
static int do_show(int argc, char **argv)
|
||||
{
|
||||
struct btf_attach_table btf_prog_table;
|
||||
struct btf_attach_table btf_map_table;
|
||||
struct hashmap *btf_prog_table;
|
||||
struct hashmap *btf_map_table;
|
||||
int err, fd = -1;
|
||||
__u32 id = 0;
|
||||
|
||||
@ -917,9 +895,19 @@ static int do_show(int argc, char **argv)
|
||||
return BAD_ARG();
|
||||
}
|
||||
|
||||
hash_init(btf_prog_table.table);
|
||||
hash_init(btf_map_table.table);
|
||||
err = build_btf_tables(&btf_prog_table, &btf_map_table);
|
||||
btf_prog_table = hashmap__new(hash_fn_for_key_as_id,
|
||||
equal_fn_for_key_as_id, NULL);
|
||||
btf_map_table = hashmap__new(hash_fn_for_key_as_id,
|
||||
equal_fn_for_key_as_id, NULL);
|
||||
if (!btf_prog_table || !btf_map_table) {
|
||||
hashmap__free(btf_prog_table);
|
||||
hashmap__free(btf_map_table);
|
||||
if (fd >= 0)
|
||||
close(fd);
|
||||
p_err("failed to create hashmap for object references");
|
||||
return -1;
|
||||
}
|
||||
err = build_btf_tables(btf_prog_table, btf_map_table);
|
||||
if (err) {
|
||||
if (fd >= 0)
|
||||
close(fd);
|
||||
@ -928,7 +916,7 @@ static int do_show(int argc, char **argv)
|
||||
build_obj_refs_table(&refs_table, BPF_OBJ_BTF);
|
||||
|
||||
if (fd >= 0) {
|
||||
err = show_btf(fd, &btf_prog_table, &btf_map_table);
|
||||
err = show_btf(fd, btf_prog_table, btf_map_table);
|
||||
close(fd);
|
||||
goto exit_free;
|
||||
}
|
||||
@ -960,7 +948,7 @@ static int do_show(int argc, char **argv)
|
||||
break;
|
||||
}
|
||||
|
||||
err = show_btf(fd, &btf_prog_table, &btf_map_table);
|
||||
err = show_btf(fd, btf_prog_table, btf_map_table);
|
||||
close(fd);
|
||||
if (err)
|
||||
break;
|
||||
@ -970,9 +958,9 @@ static int do_show(int argc, char **argv)
|
||||
jsonw_end_array(json_wtr); /* root array */
|
||||
|
||||
exit_free:
|
||||
delete_btf_table(&btf_prog_table);
|
||||
delete_btf_table(&btf_map_table);
|
||||
delete_obj_refs_table(&refs_table);
|
||||
hashmap__free(btf_prog_table);
|
||||
hashmap__free(btf_map_table);
|
||||
delete_obj_refs_table(refs_table);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
@ -22,6 +22,7 @@
|
||||
#include <sys/vfs.h>
|
||||
|
||||
#include <bpf/bpf.h>
|
||||
#include <bpf/hashmap.h>
|
||||
#include <bpf/libbpf.h> /* libbpf_num_possible_cpus */
|
||||
|
||||
#include "main.h"
|
||||
@ -393,7 +394,7 @@ void print_hex_data_json(uint8_t *data, size_t len)
|
||||
}
|
||||
|
||||
/* extra params for nftw cb */
|
||||
static struct pinned_obj_table *build_fn_table;
|
||||
static struct hashmap *build_fn_table;
|
||||
static enum bpf_obj_type build_fn_type;
|
||||
|
||||
static int do_build_table_cb(const char *fpath, const struct stat *sb,
|
||||
@ -401,9 +402,9 @@ static int do_build_table_cb(const char *fpath, const struct stat *sb,
|
||||
{
|
||||
struct bpf_prog_info pinned_info;
|
||||
__u32 len = sizeof(pinned_info);
|
||||
struct pinned_obj *obj_node;
|
||||
enum bpf_obj_type objtype;
|
||||
int fd, err = 0;
|
||||
char *path;
|
||||
|
||||
if (typeflag != FTW_F)
|
||||
goto out_ret;
|
||||
@ -420,28 +421,26 @@ static int do_build_table_cb(const char *fpath, const struct stat *sb,
|
||||
if (bpf_obj_get_info_by_fd(fd, &pinned_info, &len))
|
||||
goto out_close;
|
||||
|
||||
obj_node = calloc(1, sizeof(*obj_node));
|
||||
if (!obj_node) {
|
||||
path = strdup(fpath);
|
||||
if (!path) {
|
||||
err = -1;
|
||||
goto out_close;
|
||||
}
|
||||
|
||||
obj_node->id = pinned_info.id;
|
||||
obj_node->path = strdup(fpath);
|
||||
if (!obj_node->path) {
|
||||
err = -1;
|
||||
free(obj_node);
|
||||
err = hashmap__append(build_fn_table, u32_as_hash_field(pinned_info.id), path);
|
||||
if (err) {
|
||||
p_err("failed to append entry to hashmap for ID %u, path '%s': %s",
|
||||
pinned_info.id, path, strerror(errno));
|
||||
goto out_close;
|
||||
}
|
||||
|
||||
hash_add(build_fn_table->table, &obj_node->hash, obj_node->id);
|
||||
out_close:
|
||||
close(fd);
|
||||
out_ret:
|
||||
return err;
|
||||
}
|
||||
|
||||
int build_pinned_obj_table(struct pinned_obj_table *tab,
|
||||
int build_pinned_obj_table(struct hashmap *tab,
|
||||
enum bpf_obj_type type)
|
||||
{
|
||||
struct mntent *mntent = NULL;
|
||||
@ -470,17 +469,18 @@ int build_pinned_obj_table(struct pinned_obj_table *tab,
|
||||
return err;
|
||||
}
|
||||
|
||||
void delete_pinned_obj_table(struct pinned_obj_table *tab)
|
||||
void delete_pinned_obj_table(struct hashmap *map)
|
||||
{
|
||||
struct pinned_obj *obj;
|
||||
struct hlist_node *tmp;
|
||||
unsigned int bkt;
|
||||
struct hashmap_entry *entry;
|
||||
size_t bkt;
|
||||
|
||||
hash_for_each_safe(tab->table, bkt, tmp, obj, hash) {
|
||||
hash_del(&obj->hash);
|
||||
free(obj->path);
|
||||
free(obj);
|
||||
}
|
||||
if (!map)
|
||||
return;
|
||||
|
||||
hashmap__for_each_entry(map, entry, bkt)
|
||||
free(entry->value);
|
||||
|
||||
hashmap__free(map);
|
||||
}
|
||||
|
||||
unsigned int get_page_size(void)
|
||||
@ -962,3 +962,13 @@ int map_parse_fd_and_info(int *argc, char ***argv, void *info, __u32 *info_len)
|
||||
|
||||
return fd;
|
||||
}
|
||||
|
||||
size_t hash_fn_for_key_as_id(const void *key, void *ctx)
|
||||
{
|
||||
return (size_t)key;
|
||||
}
|
||||
|
||||
bool equal_fn_for_key_as_id(const void *k1, const void *k2, void *ctx)
|
||||
{
|
||||
return k1 == k2;
|
||||
}
|
||||
|
@ -18,7 +18,6 @@
|
||||
#include <sys/stat.h>
|
||||
#include <sys/mman.h>
|
||||
#include <bpf/btf.h>
|
||||
#include <bpf/bpf_gen_internal.h>
|
||||
|
||||
#include "json_writer.h"
|
||||
#include "main.h"
|
||||
@ -34,6 +33,11 @@ static void sanitize_identifier(char *name)
|
||||
name[i] = '_';
|
||||
}
|
||||
|
||||
static bool str_has_prefix(const char *str, const char *prefix)
|
||||
{
|
||||
return strncmp(str, prefix, strlen(prefix)) == 0;
|
||||
}
|
||||
|
||||
static bool str_has_suffix(const char *str, const char *suffix)
|
||||
{
|
||||
size_t i, n1 = strlen(str), n2 = strlen(suffix);
|
||||
@ -68,23 +72,47 @@ static void get_header_guard(char *guard, const char *obj_name)
|
||||
guard[i] = toupper(guard[i]);
|
||||
}
|
||||
|
||||
static const char *get_map_ident(const struct bpf_map *map)
|
||||
static bool get_map_ident(const struct bpf_map *map, char *buf, size_t buf_sz)
|
||||
{
|
||||
static const char *sfxs[] = { ".data", ".rodata", ".bss", ".kconfig" };
|
||||
const char *name = bpf_map__name(map);
|
||||
int i, n;
|
||||
|
||||
if (!bpf_map__is_internal(map))
|
||||
return name;
|
||||
if (!bpf_map__is_internal(map)) {
|
||||
snprintf(buf, buf_sz, "%s", name);
|
||||
return true;
|
||||
}
|
||||
|
||||
if (str_has_suffix(name, ".data"))
|
||||
return "data";
|
||||
else if (str_has_suffix(name, ".rodata"))
|
||||
return "rodata";
|
||||
else if (str_has_suffix(name, ".bss"))
|
||||
return "bss";
|
||||
else if (str_has_suffix(name, ".kconfig"))
|
||||
return "kconfig";
|
||||
else
|
||||
return NULL;
|
||||
for (i = 0, n = ARRAY_SIZE(sfxs); i < n; i++) {
|
||||
const char *sfx = sfxs[i], *p;
|
||||
|
||||
p = strstr(name, sfx);
|
||||
if (p) {
|
||||
snprintf(buf, buf_sz, "%s", p + 1);
|
||||
sanitize_identifier(buf);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool get_datasec_ident(const char *sec_name, char *buf, size_t buf_sz)
|
||||
{
|
||||
static const char *pfxs[] = { ".data", ".rodata", ".bss", ".kconfig" };
|
||||
int i, n;
|
||||
|
||||
for (i = 0, n = ARRAY_SIZE(pfxs); i < n; i++) {
|
||||
const char *pfx = pfxs[i];
|
||||
|
||||
if (str_has_prefix(sec_name, pfx)) {
|
||||
snprintf(buf, buf_sz, "%s", sec_name + 1);
|
||||
sanitize_identifier(buf);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static void codegen_btf_dump_printf(void *ctx, const char *fmt, va_list args)
|
||||
@ -101,24 +129,14 @@ static int codegen_datasec_def(struct bpf_object *obj,
|
||||
const char *sec_name = btf__name_by_offset(btf, sec->name_off);
|
||||
const struct btf_var_secinfo *sec_var = btf_var_secinfos(sec);
|
||||
int i, err, off = 0, pad_cnt = 0, vlen = btf_vlen(sec);
|
||||
const char *sec_ident;
|
||||
char var_ident[256];
|
||||
char var_ident[256], sec_ident[256];
|
||||
bool strip_mods = false;
|
||||
|
||||
if (strcmp(sec_name, ".data") == 0) {
|
||||
sec_ident = "data";
|
||||
strip_mods = true;
|
||||
} else if (strcmp(sec_name, ".bss") == 0) {
|
||||
sec_ident = "bss";
|
||||
strip_mods = true;
|
||||
} else if (strcmp(sec_name, ".rodata") == 0) {
|
||||
sec_ident = "rodata";
|
||||
strip_mods = true;
|
||||
} else if (strcmp(sec_name, ".kconfig") == 0) {
|
||||
sec_ident = "kconfig";
|
||||
} else {
|
||||
if (!get_datasec_ident(sec_name, sec_ident, sizeof(sec_ident)))
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (strcmp(sec_name, ".kconfig") != 0)
|
||||
strip_mods = true;
|
||||
|
||||
printf(" struct %s__%s {\n", obj_name, sec_ident);
|
||||
for (i = 0; i < vlen; i++, sec_var++) {
|
||||
@ -193,24 +211,63 @@ static int codegen_datasec_def(struct bpf_object *obj,
|
||||
static int codegen_datasecs(struct bpf_object *obj, const char *obj_name)
|
||||
{
|
||||
struct btf *btf = bpf_object__btf(obj);
|
||||
int n = btf__get_nr_types(btf);
|
||||
int n = btf__type_cnt(btf);
|
||||
struct btf_dump *d;
|
||||
struct bpf_map *map;
|
||||
const struct btf_type *sec;
|
||||
char sec_ident[256], map_ident[256];
|
||||
int i, err = 0;
|
||||
|
||||
d = btf_dump__new(btf, NULL, NULL, codegen_btf_dump_printf);
|
||||
if (IS_ERR(d))
|
||||
return PTR_ERR(d);
|
||||
|
||||
for (i = 1; i <= n; i++) {
|
||||
const struct btf_type *t = btf__type_by_id(btf, i);
|
||||
|
||||
if (!btf_is_datasec(t))
|
||||
bpf_object__for_each_map(map, obj) {
|
||||
/* only generate definitions for memory-mapped internal maps */
|
||||
if (!bpf_map__is_internal(map))
|
||||
continue;
|
||||
if (!(bpf_map__def(map)->map_flags & BPF_F_MMAPABLE))
|
||||
continue;
|
||||
|
||||
err = codegen_datasec_def(obj, btf, d, t, obj_name);
|
||||
if (err)
|
||||
goto out;
|
||||
if (!get_map_ident(map, map_ident, sizeof(map_ident)))
|
||||
continue;
|
||||
|
||||
sec = NULL;
|
||||
for (i = 1; i < n; i++) {
|
||||
const struct btf_type *t = btf__type_by_id(btf, i);
|
||||
const char *name;
|
||||
|
||||
if (!btf_is_datasec(t))
|
||||
continue;
|
||||
|
||||
name = btf__str_by_offset(btf, t->name_off);
|
||||
if (!get_datasec_ident(name, sec_ident, sizeof(sec_ident)))
|
||||
continue;
|
||||
|
||||
if (strcmp(sec_ident, map_ident) == 0) {
|
||||
sec = t;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/* In some cases (e.g., sections like .rodata.cst16 containing
|
||||
* compiler allocated string constants only) there will be
|
||||
* special internal maps with no corresponding DATASEC BTF
|
||||
* type. In such case, generate empty structs for each such
|
||||
* map. It will still be memory-mapped and its contents
|
||||
* accessible from user-space through BPF skeleton.
|
||||
*/
|
||||
if (!sec) {
|
||||
printf(" struct %s__%s {\n", obj_name, map_ident);
|
||||
printf(" } *%s;\n", map_ident);
|
||||
} else {
|
||||
err = codegen_datasec_def(obj, btf, d, sec, obj_name);
|
||||
if (err)
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
out:
|
||||
btf_dump__free(d);
|
||||
return err;
|
||||
@ -386,6 +443,7 @@ static void codegen_destroy(struct bpf_object *obj, const char *obj_name)
|
||||
{
|
||||
struct bpf_program *prog;
|
||||
struct bpf_map *map;
|
||||
char ident[256];
|
||||
|
||||
codegen("\
|
||||
\n\
|
||||
@ -406,10 +464,7 @@ static void codegen_destroy(struct bpf_object *obj, const char *obj_name)
|
||||
}
|
||||
|
||||
bpf_object__for_each_map(map, obj) {
|
||||
const char *ident;
|
||||
|
||||
ident = get_map_ident(map);
|
||||
if (!ident)
|
||||
if (!get_map_ident(map, ident, sizeof(ident)))
|
||||
continue;
|
||||
if (bpf_map__is_internal(map) &&
|
||||
(bpf_map__def(map)->map_flags & BPF_F_MMAPABLE))
|
||||
@ -433,6 +488,7 @@ static int gen_trace(struct bpf_object *obj, const char *obj_name, const char *h
|
||||
struct bpf_object_load_attr load_attr = {};
|
||||
DECLARE_LIBBPF_OPTS(gen_loader_opts, opts);
|
||||
struct bpf_map *map;
|
||||
char ident[256];
|
||||
int err = 0;
|
||||
|
||||
err = bpf_object__gen_loader(obj, &opts);
|
||||
@ -478,12 +534,10 @@ static int gen_trace(struct bpf_object *obj, const char *obj_name, const char *h
|
||||
",
|
||||
obj_name, opts.data_sz);
|
||||
bpf_object__for_each_map(map, obj) {
|
||||
const char *ident;
|
||||
const void *mmap_data = NULL;
|
||||
size_t mmap_size = 0;
|
||||
|
||||
ident = get_map_ident(map);
|
||||
if (!ident)
|
||||
if (!get_map_ident(map, ident, sizeof(ident)))
|
||||
continue;
|
||||
|
||||
if (!bpf_map__is_internal(map) ||
|
||||
@ -545,15 +599,15 @@ static int gen_trace(struct bpf_object *obj, const char *obj_name, const char *h
|
||||
return err; \n\
|
||||
", obj_name);
|
||||
bpf_object__for_each_map(map, obj) {
|
||||
const char *ident, *mmap_flags;
|
||||
const char *mmap_flags;
|
||||
|
||||
ident = get_map_ident(map);
|
||||
if (!ident)
|
||||
if (!get_map_ident(map, ident, sizeof(ident)))
|
||||
continue;
|
||||
|
||||
if (!bpf_map__is_internal(map) ||
|
||||
!(bpf_map__def(map)->map_flags & BPF_F_MMAPABLE))
|
||||
continue;
|
||||
|
||||
if (bpf_map__def(map)->map_flags & BPF_F_RDONLY_PROG)
|
||||
mmap_flags = "PROT_READ";
|
||||
else
|
||||
@ -603,7 +657,8 @@ static int do_skeleton(int argc, char **argv)
|
||||
DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts);
|
||||
char obj_name[MAX_OBJ_NAME_LEN] = "", *obj_data;
|
||||
struct bpf_object *obj = NULL;
|
||||
const char *file, *ident;
|
||||
const char *file;
|
||||
char ident[256];
|
||||
struct bpf_program *prog;
|
||||
int fd, err = -1;
|
||||
struct bpf_map *map;
|
||||
@ -674,8 +729,7 @@ static int do_skeleton(int argc, char **argv)
|
||||
}
|
||||
|
||||
bpf_object__for_each_map(map, obj) {
|
||||
ident = get_map_ident(map);
|
||||
if (!ident) {
|
||||
if (!get_map_ident(map, ident, sizeof(ident))) {
|
||||
p_err("ignoring unrecognized internal map '%s'...",
|
||||
bpf_map__name(map));
|
||||
continue;
|
||||
@ -728,8 +782,7 @@ static int do_skeleton(int argc, char **argv)
|
||||
if (map_cnt) {
|
||||
printf("\tstruct {\n");
|
||||
bpf_object__for_each_map(map, obj) {
|
||||
ident = get_map_ident(map);
|
||||
if (!ident)
|
||||
if (!get_map_ident(map, ident, sizeof(ident)))
|
||||
continue;
|
||||
if (use_loader)
|
||||
printf("\t\tstruct bpf_map_desc %s;\n", ident);
|
||||
@ -898,9 +951,7 @@ static int do_skeleton(int argc, char **argv)
|
||||
);
|
||||
i = 0;
|
||||
bpf_object__for_each_map(map, obj) {
|
||||
ident = get_map_ident(map);
|
||||
|
||||
if (!ident)
|
||||
if (!get_map_ident(map, ident, sizeof(ident)))
|
||||
continue;
|
||||
|
||||
codegen("\
|
||||
|
@ -57,7 +57,7 @@ static int do_pin(int argc, char **argv)
|
||||
goto close_obj;
|
||||
}
|
||||
|
||||
prog = bpf_program__next(NULL, obj);
|
||||
prog = bpf_object__next_program(obj, NULL);
|
||||
if (!prog) {
|
||||
p_err("can't find bpf program in objfile %s", objfile);
|
||||
goto close_obj;
|
||||
|
@ -7,6 +7,7 @@
|
||||
#include <unistd.h>
|
||||
|
||||
#include <bpf/bpf.h>
|
||||
#include <bpf/hashmap.h>
|
||||
|
||||
#include "json_writer.h"
|
||||
#include "main.h"
|
||||
@ -20,6 +21,8 @@ static const char * const link_type_name[] = {
|
||||
[BPF_LINK_TYPE_NETNS] = "netns",
|
||||
};
|
||||
|
||||
static struct hashmap *link_table;
|
||||
|
||||
static int link_parse_fd(int *argc, char ***argv)
|
||||
{
|
||||
int fd;
|
||||
@ -156,19 +159,18 @@ static int show_link_close_json(int fd, struct bpf_link_info *info)
|
||||
break;
|
||||
}
|
||||
|
||||
if (!hash_empty(link_table.table)) {
|
||||
struct pinned_obj *obj;
|
||||
if (!hashmap__empty(link_table)) {
|
||||
struct hashmap_entry *entry;
|
||||
|
||||
jsonw_name(json_wtr, "pinned");
|
||||
jsonw_start_array(json_wtr);
|
||||
hash_for_each_possible(link_table.table, obj, hash, info->id) {
|
||||
if (obj->id == info->id)
|
||||
jsonw_string(json_wtr, obj->path);
|
||||
}
|
||||
hashmap__for_each_key_entry(link_table, entry,
|
||||
u32_as_hash_field(info->id))
|
||||
jsonw_string(json_wtr, entry->value);
|
||||
jsonw_end_array(json_wtr);
|
||||
}
|
||||
|
||||
emit_obj_refs_json(&refs_table, info->id, json_wtr);
|
||||
emit_obj_refs_json(refs_table, info->id, json_wtr);
|
||||
|
||||
jsonw_end_object(json_wtr);
|
||||
|
||||
@ -244,15 +246,14 @@ static int show_link_close_plain(int fd, struct bpf_link_info *info)
|
||||
break;
|
||||
}
|
||||
|
||||
if (!hash_empty(link_table.table)) {
|
||||
struct pinned_obj *obj;
|
||||
if (!hashmap__empty(link_table)) {
|
||||
struct hashmap_entry *entry;
|
||||
|
||||
hash_for_each_possible(link_table.table, obj, hash, info->id) {
|
||||
if (obj->id == info->id)
|
||||
printf("\n\tpinned %s", obj->path);
|
||||
}
|
||||
hashmap__for_each_key_entry(link_table, entry,
|
||||
u32_as_hash_field(info->id))
|
||||
printf("\n\tpinned %s", (char *)entry->value);
|
||||
}
|
||||
emit_obj_refs_plain(&refs_table, info->id, "\n\tpids ");
|
||||
emit_obj_refs_plain(refs_table, info->id, "\n\tpids ");
|
||||
|
||||
printf("\n");
|
||||
|
||||
@ -302,8 +303,15 @@ static int do_show(int argc, char **argv)
|
||||
__u32 id = 0;
|
||||
int err, fd;
|
||||
|
||||
if (show_pinned)
|
||||
build_pinned_obj_table(&link_table, BPF_OBJ_LINK);
|
||||
if (show_pinned) {
|
||||
link_table = hashmap__new(hash_fn_for_key_as_id,
|
||||
equal_fn_for_key_as_id, NULL);
|
||||
if (!link_table) {
|
||||
p_err("failed to create hashmap for pinned paths");
|
||||
return -1;
|
||||
}
|
||||
build_pinned_obj_table(link_table, BPF_OBJ_LINK);
|
||||
}
|
||||
build_obj_refs_table(&refs_table, BPF_OBJ_LINK);
|
||||
|
||||
if (argc == 2) {
|
||||
@ -344,7 +352,10 @@ static int do_show(int argc, char **argv)
|
||||
if (json_output)
|
||||
jsonw_end_array(json_wtr);
|
||||
|
||||
delete_obj_refs_table(&refs_table);
|
||||
delete_obj_refs_table(refs_table);
|
||||
|
||||
if (show_pinned)
|
||||
delete_pinned_obj_table(link_table);
|
||||
|
||||
return errno == ENOENT ? 0 : -1;
|
||||
}
|
||||
|
@ -10,8 +10,9 @@
|
||||
#include <string.h>
|
||||
|
||||
#include <bpf/bpf.h>
|
||||
#include <bpf/libbpf.h>
|
||||
#include <bpf/btf.h>
|
||||
#include <bpf/hashmap.h>
|
||||
#include <bpf/libbpf.h>
|
||||
|
||||
#include "main.h"
|
||||
|
||||
@ -31,10 +32,7 @@ bool verifier_logs;
|
||||
bool relaxed_maps;
|
||||
bool use_loader;
|
||||
struct btf *base_btf;
|
||||
struct pinned_obj_table prog_table;
|
||||
struct pinned_obj_table map_table;
|
||||
struct pinned_obj_table link_table;
|
||||
struct obj_refs_table refs_table;
|
||||
struct hashmap *refs_table;
|
||||
|
||||
static void __noreturn clean_and_exit(int i)
|
||||
{
|
||||
@ -409,10 +407,6 @@ int main(int argc, char **argv)
|
||||
block_mount = false;
|
||||
bin_name = argv[0];
|
||||
|
||||
hash_init(prog_table.table);
|
||||
hash_init(map_table.table);
|
||||
hash_init(link_table.table);
|
||||
|
||||
opterr = 0;
|
||||
while ((opt = getopt_long(argc, argv, "VhpjfLmndB:",
|
||||
options, NULL)) >= 0) {
|
||||
@ -479,11 +473,6 @@ int main(int argc, char **argv)
|
||||
if (json_output)
|
||||
jsonw_destroy(&json_wtr);
|
||||
|
||||
if (show_pinned) {
|
||||
delete_pinned_obj_table(&prog_table);
|
||||
delete_pinned_obj_table(&map_table);
|
||||
delete_pinned_obj_table(&link_table);
|
||||
}
|
||||
btf__free(base_btf);
|
||||
|
||||
return ret;
|
||||
|
@ -11,9 +11,9 @@
|
||||
#include <linux/bpf.h>
|
||||
#include <linux/compiler.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/hashtable.h>
|
||||
#include <tools/libc_compat.h>
|
||||
|
||||
#include <bpf/hashmap.h>
|
||||
#include <bpf/libbpf.h>
|
||||
|
||||
#include "json_writer.h"
|
||||
@ -91,10 +91,7 @@ extern bool verifier_logs;
|
||||
extern bool relaxed_maps;
|
||||
extern bool use_loader;
|
||||
extern struct btf *base_btf;
|
||||
extern struct pinned_obj_table prog_table;
|
||||
extern struct pinned_obj_table map_table;
|
||||
extern struct pinned_obj_table link_table;
|
||||
extern struct obj_refs_table refs_table;
|
||||
extern struct hashmap *refs_table;
|
||||
|
||||
void __printf(1, 2) p_err(const char *fmt, ...);
|
||||
void __printf(1, 2) p_info(const char *fmt, ...);
|
||||
@ -108,28 +105,12 @@ void set_max_rlimit(void);
|
||||
|
||||
int mount_tracefs(const char *target);
|
||||
|
||||
struct pinned_obj_table {
|
||||
DECLARE_HASHTABLE(table, 16);
|
||||
};
|
||||
|
||||
struct pinned_obj {
|
||||
__u32 id;
|
||||
char *path;
|
||||
struct hlist_node hash;
|
||||
};
|
||||
|
||||
struct obj_refs_table {
|
||||
DECLARE_HASHTABLE(table, 16);
|
||||
};
|
||||
|
||||
struct obj_ref {
|
||||
int pid;
|
||||
char comm[16];
|
||||
};
|
||||
|
||||
struct obj_refs {
|
||||
struct hlist_node node;
|
||||
__u32 id;
|
||||
int ref_cnt;
|
||||
struct obj_ref *refs;
|
||||
};
|
||||
@ -137,15 +118,15 @@ struct obj_refs {
|
||||
struct btf;
|
||||
struct bpf_line_info;
|
||||
|
||||
int build_pinned_obj_table(struct pinned_obj_table *table,
|
||||
int build_pinned_obj_table(struct hashmap *table,
|
||||
enum bpf_obj_type type);
|
||||
void delete_pinned_obj_table(struct pinned_obj_table *tab);
|
||||
__weak int build_obj_refs_table(struct obj_refs_table *table,
|
||||
void delete_pinned_obj_table(struct hashmap *table);
|
||||
__weak int build_obj_refs_table(struct hashmap **table,
|
||||
enum bpf_obj_type type);
|
||||
__weak void delete_obj_refs_table(struct obj_refs_table *table);
|
||||
__weak void emit_obj_refs_json(struct obj_refs_table *table, __u32 id,
|
||||
__weak void delete_obj_refs_table(struct hashmap *table);
|
||||
__weak void emit_obj_refs_json(struct hashmap *table, __u32 id,
|
||||
json_writer_t *json_wtr);
|
||||
__weak void emit_obj_refs_plain(struct obj_refs_table *table, __u32 id,
|
||||
__weak void emit_obj_refs_plain(struct hashmap *table, __u32 id,
|
||||
const char *prefix);
|
||||
void print_dev_plain(__u32 ifindex, __u64 ns_dev, __u64 ns_inode);
|
||||
void print_dev_json(__u32 ifindex, __u64 ns_dev, __u64 ns_inode);
|
||||
@ -259,4 +240,23 @@ int do_filter_dump(struct tcmsg *ifinfo, struct nlattr **tb, const char *kind,
|
||||
|
||||
int print_all_levels(__maybe_unused enum libbpf_print_level level,
|
||||
const char *format, va_list args);
|
||||
|
||||
size_t hash_fn_for_key_as_id(const void *key, void *ctx);
|
||||
bool equal_fn_for_key_as_id(const void *k1, const void *k2, void *ctx);
|
||||
|
||||
static inline void *u32_as_hash_field(__u32 x)
|
||||
{
|
||||
return (void *)(uintptr_t)x;
|
||||
}
|
||||
|
||||
static inline __u32 hash_field_as_u32(const void *x)
|
||||
{
|
||||
return (__u32)(uintptr_t)x;
|
||||
}
|
||||
|
||||
static inline bool hashmap__empty(struct hashmap *map)
|
||||
{
|
||||
return map ? hashmap__size(map) == 0 : true;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@ -17,6 +17,7 @@
|
||||
|
||||
#include <bpf/bpf.h>
|
||||
#include <bpf/btf.h>
|
||||
#include <bpf/hashmap.h>
|
||||
|
||||
#include "json_writer.h"
|
||||
#include "main.h"
|
||||
@ -56,6 +57,8 @@ const char * const map_type_name[] = {
|
||||
|
||||
const size_t map_type_name_size = ARRAY_SIZE(map_type_name);
|
||||
|
||||
static struct hashmap *map_table;
|
||||
|
||||
static bool map_is_per_cpu(__u32 type)
|
||||
{
|
||||
return type == BPF_MAP_TYPE_PERCPU_HASH ||
|
||||
@ -535,19 +538,18 @@ static int show_map_close_json(int fd, struct bpf_map_info *info)
|
||||
if (info->btf_id)
|
||||
jsonw_int_field(json_wtr, "btf_id", info->btf_id);
|
||||
|
||||
if (!hash_empty(map_table.table)) {
|
||||
struct pinned_obj *obj;
|
||||
if (!hashmap__empty(map_table)) {
|
||||
struct hashmap_entry *entry;
|
||||
|
||||
jsonw_name(json_wtr, "pinned");
|
||||
jsonw_start_array(json_wtr);
|
||||
hash_for_each_possible(map_table.table, obj, hash, info->id) {
|
||||
if (obj->id == info->id)
|
||||
jsonw_string(json_wtr, obj->path);
|
||||
}
|
||||
hashmap__for_each_key_entry(map_table, entry,
|
||||
u32_as_hash_field(info->id))
|
||||
jsonw_string(json_wtr, entry->value);
|
||||
jsonw_end_array(json_wtr);
|
||||
}
|
||||
|
||||
emit_obj_refs_json(&refs_table, info->id, json_wtr);
|
||||
emit_obj_refs_json(refs_table, info->id, json_wtr);
|
||||
|
||||
jsonw_end_object(json_wtr);
|
||||
|
||||
@ -610,13 +612,12 @@ static int show_map_close_plain(int fd, struct bpf_map_info *info)
|
||||
}
|
||||
close(fd);
|
||||
|
||||
if (!hash_empty(map_table.table)) {
|
||||
struct pinned_obj *obj;
|
||||
if (!hashmap__empty(map_table)) {
|
||||
struct hashmap_entry *entry;
|
||||
|
||||
hash_for_each_possible(map_table.table, obj, hash, info->id) {
|
||||
if (obj->id == info->id)
|
||||
printf("\n\tpinned %s", obj->path);
|
||||
}
|
||||
hashmap__for_each_key_entry(map_table, entry,
|
||||
u32_as_hash_field(info->id))
|
||||
printf("\n\tpinned %s", (char *)entry->value);
|
||||
}
|
||||
printf("\n");
|
||||
|
||||
@ -636,7 +637,7 @@ static int show_map_close_plain(int fd, struct bpf_map_info *info)
|
||||
if (frozen)
|
||||
printf("%sfrozen", info->btf_id ? " " : "");
|
||||
|
||||
emit_obj_refs_plain(&refs_table, info->id, "\n\tpids ");
|
||||
emit_obj_refs_plain(refs_table, info->id, "\n\tpids ");
|
||||
|
||||
printf("\n");
|
||||
return 0;
|
||||
@ -694,8 +695,15 @@ static int do_show(int argc, char **argv)
|
||||
int err;
|
||||
int fd;
|
||||
|
||||
if (show_pinned)
|
||||
build_pinned_obj_table(&map_table, BPF_OBJ_MAP);
|
||||
if (show_pinned) {
|
||||
map_table = hashmap__new(hash_fn_for_key_as_id,
|
||||
equal_fn_for_key_as_id, NULL);
|
||||
if (!map_table) {
|
||||
p_err("failed to create hashmap for pinned paths");
|
||||
return -1;
|
||||
}
|
||||
build_pinned_obj_table(map_table, BPF_OBJ_MAP);
|
||||
}
|
||||
build_obj_refs_table(&refs_table, BPF_OBJ_MAP);
|
||||
|
||||
if (argc == 2)
|
||||
@ -740,7 +748,10 @@ static int do_show(int argc, char **argv)
|
||||
if (json_output)
|
||||
jsonw_end_array(json_wtr);
|
||||
|
||||
delete_obj_refs_table(&refs_table);
|
||||
delete_obj_refs_table(refs_table);
|
||||
|
||||
if (show_pinned)
|
||||
delete_pinned_obj_table(map_table);
|
||||
|
||||
return errno == ENOENT ? 0 : -1;
|
||||
}
|
||||
|
@ -22,7 +22,6 @@
|
||||
#include <sys/syscall.h>
|
||||
|
||||
#include <bpf/bpf.h>
|
||||
#include <perf-sys.h>
|
||||
|
||||
#include "main.h"
|
||||
|
||||
|
@ -6,35 +6,37 @@
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include <unistd.h>
|
||||
|
||||
#include <bpf/bpf.h>
|
||||
#include <bpf/hashmap.h>
|
||||
|
||||
#include "main.h"
|
||||
#include "skeleton/pid_iter.h"
|
||||
|
||||
#ifdef BPFTOOL_WITHOUT_SKELETONS
|
||||
|
||||
int build_obj_refs_table(struct obj_refs_table *table, enum bpf_obj_type type)
|
||||
int build_obj_refs_table(struct hashmap **map, enum bpf_obj_type type)
|
||||
{
|
||||
return -ENOTSUP;
|
||||
}
|
||||
void delete_obj_refs_table(struct obj_refs_table *table) {}
|
||||
void emit_obj_refs_plain(struct obj_refs_table *table, __u32 id, const char *prefix) {}
|
||||
void emit_obj_refs_json(struct obj_refs_table *table, __u32 id, json_writer_t *json_writer) {}
|
||||
void delete_obj_refs_table(struct hashmap *map) {}
|
||||
void emit_obj_refs_plain(struct hashmap *map, __u32 id, const char *prefix) {}
|
||||
void emit_obj_refs_json(struct hashmap *map, __u32 id, json_writer_t *json_writer) {}
|
||||
|
||||
#else /* BPFTOOL_WITHOUT_SKELETONS */
|
||||
|
||||
#include "pid_iter.skel.h"
|
||||
|
||||
static void add_ref(struct obj_refs_table *table, struct pid_iter_entry *e)
|
||||
static void add_ref(struct hashmap *map, struct pid_iter_entry *e)
|
||||
{
|
||||
struct hashmap_entry *entry;
|
||||
struct obj_refs *refs;
|
||||
struct obj_ref *ref;
|
||||
int err, i;
|
||||
void *tmp;
|
||||
int i;
|
||||
|
||||
hash_for_each_possible(table->table, refs, node, e->id) {
|
||||
if (refs->id != e->id)
|
||||
continue;
|
||||
hashmap__for_each_key_entry(map, entry, u32_as_hash_field(e->id)) {
|
||||
refs = entry->value;
|
||||
|
||||
for (i = 0; i < refs->ref_cnt; i++) {
|
||||
if (refs->refs[i].pid == e->pid)
|
||||
@ -64,7 +66,6 @@ static void add_ref(struct obj_refs_table *table, struct pid_iter_entry *e)
|
||||
return;
|
||||
}
|
||||
|
||||
refs->id = e->id;
|
||||
refs->refs = malloc(sizeof(*refs->refs));
|
||||
if (!refs->refs) {
|
||||
free(refs);
|
||||
@ -76,7 +77,11 @@ static void add_ref(struct obj_refs_table *table, struct pid_iter_entry *e)
|
||||
ref->pid = e->pid;
|
||||
memcpy(ref->comm, e->comm, sizeof(ref->comm));
|
||||
refs->ref_cnt = 1;
|
||||
hash_add(table->table, &refs->node, e->id);
|
||||
|
||||
err = hashmap__append(map, u32_as_hash_field(e->id), refs);
|
||||
if (err)
|
||||
p_err("failed to append entry to hashmap for ID %u: %s",
|
||||
e->id, strerror(errno));
|
||||
}
|
||||
|
||||
static int __printf(2, 0)
|
||||
@ -87,7 +92,7 @@ libbpf_print_none(__maybe_unused enum libbpf_print_level level,
|
||||
return 0;
|
||||
}
|
||||
|
||||
int build_obj_refs_table(struct obj_refs_table *table, enum bpf_obj_type type)
|
||||
int build_obj_refs_table(struct hashmap **map, enum bpf_obj_type type)
|
||||
{
|
||||
struct pid_iter_entry *e;
|
||||
char buf[4096 / sizeof(*e) * sizeof(*e)];
|
||||
@ -95,7 +100,11 @@ int build_obj_refs_table(struct obj_refs_table *table, enum bpf_obj_type type)
|
||||
int err, ret, fd = -1, i;
|
||||
libbpf_print_fn_t default_print;
|
||||
|
||||
hash_init(table->table);
|
||||
*map = hashmap__new(hash_fn_for_key_as_id, equal_fn_for_key_as_id, NULL);
|
||||
if (!*map) {
|
||||
p_err("failed to create hashmap for PID references");
|
||||
return -1;
|
||||
}
|
||||
set_max_rlimit();
|
||||
|
||||
skel = pid_iter_bpf__open();
|
||||
@ -151,7 +160,7 @@ int build_obj_refs_table(struct obj_refs_table *table, enum bpf_obj_type type)
|
||||
|
||||
e = (void *)buf;
|
||||
for (i = 0; i < ret; i++, e++) {
|
||||
add_ref(table, e);
|
||||
add_ref(*map, e);
|
||||
}
|
||||
}
|
||||
err = 0;
|
||||
@ -162,39 +171,44 @@ int build_obj_refs_table(struct obj_refs_table *table, enum bpf_obj_type type)
|
||||
return err;
|
||||
}
|
||||
|
||||
void delete_obj_refs_table(struct obj_refs_table *table)
|
||||
void delete_obj_refs_table(struct hashmap *map)
|
||||
{
|
||||
struct obj_refs *refs;
|
||||
struct hlist_node *tmp;
|
||||
unsigned int bkt;
|
||||
struct hashmap_entry *entry;
|
||||
size_t bkt;
|
||||
|
||||
if (!map)
|
||||
return;
|
||||
|
||||
hashmap__for_each_entry(map, entry, bkt) {
|
||||
struct obj_refs *refs = entry->value;
|
||||
|
||||
hash_for_each_safe(table->table, bkt, tmp, refs, node) {
|
||||
hash_del(&refs->node);
|
||||
free(refs->refs);
|
||||
free(refs);
|
||||
}
|
||||
|
||||
hashmap__free(map);
|
||||
}
|
||||
|
||||
void emit_obj_refs_json(struct obj_refs_table *table, __u32 id,
|
||||
void emit_obj_refs_json(struct hashmap *map, __u32 id,
|
||||
json_writer_t *json_writer)
|
||||
{
|
||||
struct obj_refs *refs;
|
||||
struct obj_ref *ref;
|
||||
int i;
|
||||
struct hashmap_entry *entry;
|
||||
|
||||
if (hash_empty(table->table))
|
||||
if (hashmap__empty(map))
|
||||
return;
|
||||
|
||||
hash_for_each_possible(table->table, refs, node, id) {
|
||||
if (refs->id != id)
|
||||
continue;
|
||||
hashmap__for_each_key_entry(map, entry, u32_as_hash_field(id)) {
|
||||
struct obj_refs *refs = entry->value;
|
||||
int i;
|
||||
|
||||
if (refs->ref_cnt == 0)
|
||||
break;
|
||||
|
||||
jsonw_name(json_writer, "pids");
|
||||
jsonw_start_array(json_writer);
|
||||
for (i = 0; i < refs->ref_cnt; i++) {
|
||||
ref = &refs->refs[i];
|
||||
struct obj_ref *ref = &refs->refs[i];
|
||||
|
||||
jsonw_start_object(json_writer);
|
||||
jsonw_int_field(json_writer, "pid", ref->pid);
|
||||
jsonw_string_field(json_writer, "comm", ref->comm);
|
||||
@ -205,24 +219,24 @@ void emit_obj_refs_json(struct obj_refs_table *table, __u32 id,
|
||||
}
|
||||
}
|
||||
|
||||
void emit_obj_refs_plain(struct obj_refs_table *table, __u32 id, const char *prefix)
|
||||
void emit_obj_refs_plain(struct hashmap *map, __u32 id, const char *prefix)
|
||||
{
|
||||
struct obj_refs *refs;
|
||||
struct obj_ref *ref;
|
||||
int i;
|
||||
struct hashmap_entry *entry;
|
||||
|
||||
if (hash_empty(table->table))
|
||||
if (hashmap__empty(map))
|
||||
return;
|
||||
|
||||
hash_for_each_possible(table->table, refs, node, id) {
|
||||
if (refs->id != id)
|
||||
continue;
|
||||
hashmap__for_each_key_entry(map, entry, u32_as_hash_field(id)) {
|
||||
struct obj_refs *refs = entry->value;
|
||||
int i;
|
||||
|
||||
if (refs->ref_cnt == 0)
|
||||
break;
|
||||
|
||||
printf("%s", prefix);
|
||||
for (i = 0; i < refs->ref_cnt; i++) {
|
||||
ref = &refs->refs[i];
|
||||
struct obj_ref *ref = &refs->refs[i];
|
||||
|
||||
printf("%s%s(%d)", i == 0 ? "" : ", ", ref->comm, ref->pid);
|
||||
}
|
||||
break;
|
||||
|
@ -24,8 +24,8 @@
|
||||
|
||||
#include <bpf/bpf.h>
|
||||
#include <bpf/btf.h>
|
||||
#include <bpf/hashmap.h>
|
||||
#include <bpf/libbpf.h>
|
||||
#include <bpf/bpf_gen_internal.h>
|
||||
#include <bpf/skel_internal.h>
|
||||
|
||||
#include "cfg.h"
|
||||
@ -85,6 +85,8 @@ static const char * const attach_type_strings[] = {
|
||||
[__MAX_BPF_ATTACH_TYPE] = NULL,
|
||||
};
|
||||
|
||||
static struct hashmap *prog_table;
|
||||
|
||||
static enum bpf_attach_type parse_attach_type(const char *str)
|
||||
{
|
||||
enum bpf_attach_type type;
|
||||
@ -308,18 +310,12 @@ static void show_prog_metadata(int fd, __u32 num_maps)
|
||||
if (printed_header)
|
||||
jsonw_end_object(json_wtr);
|
||||
} else {
|
||||
json_writer_t *btf_wtr = jsonw_new(stdout);
|
||||
json_writer_t *btf_wtr;
|
||||
struct btf_dumper d = {
|
||||
.btf = btf,
|
||||
.jw = btf_wtr,
|
||||
.is_plain_text = true,
|
||||
};
|
||||
|
||||
if (!btf_wtr) {
|
||||
p_err("jsonw alloc failed");
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
for (i = 0; i < vlen; i++, vsi++) {
|
||||
t_var = btf__type_by_id(btf, vsi->type);
|
||||
name = btf__name_by_offset(btf, t_var->name_off);
|
||||
@ -329,6 +325,14 @@ static void show_prog_metadata(int fd, __u32 num_maps)
|
||||
|
||||
if (!printed_header) {
|
||||
printf("\tmetadata:");
|
||||
|
||||
btf_wtr = jsonw_new(stdout);
|
||||
if (!btf_wtr) {
|
||||
p_err("jsonw alloc failed");
|
||||
goto out_free;
|
||||
}
|
||||
d.jw = btf_wtr,
|
||||
|
||||
printed_header = true;
|
||||
}
|
||||
|
||||
@ -415,19 +419,18 @@ static void print_prog_json(struct bpf_prog_info *info, int fd)
|
||||
if (info->btf_id)
|
||||
jsonw_int_field(json_wtr, "btf_id", info->btf_id);
|
||||
|
||||
if (!hash_empty(prog_table.table)) {
|
||||
struct pinned_obj *obj;
|
||||
if (!hashmap__empty(prog_table)) {
|
||||
struct hashmap_entry *entry;
|
||||
|
||||
jsonw_name(json_wtr, "pinned");
|
||||
jsonw_start_array(json_wtr);
|
||||
hash_for_each_possible(prog_table.table, obj, hash, info->id) {
|
||||
if (obj->id == info->id)
|
||||
jsonw_string(json_wtr, obj->path);
|
||||
}
|
||||
hashmap__for_each_key_entry(prog_table, entry,
|
||||
u32_as_hash_field(info->id))
|
||||
jsonw_string(json_wtr, entry->value);
|
||||
jsonw_end_array(json_wtr);
|
||||
}
|
||||
|
||||
emit_obj_refs_json(&refs_table, info->id, json_wtr);
|
||||
emit_obj_refs_json(refs_table, info->id, json_wtr);
|
||||
|
||||
show_prog_metadata(fd, info->nr_map_ids);
|
||||
|
||||
@ -487,19 +490,18 @@ static void print_prog_plain(struct bpf_prog_info *info, int fd)
|
||||
if (info->nr_map_ids)
|
||||
show_prog_maps(fd, info->nr_map_ids);
|
||||
|
||||
if (!hash_empty(prog_table.table)) {
|
||||
struct pinned_obj *obj;
|
||||
if (!hashmap__empty(prog_table)) {
|
||||
struct hashmap_entry *entry;
|
||||
|
||||
hash_for_each_possible(prog_table.table, obj, hash, info->id) {
|
||||
if (obj->id == info->id)
|
||||
printf("\n\tpinned %s", obj->path);
|
||||
}
|
||||
hashmap__for_each_key_entry(prog_table, entry,
|
||||
u32_as_hash_field(info->id))
|
||||
printf("\n\tpinned %s", (char *)entry->value);
|
||||
}
|
||||
|
||||
if (info->btf_id)
|
||||
printf("\n\tbtf_id %d", info->btf_id);
|
||||
|
||||
emit_obj_refs_plain(&refs_table, info->id, "\n\tpids ");
|
||||
emit_obj_refs_plain(refs_table, info->id, "\n\tpids ");
|
||||
|
||||
printf("\n");
|
||||
|
||||
@ -566,8 +568,15 @@ static int do_show(int argc, char **argv)
|
||||
int err;
|
||||
int fd;
|
||||
|
||||
if (show_pinned)
|
||||
build_pinned_obj_table(&prog_table, BPF_OBJ_PROG);
|
||||
if (show_pinned) {
|
||||
prog_table = hashmap__new(hash_fn_for_key_as_id,
|
||||
equal_fn_for_key_as_id, NULL);
|
||||
if (!prog_table) {
|
||||
p_err("failed to create hashmap for pinned paths");
|
||||
return -1;
|
||||
}
|
||||
build_pinned_obj_table(prog_table, BPF_OBJ_PROG);
|
||||
}
|
||||
build_obj_refs_table(&refs_table, BPF_OBJ_PROG);
|
||||
|
||||
if (argc == 2)
|
||||
@ -610,7 +619,10 @@ static int do_show(int argc, char **argv)
|
||||
if (json_output)
|
||||
jsonw_end_array(json_wtr);
|
||||
|
||||
delete_obj_refs_table(&refs_table);
|
||||
delete_obj_refs_table(refs_table);
|
||||
|
||||
if (show_pinned)
|
||||
delete_pinned_obj_table(prog_table);
|
||||
|
||||
return err;
|
||||
}
|
||||
@ -1601,7 +1613,7 @@ static int load_with_options(int argc, char **argv, bool first_prog_only)
|
||||
goto err_close_obj;
|
||||
|
||||
if (first_prog_only) {
|
||||
prog = bpf_program__next(NULL, obj);
|
||||
prog = bpf_object__next_program(obj, NULL);
|
||||
if (!prog) {
|
||||
p_err("object file doesn't contain any bpf program");
|
||||
goto err_close_obj;
|
||||
|
@ -29,25 +29,30 @@ BPFOBJ := $(OUTPUT)/libbpf/libbpf.a
|
||||
LIBBPF_OUT := $(abspath $(dir $(BPFOBJ)))/
|
||||
SUBCMDOBJ := $(OUTPUT)/libsubcmd/libsubcmd.a
|
||||
|
||||
LIBBPF_DESTDIR := $(LIBBPF_OUT)
|
||||
LIBBPF_INCLUDE := $(LIBBPF_DESTDIR)include
|
||||
|
||||
BINARY := $(OUTPUT)/resolve_btfids
|
||||
BINARY_IN := $(BINARY)-in.o
|
||||
|
||||
all: $(BINARY)
|
||||
|
||||
$(OUTPUT) $(OUTPUT)/libbpf $(OUTPUT)/libsubcmd:
|
||||
$(OUTPUT) $(OUTPUT)/libsubcmd $(LIBBPF_OUT):
|
||||
$(call msg,MKDIR,,$@)
|
||||
$(Q)mkdir -p $(@)
|
||||
|
||||
$(SUBCMDOBJ): fixdep FORCE | $(OUTPUT)/libsubcmd
|
||||
$(Q)$(MAKE) -C $(SUBCMD_SRC) OUTPUT=$(abspath $(dir $@))/ $(abspath $@)
|
||||
|
||||
$(BPFOBJ): $(wildcard $(LIBBPF_SRC)/*.[ch] $(LIBBPF_SRC)/Makefile) | $(OUTPUT)/libbpf
|
||||
$(Q)$(MAKE) $(submake_extras) -C $(LIBBPF_SRC) OUTPUT=$(LIBBPF_OUT) $(abspath $@)
|
||||
$(BPFOBJ): $(wildcard $(LIBBPF_SRC)/*.[ch] $(LIBBPF_SRC)/Makefile) | $(LIBBPF_OUT)
|
||||
$(Q)$(MAKE) $(submake_extras) -C $(LIBBPF_SRC) OUTPUT=$(LIBBPF_OUT) \
|
||||
DESTDIR=$(LIBBPF_DESTDIR) prefix= \
|
||||
$(abspath $@) install_headers
|
||||
|
||||
CFLAGS := -g \
|
||||
-I$(srctree)/tools/include \
|
||||
-I$(srctree)/tools/include/uapi \
|
||||
-I$(LIBBPF_SRC) \
|
||||
-I$(LIBBPF_INCLUDE) \
|
||||
-I$(SUBCMD_SRC)
|
||||
|
||||
LIBS = -lelf -lz
|
||||
@ -65,7 +70,8 @@ $(BINARY): $(BPFOBJ) $(SUBCMDOBJ) $(BINARY_IN)
|
||||
clean_objects := $(wildcard $(OUTPUT)/*.o \
|
||||
$(OUTPUT)/.*.o.cmd \
|
||||
$(OUTPUT)/.*.o.d \
|
||||
$(OUTPUT)/libbpf \
|
||||
$(LIBBPF_OUT) \
|
||||
$(LIBBPF_DESTDIR) \
|
||||
$(OUTPUT)/libsubcmd \
|
||||
$(OUTPUT)/resolve_btfids)
|
||||
|
||||
|
@ -60,8 +60,8 @@
|
||||
#include <linux/rbtree.h>
|
||||
#include <linux/zalloc.h>
|
||||
#include <linux/err.h>
|
||||
#include <btf.h>
|
||||
#include <libbpf.h>
|
||||
#include <bpf/btf.h>
|
||||
#include <bpf/libbpf.h>
|
||||
#include <parse-options.h>
|
||||
|
||||
#define BTF_IDS_SECTION ".BTF_ids"
|
||||
@ -89,6 +89,7 @@ struct btf_id {
|
||||
struct object {
|
||||
const char *path;
|
||||
const char *btf;
|
||||
const char *base_btf_path;
|
||||
|
||||
struct {
|
||||
int fd;
|
||||
@ -477,25 +478,36 @@ static int symbols_resolve(struct object *obj)
|
||||
int nr_structs = obj->nr_structs;
|
||||
int nr_unions = obj->nr_unions;
|
||||
int nr_funcs = obj->nr_funcs;
|
||||
struct btf *base_btf = NULL;
|
||||
int err, type_id;
|
||||
struct btf *btf;
|
||||
__u32 nr_types;
|
||||
|
||||
btf = btf__parse(obj->btf ?: obj->path, NULL);
|
||||
if (obj->base_btf_path) {
|
||||
base_btf = btf__parse(obj->base_btf_path, NULL);
|
||||
err = libbpf_get_error(base_btf);
|
||||
if (err) {
|
||||
pr_err("FAILED: load base BTF from %s: %s\n",
|
||||
obj->base_btf_path, strerror(-err));
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
btf = btf__parse_split(obj->btf ?: obj->path, base_btf);
|
||||
err = libbpf_get_error(btf);
|
||||
if (err) {
|
||||
pr_err("FAILED: load BTF from %s: %s\n",
|
||||
obj->btf ?: obj->path, strerror(-err));
|
||||
return -1;
|
||||
goto out;
|
||||
}
|
||||
|
||||
err = -1;
|
||||
nr_types = btf__get_nr_types(btf);
|
||||
nr_types = btf__type_cnt(btf);
|
||||
|
||||
/*
|
||||
* Iterate all the BTF types and search for collected symbol IDs.
|
||||
*/
|
||||
for (type_id = 1; type_id <= nr_types; type_id++) {
|
||||
for (type_id = 1; type_id < nr_types; type_id++) {
|
||||
const struct btf_type *type;
|
||||
struct rb_root *root;
|
||||
struct btf_id *id;
|
||||
@ -545,6 +557,7 @@ static int symbols_resolve(struct object *obj)
|
||||
|
||||
err = 0;
|
||||
out:
|
||||
btf__free(base_btf);
|
||||
btf__free(btf);
|
||||
return err;
|
||||
}
|
||||
@ -678,7 +691,6 @@ static const char * const resolve_btfids_usage[] = {
|
||||
|
||||
int main(int argc, const char **argv)
|
||||
{
|
||||
bool no_fail = false;
|
||||
struct object obj = {
|
||||
.efile = {
|
||||
.idlist_shndx = -1,
|
||||
@ -695,8 +707,8 @@ int main(int argc, const char **argv)
|
||||
"be more verbose (show errors, etc)"),
|
||||
OPT_STRING(0, "btf", &obj.btf, "BTF data",
|
||||
"BTF data"),
|
||||
OPT_BOOLEAN(0, "no-fail", &no_fail,
|
||||
"do not fail if " BTF_IDS_SECTION " section is not found"),
|
||||
OPT_STRING('b', "btf_base", &obj.base_btf_path, "file",
|
||||
"path of file providing base BTF"),
|
||||
OPT_END()
|
||||
};
|
||||
int err = -1;
|
||||
@ -717,10 +729,8 @@ int main(int argc, const char **argv)
|
||||
*/
|
||||
if (obj.efile.idlist_shndx == -1 ||
|
||||
obj.efile.symbols_shndx == -1) {
|
||||
if (no_fail)
|
||||
return 0;
|
||||
pr_err("FAILED to find needed sections\n");
|
||||
return -1;
|
||||
pr_debug("Cannot find .BTF_ids or symbols sections, nothing to do\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (symbols_collect(&obj))
|
||||
|
@ -9,9 +9,9 @@ BPFTOOL ?= $(DEFAULT_BPFTOOL)
|
||||
LIBBPF_SRC := $(abspath ../../lib/bpf)
|
||||
BPFOBJ_OUTPUT := $(OUTPUT)libbpf/
|
||||
BPFOBJ := $(BPFOBJ_OUTPUT)libbpf.a
|
||||
BPF_INCLUDE := $(BPFOBJ_OUTPUT)
|
||||
INCLUDES := -I$(OUTPUT) -I$(BPF_INCLUDE) -I$(abspath ../../lib) \
|
||||
-I$(abspath ../../include/uapi)
|
||||
BPF_DESTDIR := $(BPFOBJ_OUTPUT)
|
||||
BPF_INCLUDE := $(BPF_DESTDIR)/include
|
||||
INCLUDES := -I$(OUTPUT) -I$(BPF_INCLUDE) -I$(abspath ../../include/uapi)
|
||||
CFLAGS := -g -Wall
|
||||
|
||||
# Try to detect best kernel BTF source
|
||||
@ -33,7 +33,7 @@ endif
|
||||
|
||||
.DELETE_ON_ERROR:
|
||||
|
||||
.PHONY: all clean runqslower
|
||||
.PHONY: all clean runqslower libbpf_hdrs
|
||||
all: runqslower
|
||||
|
||||
runqslower: $(OUTPUT)/runqslower
|
||||
@ -46,13 +46,15 @@ clean:
|
||||
$(Q)$(RM) $(OUTPUT)runqslower
|
||||
$(Q)$(RM) -r .output
|
||||
|
||||
libbpf_hdrs: $(BPFOBJ)
|
||||
|
||||
$(OUTPUT)/runqslower: $(OUTPUT)/runqslower.o $(BPFOBJ)
|
||||
$(QUIET_LINK)$(CC) $(CFLAGS) $^ -lelf -lz -o $@
|
||||
|
||||
$(OUTPUT)/runqslower.o: runqslower.h $(OUTPUT)/runqslower.skel.h \
|
||||
$(OUTPUT)/runqslower.bpf.o
|
||||
$(OUTPUT)/runqslower.bpf.o | libbpf_hdrs
|
||||
|
||||
$(OUTPUT)/runqslower.bpf.o: $(OUTPUT)/vmlinux.h runqslower.h
|
||||
$(OUTPUT)/runqslower.bpf.o: $(OUTPUT)/vmlinux.h runqslower.h | libbpf_hdrs
|
||||
|
||||
$(OUTPUT)/%.skel.h: $(OUTPUT)/%.bpf.o | $(BPFTOOL)
|
||||
$(QUIET_GEN)$(BPFTOOL) gen skeleton $< > $@
|
||||
@ -81,8 +83,10 @@ else
|
||||
endif
|
||||
|
||||
$(BPFOBJ): $(wildcard $(LIBBPF_SRC)/*.[ch] $(LIBBPF_SRC)/Makefile) | $(BPFOBJ_OUTPUT)
|
||||
$(Q)$(MAKE) $(submake_extras) -C $(LIBBPF_SRC) OUTPUT=$(BPFOBJ_OUTPUT) $@
|
||||
$(Q)$(MAKE) $(submake_extras) -C $(LIBBPF_SRC) OUTPUT=$(BPFOBJ_OUTPUT) \
|
||||
DESTDIR=$(BPFOBJ_OUTPUT) prefix= $(abspath $@) install_headers
|
||||
|
||||
$(DEFAULT_BPFTOOL): | $(BPFTOOL_OUTPUT)
|
||||
$(DEFAULT_BPFTOOL): $(BPFOBJ) | $(BPFTOOL_OUTPUT)
|
||||
$(Q)$(MAKE) $(submake_extras) -C ../bpftool OUTPUT=$(BPFTOOL_OUTPUT) \
|
||||
CC=$(HOSTCC) LD=$(HOSTLD)
|
||||
LIBBPF_OUTPUT=$(BPFOBJ_OUTPUT) \
|
||||
LIBBPF_DESTDIR=$(BPF_DESTDIR) CC=$(HOSTCC) LD=$(HOSTLD)
|
||||
|
@ -906,6 +906,7 @@ enum bpf_map_type {
|
||||
BPF_MAP_TYPE_RINGBUF,
|
||||
BPF_MAP_TYPE_INODE_STORAGE,
|
||||
BPF_MAP_TYPE_TASK_STORAGE,
|
||||
BPF_MAP_TYPE_BLOOM_FILTER,
|
||||
};
|
||||
|
||||
/* Note that tracing related programs such as
|
||||
@ -1274,6 +1275,13 @@ union bpf_attr {
|
||||
* struct stored as the
|
||||
* map value
|
||||
*/
|
||||
/* Any per-map-type extra fields
|
||||
*
|
||||
* BPF_MAP_TYPE_BLOOM_FILTER - the lowest 4 bits indicate the
|
||||
* number of hash functions (if 0, the bloom filter will default
|
||||
* to using 5 hash functions).
|
||||
*/
|
||||
__u64 map_extra;
|
||||
};
|
||||
|
||||
struct { /* anonymous struct used by BPF_MAP_*_ELEM commands */
|
||||
@ -4909,6 +4917,27 @@ union bpf_attr {
|
||||
* Return
|
||||
* The number of bytes written to the buffer, or a negative error
|
||||
* in case of failure.
|
||||
*
|
||||
* struct unix_sock *bpf_skc_to_unix_sock(void *sk)
|
||||
* Description
|
||||
* Dynamically cast a *sk* pointer to a *unix_sock* pointer.
|
||||
* Return
|
||||
* *sk* if casting is valid, or **NULL** otherwise.
|
||||
*
|
||||
* long bpf_kallsyms_lookup_name(const char *name, int name_sz, int flags, u64 *res)
|
||||
* Description
|
||||
* Get the address of a kernel symbol, returned in *res*. *res* is
|
||||
* set to 0 if the symbol is not found.
|
||||
* Return
|
||||
* On success, zero. On error, a negative value.
|
||||
*
|
||||
* **-EINVAL** if *flags* is not zero.
|
||||
*
|
||||
* **-EINVAL** if string *name* is not the same size as *name_sz*.
|
||||
*
|
||||
* **-ENOENT** if symbol is not found.
|
||||
*
|
||||
* **-EPERM** if caller does not have permission to obtain kernel address.
|
||||
*/
|
||||
#define __BPF_FUNC_MAPPER(FN) \
|
||||
FN(unspec), \
|
||||
@ -5089,6 +5118,8 @@ union bpf_attr {
|
||||
FN(task_pt_regs), \
|
||||
FN(get_branch_snapshot), \
|
||||
FN(trace_vprintk), \
|
||||
FN(skc_to_unix_sock), \
|
||||
FN(kallsyms_lookup_name), \
|
||||
/* */
|
||||
|
||||
/* integer value in 'imm' field of BPF_CALL instruction selects which helper
|
||||
@ -5613,6 +5644,7 @@ struct bpf_prog_info {
|
||||
__u64 run_time_ns;
|
||||
__u64 run_cnt;
|
||||
__u64 recursion_misses;
|
||||
__u32 verified_insns;
|
||||
} __attribute__((aligned(8)));
|
||||
|
||||
struct bpf_map_info {
|
||||
@ -5630,6 +5662,8 @@ struct bpf_map_info {
|
||||
__u32 btf_id;
|
||||
__u32 btf_key_type_id;
|
||||
__u32 btf_value_type_id;
|
||||
__u32 :32; /* alignment pad */
|
||||
__u64 map_extra;
|
||||
} __attribute__((aligned(8)));
|
||||
|
||||
struct bpf_btf_info {
|
||||
|
@ -43,7 +43,7 @@ struct btf_type {
|
||||
* "size" tells the size of the type it is describing.
|
||||
*
|
||||
* "type" is used by PTR, TYPEDEF, VOLATILE, CONST, RESTRICT,
|
||||
* FUNC, FUNC_PROTO, VAR and TAG.
|
||||
* FUNC, FUNC_PROTO, VAR and DECL_TAG.
|
||||
* "type" is a type_id referring to another type.
|
||||
*/
|
||||
union {
|
||||
@ -74,7 +74,7 @@ enum {
|
||||
BTF_KIND_VAR = 14, /* Variable */
|
||||
BTF_KIND_DATASEC = 15, /* Section */
|
||||
BTF_KIND_FLOAT = 16, /* Floating point */
|
||||
BTF_KIND_TAG = 17, /* Tag */
|
||||
BTF_KIND_DECL_TAG = 17, /* Decl Tag */
|
||||
|
||||
NR_BTF_KINDS,
|
||||
BTF_KIND_MAX = NR_BTF_KINDS - 1,
|
||||
@ -174,14 +174,14 @@ struct btf_var_secinfo {
|
||||
__u32 size;
|
||||
};
|
||||
|
||||
/* BTF_KIND_TAG is followed by a single "struct btf_tag" to describe
|
||||
/* BTF_KIND_DECL_TAG is followed by a single "struct btf_decl_tag" to describe
|
||||
* additional information related to the tag applied location.
|
||||
* If component_idx == -1, the tag is applied to a struct, union,
|
||||
* variable or function. Otherwise, it is applied to a struct/union
|
||||
* member or a func argument, and component_idx indicates which member
|
||||
* or argument (0 ... vlen-1).
|
||||
*/
|
||||
struct btf_tag {
|
||||
struct btf_decl_tag {
|
||||
__s32 component_idx;
|
||||
};
|
||||
|
||||
|
@ -146,12 +146,6 @@ $(BPF_IN_SHARED): force $(BPF_GENERATED)
|
||||
@(test -f ../../include/uapi/linux/bpf_common.h -a -f ../../../include/uapi/linux/bpf_common.h && ( \
|
||||
(diff -B ../../include/uapi/linux/bpf_common.h ../../../include/uapi/linux/bpf_common.h >/dev/null) || \
|
||||
echo "Warning: Kernel ABI header at 'tools/include/uapi/linux/bpf_common.h' differs from latest version at 'include/uapi/linux/bpf_common.h'" >&2 )) || true
|
||||
@(test -f ../../include/uapi/linux/netlink.h -a -f ../../../include/uapi/linux/netlink.h && ( \
|
||||
(diff -B ../../include/uapi/linux/netlink.h ../../../include/uapi/linux/netlink.h >/dev/null) || \
|
||||
echo "Warning: Kernel ABI header at 'tools/include/uapi/linux/netlink.h' differs from latest version at 'include/uapi/linux/netlink.h'" >&2 )) || true
|
||||
@(test -f ../../include/uapi/linux/if_link.h -a -f ../../../include/uapi/linux/if_link.h && ( \
|
||||
(diff -B ../../include/uapi/linux/if_link.h ../../../include/uapi/linux/if_link.h >/dev/null) || \
|
||||
echo "Warning: Kernel ABI header at 'tools/include/uapi/linux/if_link.h' differs from latest version at 'include/uapi/linux/if_link.h'" >&2 )) || true
|
||||
@(test -f ../../include/uapi/linux/if_xdp.h -a -f ../../../include/uapi/linux/if_xdp.h && ( \
|
||||
(diff -B ../../include/uapi/linux/if_xdp.h ../../../include/uapi/linux/if_xdp.h >/dev/null) || \
|
||||
echo "Warning: Kernel ABI header at 'tools/include/uapi/linux/if_xdp.h' differs from latest version at 'include/uapi/linux/if_xdp.h'" >&2 )) || true
|
||||
@ -208,8 +202,8 @@ check_abi: $(OUTPUT)libbpf.so $(VERSION_SCRIPT)
|
||||
exit 1; \
|
||||
fi
|
||||
|
||||
HDR_MAJ_VERSION := $(shell grep -oE '^\#define LIBBPF_MAJOR_VERSION ([0-9]+)$$' libbpf_version.h | cut -d' ' -f3)
|
||||
HDR_MIN_VERSION := $(shell grep -oE '^\#define LIBBPF_MINOR_VERSION ([0-9]+)$$' libbpf_version.h | cut -d' ' -f3)
|
||||
HDR_MAJ_VERSION := $(shell grep -oE '^$(pound)define LIBBPF_MAJOR_VERSION ([0-9]+)$$' libbpf_version.h | cut -d' ' -f3)
|
||||
HDR_MIN_VERSION := $(shell grep -oE '^$(pound)define LIBBPF_MINOR_VERSION ([0-9]+)$$' libbpf_version.h | cut -d' ' -f3)
|
||||
|
||||
check_version: $(VERSION_SCRIPT) libbpf_version.h
|
||||
@if [ "$(HDR_MAJ_VERSION)" != "$(LIBBPF_MAJOR_VERSION)" ]; then \
|
||||
@ -241,15 +235,24 @@ install_lib: all_cmd
|
||||
$(call do_install_mkdir,$(libdir_SQ)); \
|
||||
cp -fpR $(LIB_FILE) $(DESTDIR)$(libdir_SQ)
|
||||
|
||||
INSTALL_HEADERS = bpf.h libbpf.h btf.h libbpf_common.h libbpf_legacy.h xsk.h \
|
||||
bpf_helpers.h $(BPF_GENERATED) bpf_tracing.h \
|
||||
bpf_endian.h bpf_core_read.h skel_internal.h \
|
||||
libbpf_version.h
|
||||
SRC_HDRS := bpf.h libbpf.h btf.h libbpf_common.h libbpf_legacy.h xsk.h \
|
||||
bpf_helpers.h bpf_tracing.h bpf_endian.h bpf_core_read.h \
|
||||
skel_internal.h libbpf_version.h
|
||||
GEN_HDRS := $(BPF_GENERATED)
|
||||
|
||||
install_headers: $(BPF_GENERATED)
|
||||
$(call QUIET_INSTALL, headers) \
|
||||
$(foreach hdr,$(INSTALL_HEADERS), \
|
||||
$(call do_install,$(hdr),$(prefix)/include/bpf,644);)
|
||||
INSTALL_PFX := $(DESTDIR)$(prefix)/include/bpf
|
||||
INSTALL_SRC_HDRS := $(addprefix $(INSTALL_PFX)/, $(SRC_HDRS))
|
||||
INSTALL_GEN_HDRS := $(addprefix $(INSTALL_PFX)/, $(notdir $(GEN_HDRS)))
|
||||
|
||||
$(INSTALL_SRC_HDRS): $(INSTALL_PFX)/%.h: %.h
|
||||
$(call QUIET_INSTALL, $@) \
|
||||
$(call do_install,$<,$(prefix)/include/bpf,644)
|
||||
|
||||
$(INSTALL_GEN_HDRS): $(INSTALL_PFX)/%.h: $(OUTPUT)%.h
|
||||
$(call QUIET_INSTALL, $@) \
|
||||
$(call do_install,$<,$(prefix)/include/bpf,644)
|
||||
|
||||
install_headers: $(BPF_GENERATED) $(INSTALL_SRC_HDRS) $(INSTALL_GEN_HDRS)
|
||||
|
||||
install_pkgconfig: $(PC_FILE)
|
||||
$(call QUIET_INSTALL, $(PC_FILE)) \
|
||||
|
@ -65,19 +65,28 @@ static inline int sys_bpf(enum bpf_cmd cmd, union bpf_attr *attr,
|
||||
return syscall(__NR_bpf, cmd, attr, size);
|
||||
}
|
||||
|
||||
static inline int sys_bpf_fd(enum bpf_cmd cmd, union bpf_attr *attr,
|
||||
unsigned int size)
|
||||
{
|
||||
int fd;
|
||||
|
||||
fd = sys_bpf(cmd, attr, size);
|
||||
return ensure_good_fd(fd);
|
||||
}
|
||||
|
||||
static inline int sys_bpf_prog_load(union bpf_attr *attr, unsigned int size)
|
||||
{
|
||||
int retries = 5;
|
||||
int fd;
|
||||
|
||||
do {
|
||||
fd = sys_bpf(BPF_PROG_LOAD, attr, size);
|
||||
fd = sys_bpf_fd(BPF_PROG_LOAD, attr, size);
|
||||
} while (fd < 0 && errno == EAGAIN && retries-- > 0);
|
||||
|
||||
return fd;
|
||||
}
|
||||
|
||||
int bpf_create_map_xattr(const struct bpf_create_map_attr *create_attr)
|
||||
int libbpf__bpf_create_map_xattr(const struct bpf_create_map_params *create_attr)
|
||||
{
|
||||
union bpf_attr attr;
|
||||
int fd;
|
||||
@ -102,11 +111,36 @@ int bpf_create_map_xattr(const struct bpf_create_map_attr *create_attr)
|
||||
create_attr->btf_vmlinux_value_type_id;
|
||||
else
|
||||
attr.inner_map_fd = create_attr->inner_map_fd;
|
||||
attr.map_extra = create_attr->map_extra;
|
||||
|
||||
fd = sys_bpf(BPF_MAP_CREATE, &attr, sizeof(attr));
|
||||
fd = sys_bpf_fd(BPF_MAP_CREATE, &attr, sizeof(attr));
|
||||
return libbpf_err_errno(fd);
|
||||
}
|
||||
|
||||
int bpf_create_map_xattr(const struct bpf_create_map_attr *create_attr)
|
||||
{
|
||||
struct bpf_create_map_params p = {};
|
||||
|
||||
p.map_type = create_attr->map_type;
|
||||
p.key_size = create_attr->key_size;
|
||||
p.value_size = create_attr->value_size;
|
||||
p.max_entries = create_attr->max_entries;
|
||||
p.map_flags = create_attr->map_flags;
|
||||
p.name = create_attr->name;
|
||||
p.numa_node = create_attr->numa_node;
|
||||
p.btf_fd = create_attr->btf_fd;
|
||||
p.btf_key_type_id = create_attr->btf_key_type_id;
|
||||
p.btf_value_type_id = create_attr->btf_value_type_id;
|
||||
p.map_ifindex = create_attr->map_ifindex;
|
||||
if (p.map_type == BPF_MAP_TYPE_STRUCT_OPS)
|
||||
p.btf_vmlinux_value_type_id =
|
||||
create_attr->btf_vmlinux_value_type_id;
|
||||
else
|
||||
p.inner_map_fd = create_attr->inner_map_fd;
|
||||
|
||||
return libbpf__bpf_create_map_xattr(&p);
|
||||
}
|
||||
|
||||
int bpf_create_map_node(enum bpf_map_type map_type, const char *name,
|
||||
int key_size, int value_size, int max_entries,
|
||||
__u32 map_flags, int node)
|
||||
@ -181,7 +215,7 @@ int bpf_create_map_in_map_node(enum bpf_map_type map_type, const char *name,
|
||||
attr.numa_node = node;
|
||||
}
|
||||
|
||||
fd = sys_bpf(BPF_MAP_CREATE, &attr, sizeof(attr));
|
||||
fd = sys_bpf_fd(BPF_MAP_CREATE, &attr, sizeof(attr));
|
||||
return libbpf_err_errno(fd);
|
||||
}
|
||||
|
||||
@ -264,6 +298,7 @@ int libbpf__bpf_prog_load(const struct bpf_prog_load_params *load_attr)
|
||||
attr.line_info_rec_size = load_attr->line_info_rec_size;
|
||||
attr.line_info_cnt = load_attr->line_info_cnt;
|
||||
attr.line_info = ptr_to_u64(load_attr->line_info);
|
||||
attr.fd_array = ptr_to_u64(load_attr->fd_array);
|
||||
|
||||
if (load_attr->name)
|
||||
memcpy(attr.prog_name, load_attr->name,
|
||||
@ -608,7 +643,7 @@ int bpf_obj_get(const char *pathname)
|
||||
memset(&attr, 0, sizeof(attr));
|
||||
attr.pathname = ptr_to_u64((void *)pathname);
|
||||
|
||||
fd = sys_bpf(BPF_OBJ_GET, &attr, sizeof(attr));
|
||||
fd = sys_bpf_fd(BPF_OBJ_GET, &attr, sizeof(attr));
|
||||
return libbpf_err_errno(fd);
|
||||
}
|
||||
|
||||
@ -719,7 +754,7 @@ int bpf_link_create(int prog_fd, int target_fd,
|
||||
break;
|
||||
}
|
||||
proceed:
|
||||
fd = sys_bpf(BPF_LINK_CREATE, &attr, sizeof(attr));
|
||||
fd = sys_bpf_fd(BPF_LINK_CREATE, &attr, sizeof(attr));
|
||||
return libbpf_err_errno(fd);
|
||||
}
|
||||
|
||||
@ -762,7 +797,7 @@ int bpf_iter_create(int link_fd)
|
||||
memset(&attr, 0, sizeof(attr));
|
||||
attr.iter_create.link_fd = link_fd;
|
||||
|
||||
fd = sys_bpf(BPF_ITER_CREATE, &attr, sizeof(attr));
|
||||
fd = sys_bpf_fd(BPF_ITER_CREATE, &attr, sizeof(attr));
|
||||
return libbpf_err_errno(fd);
|
||||
}
|
||||
|
||||
@ -920,7 +955,7 @@ int bpf_prog_get_fd_by_id(__u32 id)
|
||||
memset(&attr, 0, sizeof(attr));
|
||||
attr.prog_id = id;
|
||||
|
||||
fd = sys_bpf(BPF_PROG_GET_FD_BY_ID, &attr, sizeof(attr));
|
||||
fd = sys_bpf_fd(BPF_PROG_GET_FD_BY_ID, &attr, sizeof(attr));
|
||||
return libbpf_err_errno(fd);
|
||||
}
|
||||
|
||||
@ -932,7 +967,7 @@ int bpf_map_get_fd_by_id(__u32 id)
|
||||
memset(&attr, 0, sizeof(attr));
|
||||
attr.map_id = id;
|
||||
|
||||
fd = sys_bpf(BPF_MAP_GET_FD_BY_ID, &attr, sizeof(attr));
|
||||
fd = sys_bpf_fd(BPF_MAP_GET_FD_BY_ID, &attr, sizeof(attr));
|
||||
return libbpf_err_errno(fd);
|
||||
}
|
||||
|
||||
@ -944,7 +979,7 @@ int bpf_btf_get_fd_by_id(__u32 id)
|
||||
memset(&attr, 0, sizeof(attr));
|
||||
attr.btf_id = id;
|
||||
|
||||
fd = sys_bpf(BPF_BTF_GET_FD_BY_ID, &attr, sizeof(attr));
|
||||
fd = sys_bpf_fd(BPF_BTF_GET_FD_BY_ID, &attr, sizeof(attr));
|
||||
return libbpf_err_errno(fd);
|
||||
}
|
||||
|
||||
@ -956,7 +991,7 @@ int bpf_link_get_fd_by_id(__u32 id)
|
||||
memset(&attr, 0, sizeof(attr));
|
||||
attr.link_id = id;
|
||||
|
||||
fd = sys_bpf(BPF_LINK_GET_FD_BY_ID, &attr, sizeof(attr));
|
||||
fd = sys_bpf_fd(BPF_LINK_GET_FD_BY_ID, &attr, sizeof(attr));
|
||||
return libbpf_err_errno(fd);
|
||||
}
|
||||
|
||||
@ -987,7 +1022,7 @@ int bpf_raw_tracepoint_open(const char *name, int prog_fd)
|
||||
attr.raw_tracepoint.name = ptr_to_u64(name);
|
||||
attr.raw_tracepoint.prog_fd = prog_fd;
|
||||
|
||||
fd = sys_bpf(BPF_RAW_TRACEPOINT_OPEN, &attr, sizeof(attr));
|
||||
fd = sys_bpf_fd(BPF_RAW_TRACEPOINT_OPEN, &attr, sizeof(attr));
|
||||
return libbpf_err_errno(fd);
|
||||
}
|
||||
|
||||
@ -1007,7 +1042,7 @@ int bpf_load_btf(const void *btf, __u32 btf_size, char *log_buf, __u32 log_buf_s
|
||||
attr.btf_log_buf = ptr_to_u64(log_buf);
|
||||
}
|
||||
|
||||
fd = sys_bpf(BPF_BTF_LOAD, &attr, sizeof(attr));
|
||||
fd = sys_bpf_fd(BPF_BTF_LOAD, &attr, sizeof(attr));
|
||||
|
||||
if (fd < 0 && !do_log && log_buf && log_buf_size) {
|
||||
do_log = true;
|
||||
@ -1049,7 +1084,7 @@ int bpf_enable_stats(enum bpf_stats_type type)
|
||||
memset(&attr, 0, sizeof(attr));
|
||||
attr.enable_stats.type = type;
|
||||
|
||||
fd = sys_bpf(BPF_ENABLE_STATS, &attr, sizeof(attr));
|
||||
fd = sys_bpf_fd(BPF_ENABLE_STATS, &attr, sizeof(attr));
|
||||
return libbpf_err_errno(fd);
|
||||
}
|
||||
|
||||
|
@ -40,7 +40,7 @@ enum bpf_enum_value_kind {
|
||||
#define __CORE_RELO(src, field, info) \
|
||||
__builtin_preserve_field_info((src)->field, BPF_FIELD_##info)
|
||||
|
||||
#if __BYTE_ORDER == __LITTLE_ENDIAN
|
||||
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
|
||||
#define __CORE_BITFIELD_PROBE_READ(dst, src, fld) \
|
||||
bpf_probe_read_kernel( \
|
||||
(void *)dst, \
|
||||
|
@ -7,6 +7,21 @@ struct ksym_relo_desc {
|
||||
const char *name;
|
||||
int kind;
|
||||
int insn_idx;
|
||||
bool is_weak;
|
||||
bool is_typeless;
|
||||
};
|
||||
|
||||
struct ksym_desc {
|
||||
const char *name;
|
||||
int ref;
|
||||
int kind;
|
||||
union {
|
||||
/* used for kfunc */
|
||||
int off;
|
||||
/* used for typeless ksym */
|
||||
bool typeless;
|
||||
};
|
||||
int insn;
|
||||
};
|
||||
|
||||
struct bpf_gen {
|
||||
@ -24,18 +39,23 @@ struct bpf_gen {
|
||||
int relo_cnt;
|
||||
char attach_target[128];
|
||||
int attach_kind;
|
||||
struct ksym_desc *ksyms;
|
||||
__u32 nr_ksyms;
|
||||
int fd_array;
|
||||
int nr_fd_array;
|
||||
};
|
||||
|
||||
void bpf_gen__init(struct bpf_gen *gen, int log_level);
|
||||
int bpf_gen__finish(struct bpf_gen *gen);
|
||||
void bpf_gen__free(struct bpf_gen *gen);
|
||||
void bpf_gen__load_btf(struct bpf_gen *gen, const void *raw_data, __u32 raw_size);
|
||||
void bpf_gen__map_create(struct bpf_gen *gen, struct bpf_create_map_attr *map_attr, int map_idx);
|
||||
void bpf_gen__map_create(struct bpf_gen *gen, struct bpf_create_map_params *map_attr, int map_idx);
|
||||
struct bpf_prog_load_params;
|
||||
void bpf_gen__prog_load(struct bpf_gen *gen, struct bpf_prog_load_params *load_attr, int prog_idx);
|
||||
void bpf_gen__map_update_elem(struct bpf_gen *gen, int map_idx, void *value, __u32 value_size);
|
||||
void bpf_gen__map_freeze(struct bpf_gen *gen, int map_idx);
|
||||
void bpf_gen__record_attach_target(struct bpf_gen *gen, const char *name, enum bpf_attach_type type);
|
||||
void bpf_gen__record_extern(struct bpf_gen *gen, const char *name, int kind, int insn_idx);
|
||||
void bpf_gen__record_extern(struct bpf_gen *gen, const char *name, bool is_weak,
|
||||
bool is_typeless, int kind, int insn_idx);
|
||||
|
||||
#endif
|
||||
|
@ -24,6 +24,9 @@
|
||||
#elif defined(__TARGET_ARCH_sparc)
|
||||
#define bpf_target_sparc
|
||||
#define bpf_target_defined
|
||||
#elif defined(__TARGET_ARCH_riscv)
|
||||
#define bpf_target_riscv
|
||||
#define bpf_target_defined
|
||||
#else
|
||||
|
||||
/* Fall back to what the compiler says */
|
||||
@ -48,6 +51,9 @@
|
||||
#elif defined(__sparc__)
|
||||
#define bpf_target_sparc
|
||||
#define bpf_target_defined
|
||||
#elif defined(__riscv) && __riscv_xlen == 64
|
||||
#define bpf_target_riscv
|
||||
#define bpf_target_defined
|
||||
#endif /* no compiler target */
|
||||
|
||||
#endif
|
||||
@ -288,6 +294,32 @@ struct pt_regs;
|
||||
#define PT_REGS_IP_CORE(x) BPF_CORE_READ((x), pc)
|
||||
#endif
|
||||
|
||||
#elif defined(bpf_target_riscv)
|
||||
|
||||
struct pt_regs;
|
||||
#define PT_REGS_RV const volatile struct user_regs_struct
|
||||
#define PT_REGS_PARM1(x) (((PT_REGS_RV *)(x))->a0)
|
||||
#define PT_REGS_PARM2(x) (((PT_REGS_RV *)(x))->a1)
|
||||
#define PT_REGS_PARM3(x) (((PT_REGS_RV *)(x))->a2)
|
||||
#define PT_REGS_PARM4(x) (((PT_REGS_RV *)(x))->a3)
|
||||
#define PT_REGS_PARM5(x) (((PT_REGS_RV *)(x))->a4)
|
||||
#define PT_REGS_RET(x) (((PT_REGS_RV *)(x))->ra)
|
||||
#define PT_REGS_FP(x) (((PT_REGS_RV *)(x))->s5)
|
||||
#define PT_REGS_RC(x) (((PT_REGS_RV *)(x))->a5)
|
||||
#define PT_REGS_SP(x) (((PT_REGS_RV *)(x))->sp)
|
||||
#define PT_REGS_IP(x) (((PT_REGS_RV *)(x))->epc)
|
||||
|
||||
#define PT_REGS_PARM1_CORE(x) BPF_CORE_READ((PT_REGS_RV *)(x), a0)
|
||||
#define PT_REGS_PARM2_CORE(x) BPF_CORE_READ((PT_REGS_RV *)(x), a1)
|
||||
#define PT_REGS_PARM3_CORE(x) BPF_CORE_READ((PT_REGS_RV *)(x), a2)
|
||||
#define PT_REGS_PARM4_CORE(x) BPF_CORE_READ((PT_REGS_RV *)(x), a3)
|
||||
#define PT_REGS_PARM5_CORE(x) BPF_CORE_READ((PT_REGS_RV *)(x), a4)
|
||||
#define PT_REGS_RET_CORE(x) BPF_CORE_READ((PT_REGS_RV *)(x), ra)
|
||||
#define PT_REGS_FP_CORE(x) BPF_CORE_READ((PT_REGS_RV *)(x), fp)
|
||||
#define PT_REGS_RC_CORE(x) BPF_CORE_READ((PT_REGS_RV *)(x), a5)
|
||||
#define PT_REGS_SP_CORE(x) BPF_CORE_READ((PT_REGS_RV *)(x), sp)
|
||||
#define PT_REGS_IP_CORE(x) BPF_CORE_READ((PT_REGS_RV *)(x), epc)
|
||||
|
||||
#endif
|
||||
|
||||
#if defined(bpf_target_powerpc)
|
||||
|
@ -57,7 +57,7 @@ struct btf {
|
||||
* representation is broken up into three independently allocated
|
||||
* memory regions to be able to modify them independently.
|
||||
* raw_data is nulled out at that point, but can be later allocated
|
||||
* and cached again if user calls btf__get_raw_data(), at which point
|
||||
* and cached again if user calls btf__raw_data(), at which point
|
||||
* raw_data will contain a contiguous copy of header, types, and
|
||||
* strings:
|
||||
*
|
||||
@ -189,12 +189,17 @@ int libbpf_ensure_mem(void **data, size_t *cap_cnt, size_t elem_sz, size_t need_
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void *btf_add_type_offs_mem(struct btf *btf, size_t add_cnt)
|
||||
{
|
||||
return libbpf_add_mem((void **)&btf->type_offs, &btf->type_offs_cap, sizeof(__u32),
|
||||
btf->nr_types, BTF_MAX_NR_TYPES, add_cnt);
|
||||
}
|
||||
|
||||
static int btf_add_type_idx_entry(struct btf *btf, __u32 type_off)
|
||||
{
|
||||
__u32 *p;
|
||||
|
||||
p = libbpf_add_mem((void **)&btf->type_offs, &btf->type_offs_cap, sizeof(__u32),
|
||||
btf->nr_types, BTF_MAX_NR_TYPES, 1);
|
||||
p = btf_add_type_offs_mem(btf, 1);
|
||||
if (!p)
|
||||
return -ENOMEM;
|
||||
|
||||
@ -231,17 +236,23 @@ static int btf_parse_hdr(struct btf *btf)
|
||||
}
|
||||
btf_bswap_hdr(hdr);
|
||||
} else if (hdr->magic != BTF_MAGIC) {
|
||||
pr_debug("Invalid BTF magic:%x\n", hdr->magic);
|
||||
pr_debug("Invalid BTF magic: %x\n", hdr->magic);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
meta_left = btf->raw_size - sizeof(*hdr);
|
||||
if (meta_left < hdr->str_off + hdr->str_len) {
|
||||
pr_debug("Invalid BTF total size:%u\n", btf->raw_size);
|
||||
if (btf->raw_size < hdr->hdr_len) {
|
||||
pr_debug("BTF header len %u larger than data size %u\n",
|
||||
hdr->hdr_len, btf->raw_size);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (hdr->type_off + hdr->type_len > hdr->str_off) {
|
||||
meta_left = btf->raw_size - hdr->hdr_len;
|
||||
if (meta_left < (long long)hdr->str_off + hdr->str_len) {
|
||||
pr_debug("Invalid BTF total size: %u\n", btf->raw_size);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if ((long long)hdr->type_off + hdr->type_len > hdr->str_off) {
|
||||
pr_debug("Invalid BTF data sections layout: type data at %u + %u, strings data at %u + %u\n",
|
||||
hdr->type_off, hdr->type_len, hdr->str_off, hdr->str_len);
|
||||
return -EINVAL;
|
||||
@ -304,8 +315,8 @@ static int btf_type_size(const struct btf_type *t)
|
||||
return base_size + sizeof(struct btf_var);
|
||||
case BTF_KIND_DATASEC:
|
||||
return base_size + vlen * sizeof(struct btf_var_secinfo);
|
||||
case BTF_KIND_TAG:
|
||||
return base_size + sizeof(struct btf_tag);
|
||||
case BTF_KIND_DECL_TAG:
|
||||
return base_size + sizeof(struct btf_decl_tag);
|
||||
default:
|
||||
pr_debug("Unsupported BTF_KIND:%u\n", btf_kind(t));
|
||||
return -EINVAL;
|
||||
@ -378,8 +389,8 @@ static int btf_bswap_type_rest(struct btf_type *t)
|
||||
v->size = bswap_32(v->size);
|
||||
}
|
||||
return 0;
|
||||
case BTF_KIND_TAG:
|
||||
btf_tag(t)->component_idx = bswap_32(btf_tag(t)->component_idx);
|
||||
case BTF_KIND_DECL_TAG:
|
||||
btf_decl_tag(t)->component_idx = bswap_32(btf_decl_tag(t)->component_idx);
|
||||
return 0;
|
||||
default:
|
||||
pr_debug("Unsupported BTF_KIND:%u\n", btf_kind(t));
|
||||
@ -430,6 +441,11 @@ __u32 btf__get_nr_types(const struct btf *btf)
|
||||
return btf->start_id + btf->nr_types - 1;
|
||||
}
|
||||
|
||||
__u32 btf__type_cnt(const struct btf *btf)
|
||||
{
|
||||
return btf->start_id + btf->nr_types;
|
||||
}
|
||||
|
||||
const struct btf *btf__base_btf(const struct btf *btf)
|
||||
{
|
||||
return btf->base_btf;
|
||||
@ -461,8 +477,8 @@ static int determine_ptr_size(const struct btf *btf)
|
||||
if (btf->base_btf && btf->base_btf->ptr_sz > 0)
|
||||
return btf->base_btf->ptr_sz;
|
||||
|
||||
n = btf__get_nr_types(btf);
|
||||
for (i = 1; i <= n; i++) {
|
||||
n = btf__type_cnt(btf);
|
||||
for (i = 1; i < n; i++) {
|
||||
t = btf__type_by_id(btf, i);
|
||||
if (!btf_is_int(t))
|
||||
continue;
|
||||
@ -522,9 +538,9 @@ int btf__set_pointer_size(struct btf *btf, size_t ptr_sz)
|
||||
|
||||
static bool is_host_big_endian(void)
|
||||
{
|
||||
#if __BYTE_ORDER == __LITTLE_ENDIAN
|
||||
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
|
||||
return false;
|
||||
#elif __BYTE_ORDER == __BIG_ENDIAN
|
||||
#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
|
||||
return true;
|
||||
#else
|
||||
# error "Unrecognized __BYTE_ORDER__"
|
||||
@ -591,7 +607,7 @@ __s64 btf__resolve_size(const struct btf *btf, __u32 type_id)
|
||||
case BTF_KIND_CONST:
|
||||
case BTF_KIND_RESTRICT:
|
||||
case BTF_KIND_VAR:
|
||||
case BTF_KIND_TAG:
|
||||
case BTF_KIND_DECL_TAG:
|
||||
type_id = t->type;
|
||||
break;
|
||||
case BTF_KIND_ARRAY:
|
||||
@ -679,12 +695,12 @@ int btf__resolve_type(const struct btf *btf, __u32 type_id)
|
||||
|
||||
__s32 btf__find_by_name(const struct btf *btf, const char *type_name)
|
||||
{
|
||||
__u32 i, nr_types = btf__get_nr_types(btf);
|
||||
__u32 i, nr_types = btf__type_cnt(btf);
|
||||
|
||||
if (!strcmp(type_name, "void"))
|
||||
return 0;
|
||||
|
||||
for (i = 1; i <= nr_types; i++) {
|
||||
for (i = 1; i < nr_types; i++) {
|
||||
const struct btf_type *t = btf__type_by_id(btf, i);
|
||||
const char *name = btf__name_by_offset(btf, t->name_off);
|
||||
|
||||
@ -695,15 +711,15 @@ __s32 btf__find_by_name(const struct btf *btf, const char *type_name)
|
||||
return libbpf_err(-ENOENT);
|
||||
}
|
||||
|
||||
__s32 btf__find_by_name_kind(const struct btf *btf, const char *type_name,
|
||||
__u32 kind)
|
||||
static __s32 btf_find_by_name_kind(const struct btf *btf, int start_id,
|
||||
const char *type_name, __u32 kind)
|
||||
{
|
||||
__u32 i, nr_types = btf__get_nr_types(btf);
|
||||
__u32 i, nr_types = btf__type_cnt(btf);
|
||||
|
||||
if (kind == BTF_KIND_UNKN || !strcmp(type_name, "void"))
|
||||
return 0;
|
||||
|
||||
for (i = 1; i <= nr_types; i++) {
|
||||
for (i = start_id; i < nr_types; i++) {
|
||||
const struct btf_type *t = btf__type_by_id(btf, i);
|
||||
const char *name;
|
||||
|
||||
@ -717,6 +733,18 @@ __s32 btf__find_by_name_kind(const struct btf *btf, const char *type_name,
|
||||
return libbpf_err(-ENOENT);
|
||||
}
|
||||
|
||||
__s32 btf__find_by_name_kind_own(const struct btf *btf, const char *type_name,
|
||||
__u32 kind)
|
||||
{
|
||||
return btf_find_by_name_kind(btf, btf->start_id, type_name, kind);
|
||||
}
|
||||
|
||||
__s32 btf__find_by_name_kind(const struct btf *btf, const char *type_name,
|
||||
__u32 kind)
|
||||
{
|
||||
return btf_find_by_name_kind(btf, 1, type_name, kind);
|
||||
}
|
||||
|
||||
static bool btf_is_modifiable(const struct btf *btf)
|
||||
{
|
||||
return (void *)btf->hdr != btf->raw_data;
|
||||
@ -764,7 +792,7 @@ static struct btf *btf_new_empty(struct btf *base_btf)
|
||||
|
||||
if (base_btf) {
|
||||
btf->base_btf = base_btf;
|
||||
btf->start_id = btf__get_nr_types(base_btf) + 1;
|
||||
btf->start_id = btf__type_cnt(base_btf);
|
||||
btf->start_str_off = base_btf->hdr->str_len;
|
||||
}
|
||||
|
||||
@ -814,7 +842,7 @@ static struct btf *btf_new(const void *data, __u32 size, struct btf *base_btf)
|
||||
|
||||
if (base_btf) {
|
||||
btf->base_btf = base_btf;
|
||||
btf->start_id = btf__get_nr_types(base_btf) + 1;
|
||||
btf->start_id = btf__type_cnt(base_btf);
|
||||
btf->start_str_off = base_btf->hdr->str_len;
|
||||
}
|
||||
|
||||
@ -869,7 +897,7 @@ static struct btf *btf_parse_elf(const char *path, struct btf *base_btf,
|
||||
return ERR_PTR(-LIBBPF_ERRNO__LIBELF);
|
||||
}
|
||||
|
||||
fd = open(path, O_RDONLY);
|
||||
fd = open(path, O_RDONLY | O_CLOEXEC);
|
||||
if (fd < 0) {
|
||||
err = -errno;
|
||||
pr_warn("failed to open %s: %s\n", path, strerror(errno));
|
||||
@ -1090,99 +1118,6 @@ struct btf *btf__parse_split(const char *path, struct btf *base_btf)
|
||||
return libbpf_ptr(btf_parse(path, base_btf, NULL));
|
||||
}
|
||||
|
||||
static int compare_vsi_off(const void *_a, const void *_b)
|
||||
{
|
||||
const struct btf_var_secinfo *a = _a;
|
||||
const struct btf_var_secinfo *b = _b;
|
||||
|
||||
return a->offset - b->offset;
|
||||
}
|
||||
|
||||
static int btf_fixup_datasec(struct bpf_object *obj, struct btf *btf,
|
||||
struct btf_type *t)
|
||||
{
|
||||
__u32 size = 0, off = 0, i, vars = btf_vlen(t);
|
||||
const char *name = btf__name_by_offset(btf, t->name_off);
|
||||
const struct btf_type *t_var;
|
||||
struct btf_var_secinfo *vsi;
|
||||
const struct btf_var *var;
|
||||
int ret;
|
||||
|
||||
if (!name) {
|
||||
pr_debug("No name found in string section for DATASEC kind.\n");
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
/* .extern datasec size and var offsets were set correctly during
|
||||
* extern collection step, so just skip straight to sorting variables
|
||||
*/
|
||||
if (t->size)
|
||||
goto sort_vars;
|
||||
|
||||
ret = bpf_object__section_size(obj, name, &size);
|
||||
if (ret || !size || (t->size && t->size != size)) {
|
||||
pr_debug("Invalid size for section %s: %u bytes\n", name, size);
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
t->size = size;
|
||||
|
||||
for (i = 0, vsi = btf_var_secinfos(t); i < vars; i++, vsi++) {
|
||||
t_var = btf__type_by_id(btf, vsi->type);
|
||||
var = btf_var(t_var);
|
||||
|
||||
if (!btf_is_var(t_var)) {
|
||||
pr_debug("Non-VAR type seen in section %s\n", name);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (var->linkage == BTF_VAR_STATIC)
|
||||
continue;
|
||||
|
||||
name = btf__name_by_offset(btf, t_var->name_off);
|
||||
if (!name) {
|
||||
pr_debug("No name found in string section for VAR kind\n");
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
ret = bpf_object__variable_offset(obj, name, &off);
|
||||
if (ret) {
|
||||
pr_debug("No offset found in symbol table for VAR %s\n",
|
||||
name);
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
vsi->offset = off;
|
||||
}
|
||||
|
||||
sort_vars:
|
||||
qsort(btf_var_secinfos(t), vars, sizeof(*vsi), compare_vsi_off);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int btf__finalize_data(struct bpf_object *obj, struct btf *btf)
|
||||
{
|
||||
int err = 0;
|
||||
__u32 i;
|
||||
|
||||
for (i = 1; i <= btf->nr_types; i++) {
|
||||
struct btf_type *t = btf_type_by_id(btf, i);
|
||||
|
||||
/* Loader needs to fix up some of the things compiler
|
||||
* couldn't get its hands on while emitting BTF. This
|
||||
* is section size and global variable offset. We use
|
||||
* the info from the ELF itself for this purpose.
|
||||
*/
|
||||
if (btf_is_datasec(t)) {
|
||||
err = btf_fixup_datasec(obj, btf, t);
|
||||
if (err)
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return libbpf_err(err);
|
||||
}
|
||||
|
||||
static void *btf_get_raw_data(const struct btf *btf, __u32 *size, bool swap_endian);
|
||||
|
||||
int btf__load_into_kernel(struct btf *btf)
|
||||
@ -1300,7 +1235,7 @@ static void *btf_get_raw_data(const struct btf *btf, __u32 *size, bool swap_endi
|
||||
return NULL;
|
||||
}
|
||||
|
||||
const void *btf__get_raw_data(const struct btf *btf_ro, __u32 *size)
|
||||
const void *btf__raw_data(const struct btf *btf_ro, __u32 *size)
|
||||
{
|
||||
struct btf *btf = (struct btf *)btf_ro;
|
||||
__u32 data_sz;
|
||||
@ -1308,7 +1243,7 @@ const void *btf__get_raw_data(const struct btf *btf_ro, __u32 *size)
|
||||
|
||||
data = btf_get_raw_data(btf, &data_sz, btf->swapped_endian);
|
||||
if (!data)
|
||||
return errno = -ENOMEM, NULL;
|
||||
return errno = ENOMEM, NULL;
|
||||
|
||||
btf->raw_size = data_sz;
|
||||
if (btf->swapped_endian)
|
||||
@ -1319,6 +1254,9 @@ const void *btf__get_raw_data(const struct btf *btf_ro, __u32 *size)
|
||||
return data;
|
||||
}
|
||||
|
||||
__attribute__((alias("btf__raw_data")))
|
||||
const void *btf__get_raw_data(const struct btf *btf, __u32 *size);
|
||||
|
||||
const char *btf__str_by_offset(const struct btf *btf, __u32 offset)
|
||||
{
|
||||
if (offset < btf->start_str_off)
|
||||
@ -1691,6 +1629,111 @@ int btf__add_type(struct btf *btf, const struct btf *src_btf, const struct btf_t
|
||||
return btf_commit_type(btf, sz);
|
||||
}
|
||||
|
||||
static int btf_rewrite_type_ids(__u32 *type_id, void *ctx)
|
||||
{
|
||||
struct btf *btf = ctx;
|
||||
|
||||
if (!*type_id) /* nothing to do for VOID references */
|
||||
return 0;
|
||||
|
||||
/* we haven't updated btf's type count yet, so
|
||||
* btf->start_id + btf->nr_types - 1 is the type ID offset we should
|
||||
* add to all newly added BTF types
|
||||
*/
|
||||
*type_id += btf->start_id + btf->nr_types - 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int btf__add_btf(struct btf *btf, const struct btf *src_btf)
|
||||
{
|
||||
struct btf_pipe p = { .src = src_btf, .dst = btf };
|
||||
int data_sz, sz, cnt, i, err, old_strs_len;
|
||||
__u32 *off;
|
||||
void *t;
|
||||
|
||||
/* appending split BTF isn't supported yet */
|
||||
if (src_btf->base_btf)
|
||||
return libbpf_err(-ENOTSUP);
|
||||
|
||||
/* deconstruct BTF, if necessary, and invalidate raw_data */
|
||||
if (btf_ensure_modifiable(btf))
|
||||
return libbpf_err(-ENOMEM);
|
||||
|
||||
/* remember original strings section size if we have to roll back
|
||||
* partial strings section changes
|
||||
*/
|
||||
old_strs_len = btf->hdr->str_len;
|
||||
|
||||
data_sz = src_btf->hdr->type_len;
|
||||
cnt = btf__type_cnt(src_btf) - 1;
|
||||
|
||||
/* pre-allocate enough memory for new types */
|
||||
t = btf_add_type_mem(btf, data_sz);
|
||||
if (!t)
|
||||
return libbpf_err(-ENOMEM);
|
||||
|
||||
/* pre-allocate enough memory for type offset index for new types */
|
||||
off = btf_add_type_offs_mem(btf, cnt);
|
||||
if (!off)
|
||||
return libbpf_err(-ENOMEM);
|
||||
|
||||
/* bulk copy types data for all types from src_btf */
|
||||
memcpy(t, src_btf->types_data, data_sz);
|
||||
|
||||
for (i = 0; i < cnt; i++) {
|
||||
sz = btf_type_size(t);
|
||||
if (sz < 0) {
|
||||
/* unlikely, has to be corrupted src_btf */
|
||||
err = sz;
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
/* fill out type ID to type offset mapping for lookups by type ID */
|
||||
*off = t - btf->types_data;
|
||||
|
||||
/* add, dedup, and remap strings referenced by this BTF type */
|
||||
err = btf_type_visit_str_offs(t, btf_rewrite_str, &p);
|
||||
if (err)
|
||||
goto err_out;
|
||||
|
||||
/* remap all type IDs referenced from this BTF type */
|
||||
err = btf_type_visit_type_ids(t, btf_rewrite_type_ids, btf);
|
||||
if (err)
|
||||
goto err_out;
|
||||
|
||||
/* go to next type data and type offset index entry */
|
||||
t += sz;
|
||||
off++;
|
||||
}
|
||||
|
||||
/* Up until now any of the copied type data was effectively invisible,
|
||||
* so if we exited early before this point due to error, BTF would be
|
||||
* effectively unmodified. There would be extra internal memory
|
||||
* pre-allocated, but it would not be available for querying. But now
|
||||
* that we've copied and rewritten all the data successfully, we can
|
||||
* update type count and various internal offsets and sizes to
|
||||
* "commit" the changes and made them visible to the outside world.
|
||||
*/
|
||||
btf->hdr->type_len += data_sz;
|
||||
btf->hdr->str_off += data_sz;
|
||||
btf->nr_types += cnt;
|
||||
|
||||
/* return type ID of the first added BTF type */
|
||||
return btf->start_id + btf->nr_types - cnt;
|
||||
err_out:
|
||||
/* zero out preallocated memory as if it was just allocated with
|
||||
* libbpf_add_mem()
|
||||
*/
|
||||
memset(btf->types_data + btf->hdr->type_len, 0, data_sz);
|
||||
memset(btf->strs_data + old_strs_len, 0, btf->hdr->str_len - old_strs_len);
|
||||
|
||||
/* and now restore original strings section size; types data size
|
||||
* wasn't modified, so doesn't need restoring, see big comment above */
|
||||
btf->hdr->str_len = old_strs_len;
|
||||
|
||||
return libbpf_err(err);
|
||||
}
|
||||
|
||||
/*
|
||||
* Append new BTF_KIND_INT type with:
|
||||
* - *name* - non-empty, non-NULL type name;
|
||||
@ -1939,7 +1982,7 @@ int btf__add_union(struct btf *btf, const char *name, __u32 byte_sz)
|
||||
|
||||
static struct btf_type *btf_last_type(struct btf *btf)
|
||||
{
|
||||
return btf_type_by_id(btf, btf__get_nr_types(btf));
|
||||
return btf_type_by_id(btf, btf__type_cnt(btf) - 1);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -2447,7 +2490,7 @@ int btf__add_datasec_var_info(struct btf *btf, int var_type_id, __u32 offset, __
|
||||
}
|
||||
|
||||
/*
|
||||
* Append new BTF_KIND_TAG type with:
|
||||
* Append new BTF_KIND_DECL_TAG type with:
|
||||
* - *value* - non-empty/non-NULL string;
|
||||
* - *ref_type_id* - referenced type ID, it might not exist yet;
|
||||
* - *component_idx* - -1 for tagging reference type, otherwise struct/union
|
||||
@ -2456,7 +2499,7 @@ int btf__add_datasec_var_info(struct btf *btf, int var_type_id, __u32 offset, __
|
||||
* - >0, type ID of newly added BTF type;
|
||||
* - <0, on error.
|
||||
*/
|
||||
int btf__add_tag(struct btf *btf, const char *value, int ref_type_id,
|
||||
int btf__add_decl_tag(struct btf *btf, const char *value, int ref_type_id,
|
||||
int component_idx)
|
||||
{
|
||||
struct btf_type *t;
|
||||
@ -2471,7 +2514,7 @@ int btf__add_tag(struct btf *btf, const char *value, int ref_type_id,
|
||||
if (btf_ensure_modifiable(btf))
|
||||
return libbpf_err(-ENOMEM);
|
||||
|
||||
sz = sizeof(struct btf_type) + sizeof(struct btf_tag);
|
||||
sz = sizeof(struct btf_type) + sizeof(struct btf_decl_tag);
|
||||
t = btf_add_type_mem(btf, sz);
|
||||
if (!t)
|
||||
return libbpf_err(-ENOMEM);
|
||||
@ -2481,9 +2524,9 @@ int btf__add_tag(struct btf *btf, const char *value, int ref_type_id,
|
||||
return value_off;
|
||||
|
||||
t->name_off = value_off;
|
||||
t->info = btf_type_info(BTF_KIND_TAG, 0, false);
|
||||
t->info = btf_type_info(BTF_KIND_DECL_TAG, 0, false);
|
||||
t->type = ref_type_id;
|
||||
btf_tag(t)->component_idx = component_idx;
|
||||
btf_decl_tag(t)->component_idx = component_idx;
|
||||
|
||||
return btf_commit_type(btf, sz);
|
||||
}
|
||||
@ -2962,8 +3005,10 @@ int btf__dedup(struct btf *btf, struct btf_ext *btf_ext,
|
||||
return libbpf_err(-EINVAL);
|
||||
}
|
||||
|
||||
if (btf_ensure_modifiable(btf))
|
||||
return libbpf_err(-ENOMEM);
|
||||
if (btf_ensure_modifiable(btf)) {
|
||||
err = -ENOMEM;
|
||||
goto done;
|
||||
}
|
||||
|
||||
err = btf_dedup_prep(d);
|
||||
if (err) {
|
||||
@ -3143,7 +3188,7 @@ static struct btf_dedup *btf_dedup_new(struct btf *btf, struct btf_ext *btf_ext,
|
||||
goto done;
|
||||
}
|
||||
|
||||
type_cnt = btf__get_nr_types(btf) + 1;
|
||||
type_cnt = btf__type_cnt(btf);
|
||||
d->map = malloc(sizeof(__u32) * type_cnt);
|
||||
if (!d->map) {
|
||||
err = -ENOMEM;
|
||||
@ -3305,7 +3350,7 @@ static bool btf_equal_common(struct btf_type *t1, struct btf_type *t2)
|
||||
}
|
||||
|
||||
/* Calculate type signature hash of INT or TAG. */
|
||||
static long btf_hash_int_tag(struct btf_type *t)
|
||||
static long btf_hash_int_decl_tag(struct btf_type *t)
|
||||
{
|
||||
__u32 info = *(__u32 *)(t + 1);
|
||||
long h;
|
||||
@ -3583,8 +3628,8 @@ static int btf_dedup_prep(struct btf_dedup *d)
|
||||
h = btf_hash_common(t);
|
||||
break;
|
||||
case BTF_KIND_INT:
|
||||
case BTF_KIND_TAG:
|
||||
h = btf_hash_int_tag(t);
|
||||
case BTF_KIND_DECL_TAG:
|
||||
h = btf_hash_int_decl_tag(t);
|
||||
break;
|
||||
case BTF_KIND_ENUM:
|
||||
h = btf_hash_enum(t);
|
||||
@ -3639,11 +3684,11 @@ static int btf_dedup_prim_type(struct btf_dedup *d, __u32 type_id)
|
||||
case BTF_KIND_FUNC_PROTO:
|
||||
case BTF_KIND_VAR:
|
||||
case BTF_KIND_DATASEC:
|
||||
case BTF_KIND_TAG:
|
||||
case BTF_KIND_DECL_TAG:
|
||||
return 0;
|
||||
|
||||
case BTF_KIND_INT:
|
||||
h = btf_hash_int_tag(t);
|
||||
h = btf_hash_int_decl_tag(t);
|
||||
for_each_dedup_cand(d, hash_entry, h) {
|
||||
cand_id = (__u32)(long)hash_entry->value;
|
||||
cand = btf_type_by_id(d->btf, cand_id);
|
||||
@ -4260,13 +4305,13 @@ static int btf_dedup_ref_type(struct btf_dedup *d, __u32 type_id)
|
||||
}
|
||||
break;
|
||||
|
||||
case BTF_KIND_TAG:
|
||||
case BTF_KIND_DECL_TAG:
|
||||
ref_type_id = btf_dedup_ref_type(d, t->type);
|
||||
if (ref_type_id < 0)
|
||||
return ref_type_id;
|
||||
t->type = ref_type_id;
|
||||
|
||||
h = btf_hash_int_tag(t);
|
||||
h = btf_hash_int_decl_tag(t);
|
||||
for_each_dedup_cand(d, hash_entry, h) {
|
||||
cand_id = (__u32)(long)hash_entry->value;
|
||||
cand = btf_type_by_id(d->btf, cand_id);
|
||||
@ -4549,7 +4594,7 @@ int btf_type_visit_type_ids(struct btf_type *t, type_id_visit_fn visit, void *ct
|
||||
case BTF_KIND_TYPEDEF:
|
||||
case BTF_KIND_FUNC:
|
||||
case BTF_KIND_VAR:
|
||||
case BTF_KIND_TAG:
|
||||
case BTF_KIND_DECL_TAG:
|
||||
return visit(&t->type, ctx);
|
||||
|
||||
case BTF_KIND_ARRAY: {
|
||||
|
@ -123,6 +123,7 @@ LIBBPF_API struct btf *btf__load_from_kernel_by_id_split(__u32 id, struct btf *b
|
||||
LIBBPF_DEPRECATED_SINCE(0, 6, "use btf__load_from_kernel_by_id instead")
|
||||
LIBBPF_API int btf__get_from_id(__u32 id, struct btf **btf);
|
||||
|
||||
LIBBPF_DEPRECATED_SINCE(0, 6, "intended for internal libbpf use only")
|
||||
LIBBPF_API int btf__finalize_data(struct bpf_object *obj, struct btf *btf);
|
||||
LIBBPF_DEPRECATED_SINCE(0, 6, "use btf__load_into_kernel instead")
|
||||
LIBBPF_API int btf__load(struct btf *btf);
|
||||
@ -131,7 +132,9 @@ LIBBPF_API __s32 btf__find_by_name(const struct btf *btf,
|
||||
const char *type_name);
|
||||
LIBBPF_API __s32 btf__find_by_name_kind(const struct btf *btf,
|
||||
const char *type_name, __u32 kind);
|
||||
LIBBPF_DEPRECATED_SINCE(0, 7, "use btf__type_cnt() instead; note that btf__get_nr_types() == btf__type_cnt() - 1")
|
||||
LIBBPF_API __u32 btf__get_nr_types(const struct btf *btf);
|
||||
LIBBPF_API __u32 btf__type_cnt(const struct btf *btf);
|
||||
LIBBPF_API const struct btf *btf__base_btf(const struct btf *btf);
|
||||
LIBBPF_API const struct btf_type *btf__type_by_id(const struct btf *btf,
|
||||
__u32 id);
|
||||
@ -144,7 +147,9 @@ LIBBPF_API int btf__resolve_type(const struct btf *btf, __u32 type_id);
|
||||
LIBBPF_API int btf__align_of(const struct btf *btf, __u32 id);
|
||||
LIBBPF_API int btf__fd(const struct btf *btf);
|
||||
LIBBPF_API void btf__set_fd(struct btf *btf, int fd);
|
||||
LIBBPF_DEPRECATED_SINCE(0, 7, "use btf__raw_data() instead")
|
||||
LIBBPF_API const void *btf__get_raw_data(const struct btf *btf, __u32 *size);
|
||||
LIBBPF_API const void *btf__raw_data(const struct btf *btf, __u32 *size);
|
||||
LIBBPF_API const char *btf__name_by_offset(const struct btf *btf, __u32 offset);
|
||||
LIBBPF_API const char *btf__str_by_offset(const struct btf *btf, __u32 offset);
|
||||
LIBBPF_API int btf__get_map_kv_tids(const struct btf *btf, const char *map_name,
|
||||
@ -173,6 +178,28 @@ LIBBPF_API int btf__find_str(struct btf *btf, const char *s);
|
||||
LIBBPF_API int btf__add_str(struct btf *btf, const char *s);
|
||||
LIBBPF_API int btf__add_type(struct btf *btf, const struct btf *src_btf,
|
||||
const struct btf_type *src_type);
|
||||
/**
|
||||
* @brief **btf__add_btf()** appends all the BTF types from *src_btf* into *btf*
|
||||
* @param btf BTF object which all the BTF types and strings are added to
|
||||
* @param src_btf BTF object which all BTF types and referenced strings are copied from
|
||||
* @return BTF type ID of the first appended BTF type, or negative error code
|
||||
*
|
||||
* **btf__add_btf()** can be used to simply and efficiently append the entire
|
||||
* contents of one BTF object to another one. All the BTF type data is copied
|
||||
* over, all referenced type IDs are adjusted by adding a necessary ID offset.
|
||||
* Only strings referenced from BTF types are copied over and deduplicated, so
|
||||
* if there were some unused strings in *src_btf*, those won't be copied over,
|
||||
* which is consistent with the general string deduplication semantics of BTF
|
||||
* writing APIs.
|
||||
*
|
||||
* If any error is encountered during this process, the contents of *btf* is
|
||||
* left intact, which means that **btf__add_btf()** follows the transactional
|
||||
* semantics and the operation as a whole is all-or-nothing.
|
||||
*
|
||||
* *src_btf* has to be non-split BTF, as of now copying types from split BTF
|
||||
* is not supported and will result in -ENOTSUP error code returned.
|
||||
*/
|
||||
LIBBPF_API int btf__add_btf(struct btf *btf, const struct btf *src_btf);
|
||||
|
||||
LIBBPF_API int btf__add_int(struct btf *btf, const char *name, size_t byte_sz, int encoding);
|
||||
LIBBPF_API int btf__add_float(struct btf *btf, const char *name, size_t byte_sz);
|
||||
@ -214,7 +241,7 @@ LIBBPF_API int btf__add_datasec_var_info(struct btf *btf, int var_type_id,
|
||||
__u32 offset, __u32 byte_sz);
|
||||
|
||||
/* tag construction API */
|
||||
LIBBPF_API int btf__add_tag(struct btf *btf, const char *value, int ref_type_id,
|
||||
LIBBPF_API int btf__add_decl_tag(struct btf *btf, const char *value, int ref_type_id,
|
||||
int component_idx);
|
||||
|
||||
struct btf_dedup_opts {
|
||||
@ -404,9 +431,9 @@ static inline bool btf_is_float(const struct btf_type *t)
|
||||
return btf_kind(t) == BTF_KIND_FLOAT;
|
||||
}
|
||||
|
||||
static inline bool btf_is_tag(const struct btf_type *t)
|
||||
static inline bool btf_is_decl_tag(const struct btf_type *t)
|
||||
{
|
||||
return btf_kind(t) == BTF_KIND_TAG;
|
||||
return btf_kind(t) == BTF_KIND_DECL_TAG;
|
||||
}
|
||||
|
||||
static inline __u8 btf_int_encoding(const struct btf_type *t)
|
||||
@ -477,10 +504,10 @@ btf_var_secinfos(const struct btf_type *t)
|
||||
return (struct btf_var_secinfo *)(t + 1);
|
||||
}
|
||||
|
||||
struct btf_tag;
|
||||
static inline struct btf_tag *btf_tag(const struct btf_type *t)
|
||||
struct btf_decl_tag;
|
||||
static inline struct btf_decl_tag *btf_decl_tag(const struct btf_type *t)
|
||||
{
|
||||
return (struct btf_tag *)(t + 1);
|
||||
return (struct btf_decl_tag *)(t + 1);
|
||||
}
|
||||
|
||||
#ifdef __cplusplus
|
||||
|
@ -188,7 +188,7 @@ struct btf_dump *btf_dump__new(const struct btf *btf,
|
||||
|
||||
static int btf_dump_resize(struct btf_dump *d)
|
||||
{
|
||||
int err, last_id = btf__get_nr_types(d->btf);
|
||||
int err, last_id = btf__type_cnt(d->btf) - 1;
|
||||
|
||||
if (last_id <= d->last_id)
|
||||
return 0;
|
||||
@ -262,7 +262,7 @@ int btf_dump__dump_type(struct btf_dump *d, __u32 id)
|
||||
{
|
||||
int err, i;
|
||||
|
||||
if (id > btf__get_nr_types(d->btf))
|
||||
if (id >= btf__type_cnt(d->btf))
|
||||
return libbpf_err(-EINVAL);
|
||||
|
||||
err = btf_dump_resize(d);
|
||||
@ -294,11 +294,11 @@ int btf_dump__dump_type(struct btf_dump *d, __u32 id)
|
||||
*/
|
||||
static int btf_dump_mark_referenced(struct btf_dump *d)
|
||||
{
|
||||
int i, j, n = btf__get_nr_types(d->btf);
|
||||
int i, j, n = btf__type_cnt(d->btf);
|
||||
const struct btf_type *t;
|
||||
__u16 vlen;
|
||||
|
||||
for (i = d->last_id + 1; i <= n; i++) {
|
||||
for (i = d->last_id + 1; i < n; i++) {
|
||||
t = btf__type_by_id(d->btf, i);
|
||||
vlen = btf_vlen(t);
|
||||
|
||||
@ -316,7 +316,7 @@ static int btf_dump_mark_referenced(struct btf_dump *d)
|
||||
case BTF_KIND_TYPEDEF:
|
||||
case BTF_KIND_FUNC:
|
||||
case BTF_KIND_VAR:
|
||||
case BTF_KIND_TAG:
|
||||
case BTF_KIND_DECL_TAG:
|
||||
d->type_states[t->type].referenced = 1;
|
||||
break;
|
||||
|
||||
@ -584,7 +584,7 @@ static int btf_dump_order_type(struct btf_dump *d, __u32 id, bool through_ptr)
|
||||
case BTF_KIND_FUNC:
|
||||
case BTF_KIND_VAR:
|
||||
case BTF_KIND_DATASEC:
|
||||
case BTF_KIND_TAG:
|
||||
case BTF_KIND_DECL_TAG:
|
||||
d->type_states[id].order_state = ORDERED;
|
||||
return 0;
|
||||
|
||||
@ -1562,29 +1562,28 @@ static int btf_dump_get_bitfield_value(struct btf_dump *d,
|
||||
__u64 *value)
|
||||
{
|
||||
__u16 left_shift_bits, right_shift_bits;
|
||||
__u8 nr_copy_bits, nr_copy_bytes;
|
||||
const __u8 *bytes = data;
|
||||
int sz = t->size;
|
||||
__u8 nr_copy_bits;
|
||||
__u64 num = 0;
|
||||
int i;
|
||||
|
||||
/* Maximum supported bitfield size is 64 bits */
|
||||
if (sz > 8) {
|
||||
pr_warn("unexpected bitfield size %d\n", sz);
|
||||
if (t->size > 8) {
|
||||
pr_warn("unexpected bitfield size %d\n", t->size);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Bitfield value retrieval is done in two steps; first relevant bytes are
|
||||
* stored in num, then we left/right shift num to eliminate irrelevant bits.
|
||||
*/
|
||||
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
|
||||
for (i = t->size - 1; i >= 0; i--)
|
||||
num = num * 256 + bytes[i];
|
||||
nr_copy_bits = bit_sz + bits_offset;
|
||||
nr_copy_bytes = t->size;
|
||||
#if __BYTE_ORDER == __LITTLE_ENDIAN
|
||||
for (i = nr_copy_bytes - 1; i >= 0; i--)
|
||||
num = num * 256 + bytes[i];
|
||||
#elif __BYTE_ORDER == __BIG_ENDIAN
|
||||
for (i = 0; i < nr_copy_bytes; i++)
|
||||
#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
|
||||
for (i = 0; i < t->size; i++)
|
||||
num = num * 256 + bytes[i];
|
||||
nr_copy_bits = t->size * 8 - bits_offset;
|
||||
#else
|
||||
# error "Unrecognized __BYTE_ORDER__"
|
||||
#endif
|
||||
@ -1658,9 +1657,15 @@ static int btf_dump_base_type_check_zero(struct btf_dump *d,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool ptr_is_aligned(const void *data, int data_sz)
|
||||
static bool ptr_is_aligned(const struct btf *btf, __u32 type_id,
|
||||
const void *data)
|
||||
{
|
||||
return ((uintptr_t)data) % data_sz == 0;
|
||||
int alignment = btf__align_of(btf, type_id);
|
||||
|
||||
if (alignment == 0)
|
||||
return false;
|
||||
|
||||
return ((uintptr_t)data) % alignment == 0;
|
||||
}
|
||||
|
||||
static int btf_dump_int_data(struct btf_dump *d,
|
||||
@ -1671,9 +1676,10 @@ static int btf_dump_int_data(struct btf_dump *d,
|
||||
{
|
||||
__u8 encoding = btf_int_encoding(t);
|
||||
bool sign = encoding & BTF_INT_SIGNED;
|
||||
char buf[16] __attribute__((aligned(16)));
|
||||
int sz = t->size;
|
||||
|
||||
if (sz == 0) {
|
||||
if (sz == 0 || sz > sizeof(buf)) {
|
||||
pr_warn("unexpected size %d for id [%u]\n", sz, type_id);
|
||||
return -EINVAL;
|
||||
}
|
||||
@ -1681,8 +1687,10 @@ static int btf_dump_int_data(struct btf_dump *d,
|
||||
/* handle packed int data - accesses of integers not aligned on
|
||||
* int boundaries can cause problems on some platforms.
|
||||
*/
|
||||
if (!ptr_is_aligned(data, sz))
|
||||
return btf_dump_bitfield_data(d, t, data, 0, 0);
|
||||
if (!ptr_is_aligned(d->btf, type_id, data)) {
|
||||
memcpy(buf, data, sz);
|
||||
data = buf;
|
||||
}
|
||||
|
||||
switch (sz) {
|
||||
case 16: {
|
||||
@ -1692,10 +1700,10 @@ static int btf_dump_int_data(struct btf_dump *d,
|
||||
/* avoid use of __int128 as some 32-bit platforms do not
|
||||
* support it.
|
||||
*/
|
||||
#if __BYTE_ORDER == __LITTLE_ENDIAN
|
||||
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
|
||||
lsi = ints[0];
|
||||
msi = ints[1];
|
||||
#elif __BYTE_ORDER == __BIG_ENDIAN
|
||||
#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
|
||||
lsi = ints[1];
|
||||
msi = ints[0];
|
||||
#else
|
||||
@ -1768,7 +1776,7 @@ static int btf_dump_float_data(struct btf_dump *d,
|
||||
int sz = t->size;
|
||||
|
||||
/* handle unaligned data; copy to local union */
|
||||
if (!ptr_is_aligned(data, sz)) {
|
||||
if (!ptr_is_aligned(d->btf, type_id, data)) {
|
||||
memcpy(&fl, data, sz);
|
||||
flp = &fl;
|
||||
}
|
||||
@ -1931,7 +1939,7 @@ static int btf_dump_ptr_data(struct btf_dump *d,
|
||||
__u32 id,
|
||||
const void *data)
|
||||
{
|
||||
if (ptr_is_aligned(data, d->ptr_sz) && d->ptr_sz == sizeof(void *)) {
|
||||
if (ptr_is_aligned(d->btf, id, data) && d->ptr_sz == sizeof(void *)) {
|
||||
btf_dump_type_values(d, "%p", *(void **)data);
|
||||
} else {
|
||||
union ptr_data pt;
|
||||
@ -1951,10 +1959,8 @@ static int btf_dump_get_enum_value(struct btf_dump *d,
|
||||
__u32 id,
|
||||
__s64 *value)
|
||||
{
|
||||
int sz = t->size;
|
||||
|
||||
/* handle unaligned enum value */
|
||||
if (!ptr_is_aligned(data, sz)) {
|
||||
if (!ptr_is_aligned(d->btf, id, data)) {
|
||||
__u64 val;
|
||||
int err;
|
||||
|
||||
@ -2217,7 +2223,7 @@ static int btf_dump_dump_type_data(struct btf_dump *d,
|
||||
case BTF_KIND_FWD:
|
||||
case BTF_KIND_FUNC:
|
||||
case BTF_KIND_FUNC_PROTO:
|
||||
case BTF_KIND_TAG:
|
||||
case BTF_KIND_DECL_TAG:
|
||||
err = btf_dump_unsupported_data(d, t, id);
|
||||
break;
|
||||
case BTF_KIND_INT:
|
||||
|
@ -13,9 +13,12 @@
|
||||
#include "hashmap.h"
|
||||
#include "bpf_gen_internal.h"
|
||||
#include "skel_internal.h"
|
||||
#include <asm/byteorder.h>
|
||||
|
||||
#define MAX_USED_MAPS 64
|
||||
#define MAX_USED_PROGS 32
|
||||
#define MAX_USED_MAPS 64
|
||||
#define MAX_USED_PROGS 32
|
||||
#define MAX_KFUNC_DESCS 256
|
||||
#define MAX_FD_ARRAY_SZ (MAX_USED_PROGS + MAX_KFUNC_DESCS)
|
||||
|
||||
/* The following structure describes the stack layout of the loader program.
|
||||
* In addition R6 contains the pointer to context.
|
||||
@ -30,7 +33,6 @@
|
||||
*/
|
||||
struct loader_stack {
|
||||
__u32 btf_fd;
|
||||
__u32 map_fd[MAX_USED_MAPS];
|
||||
__u32 prog_fd[MAX_USED_PROGS];
|
||||
__u32 inner_map_fd;
|
||||
};
|
||||
@ -143,13 +145,49 @@ static int add_data(struct bpf_gen *gen, const void *data, __u32 size)
|
||||
if (realloc_data_buf(gen, size8))
|
||||
return 0;
|
||||
prev = gen->data_cur;
|
||||
memcpy(gen->data_cur, data, size);
|
||||
gen->data_cur += size;
|
||||
memcpy(gen->data_cur, &zero, size8 - size);
|
||||
gen->data_cur += size8 - size;
|
||||
if (data) {
|
||||
memcpy(gen->data_cur, data, size);
|
||||
memcpy(gen->data_cur + size, &zero, size8 - size);
|
||||
} else {
|
||||
memset(gen->data_cur, 0, size8);
|
||||
}
|
||||
gen->data_cur += size8;
|
||||
return prev - gen->data_start;
|
||||
}
|
||||
|
||||
/* Get index for map_fd/btf_fd slot in reserved fd_array, or in data relative
|
||||
* to start of fd_array. Caller can decide if it is usable or not.
|
||||
*/
|
||||
static int add_map_fd(struct bpf_gen *gen)
|
||||
{
|
||||
if (!gen->fd_array)
|
||||
gen->fd_array = add_data(gen, NULL, MAX_FD_ARRAY_SZ * sizeof(int));
|
||||
if (gen->nr_maps == MAX_USED_MAPS) {
|
||||
pr_warn("Total maps exceeds %d\n", MAX_USED_MAPS);
|
||||
gen->error = -E2BIG;
|
||||
return 0;
|
||||
}
|
||||
return gen->nr_maps++;
|
||||
}
|
||||
|
||||
static int add_kfunc_btf_fd(struct bpf_gen *gen)
|
||||
{
|
||||
int cur;
|
||||
|
||||
if (!gen->fd_array)
|
||||
gen->fd_array = add_data(gen, NULL, MAX_FD_ARRAY_SZ * sizeof(int));
|
||||
if (gen->nr_fd_array == MAX_KFUNC_DESCS) {
|
||||
cur = add_data(gen, NULL, sizeof(int));
|
||||
return (cur - gen->fd_array) / sizeof(int);
|
||||
}
|
||||
return MAX_USED_MAPS + gen->nr_fd_array++;
|
||||
}
|
||||
|
||||
static int blob_fd_array_off(struct bpf_gen *gen, int index)
|
||||
{
|
||||
return gen->fd_array + index * sizeof(int);
|
||||
}
|
||||
|
||||
static int insn_bytes_to_bpf_size(__u32 sz)
|
||||
{
|
||||
switch (sz) {
|
||||
@ -171,14 +209,22 @@ static void emit_rel_store(struct bpf_gen *gen, int off, int data)
|
||||
emit(gen, BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0));
|
||||
}
|
||||
|
||||
/* *(u64 *)(blob + off) = (u64)(void *)(%sp + stack_off) */
|
||||
static void emit_rel_store_sp(struct bpf_gen *gen, int off, int stack_off)
|
||||
static void move_blob2blob(struct bpf_gen *gen, int off, int size, int blob_off)
|
||||
{
|
||||
emit(gen, BPF_MOV64_REG(BPF_REG_0, BPF_REG_10));
|
||||
emit(gen, BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, stack_off));
|
||||
emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_2, BPF_PSEUDO_MAP_IDX_VALUE,
|
||||
0, 0, 0, blob_off));
|
||||
emit(gen, BPF_LDX_MEM(insn_bytes_to_bpf_size(size), BPF_REG_0, BPF_REG_2, 0));
|
||||
emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE,
|
||||
0, 0, 0, off));
|
||||
emit(gen, BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0));
|
||||
emit(gen, BPF_STX_MEM(insn_bytes_to_bpf_size(size), BPF_REG_1, BPF_REG_0, 0));
|
||||
}
|
||||
|
||||
static void move_blob2ctx(struct bpf_gen *gen, int ctx_off, int size, int blob_off)
|
||||
{
|
||||
emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE,
|
||||
0, 0, 0, blob_off));
|
||||
emit(gen, BPF_LDX_MEM(insn_bytes_to_bpf_size(size), BPF_REG_0, BPF_REG_1, 0));
|
||||
emit(gen, BPF_STX_MEM(insn_bytes_to_bpf_size(size), BPF_REG_6, BPF_REG_0, ctx_off));
|
||||
}
|
||||
|
||||
static void move_ctx2blob(struct bpf_gen *gen, int off, int size, int ctx_off,
|
||||
@ -326,11 +372,11 @@ int bpf_gen__finish(struct bpf_gen *gen)
|
||||
offsetof(struct bpf_prog_desc, prog_fd), 4,
|
||||
stack_off(prog_fd[i]));
|
||||
for (i = 0; i < gen->nr_maps; i++)
|
||||
move_stack2ctx(gen,
|
||||
sizeof(struct bpf_loader_ctx) +
|
||||
sizeof(struct bpf_map_desc) * i +
|
||||
offsetof(struct bpf_map_desc, map_fd), 4,
|
||||
stack_off(map_fd[i]));
|
||||
move_blob2ctx(gen,
|
||||
sizeof(struct bpf_loader_ctx) +
|
||||
sizeof(struct bpf_map_desc) * i +
|
||||
offsetof(struct bpf_map_desc, map_fd), 4,
|
||||
blob_fd_array_off(gen, i));
|
||||
emit(gen, BPF_MOV64_IMM(BPF_REG_0, 0));
|
||||
emit(gen, BPF_EXIT_INSN());
|
||||
pr_debug("gen: finish %d\n", gen->error);
|
||||
@ -386,11 +432,11 @@ void bpf_gen__load_btf(struct bpf_gen *gen, const void *btf_raw_data,
|
||||
}
|
||||
|
||||
void bpf_gen__map_create(struct bpf_gen *gen,
|
||||
struct bpf_create_map_attr *map_attr, int map_idx)
|
||||
struct bpf_create_map_params *map_attr, int map_idx)
|
||||
{
|
||||
int attr_size = offsetofend(union bpf_attr, btf_vmlinux_value_type_id);
|
||||
bool close_inner_map_fd = false;
|
||||
int map_create_attr;
|
||||
int map_create_attr, idx;
|
||||
union bpf_attr attr;
|
||||
|
||||
memset(&attr, 0, attr_size);
|
||||
@ -398,6 +444,7 @@ void bpf_gen__map_create(struct bpf_gen *gen,
|
||||
attr.key_size = map_attr->key_size;
|
||||
attr.value_size = map_attr->value_size;
|
||||
attr.map_flags = map_attr->map_flags;
|
||||
attr.map_extra = map_attr->map_extra;
|
||||
memcpy(attr.map_name, map_attr->name,
|
||||
min((unsigned)strlen(map_attr->name), BPF_OBJ_NAME_LEN - 1));
|
||||
attr.numa_node = map_attr->numa_node;
|
||||
@ -467,9 +514,11 @@ void bpf_gen__map_create(struct bpf_gen *gen,
|
||||
gen->error = -EDOM; /* internal bug */
|
||||
return;
|
||||
} else {
|
||||
emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_7,
|
||||
stack_off(map_fd[map_idx])));
|
||||
gen->nr_maps++;
|
||||
/* add_map_fd does gen->nr_maps++ */
|
||||
idx = add_map_fd(gen);
|
||||
emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE,
|
||||
0, 0, 0, blob_fd_array_off(gen, idx)));
|
||||
emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_7, 0));
|
||||
}
|
||||
if (close_inner_map_fd)
|
||||
emit_sys_close_stack(gen, stack_off(inner_map_fd));
|
||||
@ -511,8 +560,8 @@ static void emit_find_attach_target(struct bpf_gen *gen)
|
||||
*/
|
||||
}
|
||||
|
||||
void bpf_gen__record_extern(struct bpf_gen *gen, const char *name, int kind,
|
||||
int insn_idx)
|
||||
void bpf_gen__record_extern(struct bpf_gen *gen, const char *name, bool is_weak,
|
||||
bool is_typeless, int kind, int insn_idx)
|
||||
{
|
||||
struct ksym_relo_desc *relo;
|
||||
|
||||
@ -524,38 +573,292 @@ void bpf_gen__record_extern(struct bpf_gen *gen, const char *name, int kind,
|
||||
gen->relos = relo;
|
||||
relo += gen->relo_cnt;
|
||||
relo->name = name;
|
||||
relo->is_weak = is_weak;
|
||||
relo->is_typeless = is_typeless;
|
||||
relo->kind = kind;
|
||||
relo->insn_idx = insn_idx;
|
||||
gen->relo_cnt++;
|
||||
}
|
||||
|
||||
static void emit_relo(struct bpf_gen *gen, struct ksym_relo_desc *relo, int insns)
|
||||
/* returns existing ksym_desc with ref incremented, or inserts a new one */
|
||||
static struct ksym_desc *get_ksym_desc(struct bpf_gen *gen, struct ksym_relo_desc *relo)
|
||||
{
|
||||
int name, insn, len = strlen(relo->name) + 1;
|
||||
struct ksym_desc *kdesc;
|
||||
|
||||
pr_debug("gen: emit_relo: %s at %d\n", relo->name, relo->insn_idx);
|
||||
name = add_data(gen, relo->name, len);
|
||||
for (int i = 0; i < gen->nr_ksyms; i++) {
|
||||
if (!strcmp(gen->ksyms[i].name, relo->name)) {
|
||||
gen->ksyms[i].ref++;
|
||||
return &gen->ksyms[i];
|
||||
}
|
||||
}
|
||||
kdesc = libbpf_reallocarray(gen->ksyms, gen->nr_ksyms + 1, sizeof(*kdesc));
|
||||
if (!kdesc) {
|
||||
gen->error = -ENOMEM;
|
||||
return NULL;
|
||||
}
|
||||
gen->ksyms = kdesc;
|
||||
kdesc = &gen->ksyms[gen->nr_ksyms++];
|
||||
kdesc->name = relo->name;
|
||||
kdesc->kind = relo->kind;
|
||||
kdesc->ref = 1;
|
||||
kdesc->off = 0;
|
||||
kdesc->insn = 0;
|
||||
return kdesc;
|
||||
}
|
||||
|
||||
/* Overwrites BPF_REG_{0, 1, 2, 3, 4, 7}
|
||||
* Returns result in BPF_REG_7
|
||||
*/
|
||||
static void emit_bpf_find_by_name_kind(struct bpf_gen *gen, struct ksym_relo_desc *relo)
|
||||
{
|
||||
int name_off, len = strlen(relo->name) + 1;
|
||||
|
||||
name_off = add_data(gen, relo->name, len);
|
||||
emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE,
|
||||
0, 0, 0, name));
|
||||
0, 0, 0, name_off));
|
||||
emit(gen, BPF_MOV64_IMM(BPF_REG_2, len));
|
||||
emit(gen, BPF_MOV64_IMM(BPF_REG_3, relo->kind));
|
||||
emit(gen, BPF_MOV64_IMM(BPF_REG_4, 0));
|
||||
emit(gen, BPF_EMIT_CALL(BPF_FUNC_btf_find_by_name_kind));
|
||||
emit(gen, BPF_MOV64_REG(BPF_REG_7, BPF_REG_0));
|
||||
debug_ret(gen, "find_by_name_kind(%s,%d)", relo->name, relo->kind);
|
||||
emit_check_err(gen);
|
||||
}
|
||||
|
||||
/* Overwrites BPF_REG_{0, 1, 2, 3, 4, 7}
|
||||
* Returns result in BPF_REG_7
|
||||
* Returns u64 symbol addr in BPF_REG_9
|
||||
*/
|
||||
static void emit_bpf_kallsyms_lookup_name(struct bpf_gen *gen, struct ksym_relo_desc *relo)
|
||||
{
|
||||
int name_off, len = strlen(relo->name) + 1, res_off;
|
||||
|
||||
name_off = add_data(gen, relo->name, len);
|
||||
res_off = add_data(gen, NULL, 8); /* res is u64 */
|
||||
emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE,
|
||||
0, 0, 0, name_off));
|
||||
emit(gen, BPF_MOV64_IMM(BPF_REG_2, len));
|
||||
emit(gen, BPF_MOV64_IMM(BPF_REG_3, 0));
|
||||
emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_4, BPF_PSEUDO_MAP_IDX_VALUE,
|
||||
0, 0, 0, res_off));
|
||||
emit(gen, BPF_MOV64_REG(BPF_REG_7, BPF_REG_4));
|
||||
emit(gen, BPF_EMIT_CALL(BPF_FUNC_kallsyms_lookup_name));
|
||||
emit(gen, BPF_LDX_MEM(BPF_DW, BPF_REG_9, BPF_REG_7, 0));
|
||||
emit(gen, BPF_MOV64_REG(BPF_REG_7, BPF_REG_0));
|
||||
debug_ret(gen, "kallsyms_lookup_name(%s,%d)", relo->name, relo->kind);
|
||||
}
|
||||
|
||||
/* Expects:
|
||||
* BPF_REG_8 - pointer to instruction
|
||||
*
|
||||
* We need to reuse BTF fd for same symbol otherwise each relocation takes a new
|
||||
* index, while kernel limits total kfunc BTFs to 256. For duplicate symbols,
|
||||
* this would mean a new BTF fd index for each entry. By pairing symbol name
|
||||
* with index, we get the insn->imm, insn->off pairing that kernel uses for
|
||||
* kfunc_tab, which becomes the effective limit even though all of them may
|
||||
* share same index in fd_array (such that kfunc_btf_tab has 1 element).
|
||||
*/
|
||||
static void emit_relo_kfunc_btf(struct bpf_gen *gen, struct ksym_relo_desc *relo, int insn)
|
||||
{
|
||||
struct ksym_desc *kdesc;
|
||||
int btf_fd_idx;
|
||||
|
||||
kdesc = get_ksym_desc(gen, relo);
|
||||
if (!kdesc)
|
||||
return;
|
||||
/* try to copy from existing bpf_insn */
|
||||
if (kdesc->ref > 1) {
|
||||
move_blob2blob(gen, insn + offsetof(struct bpf_insn, imm), 4,
|
||||
kdesc->insn + offsetof(struct bpf_insn, imm));
|
||||
move_blob2blob(gen, insn + offsetof(struct bpf_insn, off), 2,
|
||||
kdesc->insn + offsetof(struct bpf_insn, off));
|
||||
goto log;
|
||||
}
|
||||
/* remember insn offset, so we can copy BTF ID and FD later */
|
||||
kdesc->insn = insn;
|
||||
emit_bpf_find_by_name_kind(gen, relo);
|
||||
if (!relo->is_weak)
|
||||
emit_check_err(gen);
|
||||
/* get index in fd_array to store BTF FD at */
|
||||
btf_fd_idx = add_kfunc_btf_fd(gen);
|
||||
if (btf_fd_idx > INT16_MAX) {
|
||||
pr_warn("BTF fd off %d for kfunc %s exceeds INT16_MAX, cannot process relocation\n",
|
||||
btf_fd_idx, relo->name);
|
||||
gen->error = -E2BIG;
|
||||
return;
|
||||
}
|
||||
kdesc->off = btf_fd_idx;
|
||||
/* set a default value for imm */
|
||||
emit(gen, BPF_ST_MEM(BPF_W, BPF_REG_8, offsetof(struct bpf_insn, imm), 0));
|
||||
/* skip success case store if ret < 0 */
|
||||
emit(gen, BPF_JMP_IMM(BPF_JSLT, BPF_REG_7, 0, 1));
|
||||
/* store btf_id into insn[insn_idx].imm */
|
||||
insn = insns + sizeof(struct bpf_insn) * relo->insn_idx +
|
||||
offsetof(struct bpf_insn, imm);
|
||||
emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_8, BPF_REG_7, offsetof(struct bpf_insn, imm)));
|
||||
/* load fd_array slot pointer */
|
||||
emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_0, BPF_PSEUDO_MAP_IDX_VALUE,
|
||||
0, 0, 0, insn));
|
||||
emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_7, 0));
|
||||
if (relo->kind == BTF_KIND_VAR) {
|
||||
/* store btf_obj_fd into insn[insn_idx + 1].imm */
|
||||
emit(gen, BPF_ALU64_IMM(BPF_RSH, BPF_REG_7, 32));
|
||||
emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_7,
|
||||
sizeof(struct bpf_insn)));
|
||||
0, 0, 0, blob_fd_array_off(gen, btf_fd_idx)));
|
||||
/* skip store of BTF fd if ret < 0 */
|
||||
emit(gen, BPF_JMP_IMM(BPF_JSLT, BPF_REG_7, 0, 3));
|
||||
/* store BTF fd in slot */
|
||||
emit(gen, BPF_MOV64_REG(BPF_REG_9, BPF_REG_7));
|
||||
emit(gen, BPF_ALU64_IMM(BPF_RSH, BPF_REG_9, 32));
|
||||
emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_9, 0));
|
||||
/* set a default value for off */
|
||||
emit(gen, BPF_ST_MEM(BPF_H, BPF_REG_8, offsetof(struct bpf_insn, off), 0));
|
||||
/* skip insn->off store if ret < 0 */
|
||||
emit(gen, BPF_JMP_IMM(BPF_JSLT, BPF_REG_7, 0, 2));
|
||||
/* skip if vmlinux BTF */
|
||||
emit(gen, BPF_JMP_IMM(BPF_JEQ, BPF_REG_9, 0, 1));
|
||||
/* store index into insn[insn_idx].off */
|
||||
emit(gen, BPF_ST_MEM(BPF_H, BPF_REG_8, offsetof(struct bpf_insn, off), btf_fd_idx));
|
||||
log:
|
||||
if (!gen->log_level)
|
||||
return;
|
||||
emit(gen, BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_8,
|
||||
offsetof(struct bpf_insn, imm)));
|
||||
emit(gen, BPF_LDX_MEM(BPF_H, BPF_REG_9, BPF_REG_8,
|
||||
offsetof(struct bpf_insn, off)));
|
||||
debug_regs(gen, BPF_REG_7, BPF_REG_9, " func (%s:count=%d): imm: %%d, off: %%d",
|
||||
relo->name, kdesc->ref);
|
||||
emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_0, BPF_PSEUDO_MAP_IDX_VALUE,
|
||||
0, 0, 0, blob_fd_array_off(gen, kdesc->off)));
|
||||
emit(gen, BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_0, 0));
|
||||
debug_regs(gen, BPF_REG_9, -1, " func (%s:count=%d): btf_fd",
|
||||
relo->name, kdesc->ref);
|
||||
}
|
||||
|
||||
static void emit_ksym_relo_log(struct bpf_gen *gen, struct ksym_relo_desc *relo,
|
||||
int ref)
|
||||
{
|
||||
if (!gen->log_level)
|
||||
return;
|
||||
emit(gen, BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_8,
|
||||
offsetof(struct bpf_insn, imm)));
|
||||
emit(gen, BPF_LDX_MEM(BPF_H, BPF_REG_9, BPF_REG_8, sizeof(struct bpf_insn) +
|
||||
offsetof(struct bpf_insn, imm)));
|
||||
debug_regs(gen, BPF_REG_7, BPF_REG_9, " var t=%d w=%d (%s:count=%d): imm[0]: %%d, imm[1]: %%d",
|
||||
relo->is_typeless, relo->is_weak, relo->name, ref);
|
||||
emit(gen, BPF_LDX_MEM(BPF_B, BPF_REG_9, BPF_REG_8, offsetofend(struct bpf_insn, code)));
|
||||
debug_regs(gen, BPF_REG_9, -1, " var t=%d w=%d (%s:count=%d): insn.reg",
|
||||
relo->is_typeless, relo->is_weak, relo->name, ref);
|
||||
}
|
||||
|
||||
/* Expects:
|
||||
* BPF_REG_8 - pointer to instruction
|
||||
*/
|
||||
static void emit_relo_ksym_typeless(struct bpf_gen *gen,
|
||||
struct ksym_relo_desc *relo, int insn)
|
||||
{
|
||||
struct ksym_desc *kdesc;
|
||||
|
||||
kdesc = get_ksym_desc(gen, relo);
|
||||
if (!kdesc)
|
||||
return;
|
||||
/* try to copy from existing ldimm64 insn */
|
||||
if (kdesc->ref > 1) {
|
||||
move_blob2blob(gen, insn + offsetof(struct bpf_insn, imm), 4,
|
||||
kdesc->insn + offsetof(struct bpf_insn, imm));
|
||||
move_blob2blob(gen, insn + sizeof(struct bpf_insn) + offsetof(struct bpf_insn, imm), 4,
|
||||
kdesc->insn + sizeof(struct bpf_insn) + offsetof(struct bpf_insn, imm));
|
||||
goto log;
|
||||
}
|
||||
/* remember insn offset, so we can copy ksym addr later */
|
||||
kdesc->insn = insn;
|
||||
/* skip typeless ksym_desc in fd closing loop in cleanup_relos */
|
||||
kdesc->typeless = true;
|
||||
emit_bpf_kallsyms_lookup_name(gen, relo);
|
||||
emit(gen, BPF_JMP_IMM(BPF_JEQ, BPF_REG_7, -ENOENT, 1));
|
||||
emit_check_err(gen);
|
||||
/* store lower half of addr into insn[insn_idx].imm */
|
||||
emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_8, BPF_REG_9, offsetof(struct bpf_insn, imm)));
|
||||
/* store upper half of addr into insn[insn_idx + 1].imm */
|
||||
emit(gen, BPF_ALU64_IMM(BPF_RSH, BPF_REG_9, 32));
|
||||
emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_8, BPF_REG_9,
|
||||
sizeof(struct bpf_insn) + offsetof(struct bpf_insn, imm)));
|
||||
log:
|
||||
emit_ksym_relo_log(gen, relo, kdesc->ref);
|
||||
}
|
||||
|
||||
static __u32 src_reg_mask(void)
|
||||
{
|
||||
#if defined(__LITTLE_ENDIAN_BITFIELD)
|
||||
return 0x0f; /* src_reg,dst_reg,... */
|
||||
#elif defined(__BIG_ENDIAN_BITFIELD)
|
||||
return 0xf0; /* dst_reg,src_reg,... */
|
||||
#else
|
||||
#error "Unsupported bit endianness, cannot proceed"
|
||||
#endif
|
||||
}
|
||||
|
||||
/* Expects:
|
||||
* BPF_REG_8 - pointer to instruction
|
||||
*/
|
||||
static void emit_relo_ksym_btf(struct bpf_gen *gen, struct ksym_relo_desc *relo, int insn)
|
||||
{
|
||||
struct ksym_desc *kdesc;
|
||||
__u32 reg_mask;
|
||||
|
||||
kdesc = get_ksym_desc(gen, relo);
|
||||
if (!kdesc)
|
||||
return;
|
||||
/* try to copy from existing ldimm64 insn */
|
||||
if (kdesc->ref > 1) {
|
||||
move_blob2blob(gen, insn + offsetof(struct bpf_insn, imm), 4,
|
||||
kdesc->insn + offsetof(struct bpf_insn, imm));
|
||||
move_blob2blob(gen, insn + sizeof(struct bpf_insn) + offsetof(struct bpf_insn, imm), 4,
|
||||
kdesc->insn + sizeof(struct bpf_insn) + offsetof(struct bpf_insn, imm));
|
||||
emit(gen, BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_8, offsetof(struct bpf_insn, imm)));
|
||||
/* jump over src_reg adjustment if imm is not 0 */
|
||||
emit(gen, BPF_JMP_IMM(BPF_JNE, BPF_REG_9, 0, 3));
|
||||
goto clear_src_reg;
|
||||
}
|
||||
/* remember insn offset, so we can copy BTF ID and FD later */
|
||||
kdesc->insn = insn;
|
||||
emit_bpf_find_by_name_kind(gen, relo);
|
||||
if (!relo->is_weak)
|
||||
emit_check_err(gen);
|
||||
/* set default values as 0 */
|
||||
emit(gen, BPF_ST_MEM(BPF_W, BPF_REG_8, offsetof(struct bpf_insn, imm), 0));
|
||||
emit(gen, BPF_ST_MEM(BPF_W, BPF_REG_8, sizeof(struct bpf_insn) + offsetof(struct bpf_insn, imm), 0));
|
||||
/* skip success case stores if ret < 0 */
|
||||
emit(gen, BPF_JMP_IMM(BPF_JSLT, BPF_REG_7, 0, 4));
|
||||
/* store btf_id into insn[insn_idx].imm */
|
||||
emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_8, BPF_REG_7, offsetof(struct bpf_insn, imm)));
|
||||
/* store btf_obj_fd into insn[insn_idx + 1].imm */
|
||||
emit(gen, BPF_ALU64_IMM(BPF_RSH, BPF_REG_7, 32));
|
||||
emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_8, BPF_REG_7,
|
||||
sizeof(struct bpf_insn) + offsetof(struct bpf_insn, imm)));
|
||||
emit(gen, BPF_JMP_IMM(BPF_JSGE, BPF_REG_7, 0, 3));
|
||||
clear_src_reg:
|
||||
/* clear bpf_object__relocate_data's src_reg assignment, otherwise we get a verifier failure */
|
||||
reg_mask = src_reg_mask();
|
||||
emit(gen, BPF_LDX_MEM(BPF_B, BPF_REG_9, BPF_REG_8, offsetofend(struct bpf_insn, code)));
|
||||
emit(gen, BPF_ALU32_IMM(BPF_AND, BPF_REG_9, reg_mask));
|
||||
emit(gen, BPF_STX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, offsetofend(struct bpf_insn, code)));
|
||||
|
||||
emit_ksym_relo_log(gen, relo, kdesc->ref);
|
||||
}
|
||||
|
||||
static void emit_relo(struct bpf_gen *gen, struct ksym_relo_desc *relo, int insns)
|
||||
{
|
||||
int insn;
|
||||
|
||||
pr_debug("gen: emit_relo (%d): %s at %d\n", relo->kind, relo->name, relo->insn_idx);
|
||||
insn = insns + sizeof(struct bpf_insn) * relo->insn_idx;
|
||||
emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_8, BPF_PSEUDO_MAP_IDX_VALUE, 0, 0, 0, insn));
|
||||
switch (relo->kind) {
|
||||
case BTF_KIND_VAR:
|
||||
if (relo->is_typeless)
|
||||
emit_relo_ksym_typeless(gen, relo, insn);
|
||||
else
|
||||
emit_relo_ksym_btf(gen, relo, insn);
|
||||
break;
|
||||
case BTF_KIND_FUNC:
|
||||
emit_relo_kfunc_btf(gen, relo, insn);
|
||||
break;
|
||||
default:
|
||||
pr_warn("Unknown relocation kind '%d'\n", relo->kind);
|
||||
gen->error = -EDOM;
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
@ -571,14 +874,23 @@ static void cleanup_relos(struct bpf_gen *gen, int insns)
|
||||
{
|
||||
int i, insn;
|
||||
|
||||
for (i = 0; i < gen->relo_cnt; i++) {
|
||||
if (gen->relos[i].kind != BTF_KIND_VAR)
|
||||
continue;
|
||||
/* close fd recorded in insn[insn_idx + 1].imm */
|
||||
insn = insns +
|
||||
sizeof(struct bpf_insn) * (gen->relos[i].insn_idx + 1) +
|
||||
offsetof(struct bpf_insn, imm);
|
||||
emit_sys_close_blob(gen, insn);
|
||||
for (i = 0; i < gen->nr_ksyms; i++) {
|
||||
/* only close fds for typed ksyms and kfuncs */
|
||||
if (gen->ksyms[i].kind == BTF_KIND_VAR && !gen->ksyms[i].typeless) {
|
||||
/* close fd recorded in insn[insn_idx + 1].imm */
|
||||
insn = gen->ksyms[i].insn;
|
||||
insn += sizeof(struct bpf_insn) + offsetof(struct bpf_insn, imm);
|
||||
emit_sys_close_blob(gen, insn);
|
||||
} else if (gen->ksyms[i].kind == BTF_KIND_FUNC) {
|
||||
emit_sys_close_blob(gen, blob_fd_array_off(gen, gen->ksyms[i].off));
|
||||
if (gen->ksyms[i].off < MAX_FD_ARRAY_SZ)
|
||||
gen->nr_fd_array--;
|
||||
}
|
||||
}
|
||||
if (gen->nr_ksyms) {
|
||||
free(gen->ksyms);
|
||||
gen->nr_ksyms = 0;
|
||||
gen->ksyms = NULL;
|
||||
}
|
||||
if (gen->relo_cnt) {
|
||||
free(gen->relos);
|
||||
@ -637,9 +949,8 @@ void bpf_gen__prog_load(struct bpf_gen *gen,
|
||||
/* populate union bpf_attr with a pointer to line_info */
|
||||
emit_rel_store(gen, attr_field(prog_load_attr, line_info), line_info);
|
||||
|
||||
/* populate union bpf_attr fd_array with a pointer to stack where map_fds are saved */
|
||||
emit_rel_store_sp(gen, attr_field(prog_load_attr, fd_array),
|
||||
stack_off(map_fd[0]));
|
||||
/* populate union bpf_attr fd_array with a pointer to data where map_fds are saved */
|
||||
emit_rel_store(gen, attr_field(prog_load_attr, fd_array), gen->fd_array);
|
||||
|
||||
/* populate union bpf_attr with user provided log details */
|
||||
move_ctx2blob(gen, attr_field(prog_load_attr, log_level), 4,
|
||||
@ -706,8 +1017,8 @@ void bpf_gen__map_update_elem(struct bpf_gen *gen, int map_idx, void *pvalue,
|
||||
emit(gen, BPF_EMIT_CALL(BPF_FUNC_copy_from_user));
|
||||
|
||||
map_update_attr = add_data(gen, &attr, attr_size);
|
||||
move_stack2blob(gen, attr_field(map_update_attr, map_fd), 4,
|
||||
stack_off(map_fd[map_idx]));
|
||||
move_blob2blob(gen, attr_field(map_update_attr, map_fd), 4,
|
||||
blob_fd_array_off(gen, map_idx));
|
||||
emit_rel_store(gen, attr_field(map_update_attr, key), key);
|
||||
emit_rel_store(gen, attr_field(map_update_attr, value), value);
|
||||
/* emit MAP_UPDATE_ELEM command */
|
||||
@ -725,8 +1036,8 @@ void bpf_gen__map_freeze(struct bpf_gen *gen, int map_idx)
|
||||
memset(&attr, 0, attr_size);
|
||||
pr_debug("gen: map_freeze: idx %d\n", map_idx);
|
||||
map_freeze_attr = add_data(gen, &attr, attr_size);
|
||||
move_stack2blob(gen, attr_field(map_freeze_attr, map_fd), 4,
|
||||
stack_off(map_fd[map_idx]));
|
||||
move_blob2blob(gen, attr_field(map_freeze_attr, map_fd), 4,
|
||||
blob_fd_array_off(gen, map_idx));
|
||||
/* emit MAP_FREEZE command */
|
||||
emit_sys_bpf(gen, BPF_MAP_FREEZE, map_freeze_attr, attr_size);
|
||||
debug_ret(gen, "map_freeze");
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -150,6 +150,7 @@ struct bpf_object_load_attr {
|
||||
/* Load/unload object into/from kernel */
|
||||
LIBBPF_API int bpf_object__load(struct bpf_object *obj);
|
||||
LIBBPF_API int bpf_object__load_xattr(struct bpf_object_load_attr *attr);
|
||||
LIBBPF_DEPRECATED_SINCE(0, 6, "bpf_object__unload() is deprecated, use bpf_object__close() instead")
|
||||
LIBBPF_API int bpf_object__unload(struct bpf_object *obj);
|
||||
|
||||
LIBBPF_API const char *bpf_object__name(const struct bpf_object *obj);
|
||||
@ -167,7 +168,8 @@ LIBBPF_API struct bpf_program *
|
||||
bpf_object__find_program_by_name(const struct bpf_object *obj,
|
||||
const char *name);
|
||||
|
||||
LIBBPF_API struct bpf_object *bpf_object__next(struct bpf_object *prev);
|
||||
LIBBPF_API LIBBPF_DEPRECATED_SINCE(0, 7, "track bpf_objects in application code instead")
|
||||
struct bpf_object *bpf_object__next(struct bpf_object *prev);
|
||||
#define bpf_object__for_each_safe(pos, tmp) \
|
||||
for ((pos) = bpf_object__next(NULL), \
|
||||
(tmp) = bpf_object__next(pos); \
|
||||
@ -189,16 +191,22 @@ LIBBPF_API int libbpf_find_vmlinux_btf_id(const char *name,
|
||||
|
||||
/* Accessors of bpf_program */
|
||||
struct bpf_program;
|
||||
LIBBPF_API struct bpf_program *bpf_program__next(struct bpf_program *prog,
|
||||
const struct bpf_object *obj);
|
||||
LIBBPF_API LIBBPF_DEPRECATED_SINCE(0, 7, "use bpf_object__next_program() instead")
|
||||
struct bpf_program *bpf_program__next(struct bpf_program *prog,
|
||||
const struct bpf_object *obj);
|
||||
LIBBPF_API struct bpf_program *
|
||||
bpf_object__next_program(const struct bpf_object *obj, struct bpf_program *prog);
|
||||
|
||||
#define bpf_object__for_each_program(pos, obj) \
|
||||
for ((pos) = bpf_program__next(NULL, (obj)); \
|
||||
(pos) != NULL; \
|
||||
(pos) = bpf_program__next((pos), (obj)))
|
||||
#define bpf_object__for_each_program(pos, obj) \
|
||||
for ((pos) = bpf_object__next_program((obj), NULL); \
|
||||
(pos) != NULL; \
|
||||
(pos) = bpf_object__next_program((obj), (pos)))
|
||||
|
||||
LIBBPF_API struct bpf_program *bpf_program__prev(struct bpf_program *prog,
|
||||
const struct bpf_object *obj);
|
||||
LIBBPF_API LIBBPF_DEPRECATED_SINCE(0, 7, "use bpf_object__prev_program() instead")
|
||||
struct bpf_program *bpf_program__prev(struct bpf_program *prog,
|
||||
const struct bpf_object *obj);
|
||||
LIBBPF_API struct bpf_program *
|
||||
bpf_object__prev_program(const struct bpf_object *obj, struct bpf_program *prog);
|
||||
|
||||
typedef void (*bpf_program_clear_priv_t)(struct bpf_program *, void *);
|
||||
|
||||
@ -217,14 +225,51 @@ LIBBPF_API bool bpf_program__autoload(const struct bpf_program *prog);
|
||||
LIBBPF_API int bpf_program__set_autoload(struct bpf_program *prog, bool autoload);
|
||||
|
||||
/* returns program size in bytes */
|
||||
LIBBPF_DEPRECATED_SINCE(0, 7, "use bpf_program__insn_cnt() instead")
|
||||
LIBBPF_API size_t bpf_program__size(const struct bpf_program *prog);
|
||||
|
||||
struct bpf_insn;
|
||||
|
||||
/**
|
||||
* @brief **bpf_program__insns()** gives read-only access to BPF program's
|
||||
* underlying BPF instructions.
|
||||
* @param prog BPF program for which to return instructions
|
||||
* @return a pointer to an array of BPF instructions that belong to the
|
||||
* specified BPF program
|
||||
*
|
||||
* Returned pointer is always valid and not NULL. Number of `struct bpf_insn`
|
||||
* pointed to can be fetched using **bpf_program__insn_cnt()** API.
|
||||
*
|
||||
* Keep in mind, libbpf can modify and append/delete BPF program's
|
||||
* instructions as it processes BPF object file and prepares everything for
|
||||
* uploading into the kernel. So depending on the point in BPF object
|
||||
* lifetime, **bpf_program__insns()** can return different sets of
|
||||
* instructions. As an example, during BPF object load phase BPF program
|
||||
* instructions will be CO-RE-relocated, BPF subprograms instructions will be
|
||||
* appended, ldimm64 instructions will have FDs embedded, etc. So instructions
|
||||
* returned before **bpf_object__load()** and after it might be quite
|
||||
* different.
|
||||
*/
|
||||
LIBBPF_API const struct bpf_insn *bpf_program__insns(const struct bpf_program *prog);
|
||||
/**
|
||||
* @brief **bpf_program__insn_cnt()** returns number of `struct bpf_insn`'s
|
||||
* that form specified BPF program.
|
||||
* @param prog BPF program for which to return number of BPF instructions
|
||||
*
|
||||
* See **bpf_program__insns()** documentation for notes on how libbpf can
|
||||
* change instructions and their count during different phases of
|
||||
* **bpf_object** lifetime.
|
||||
*/
|
||||
LIBBPF_API size_t bpf_program__insn_cnt(const struct bpf_program *prog);
|
||||
|
||||
LIBBPF_API int bpf_program__load(struct bpf_program *prog, char *license,
|
||||
__u32 kern_version);
|
||||
LIBBPF_API int bpf_program__fd(const struct bpf_program *prog);
|
||||
LIBBPF_DEPRECATED_SINCE(0, 7, "multi-instance bpf_program support is deprecated")
|
||||
LIBBPF_API int bpf_program__pin_instance(struct bpf_program *prog,
|
||||
const char *path,
|
||||
int instance);
|
||||
LIBBPF_DEPRECATED_SINCE(0, 7, "multi-instance bpf_program support is deprecated")
|
||||
LIBBPF_API int bpf_program__unpin_instance(struct bpf_program *prog,
|
||||
const char *path,
|
||||
int instance);
|
||||
@ -358,8 +403,6 @@ LIBBPF_API struct bpf_link *
|
||||
bpf_program__attach_iter(const struct bpf_program *prog,
|
||||
const struct bpf_iter_attach_opts *opts);
|
||||
|
||||
struct bpf_insn;
|
||||
|
||||
/*
|
||||
* Libbpf allows callers to adjust BPF programs before being loaded
|
||||
* into kernel. One program in an object file can be transformed into
|
||||
@ -388,7 +431,7 @@ struct bpf_insn;
|
||||
* one instance. In this case bpf_program__fd(prog) is equal to
|
||||
* bpf_program__nth_fd(prog, 0).
|
||||
*/
|
||||
|
||||
LIBBPF_DEPRECATED_SINCE(0, 7, "use bpf_program__insns() for getting bpf_program instructions")
|
||||
struct bpf_prog_prep_result {
|
||||
/*
|
||||
* If not NULL, load new instruction array.
|
||||
@ -417,9 +460,11 @@ typedef int (*bpf_program_prep_t)(struct bpf_program *prog, int n,
|
||||
struct bpf_insn *insns, int insns_cnt,
|
||||
struct bpf_prog_prep_result *res);
|
||||
|
||||
LIBBPF_DEPRECATED_SINCE(0, 7, "use bpf_program__insns() for getting bpf_program instructions")
|
||||
LIBBPF_API int bpf_program__set_prep(struct bpf_program *prog, int nr_instance,
|
||||
bpf_program_prep_t prep);
|
||||
|
||||
LIBBPF_DEPRECATED_SINCE(0, 7, "multi-instance bpf_program support is deprecated")
|
||||
LIBBPF_API int bpf_program__nth_fd(const struct bpf_program *prog, int n);
|
||||
|
||||
/*
|
||||
@ -502,16 +547,21 @@ bpf_object__find_map_fd_by_name(const struct bpf_object *obj, const char *name);
|
||||
LIBBPF_API struct bpf_map *
|
||||
bpf_object__find_map_by_offset(struct bpf_object *obj, size_t offset);
|
||||
|
||||
LIBBPF_API LIBBPF_DEPRECATED_SINCE(0, 7, "use bpf_object__next_map() instead")
|
||||
struct bpf_map *bpf_map__next(const struct bpf_map *map, const struct bpf_object *obj);
|
||||
LIBBPF_API struct bpf_map *
|
||||
bpf_map__next(const struct bpf_map *map, const struct bpf_object *obj);
|
||||
bpf_object__next_map(const struct bpf_object *obj, const struct bpf_map *map);
|
||||
|
||||
#define bpf_object__for_each_map(pos, obj) \
|
||||
for ((pos) = bpf_map__next(NULL, (obj)); \
|
||||
for ((pos) = bpf_object__next_map((obj), NULL); \
|
||||
(pos) != NULL; \
|
||||
(pos) = bpf_map__next((pos), (obj)))
|
||||
(pos) = bpf_object__next_map((obj), (pos)))
|
||||
#define bpf_map__for_each bpf_object__for_each_map
|
||||
|
||||
LIBBPF_API LIBBPF_DEPRECATED_SINCE(0, 7, "use bpf_object__prev_map() instead")
|
||||
struct bpf_map *bpf_map__prev(const struct bpf_map *map, const struct bpf_object *obj);
|
||||
LIBBPF_API struct bpf_map *
|
||||
bpf_map__prev(const struct bpf_map *map, const struct bpf_object *obj);
|
||||
bpf_object__prev_map(const struct bpf_object *obj, const struct bpf_map *map);
|
||||
|
||||
/**
|
||||
* @brief **bpf_map__fd()** gets the file descriptor of the passed
|
||||
@ -550,6 +600,9 @@ LIBBPF_API __u32 bpf_map__btf_value_type_id(const struct bpf_map *map);
|
||||
/* get/set map if_index */
|
||||
LIBBPF_API __u32 bpf_map__ifindex(const struct bpf_map *map);
|
||||
LIBBPF_API int bpf_map__set_ifindex(struct bpf_map *map, __u32 ifindex);
|
||||
/* get/set map map_extra flags */
|
||||
LIBBPF_API __u64 bpf_map__map_extra(const struct bpf_map *map);
|
||||
LIBBPF_API int bpf_map__set_map_extra(struct bpf_map *map, __u64 map_extra);
|
||||
|
||||
typedef void (*bpf_map_clear_priv_t)(struct bpf_map *, void *);
|
||||
LIBBPF_API int bpf_map__set_priv(struct bpf_map *map, void *priv,
|
||||
|
@ -389,5 +389,16 @@ LIBBPF_0.5.0 {
|
||||
|
||||
LIBBPF_0.6.0 {
|
||||
global:
|
||||
btf__add_tag;
|
||||
bpf_map__map_extra;
|
||||
bpf_map__set_map_extra;
|
||||
bpf_object__next_map;
|
||||
bpf_object__next_program;
|
||||
bpf_object__prev_map;
|
||||
bpf_object__prev_program;
|
||||
bpf_program__insn_cnt;
|
||||
bpf_program__insns;
|
||||
btf__add_btf;
|
||||
btf__add_decl_tag;
|
||||
btf__raw_data;
|
||||
btf__type_cnt;
|
||||
} LIBBPF_0.5.0;
|
||||
|
@ -13,6 +13,8 @@
|
||||
#include <limits.h>
|
||||
#include <errno.h>
|
||||
#include <linux/err.h>
|
||||
#include <fcntl.h>
|
||||
#include <unistd.h>
|
||||
#include "libbpf_legacy.h"
|
||||
#include "relo_core.h"
|
||||
|
||||
@ -52,8 +54,8 @@
|
||||
#endif
|
||||
|
||||
/* Older libelf all end up in this expression, for both 32 and 64 bit */
|
||||
#ifndef GELF_ST_VISIBILITY
|
||||
#define GELF_ST_VISIBILITY(o) ((o) & 0x03)
|
||||
#ifndef ELF64_ST_VISIBILITY
|
||||
#define ELF64_ST_VISIBILITY(o) ((o) & 0x03)
|
||||
#endif
|
||||
|
||||
#define BTF_INFO_ENC(kind, kind_flag, vlen) \
|
||||
@ -69,8 +71,8 @@
|
||||
#define BTF_VAR_SECINFO_ENC(type, offset, size) (type), (offset), (size)
|
||||
#define BTF_TYPE_FLOAT_ENC(name, sz) \
|
||||
BTF_TYPE_ENC(name, BTF_INFO_ENC(BTF_KIND_FLOAT, 0, 0), sz)
|
||||
#define BTF_TYPE_TAG_ENC(value, type, component_idx) \
|
||||
BTF_TYPE_ENC(value, BTF_INFO_ENC(BTF_KIND_TAG, 0, 0), type), (component_idx)
|
||||
#define BTF_TYPE_DECL_TAG_ENC(value, type, component_idx) \
|
||||
BTF_TYPE_ENC(value, BTF_INFO_ENC(BTF_KIND_DECL_TAG, 0, 0), type), (component_idx)
|
||||
|
||||
#ifndef likely
|
||||
#define likely(x) __builtin_expect(!!(x), 1)
|
||||
@ -193,8 +195,9 @@ enum map_def_parts {
|
||||
MAP_DEF_NUMA_NODE = 0x080,
|
||||
MAP_DEF_PINNING = 0x100,
|
||||
MAP_DEF_INNER_MAP = 0x200,
|
||||
MAP_DEF_MAP_EXTRA = 0x400,
|
||||
|
||||
MAP_DEF_ALL = 0x3ff, /* combination of all above */
|
||||
MAP_DEF_ALL = 0x7ff, /* combination of all above */
|
||||
};
|
||||
|
||||
struct btf_map_def {
|
||||
@ -208,6 +211,7 @@ struct btf_map_def {
|
||||
__u32 map_flags;
|
||||
__u32 numa_node;
|
||||
__u32 pinning;
|
||||
__u64 map_extra;
|
||||
};
|
||||
|
||||
int parse_btf_map_def(const char *map_name, struct btf *btf,
|
||||
@ -298,14 +302,32 @@ struct bpf_prog_load_params {
|
||||
__u32 log_level;
|
||||
char *log_buf;
|
||||
size_t log_buf_sz;
|
||||
int *fd_array;
|
||||
};
|
||||
|
||||
int libbpf__bpf_prog_load(const struct bpf_prog_load_params *load_attr);
|
||||
|
||||
int bpf_object__section_size(const struct bpf_object *obj, const char *name,
|
||||
__u32 *size);
|
||||
int bpf_object__variable_offset(const struct bpf_object *obj, const char *name,
|
||||
__u32 *off);
|
||||
struct bpf_create_map_params {
|
||||
const char *name;
|
||||
enum bpf_map_type map_type;
|
||||
__u32 map_flags;
|
||||
__u32 key_size;
|
||||
__u32 value_size;
|
||||
__u32 max_entries;
|
||||
__u32 numa_node;
|
||||
__u32 btf_fd;
|
||||
__u32 btf_key_type_id;
|
||||
__u32 btf_value_type_id;
|
||||
__u32 map_ifindex;
|
||||
union {
|
||||
__u32 inner_map_fd;
|
||||
__u32 btf_vmlinux_value_type_id;
|
||||
};
|
||||
__u64 map_extra;
|
||||
};
|
||||
|
||||
int libbpf__bpf_create_map_xattr(const struct bpf_create_map_params *create_attr);
|
||||
|
||||
struct btf *btf_get_from_fd(int btf_fd, struct btf *base_btf);
|
||||
void btf_get_kernel_prefix_kind(enum bpf_attach_type attach_type,
|
||||
const char **prefix, int *kind);
|
||||
@ -408,6 +430,8 @@ int btf_type_visit_type_ids(struct btf_type *t, type_id_visit_fn visit, void *ct
|
||||
int btf_type_visit_str_offs(struct btf_type *t, str_off_visit_fn visit, void *ctx);
|
||||
int btf_ext_visit_type_ids(struct btf_ext *btf_ext, type_id_visit_fn visit, void *ctx);
|
||||
int btf_ext_visit_str_offs(struct btf_ext *btf_ext, str_off_visit_fn visit, void *ctx);
|
||||
__s32 btf__find_by_name_kind_own(const struct btf *btf, const char *type_name,
|
||||
__u32 kind);
|
||||
|
||||
extern enum libbpf_strict_mode libbpf_mode;
|
||||
|
||||
@ -469,4 +493,26 @@ static inline bool is_ldimm64_insn(struct bpf_insn *insn)
|
||||
return insn->code == (BPF_LD | BPF_IMM | BPF_DW);
|
||||
}
|
||||
|
||||
/* if fd is stdin, stdout, or stderr, dup to a fd greater than 2
|
||||
* Takes ownership of the fd passed in, and closes it if calling
|
||||
* fcntl(fd, F_DUPFD_CLOEXEC, 3).
|
||||
*/
|
||||
static inline int ensure_good_fd(int fd)
|
||||
{
|
||||
int old_fd = fd, saved_errno;
|
||||
|
||||
if (fd < 0)
|
||||
return fd;
|
||||
if (fd < 3) {
|
||||
fd = fcntl(fd, F_DUPFD_CLOEXEC, 3);
|
||||
saved_errno = errno;
|
||||
close(old_fd);
|
||||
if (fd < 0) {
|
||||
pr_warn("failed to dup FD %d to FD > 2: %d\n", old_fd, -saved_errno);
|
||||
errno = saved_errno;
|
||||
}
|
||||
}
|
||||
return fd;
|
||||
}
|
||||
|
||||
#endif /* __LIBBPF_LIBBPF_INTERNAL_H */
|
||||
|
@ -52,8 +52,17 @@ enum libbpf_strict_mode {
|
||||
* allowed, with LIBBPF_STRICT_SEC_PREFIX this will become
|
||||
* unrecognized by libbpf and would have to be just SEC("xdp") and
|
||||
* SEC("xdp") and SEC("perf_event").
|
||||
*
|
||||
* Note, in this mode the program pin path will be based on the
|
||||
* function name instead of section name.
|
||||
*/
|
||||
LIBBPF_STRICT_SEC_NAME = 0x04,
|
||||
/*
|
||||
* Disable the global 'bpf_objects_list'. Maintaining this list adds
|
||||
* a race condition to bpf_object__open() and bpf_object__close().
|
||||
* Clients can maintain it on their own if it is valuable for them.
|
||||
*/
|
||||
LIBBPF_STRICT_NO_OBJECT_LIST = 0x08,
|
||||
|
||||
__LIBBPF_STRICT_LAST,
|
||||
};
|
||||
|
@ -33,7 +33,7 @@ static int get_vendor_id(int ifindex)
|
||||
|
||||
snprintf(path, sizeof(path), "/sys/class/net/%s/device/vendor", ifname);
|
||||
|
||||
fd = open(path, O_RDONLY);
|
||||
fd = open(path, O_RDONLY | O_CLOEXEC);
|
||||
if (fd < 0)
|
||||
return -1;
|
||||
|
||||
|
@ -15,7 +15,6 @@
|
||||
#include <linux/btf.h>
|
||||
#include <elf.h>
|
||||
#include <libelf.h>
|
||||
#include <gelf.h>
|
||||
#include <fcntl.h>
|
||||
#include "libbpf.h"
|
||||
#include "btf.h"
|
||||
@ -302,7 +301,7 @@ static int init_output_elf(struct bpf_linker *linker, const char *file)
|
||||
if (!linker->filename)
|
||||
return -ENOMEM;
|
||||
|
||||
linker->fd = open(file, O_WRONLY | O_CREAT | O_TRUNC, 0644);
|
||||
linker->fd = open(file, O_WRONLY | O_CREAT | O_TRUNC | O_CLOEXEC, 0644);
|
||||
if (linker->fd < 0) {
|
||||
err = -errno;
|
||||
pr_warn("failed to create '%s': %d\n", file, err);
|
||||
@ -324,12 +323,12 @@ static int init_output_elf(struct bpf_linker *linker, const char *file)
|
||||
|
||||
linker->elf_hdr->e_machine = EM_BPF;
|
||||
linker->elf_hdr->e_type = ET_REL;
|
||||
#if __BYTE_ORDER == __LITTLE_ENDIAN
|
||||
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
|
||||
linker->elf_hdr->e_ident[EI_DATA] = ELFDATA2LSB;
|
||||
#elif __BYTE_ORDER == __BIG_ENDIAN
|
||||
#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
|
||||
linker->elf_hdr->e_ident[EI_DATA] = ELFDATA2MSB;
|
||||
#else
|
||||
#error "Unknown __BYTE_ORDER"
|
||||
#error "Unknown __BYTE_ORDER__"
|
||||
#endif
|
||||
|
||||
/* STRTAB */
|
||||
@ -539,12 +538,12 @@ static int linker_load_obj_file(struct bpf_linker *linker, const char *filename,
|
||||
const struct bpf_linker_file_opts *opts,
|
||||
struct src_obj *obj)
|
||||
{
|
||||
#if __BYTE_ORDER == __LITTLE_ENDIAN
|
||||
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
|
||||
const int host_endianness = ELFDATA2LSB;
|
||||
#elif __BYTE_ORDER == __BIG_ENDIAN
|
||||
#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
|
||||
const int host_endianness = ELFDATA2MSB;
|
||||
#else
|
||||
#error "Unknown __BYTE_ORDER"
|
||||
#error "Unknown __BYTE_ORDER__"
|
||||
#endif
|
||||
int err = 0;
|
||||
Elf_Scn *scn;
|
||||
@ -557,7 +556,7 @@ static int linker_load_obj_file(struct bpf_linker *linker, const char *filename,
|
||||
|
||||
obj->filename = filename;
|
||||
|
||||
obj->fd = open(filename, O_RDONLY);
|
||||
obj->fd = open(filename, O_RDONLY | O_CLOEXEC);
|
||||
if (obj->fd < 0) {
|
||||
err = -errno;
|
||||
pr_warn("failed to open file '%s': %d\n", filename, err);
|
||||
@ -921,7 +920,7 @@ static int check_btf_type_id(__u32 *type_id, void *ctx)
|
||||
{
|
||||
struct btf *btf = ctx;
|
||||
|
||||
if (*type_id > btf__get_nr_types(btf))
|
||||
if (*type_id >= btf__type_cnt(btf))
|
||||
return -EINVAL;
|
||||
|
||||
return 0;
|
||||
@ -948,8 +947,8 @@ static int linker_sanity_check_btf(struct src_obj *obj)
|
||||
if (!obj->btf)
|
||||
return 0;
|
||||
|
||||
n = btf__get_nr_types(obj->btf);
|
||||
for (i = 1; i <= n; i++) {
|
||||
n = btf__type_cnt(obj->btf);
|
||||
for (i = 1; i < n; i++) {
|
||||
t = btf_type_by_id(obj->btf, i);
|
||||
|
||||
err = err ?: btf_type_visit_type_ids(t, check_btf_type_id, obj->btf);
|
||||
@ -1659,8 +1658,8 @@ static int find_glob_sym_btf(struct src_obj *obj, Elf64_Sym *sym, const char *sy
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
n = btf__get_nr_types(obj->btf);
|
||||
for (i = 1; i <= n; i++) {
|
||||
n = btf__type_cnt(obj->btf);
|
||||
for (i = 1; i < n; i++) {
|
||||
t = btf__type_by_id(obj->btf, i);
|
||||
|
||||
/* some global and extern FUNCs and VARs might not be associated with any
|
||||
@ -2131,8 +2130,8 @@ static int linker_fixup_btf(struct src_obj *obj)
|
||||
if (!obj->btf)
|
||||
return 0;
|
||||
|
||||
n = btf__get_nr_types(obj->btf);
|
||||
for (i = 1; i <= n; i++) {
|
||||
n = btf__type_cnt(obj->btf);
|
||||
for (i = 1; i < n; i++) {
|
||||
struct btf_var_secinfo *vi;
|
||||
struct btf_type *t;
|
||||
|
||||
@ -2235,14 +2234,14 @@ static int linker_append_btf(struct bpf_linker *linker, struct src_obj *obj)
|
||||
if (!obj->btf)
|
||||
return 0;
|
||||
|
||||
start_id = btf__get_nr_types(linker->btf) + 1;
|
||||
n = btf__get_nr_types(obj->btf);
|
||||
start_id = btf__type_cnt(linker->btf);
|
||||
n = btf__type_cnt(obj->btf);
|
||||
|
||||
obj->btf_type_map = calloc(n + 1, sizeof(int));
|
||||
if (!obj->btf_type_map)
|
||||
return -ENOMEM;
|
||||
|
||||
for (i = 1; i <= n; i++) {
|
||||
for (i = 1; i < n; i++) {
|
||||
struct glob_sym *glob_sym = NULL;
|
||||
|
||||
t = btf__type_by_id(obj->btf, i);
|
||||
@ -2297,8 +2296,8 @@ static int linker_append_btf(struct bpf_linker *linker, struct src_obj *obj)
|
||||
}
|
||||
|
||||
/* remap all the types except DATASECs */
|
||||
n = btf__get_nr_types(linker->btf);
|
||||
for (i = start_id; i <= n; i++) {
|
||||
n = btf__type_cnt(linker->btf);
|
||||
for (i = start_id; i < n; i++) {
|
||||
struct btf_type *dst_t = btf_type_by_id(linker->btf, i);
|
||||
|
||||
if (btf_type_visit_type_ids(dst_t, remap_type_id, obj->btf_type_map))
|
||||
@ -2657,7 +2656,7 @@ static int finalize_btf(struct bpf_linker *linker)
|
||||
__u32 raw_sz;
|
||||
|
||||
/* bail out if no BTF data was produced */
|
||||
if (btf__get_nr_types(linker->btf) == 0)
|
||||
if (btf__type_cnt(linker->btf) == 1)
|
||||
return 0;
|
||||
|
||||
for (i = 1; i < linker->sec_cnt; i++) {
|
||||
@ -2694,7 +2693,7 @@ static int finalize_btf(struct bpf_linker *linker)
|
||||
}
|
||||
|
||||
/* Emit .BTF section */
|
||||
raw_data = btf__get_raw_data(linker->btf, &raw_sz);
|
||||
raw_data = btf__raw_data(linker->btf, &raw_sz);
|
||||
if (!raw_data)
|
||||
return -ENOMEM;
|
||||
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user