mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2025-01-10 15:58:47 +00:00
selftests/bpf: Attempt to build BPF programs with -Wsign-compare
GCC's -Wall includes -Wsign-compare while clang does not. Since BPF programs are built with clang we need to add this flag explicitly to catch problematic comparisons like: int i = -1; unsigned int j = 1; if (i < j) // this is false. long i = -1; unsigned int j = 1; if (i < j) // this is true. C standard for reference: - If either operand is unsigned long the other shall be converted to unsigned long. - Otherwise, if one operand is a long int and the other unsigned int, then if a long int can represent all the values of an unsigned int, the unsigned int shall be converted to a long int; otherwise both operands shall be converted to unsigned long int. - Otherwise, if either operand is long, the other shall be converted to long. - Otherwise, if either operand is unsigned, the other shall be converted to unsigned. Unfortunately clang's -Wsign-compare is very noisy. It complains about (s32)a == (u32)b which is safe and doen't have surprising behavior. This patch fixes some of the issues. It needs a follow up to fix the rest. Signed-off-by: Alexei Starovoitov <ast@kernel.org> Signed-off-by: Andrii Nakryiko <andrii@kernel.org> Acked-by: Jiri Olsa <jolsa@kernel.org> Acked-by: Kumar Kartikeya Dwivedi <memxor@gmail.com> Link: https://lore.kernel.org/bpf/20231226191148.48536-2-alexei.starovoitov@gmail.com
This commit is contained in:
parent
a640de4cf9
commit
495d2d8133
@ -383,6 +383,7 @@ CLANG_SYS_INCLUDES = $(call get_sys_includes,$(CLANG),$(CLANG_TARGET_ARCH))
|
||||
BPF_CFLAGS = -g -Wall -Werror -D__TARGET_ARCH_$(SRCARCH) $(MENDIAN) \
|
||||
-I$(INCLUDE_DIR) -I$(CURDIR) -I$(APIDIR) \
|
||||
-I$(abspath $(OUTPUT)/../usr/include)
|
||||
# TODO: enable me -Wsign-compare
|
||||
|
||||
CLANG_CFLAGS = $(CLANG_SYS_INCLUDES) \
|
||||
-Wno-compare-distinct-pointer-types
|
||||
|
@ -20,7 +20,7 @@ struct {
|
||||
} hashmap1 SEC(".maps");
|
||||
|
||||
/* will set before prog run */
|
||||
volatile const __u32 num_cpus = 0;
|
||||
volatile const __s32 num_cpus = 0;
|
||||
|
||||
/* will collect results during prog run */
|
||||
__u32 key_sum_a = 0, key_sum_b = 0, key_sum_c = 0;
|
||||
|
@ -35,7 +35,7 @@ SEC("iter/task_vma") int proc_maps(struct bpf_iter__task_vma *ctx)
|
||||
return 0;
|
||||
|
||||
file = vma->vm_file;
|
||||
if (task->tgid != pid) {
|
||||
if (task->tgid != (pid_t)pid) {
|
||||
if (one_task)
|
||||
one_task_error = 1;
|
||||
return 0;
|
||||
|
@ -22,7 +22,7 @@ int dump_task(struct bpf_iter__task *ctx)
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (task->pid != tid)
|
||||
if (task->pid != (pid_t)tid)
|
||||
num_unknown_tid++;
|
||||
else
|
||||
num_known_tid++;
|
||||
|
@ -45,7 +45,7 @@ int dump_bpf_map(struct bpf_iter__bpf_map *ctx)
|
||||
}
|
||||
|
||||
/* fill seq_file buffer */
|
||||
for (i = 0; i < print_len; i++)
|
||||
for (i = 0; i < (int)print_len; i++)
|
||||
bpf_seq_write(seq, &seq_num, sizeof(seq_num));
|
||||
|
||||
return ret;
|
||||
|
@ -11,7 +11,7 @@
|
||||
__u32 invocations = 0;
|
||||
__u32 assertion_error = 0;
|
||||
__u32 retval_value = 0;
|
||||
__u32 page_size = 0;
|
||||
__s32 page_size = 0;
|
||||
|
||||
SEC("cgroup/setsockopt")
|
||||
int get_retval(struct bpf_sockopt *ctx)
|
||||
|
@ -15,7 +15,7 @@ struct {
|
||||
__type(value, long);
|
||||
} map_a SEC(".maps");
|
||||
|
||||
__u32 target_pid;
|
||||
__s32 target_pid;
|
||||
__u64 cgroup_id;
|
||||
int target_hid;
|
||||
bool is_cgroup1;
|
||||
|
@ -332,7 +332,7 @@ SEC("tp_btf/task_newtask")
|
||||
int BPF_PROG(test_copy_any_anyand, struct task_struct *task, u64 clone_flags)
|
||||
{
|
||||
struct bpf_cpumask *mask1, *mask2, *dst1, *dst2;
|
||||
u32 cpu;
|
||||
int cpu;
|
||||
|
||||
if (!is_test_task())
|
||||
return 0;
|
||||
|
@ -6,7 +6,7 @@
|
||||
#include <bpf/bpf_helpers.h>
|
||||
#include "bpf_misc.h"
|
||||
|
||||
#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
|
||||
#define ARRAY_SIZE(x) (int)(sizeof(x) / sizeof((x)[0]))
|
||||
|
||||
static volatile int zero = 0;
|
||||
|
||||
@ -676,7 +676,7 @@ static __noinline int sum(struct bpf_iter_num *it, int *arr, __u32 n)
|
||||
|
||||
while ((t = bpf_iter_num_next(it))) {
|
||||
i = *t;
|
||||
if (i >= n)
|
||||
if ((__u32)i >= n)
|
||||
break;
|
||||
sum += arr[i];
|
||||
}
|
||||
|
@ -8,7 +8,7 @@
|
||||
#include "bpf_misc.h"
|
||||
|
||||
/* weak and shared between two files */
|
||||
const volatile int my_tid __weak;
|
||||
const volatile __u32 my_tid __weak;
|
||||
long syscall_id __weak;
|
||||
|
||||
int output_val1;
|
||||
|
@ -68,7 +68,7 @@ int BPF_PROG(handler2, struct pt_regs *regs, long id)
|
||||
{
|
||||
static volatile int whatever;
|
||||
|
||||
if (my_tid != (u32)bpf_get_current_pid_tgid() || id != syscall_id)
|
||||
if (my_tid != (s32)bpf_get_current_pid_tgid() || id != syscall_id)
|
||||
return 0;
|
||||
|
||||
/* make sure we have CO-RE relocations in main program */
|
||||
|
@ -6,7 +6,7 @@
|
||||
#include "bpf_experimental.h"
|
||||
|
||||
#ifndef ARRAY_SIZE
|
||||
#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
|
||||
#define ARRAY_SIZE(x) (int)(sizeof(x) / sizeof((x)[0]))
|
||||
#endif
|
||||
|
||||
#include "linked_list.h"
|
||||
|
@ -13,7 +13,7 @@ char _license[] SEC("license") = "GPL";
|
||||
|
||||
#define DUMMY_STORAGE_VALUE 0xdeadbeef
|
||||
|
||||
int monitored_pid = 0;
|
||||
__u32 monitored_pid = 0;
|
||||
int inode_storage_result = -1;
|
||||
int sk_storage_result = -1;
|
||||
int task_storage_result = -1;
|
||||
|
@ -92,7 +92,7 @@ int BPF_PROG(test_int_hook, struct vm_area_struct *vma,
|
||||
if (ret != 0)
|
||||
return ret;
|
||||
|
||||
__u32 pid = bpf_get_current_pid_tgid() >> 32;
|
||||
__s32 pid = bpf_get_current_pid_tgid() >> 32;
|
||||
int is_stack = 0;
|
||||
|
||||
is_stack = (vma->vm_start <= vma->vm_mm->start_stack &&
|
||||
|
@ -36,7 +36,7 @@ int add_to_list_in_array(void *ctx)
|
||||
struct node_data *new;
|
||||
int zero = 0;
|
||||
|
||||
if (done || (u32)bpf_get_current_pid_tgid() != pid)
|
||||
if (done || (int)bpf_get_current_pid_tgid() != pid)
|
||||
return 0;
|
||||
|
||||
value = bpf_map_lookup_elem(&array, &zero);
|
||||
|
@ -132,7 +132,7 @@ struct {
|
||||
} disallowed_exec_inodes SEC(".maps");
|
||||
|
||||
#ifndef ARRAY_SIZE
|
||||
#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof(arr[0]))
|
||||
#define ARRAY_SIZE(arr) (int)(sizeof(arr) / sizeof(arr[0]))
|
||||
#endif
|
||||
|
||||
static INLINE bool IS_ERR(const void* ptr)
|
||||
@ -645,7 +645,7 @@ int raw_tracepoint__sched_process_exit(void* ctx)
|
||||
for (int i = 0; i < ARRAY_SIZE(arr_struct->array); i++) {
|
||||
struct var_kill_data_t* past_kill_data = &arr_struct->array[i];
|
||||
|
||||
if (past_kill_data != NULL && past_kill_data->kill_target_pid == tpid) {
|
||||
if (past_kill_data != NULL && past_kill_data->kill_target_pid == (pid_t)tpid) {
|
||||
bpf_probe_read_kernel(kill_data, sizeof(*past_kill_data),
|
||||
past_kill_data);
|
||||
void* payload = kill_data->payload;
|
||||
|
@ -9,7 +9,7 @@ char _license[] SEC("license") = "GPL";
|
||||
#define CUSTOM_INHERIT2 1
|
||||
#define CUSTOM_LISTENER 2
|
||||
|
||||
__u32 page_size = 0;
|
||||
__s32 page_size = 0;
|
||||
|
||||
struct sockopt_inherit {
|
||||
__u8 val;
|
||||
|
@ -5,7 +5,7 @@
|
||||
|
||||
char _license[] SEC("license") = "GPL";
|
||||
|
||||
__u32 page_size = 0;
|
||||
__s32 page_size = 0;
|
||||
|
||||
SEC("cgroup/getsockopt")
|
||||
int _getsockopt_child(struct bpf_sockopt *ctx)
|
||||
|
@ -9,7 +9,7 @@
|
||||
|
||||
char _license[] SEC("license") = "GPL";
|
||||
|
||||
__u32 page_size = 0;
|
||||
__s32 page_size = 0;
|
||||
|
||||
SEC("cgroup/setsockopt")
|
||||
int sockopt_qos_to_cc(struct bpf_sockopt *ctx)
|
||||
|
@ -21,7 +21,7 @@ const unsigned int data_sizes[] = {16, 32, 64, 96, 128, 192, 256, 512, 1024, 204
|
||||
const volatile unsigned int data_btf_ids[ARRAY_SIZE(data_sizes)] = {};
|
||||
|
||||
int err = 0;
|
||||
int pid = 0;
|
||||
u32 pid = 0;
|
||||
|
||||
#define DEFINE_ARRAY_WITH_KPTR(_size) \
|
||||
struct bin_data_##_size { \
|
||||
|
@ -53,7 +53,7 @@ int test_core_kernel(void *ctx)
|
||||
struct task_struct *task = (void *)bpf_get_current_task();
|
||||
struct core_reloc_kernel_output *out = (void *)&data.out;
|
||||
uint64_t pid_tgid = bpf_get_current_pid_tgid();
|
||||
uint32_t real_tgid = (uint32_t)pid_tgid;
|
||||
int32_t real_tgid = (int32_t)pid_tgid;
|
||||
int pid, tgid;
|
||||
|
||||
if (data.my_pid_tgid != pid_tgid)
|
||||
|
@ -43,8 +43,8 @@ int BPF_PROG(test_core_module_probed,
|
||||
#if __has_builtin(__builtin_preserve_enum_value)
|
||||
struct core_reloc_module_output *out = (void *)&data.out;
|
||||
__u64 pid_tgid = bpf_get_current_pid_tgid();
|
||||
__u32 real_tgid = (__u32)(pid_tgid >> 32);
|
||||
__u32 real_pid = (__u32)pid_tgid;
|
||||
__s32 real_tgid = (__s32)(pid_tgid >> 32);
|
||||
__s32 real_pid = (__s32)pid_tgid;
|
||||
|
||||
if (data.my_pid_tgid != pid_tgid)
|
||||
return 0;
|
||||
@ -77,8 +77,8 @@ int BPF_PROG(test_core_module_direct,
|
||||
#if __has_builtin(__builtin_preserve_enum_value)
|
||||
struct core_reloc_module_output *out = (void *)&data.out;
|
||||
__u64 pid_tgid = bpf_get_current_pid_tgid();
|
||||
__u32 real_tgid = (__u32)(pid_tgid >> 32);
|
||||
__u32 real_pid = (__u32)pid_tgid;
|
||||
__s32 real_tgid = (__s32)(pid_tgid >> 32);
|
||||
__s32 real_pid = (__s32)pid_tgid;
|
||||
|
||||
if (data.my_pid_tgid != pid_tgid)
|
||||
return 0;
|
||||
|
@ -38,7 +38,7 @@ int BPF_PROG(test_file_open, struct file *f)
|
||||
return 0;
|
||||
got_fsverity = 1;
|
||||
|
||||
for (i = 0; i < sizeof(digest); i++) {
|
||||
for (i = 0; i < (int)sizeof(digest); i++) {
|
||||
if (digest[i] != expected_digest[i])
|
||||
return 0;
|
||||
}
|
||||
|
@ -29,7 +29,7 @@ int BPF_PROG(unix_listen, struct socket *sock, int backlog)
|
||||
len = unix_sk->addr->len - sizeof(short);
|
||||
path[0] = '@';
|
||||
for (i = 1; i < len; i++) {
|
||||
if (i >= sizeof(struct sockaddr_un))
|
||||
if (i >= (int)sizeof(struct sockaddr_un))
|
||||
break;
|
||||
|
||||
path[i] = unix_sk->addr->name->sun_path[i];
|
||||
|
@ -38,7 +38,7 @@ int xdp_redirect(struct xdp_md *xdp)
|
||||
if (payload + 1 > data_end)
|
||||
return XDP_ABORTED;
|
||||
|
||||
if (xdp->ingress_ifindex != ifindex_in)
|
||||
if (xdp->ingress_ifindex != (__u32)ifindex_in)
|
||||
return XDP_ABORTED;
|
||||
|
||||
if (metadata + 1 > data)
|
||||
|
Loading…
x
Reference in New Issue
Block a user