mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2025-01-06 13:16:22 +00:00
75839101ce
When we attach a sleepable hook to hid_hw_raw_requests, we can (and in many cases should) call ourself hid_bpf_raw_request(), to actually fetch data from the device itself. However, this means that we might enter an infinite loop between hid_hw_raw_requests hooks and hid_bpf_hw_request() call. To prevent that, if a hid_bpf_hw_request() call is emitted, we prevent any new call of this kfunc by storing the information in the context. This way we can always trace/monitor/filter the incoming bpf requests, while preventing those loops to happen. I don't think exposing "from_bpf" is very interesting because while writing such a bpf program, you need to match at least the report number and/or the source of the call. So a blind "if there is a hid_hw_raw_request() call, I'm emitting another one" makes no real sense. Link: https://patch.msgid.link/20240626-hid_hw_req_bpf-v2-5-cfd60fb6c79f@kernel.org Acked-by: Jiri Kosina <jkosina@suse.com> Signed-off-by: Benjamin Tissoires <bentiss@kernel.org>
23 lines
509 B
C
23 lines
509 B
C
/* SPDX-License-Identifier: GPL-2.0-only */
|
|
|
|
#ifndef _BPF_HID_BPF_DISPATCH_H
|
|
#define _BPF_HID_BPF_DISPATCH_H
|
|
|
|
#include <linux/hid.h>
|
|
|
|
struct hid_bpf_ctx_kern {
|
|
struct hid_bpf_ctx ctx;
|
|
u8 *data;
|
|
bool from_bpf;
|
|
};
|
|
|
|
struct hid_device *hid_get_device(unsigned int hid_id);
|
|
void hid_put_device(struct hid_device *hid);
|
|
int hid_bpf_allocate_event_data(struct hid_device *hdev);
|
|
void __hid_bpf_ops_destroy_device(struct hid_device *hdev);
|
|
int hid_bpf_reconnect(struct hid_device *hdev);
|
|
|
|
struct bpf_prog;
|
|
|
|
#endif
|