mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2024-12-28 16:52:18 +00:00
ff9e1632d6
During 6.4 development it became clear that the one-shot list used by the user_event_mm's next field was confusing to others. It is not clear how this list is protected or what the next field usage is for unless you are familiar with the code. Add comments into the user_event_mm struct indicating lock requirement and usage. Also document how and why this approach was used via comments in both user_event_enabler_update() and user_event_mm_get_all() and the rules to properly use it. Link: https://lkml.kernel.org/r/20230519230741.669-5-beaub@linux.microsoft.com Link: https://lore.kernel.org/linux-trace-kernel/CAHk-=wicngggxVpbnrYHjRTwGE0WYscPRM+L2HO2BF8ia1EXgQ@mail.gmail.com/ Suggested-by: Linus Torvalds <torvalds@linux-foundation.org> Signed-off-by: Beau Belgrave <beaub@linux.microsoft.com> Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
85 lines
1.7 KiB
C
85 lines
1.7 KiB
C
/* SPDX-License-Identifier: GPL-2.0-only */
|
|
/*
|
|
* Copyright (c) 2022, Microsoft Corporation.
|
|
*
|
|
* Authors:
|
|
* Beau Belgrave <beaub@linux.microsoft.com>
|
|
*/
|
|
|
|
#ifndef _LINUX_USER_EVENTS_H
|
|
#define _LINUX_USER_EVENTS_H
|
|
|
|
#include <linux/list.h>
|
|
#include <linux/refcount.h>
|
|
#include <linux/mm_types.h>
|
|
#include <linux/workqueue.h>
|
|
#include <uapi/linux/user_events.h>
|
|
|
|
#ifdef CONFIG_USER_EVENTS
|
|
struct user_event_mm {
|
|
struct list_head mms_link;
|
|
struct list_head enablers;
|
|
struct mm_struct *mm;
|
|
/* Used for one-shot lists, protected by event_mutex */
|
|
struct user_event_mm *next;
|
|
refcount_t refcnt;
|
|
refcount_t tasks;
|
|
struct rcu_work put_rwork;
|
|
};
|
|
|
|
extern void user_event_mm_dup(struct task_struct *t,
|
|
struct user_event_mm *old_mm);
|
|
|
|
extern void user_event_mm_remove(struct task_struct *t);
|
|
|
|
static inline void user_events_fork(struct task_struct *t,
|
|
unsigned long clone_flags)
|
|
{
|
|
struct user_event_mm *old_mm;
|
|
|
|
if (!t || !current->user_event_mm)
|
|
return;
|
|
|
|
old_mm = current->user_event_mm;
|
|
|
|
if (clone_flags & CLONE_VM) {
|
|
t->user_event_mm = old_mm;
|
|
refcount_inc(&old_mm->tasks);
|
|
return;
|
|
}
|
|
|
|
user_event_mm_dup(t, old_mm);
|
|
}
|
|
|
|
static inline void user_events_execve(struct task_struct *t)
|
|
{
|
|
if (!t || !t->user_event_mm)
|
|
return;
|
|
|
|
user_event_mm_remove(t);
|
|
}
|
|
|
|
static inline void user_events_exit(struct task_struct *t)
|
|
{
|
|
if (!t || !t->user_event_mm)
|
|
return;
|
|
|
|
user_event_mm_remove(t);
|
|
}
|
|
#else
|
|
static inline void user_events_fork(struct task_struct *t,
|
|
unsigned long clone_flags)
|
|
{
|
|
}
|
|
|
|
static inline void user_events_execve(struct task_struct *t)
|
|
{
|
|
}
|
|
|
|
static inline void user_events_exit(struct task_struct *t)
|
|
{
|
|
}
|
|
#endif /* CONFIG_USER_EVENTS */
|
|
|
|
#endif /* _LINUX_USER_EVENTS_H */
|