linux/mm/mmap_lock.c
Vlastimil Babka 9b5c87d479 mm: mmap_lock: check trace_mmap_lock_$type_enabled() instead of regcount
Since 7d6be67cfd ("mm: mmap_lock: replace get_memcg_path_buf() with
on-stack buffer") we use trace_mmap_lock_reg()/unreg() only to maintain an
atomic reg_refcount which is checked to avoid performing
get_mm_memcg_path() in case none of the tracepoints using it is enabled.

This can be achieved directly by putting all the work needed for the
tracepoint behind the trace_mmap_lock_##type##_enabled(), as suggested by
Documentation/trace/tracepoints.rst and with the following advantages:

- uses the tracepoint's static key instead of evaluating a branch

- the check tracepoint specific, not shared by all of them

- we can get rid of trace_mmap_lock_reg()/unreg() completely

Thus use the trace_..._enabled() check and remove unnecessary code.

Link: https://lkml.kernel.org/r/20241105113456.95066-2-vbabka@suse.cz
Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
Reviewed-by: Axel Rasmussen <axelrasmussen@google.com>
Cc: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Masami Hiramatsu <mhiramat@kernel.org>
Cc: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
2024-11-11 17:22:28 -08:00

89 lines
2.4 KiB
C

// SPDX-License-Identifier: GPL-2.0
#define CREATE_TRACE_POINTS
#include <trace/events/mmap_lock.h>
#include <linux/mm.h>
#include <linux/cgroup.h>
#include <linux/memcontrol.h>
#include <linux/mmap_lock.h>
#include <linux/mutex.h>
#include <linux/percpu.h>
#include <linux/rcupdate.h>
#include <linux/smp.h>
#include <linux/trace_events.h>
#include <linux/local_lock.h>
EXPORT_TRACEPOINT_SYMBOL(mmap_lock_start_locking);
EXPORT_TRACEPOINT_SYMBOL(mmap_lock_acquire_returned);
EXPORT_TRACEPOINT_SYMBOL(mmap_lock_released);
#ifdef CONFIG_MEMCG
/*
* Size of the buffer for memcg path names. Ignoring stack trace support,
* trace_events_hist.c uses MAX_FILTER_STR_VAL for this, so we also use it.
*/
#define MEMCG_PATH_BUF_SIZE MAX_FILTER_STR_VAL
#define TRACE_MMAP_LOCK_EVENT(type, mm, ...) \
do { \
if (trace_mmap_lock_##type##_enabled()) { \
char buf[MEMCG_PATH_BUF_SIZE]; \
get_mm_memcg_path(mm, buf, sizeof(buf)); \
trace_mmap_lock_##type(mm, buf, ##__VA_ARGS__); \
} \
} while (0)
#else /* !CONFIG_MEMCG */
#define TRACE_MMAP_LOCK_EVENT(type, mm, ...) \
trace_mmap_lock_##type(mm, "", ##__VA_ARGS__)
#endif /* CONFIG_MEMCG */
#ifdef CONFIG_TRACING
#ifdef CONFIG_MEMCG
/*
* Write the given mm_struct's memcg path to a buffer. If the path cannot be
* determined, empty string is written.
*/
static void get_mm_memcg_path(struct mm_struct *mm, char *buf, size_t buflen)
{
struct mem_cgroup *memcg;
buf[0] = '\0';
memcg = get_mem_cgroup_from_mm(mm);
if (memcg == NULL)
return;
if (memcg->css.cgroup)
cgroup_path(memcg->css.cgroup, buf, buflen);
css_put(&memcg->css);
}
#endif /* CONFIG_MEMCG */
/*
* Trace calls must be in a separate file, as otherwise there's a circular
* dependency between linux/mmap_lock.h and trace/events/mmap_lock.h.
*/
void __mmap_lock_do_trace_start_locking(struct mm_struct *mm, bool write)
{
TRACE_MMAP_LOCK_EVENT(start_locking, mm, write);
}
EXPORT_SYMBOL(__mmap_lock_do_trace_start_locking);
void __mmap_lock_do_trace_acquire_returned(struct mm_struct *mm, bool write,
bool success)
{
TRACE_MMAP_LOCK_EVENT(acquire_returned, mm, write, success);
}
EXPORT_SYMBOL(__mmap_lock_do_trace_acquire_returned);
void __mmap_lock_do_trace_released(struct mm_struct *mm, bool write)
{
TRACE_MMAP_LOCK_EVENT(released, mm, write);
}
EXPORT_SYMBOL(__mmap_lock_do_trace_released);
#endif /* CONFIG_TRACING */