kasan: make kasan_record_aux_stack_noalloc() the default behaviour

kasan_record_aux_stack_noalloc() was introduced to record a stack trace
without allocating memory in the process.  It has been added to callers
which were invoked while a raw_spinlock_t was held.  More and more callers
were identified and changed over time.  Is it a good thing to have this
while functions try their best to do a locklessly setup?  The only
downside of having kasan_record_aux_stack() not allocate any memory is
that we end up without a stacktrace if stackdepot runs out of memory and
at the same stacktrace was not recorded before To quote Marco Elver from
https://lore.kernel.org/all/CANpmjNPmQYJ7pv1N3cuU8cP18u7PP_uoZD8YxwZd4jtbof9nVQ@mail.gmail.com/

| I'd be in favor, it simplifies things. And stack depot should be
| able to replenish its pool sufficiently in the "non-aux" cases
| i.e. regular allocations. Worst case we fail to record some
| aux stacks, but I think that's only really bad if there's a bug
| around one of these allocations. In general the probabilities
| of this being a regression are extremely small [...]

Make the kasan_record_aux_stack_noalloc() behaviour default as
kasan_record_aux_stack().

[bigeasy@linutronix.de: dressed the diff as patch]
Link: https://lkml.kernel.org/r/20241122155451.Mb2pmeyJ@linutronix.de
Fixes: 7cb3007ce2da ("kasan: generic: introduce kasan_record_aux_stack_noalloc()")
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Reported-by: syzbot+39f85d612b7c20d8db48@syzkaller.appspotmail.com
Closes: https://lore.kernel.org/all/67275485.050a0220.3c8d68.0a37.GAE@google.com
Reviewed-by: Andrey Konovalov <andreyknvl@gmail.com>
Reviewed-by: Marco Elver <elver@google.com>
Reviewed-by: Waiman Long <longman@redhat.com>
Cc: Alexander Potapenko <glider@google.com>
Cc: Andrey Ryabinin <ryabinin.a.a@gmail.com>
Cc: Ben Segall <bsegall@google.com>
Cc: Boqun Feng <boqun.feng@gmail.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Dietmar Eggemann <dietmar.eggemann@arm.com>
Cc: Dmitry Vyukov <dvyukov@google.com>
Cc: Frederic Weisbecker <frederic@kernel.org>
Cc: Hyeonggon Yoo <42.hyeyoo@gmail.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Jann Horn <jannh@google.com>
Cc: Joel Fernandes (Google) <joel@joelfernandes.org>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Josh Triplett <josh@joshtriplett.org>
Cc: Juri Lelli <juri.lelli@redhat.com>
Cc: <kasan-dev@googlegroups.com>
Cc: Lai Jiangshan <jiangshanlai@gmail.com>
Cc: Liam R. Howlett <Liam.Howlett@Oracle.com>
Cc: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Cc: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Neeraj Upadhyay <neeraj.upadhyay@kernel.org>
Cc: Paul E. McKenney <paulmck@kernel.org>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: Roman Gushchin <roman.gushchin@linux.dev>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: syzkaller-bugs@googlegroups.com
Cc: Tejun Heo <tj@kernel.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Uladzislau Rezki (Sony) <urezki@gmail.com>
Cc: Valentin Schneider <vschneid@redhat.com>
Cc: Vincent Guittot <vincent.guittot@linaro.org>
Cc: Vincenzo Frascino <vincenzo.frascino@arm.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Zqiang <qiang.zhang1211@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Peter Zijlstra 2024-11-22 16:54:51 +01:00 committed by Andrew Morton
parent 67b5aec6b3
commit f8db55561f
10 changed files with 14 additions and 37 deletions

View File

@ -491,7 +491,6 @@ void kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
void kasan_cache_shrink(struct kmem_cache *cache); void kasan_cache_shrink(struct kmem_cache *cache);
void kasan_cache_shutdown(struct kmem_cache *cache); void kasan_cache_shutdown(struct kmem_cache *cache);
void kasan_record_aux_stack(void *ptr); void kasan_record_aux_stack(void *ptr);
void kasan_record_aux_stack_noalloc(void *ptr);
#else /* CONFIG_KASAN_GENERIC */ #else /* CONFIG_KASAN_GENERIC */
@ -509,7 +508,6 @@ static inline void kasan_cache_create(struct kmem_cache *cache,
static inline void kasan_cache_shrink(struct kmem_cache *cache) {} static inline void kasan_cache_shrink(struct kmem_cache *cache) {}
static inline void kasan_cache_shutdown(struct kmem_cache *cache) {} static inline void kasan_cache_shutdown(struct kmem_cache *cache) {}
static inline void kasan_record_aux_stack(void *ptr) {} static inline void kasan_record_aux_stack(void *ptr) {}
static inline void kasan_record_aux_stack_noalloc(void *ptr) {}
#endif /* CONFIG_KASAN_GENERIC */ #endif /* CONFIG_KASAN_GENERIC */

View File

@ -19,9 +19,6 @@ enum task_work_notify_mode {
TWA_SIGNAL, TWA_SIGNAL,
TWA_SIGNAL_NO_IPI, TWA_SIGNAL_NO_IPI,
TWA_NMI_CURRENT, TWA_NMI_CURRENT,
TWA_FLAGS = 0xff00,
TWAF_NO_ALLOC = 0x0100,
}; };
static inline bool task_work_pending(struct task_struct *task) static inline bool task_work_pending(struct task_struct *task)

View File

@ -147,7 +147,7 @@ bool irq_work_queue_on(struct irq_work *work, int cpu)
if (!irq_work_claim(work)) if (!irq_work_claim(work))
return false; return false;
kasan_record_aux_stack_noalloc(work); kasan_record_aux_stack(work);
preempt_disable(); preempt_disable();
if (cpu != smp_processor_id()) { if (cpu != smp_processor_id()) {

View File

@ -250,7 +250,7 @@ EXPORT_SYMBOL_GPL(poll_state_synchronize_rcu);
void kvfree_call_rcu(struct rcu_head *head, void *ptr) void kvfree_call_rcu(struct rcu_head *head, void *ptr)
{ {
if (head) if (head)
kasan_record_aux_stack_noalloc(ptr); kasan_record_aux_stack(ptr);
__kvfree_call_rcu(head, ptr); __kvfree_call_rcu(head, ptr);
} }

View File

@ -3083,7 +3083,7 @@ __call_rcu_common(struct rcu_head *head, rcu_callback_t func, bool lazy_in)
} }
head->func = func; head->func = func;
head->next = NULL; head->next = NULL;
kasan_record_aux_stack_noalloc(head); kasan_record_aux_stack(head);
local_irq_save(flags); local_irq_save(flags);
rdp = this_cpu_ptr(&rcu_data); rdp = this_cpu_ptr(&rcu_data);
lazy = lazy_in && !rcu_async_should_hurry(); lazy = lazy_in && !rcu_async_should_hurry();
@ -3817,7 +3817,7 @@ void kvfree_call_rcu(struct rcu_head *head, void *ptr)
return; return;
} }
kasan_record_aux_stack_noalloc(ptr); kasan_record_aux_stack(ptr);
success = add_ptr_to_bulk_krc_lock(&krcp, &flags, ptr, !head); success = add_ptr_to_bulk_krc_lock(&krcp, &flags, ptr, !head);
if (!success) { if (!success) {
run_page_cache_worker(krcp); run_page_cache_worker(krcp);

View File

@ -10590,7 +10590,7 @@ void task_tick_mm_cid(struct rq *rq, struct task_struct *curr)
return; return;
/* No page allocation under rq lock */ /* No page allocation under rq lock */
task_work_add(curr, work, TWA_RESUME | TWAF_NO_ALLOC); task_work_add(curr, work, TWA_RESUME);
} }
void sched_mm_cid_exit_signals(struct task_struct *t) void sched_mm_cid_exit_signals(struct task_struct *t)

View File

@ -55,25 +55,13 @@ int task_work_add(struct task_struct *task, struct callback_head *work,
enum task_work_notify_mode notify) enum task_work_notify_mode notify)
{ {
struct callback_head *head; struct callback_head *head;
int flags = notify & TWA_FLAGS;
notify &= ~TWA_FLAGS;
if (notify == TWA_NMI_CURRENT) { if (notify == TWA_NMI_CURRENT) {
if (WARN_ON_ONCE(task != current)) if (WARN_ON_ONCE(task != current))
return -EINVAL; return -EINVAL;
if (!IS_ENABLED(CONFIG_IRQ_WORK)) if (!IS_ENABLED(CONFIG_IRQ_WORK))
return -EINVAL; return -EINVAL;
} else { } else {
/*
* Record the work call stack in order to print it in KASAN
* reports.
*
* Note that stack allocation can fail if TWAF_NO_ALLOC flag
* is set and new page is needed to expand the stack buffer.
*/
if (flags & TWAF_NO_ALLOC)
kasan_record_aux_stack_noalloc(work);
else
kasan_record_aux_stack(work); kasan_record_aux_stack(work);
} }

View File

@ -2180,7 +2180,7 @@ static void insert_work(struct pool_workqueue *pwq, struct work_struct *work,
debug_work_activate(work); debug_work_activate(work);
/* record the work call stack in order to print it in KASAN reports */ /* record the work call stack in order to print it in KASAN reports */
kasan_record_aux_stack_noalloc(work); kasan_record_aux_stack(work);
/* we own @work, set data and link */ /* we own @work, set data and link */
set_work_pwq(work, pwq, extra_flags); set_work_pwq(work, pwq, extra_flags);

View File

@ -524,7 +524,11 @@ size_t kasan_metadata_size(struct kmem_cache *cache, bool in_object)
sizeof(struct kasan_free_meta) : 0); sizeof(struct kasan_free_meta) : 0);
} }
static void __kasan_record_aux_stack(void *addr, depot_flags_t depot_flags) /*
* This function avoids dynamic memory allocations and thus can be called from
* contexts that do not allow allocating memory.
*/
void kasan_record_aux_stack(void *addr)
{ {
struct slab *slab = kasan_addr_to_slab(addr); struct slab *slab = kasan_addr_to_slab(addr);
struct kmem_cache *cache; struct kmem_cache *cache;
@ -541,17 +545,7 @@ static void __kasan_record_aux_stack(void *addr, depot_flags_t depot_flags)
return; return;
alloc_meta->aux_stack[1] = alloc_meta->aux_stack[0]; alloc_meta->aux_stack[1] = alloc_meta->aux_stack[0];
alloc_meta->aux_stack[0] = kasan_save_stack(0, depot_flags); alloc_meta->aux_stack[0] = kasan_save_stack(0, 0);
}
void kasan_record_aux_stack(void *addr)
{
return __kasan_record_aux_stack(addr, STACK_DEPOT_FLAG_CAN_ALLOC);
}
void kasan_record_aux_stack_noalloc(void *addr)
{
return __kasan_record_aux_stack(addr, 0);
} }
void kasan_save_alloc_info(struct kmem_cache *cache, void *object, gfp_t flags) void kasan_save_alloc_info(struct kmem_cache *cache, void *object, gfp_t flags)

View File

@ -2311,7 +2311,7 @@ bool slab_free_hook(struct kmem_cache *s, void *x, bool init,
* We have to do this manually because the rcu_head is * We have to do this manually because the rcu_head is
* not located inside the object. * not located inside the object.
*/ */
kasan_record_aux_stack_noalloc(x); kasan_record_aux_stack(x);
delayed_free->object = x; delayed_free->object = x;
call_rcu(&delayed_free->head, slab_free_after_rcu_debug); call_rcu(&delayed_free->head, slab_free_after_rcu_debug);