mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2024-12-29 17:23:36 +00:00
4e90d0522a
Currently, each architecture can support PREEMPT_DYNAMIC through
either static calls or static keys. To support PREEMPT_DYNAMIC on
riscv, we face three choices:
1. only add static calls support to riscv
As Mark pointed out in commit 99cf983cc8
("sched/preempt: Add
PREEMPT_DYNAMIC using static keys"), static keys "...should have
slightly lower overhead than non-inline static calls, as this
effectively inlines each trampoline into the start of its callee. This
may avoid redundant work, and may integrate better with CFI schemes."
So even we add static calls(without inline static calls) to riscv,
static keys is still a better choice.
2. add static calls and inline static calls to riscv
Per my understanding, inline static calls requires objtool support
which is not easy.
3. use static keys
While riscv doesn't have static calls support, it supports static keys
perfectly. So this patch selects HAVE_PREEMPT_DYNAMIC_KEY to enable
support for PREEMPT_DYNAMIC on riscv, so that the preemption model can
be chosen at boot time. It also patches asm-generic/preempt.h, mainly
to add __preempt_schedule() and __preempt_schedule_notrace() macros
for PREEMPT_DYNAMIC case. Other architectures which use generic
preempt.h can also benefit from this patch by simply selecting
HAVE_PREEMPT_DYNAMIC_KEY to enable PREEMPT_DYNAMIC if they supports
static keys.
Signed-off-by: Jisheng Zhang <jszhang@kernel.org>
Reviewed-by: Conor Dooley <conor.dooley@microchip.com>
Link: https://lore.kernel.org/r/20230716164925.1858-1-jszhang@kernel.org
Signed-off-by: Palmer Dabbelt <palmer@rivosinc.com>
101 lines
2.4 KiB
C
101 lines
2.4 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
#ifndef __ASM_PREEMPT_H
|
|
#define __ASM_PREEMPT_H
|
|
|
|
#include <linux/thread_info.h>
|
|
|
|
#define PREEMPT_ENABLED (0)
|
|
|
|
static __always_inline int preempt_count(void)
|
|
{
|
|
return READ_ONCE(current_thread_info()->preempt_count);
|
|
}
|
|
|
|
static __always_inline volatile int *preempt_count_ptr(void)
|
|
{
|
|
return ¤t_thread_info()->preempt_count;
|
|
}
|
|
|
|
static __always_inline void preempt_count_set(int pc)
|
|
{
|
|
*preempt_count_ptr() = pc;
|
|
}
|
|
|
|
/*
|
|
* must be macros to avoid header recursion hell
|
|
*/
|
|
#define init_task_preempt_count(p) do { \
|
|
task_thread_info(p)->preempt_count = FORK_PREEMPT_COUNT; \
|
|
} while (0)
|
|
|
|
#define init_idle_preempt_count(p, cpu) do { \
|
|
task_thread_info(p)->preempt_count = PREEMPT_DISABLED; \
|
|
} while (0)
|
|
|
|
static __always_inline void set_preempt_need_resched(void)
|
|
{
|
|
}
|
|
|
|
static __always_inline void clear_preempt_need_resched(void)
|
|
{
|
|
}
|
|
|
|
static __always_inline bool test_preempt_need_resched(void)
|
|
{
|
|
return false;
|
|
}
|
|
|
|
/*
|
|
* The various preempt_count add/sub methods
|
|
*/
|
|
|
|
static __always_inline void __preempt_count_add(int val)
|
|
{
|
|
*preempt_count_ptr() += val;
|
|
}
|
|
|
|
static __always_inline void __preempt_count_sub(int val)
|
|
{
|
|
*preempt_count_ptr() -= val;
|
|
}
|
|
|
|
static __always_inline bool __preempt_count_dec_and_test(void)
|
|
{
|
|
/*
|
|
* Because of load-store architectures cannot do per-cpu atomic
|
|
* operations; we cannot use PREEMPT_NEED_RESCHED because it might get
|
|
* lost.
|
|
*/
|
|
return !--*preempt_count_ptr() && tif_need_resched();
|
|
}
|
|
|
|
/*
|
|
* Returns true when we need to resched and can (barring IRQ state).
|
|
*/
|
|
static __always_inline bool should_resched(int preempt_offset)
|
|
{
|
|
return unlikely(preempt_count() == preempt_offset &&
|
|
tif_need_resched());
|
|
}
|
|
|
|
#ifdef CONFIG_PREEMPTION
|
|
extern asmlinkage void preempt_schedule(void);
|
|
extern asmlinkage void preempt_schedule_notrace(void);
|
|
|
|
#if defined(CONFIG_PREEMPT_DYNAMIC) && defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
|
|
|
|
void dynamic_preempt_schedule(void);
|
|
void dynamic_preempt_schedule_notrace(void);
|
|
#define __preempt_schedule() dynamic_preempt_schedule()
|
|
#define __preempt_schedule_notrace() dynamic_preempt_schedule_notrace()
|
|
|
|
#else /* !CONFIG_PREEMPT_DYNAMIC || !CONFIG_HAVE_PREEMPT_DYNAMIC_KEY*/
|
|
|
|
#define __preempt_schedule() preempt_schedule()
|
|
#define __preempt_schedule_notrace() preempt_schedule_notrace()
|
|
|
|
#endif /* CONFIG_PREEMPT_DYNAMIC && CONFIG_HAVE_PREEMPT_DYNAMIC_KEY*/
|
|
#endif /* CONFIG_PREEMPTION */
|
|
|
|
#endif /* __ASM_PREEMPT_H */
|