mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2025-01-16 18:26:42 +00:00
x86/bugs: Rename CONFIG_RETPOLINE => CONFIG_MITIGATION_RETPOLINE
Step 5/10 of the namespace unification of CPU mitigations related Kconfig options. [ mingo: Converted a few more uses in comments/messages as well. ] Suggested-by: Josh Poimboeuf <jpoimboe@kernel.org> Signed-off-by: Breno Leitao <leitao@debian.org> Signed-off-by: Ingo Molnar <mingo@kernel.org> Reviewed-by: Ariel Miculas <amiculas@cisco.com> Acked-by: Josh Poimboeuf <jpoimboe@kernel.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Link: https://lore.kernel.org/r/20231121160740.1249350-6-leitao@debian.org
This commit is contained in:
parent
ea4654e088
commit
aefb2f2e61
@ -473,8 +473,8 @@ Spectre variant 2
|
||||
-mindirect-branch=thunk-extern -mindirect-branch-register options.
|
||||
If the kernel is compiled with a Clang compiler, the compiler needs
|
||||
to support -mretpoline-external-thunk option. The kernel config
|
||||
CONFIG_RETPOLINE needs to be turned on, and the CPU needs to run with
|
||||
the latest updated microcode.
|
||||
CONFIG_MITIGATION_RETPOLINE needs to be turned on, and the CPU needs
|
||||
to run with the latest updated microcode.
|
||||
|
||||
On Intel Skylake-era systems the mitigation covers most, but not all,
|
||||
cases. See :ref:`[3] <spec_ref3>` for more details.
|
||||
@ -609,8 +609,8 @@ kernel command line.
|
||||
Selecting 'on' will, and 'auto' may, choose a
|
||||
mitigation method at run time according to the
|
||||
CPU, the available microcode, the setting of the
|
||||
CONFIG_RETPOLINE configuration option, and the
|
||||
compiler with which the kernel was built.
|
||||
CONFIG_MITIGATION_RETPOLINE configuration option,
|
||||
and the compiler with which the kernel was built.
|
||||
|
||||
Selecting 'on' will also enable the mitigation
|
||||
against user space to user space task attacks.
|
||||
|
@ -6007,8 +6007,8 @@
|
||||
Selecting 'on' will, and 'auto' may, choose a
|
||||
mitigation method at run time according to the
|
||||
CPU, the available microcode, the setting of the
|
||||
CONFIG_RETPOLINE configuration option, and the
|
||||
compiler with which the kernel was built.
|
||||
CONFIG_MITIGATION_RETPOLINE configuration option,
|
||||
and the compiler with which the kernel was built.
|
||||
|
||||
Selecting 'on' will also enable the mitigation
|
||||
against user space to user space task attacks.
|
||||
|
@ -2457,7 +2457,7 @@ config CALL_PADDING
|
||||
|
||||
config FINEIBT
|
||||
def_bool y
|
||||
depends on X86_KERNEL_IBT && CFI_CLANG && RETPOLINE
|
||||
depends on X86_KERNEL_IBT && CFI_CLANG && MITIGATION_RETPOLINE
|
||||
select CALL_PADDING
|
||||
|
||||
config HAVE_CALL_THUNKS
|
||||
@ -2495,7 +2495,7 @@ config MITIGATION_PAGE_TABLE_ISOLATION
|
||||
|
||||
See Documentation/arch/x86/pti.rst for more details.
|
||||
|
||||
config RETPOLINE
|
||||
config MITIGATION_RETPOLINE
|
||||
bool "Avoid speculative indirect branches in kernel"
|
||||
select OBJTOOL if HAVE_OBJTOOL
|
||||
default y
|
||||
@ -2507,7 +2507,7 @@ config RETPOLINE
|
||||
|
||||
config RETHUNK
|
||||
bool "Enable return-thunks"
|
||||
depends on RETPOLINE && CC_HAS_RETURN_THUNK
|
||||
depends on MITIGATION_RETPOLINE && CC_HAS_RETURN_THUNK
|
||||
select OBJTOOL if HAVE_OBJTOOL
|
||||
default y if X86_64
|
||||
help
|
||||
|
@ -192,7 +192,7 @@ KBUILD_CFLAGS += -Wno-sign-compare
|
||||
KBUILD_CFLAGS += -fno-asynchronous-unwind-tables
|
||||
|
||||
# Avoid indirect branches in kernel to deal with Spectre
|
||||
ifdef CONFIG_RETPOLINE
|
||||
ifdef CONFIG_MITIGATION_RETPOLINE
|
||||
KBUILD_CFLAGS += $(RETPOLINE_CFLAGS)
|
||||
# Additionally, avoid generating expensive indirect jumps which
|
||||
# are subject to retpolines for small number of switch cases.
|
||||
@ -301,7 +301,7 @@ vdso-install-$(CONFIG_IA32_EMULATION) += arch/x86/entry/vdso/vdso32.so.dbg
|
||||
|
||||
archprepare: checkbin
|
||||
checkbin:
|
||||
ifdef CONFIG_RETPOLINE
|
||||
ifdef CONFIG_MITIGATION_RETPOLINE
|
||||
ifeq ($(RETPOLINE_CFLAGS),)
|
||||
@echo "You are building kernel with non-retpoline compiler." >&2
|
||||
@echo "Please update your compiler." >&2
|
||||
|
@ -87,7 +87,7 @@ CFL := $(PROFILING) -mcmodel=small -fPIC -O2 -fasynchronous-unwind-tables -m64 \
|
||||
-fno-omit-frame-pointer -foptimize-sibling-calls \
|
||||
-DDISABLE_BRANCH_PROFILING -DBUILD_VDSO
|
||||
|
||||
ifdef CONFIG_RETPOLINE
|
||||
ifdef CONFIG_MITIGATION_RETPOLINE
|
||||
ifneq ($(RETPOLINE_VDSO_CFLAGS),)
|
||||
CFL += $(RETPOLINE_VDSO_CFLAGS)
|
||||
endif
|
||||
@ -164,7 +164,7 @@ KBUILD_CFLAGS_32 += $(call cc-option, -foptimize-sibling-calls)
|
||||
KBUILD_CFLAGS_32 += -fno-omit-frame-pointer
|
||||
KBUILD_CFLAGS_32 += -DDISABLE_BRANCH_PROFILING
|
||||
|
||||
ifdef CONFIG_RETPOLINE
|
||||
ifdef CONFIG_MITIGATION_RETPOLINE
|
||||
ifneq ($(RETPOLINE_VDSO_CFLAGS),)
|
||||
KBUILD_CFLAGS_32 += $(RETPOLINE_VDSO_CFLAGS)
|
||||
endif
|
||||
|
@ -50,7 +50,7 @@
|
||||
# define DISABLE_PTI (1 << (X86_FEATURE_PTI & 31))
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_RETPOLINE
|
||||
#ifdef CONFIG_MITIGATION_RETPOLINE
|
||||
# define DISABLE_RETPOLINE 0
|
||||
#else
|
||||
# define DISABLE_RETPOLINE ((1 << (X86_FEATURE_RETPOLINE & 31)) | \
|
||||
|
@ -42,25 +42,25 @@
|
||||
|
||||
#if defined(CONFIG_RETHUNK) && !defined(__DISABLE_EXPORTS) && !defined(BUILD_VDSO)
|
||||
#define RET jmp __x86_return_thunk
|
||||
#else /* CONFIG_RETPOLINE */
|
||||
#else /* CONFIG_MITIGATION_RETPOLINE */
|
||||
#ifdef CONFIG_SLS
|
||||
#define RET ret; int3
|
||||
#else
|
||||
#define RET ret
|
||||
#endif
|
||||
#endif /* CONFIG_RETPOLINE */
|
||||
#endif /* CONFIG_MITIGATION_RETPOLINE */
|
||||
|
||||
#else /* __ASSEMBLY__ */
|
||||
|
||||
#if defined(CONFIG_RETHUNK) && !defined(__DISABLE_EXPORTS) && !defined(BUILD_VDSO)
|
||||
#define ASM_RET "jmp __x86_return_thunk\n\t"
|
||||
#else /* CONFIG_RETPOLINE */
|
||||
#else /* CONFIG_MITIGATION_RETPOLINE */
|
||||
#ifdef CONFIG_SLS
|
||||
#define ASM_RET "ret; int3\n\t"
|
||||
#else
|
||||
#define ASM_RET "ret\n\t"
|
||||
#endif
|
||||
#endif /* CONFIG_RETPOLINE */
|
||||
#endif /* CONFIG_MITIGATION_RETPOLINE */
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
|
@ -241,7 +241,7 @@
|
||||
* instruction irrespective of kCFI.
|
||||
*/
|
||||
.macro JMP_NOSPEC reg:req
|
||||
#ifdef CONFIG_RETPOLINE
|
||||
#ifdef CONFIG_MITIGATION_RETPOLINE
|
||||
__CS_PREFIX \reg
|
||||
jmp __x86_indirect_thunk_\reg
|
||||
#else
|
||||
@ -251,7 +251,7 @@
|
||||
.endm
|
||||
|
||||
.macro CALL_NOSPEC reg:req
|
||||
#ifdef CONFIG_RETPOLINE
|
||||
#ifdef CONFIG_MITIGATION_RETPOLINE
|
||||
__CS_PREFIX \reg
|
||||
call __x86_indirect_thunk_\reg
|
||||
#else
|
||||
@ -378,7 +378,7 @@ static inline void call_depth_return_thunk(void) {}
|
||||
|
||||
#endif /* CONFIG_MITIGATION_CALL_DEPTH_TRACKING */
|
||||
|
||||
#ifdef CONFIG_RETPOLINE
|
||||
#ifdef CONFIG_MITIGATION_RETPOLINE
|
||||
|
||||
#define GEN(reg) \
|
||||
extern retpoline_thunk_t __x86_indirect_thunk_ ## reg;
|
||||
@ -399,7 +399,7 @@ static inline void call_depth_return_thunk(void) {}
|
||||
|
||||
/*
|
||||
* Inline asm uses the %V modifier which is only in newer GCC
|
||||
* which is ensured when CONFIG_RETPOLINE is defined.
|
||||
* which is ensured when CONFIG_MITIGATION_RETPOLINE is defined.
|
||||
*/
|
||||
# define CALL_NOSPEC \
|
||||
ALTERNATIVE_2( \
|
||||
|
@ -544,7 +544,7 @@ static inline bool is_jcc32(struct insn *insn)
|
||||
return insn->opcode.bytes[0] == 0x0f && (insn->opcode.bytes[1] & 0xf0) == 0x80;
|
||||
}
|
||||
|
||||
#if defined(CONFIG_RETPOLINE) && defined(CONFIG_OBJTOOL)
|
||||
#if defined(CONFIG_MITIGATION_RETPOLINE) && defined(CONFIG_OBJTOOL)
|
||||
|
||||
/*
|
||||
* CALL/JMP *%\reg
|
||||
@ -844,12 +844,12 @@ void __init_or_module noinline apply_returns(s32 *start, s32 *end)
|
||||
void __init_or_module noinline apply_returns(s32 *start, s32 *end) { }
|
||||
#endif /* CONFIG_RETHUNK */
|
||||
|
||||
#else /* !CONFIG_RETPOLINE || !CONFIG_OBJTOOL */
|
||||
#else /* !CONFIG_MITIGATION_RETPOLINE || !CONFIG_OBJTOOL */
|
||||
|
||||
void __init_or_module noinline apply_retpolines(s32 *start, s32 *end) { }
|
||||
void __init_or_module noinline apply_returns(s32 *start, s32 *end) { }
|
||||
|
||||
#endif /* CONFIG_RETPOLINE && CONFIG_OBJTOOL */
|
||||
#endif /* CONFIG_MITIGATION_RETPOLINE && CONFIG_OBJTOOL */
|
||||
|
||||
#ifdef CONFIG_X86_KERNEL_IBT
|
||||
|
||||
|
@ -1103,7 +1103,7 @@ static enum spectre_v2_user_mitigation spectre_v2_user_stibp __ro_after_init =
|
||||
static enum spectre_v2_user_mitigation spectre_v2_user_ibpb __ro_after_init =
|
||||
SPECTRE_V2_USER_NONE;
|
||||
|
||||
#ifdef CONFIG_RETPOLINE
|
||||
#ifdef CONFIG_MITIGATION_RETPOLINE
|
||||
static bool spectre_v2_bad_module;
|
||||
|
||||
bool retpoline_module_ok(bool has_retpoline)
|
||||
@ -1416,7 +1416,7 @@ static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)
|
||||
cmd == SPECTRE_V2_CMD_RETPOLINE_GENERIC ||
|
||||
cmd == SPECTRE_V2_CMD_EIBRS_LFENCE ||
|
||||
cmd == SPECTRE_V2_CMD_EIBRS_RETPOLINE) &&
|
||||
!IS_ENABLED(CONFIG_RETPOLINE)) {
|
||||
!IS_ENABLED(CONFIG_MITIGATION_RETPOLINE)) {
|
||||
pr_err("%s selected but not compiled in. Switching to AUTO select\n",
|
||||
mitigation_options[i].option);
|
||||
return SPECTRE_V2_CMD_AUTO;
|
||||
@ -1470,7 +1470,7 @@ static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)
|
||||
|
||||
static enum spectre_v2_mitigation __init spectre_v2_select_retpoline(void)
|
||||
{
|
||||
if (!IS_ENABLED(CONFIG_RETPOLINE)) {
|
||||
if (!IS_ENABLED(CONFIG_MITIGATION_RETPOLINE)) {
|
||||
pr_err("Kernel not compiled with retpoline; no mitigation available!");
|
||||
return SPECTRE_V2_NONE;
|
||||
}
|
||||
|
@ -307,7 +307,7 @@ union ftrace_op_code_union {
|
||||
} __attribute__((packed));
|
||||
};
|
||||
|
||||
#define RET_SIZE (IS_ENABLED(CONFIG_RETPOLINE) ? 5 : 1 + IS_ENABLED(CONFIG_SLS))
|
||||
#define RET_SIZE (IS_ENABLED(CONFIG_MITIGATION_RETPOLINE) ? 5 : 1 + IS_ENABLED(CONFIG_SLS))
|
||||
|
||||
static unsigned long
|
||||
create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size)
|
||||
|
@ -324,7 +324,7 @@ static int can_optimize(unsigned long paddr)
|
||||
* However, the kernel built with retpolines or IBT has jump
|
||||
* tables disabled so the check can be skipped altogether.
|
||||
*/
|
||||
if (!IS_ENABLED(CONFIG_RETPOLINE) &&
|
||||
if (!IS_ENABLED(CONFIG_MITIGATION_RETPOLINE) &&
|
||||
!IS_ENABLED(CONFIG_X86_KERNEL_IBT) &&
|
||||
insn_is_indirect_jump(&insn))
|
||||
return 0;
|
||||
|
@ -132,7 +132,7 @@ SECTIONS
|
||||
LOCK_TEXT
|
||||
KPROBES_TEXT
|
||||
SOFTIRQENTRY_TEXT
|
||||
#ifdef CONFIG_RETPOLINE
|
||||
#ifdef CONFIG_MITIGATION_RETPOLINE
|
||||
*(.text..__x86.indirect_thunk)
|
||||
*(.text..__x86.return_thunk)
|
||||
#endif
|
||||
@ -267,7 +267,7 @@ SECTIONS
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_RETPOLINE
|
||||
#ifdef CONFIG_MITIGATION_RETPOLINE
|
||||
/*
|
||||
* List of instructions that call/jmp/jcc to retpoline thunks
|
||||
* __x86_indirect_thunk_*(). These instructions can be patched along
|
||||
|
@ -263,7 +263,7 @@ static unsigned long get_guest_cr3(struct kvm_vcpu *vcpu)
|
||||
static inline unsigned long kvm_mmu_get_guest_pgd(struct kvm_vcpu *vcpu,
|
||||
struct kvm_mmu *mmu)
|
||||
{
|
||||
if (IS_ENABLED(CONFIG_RETPOLINE) && mmu->get_guest_pgd == get_guest_cr3)
|
||||
if (IS_ENABLED(CONFIG_MITIGATION_RETPOLINE) && mmu->get_guest_pgd == get_guest_cr3)
|
||||
return kvm_read_cr3(vcpu);
|
||||
|
||||
return mmu->get_guest_pgd(vcpu);
|
||||
|
@ -312,7 +312,7 @@ static inline int kvm_mmu_do_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
|
||||
if (!prefetch)
|
||||
vcpu->stat.pf_taken++;
|
||||
|
||||
if (IS_ENABLED(CONFIG_RETPOLINE) && fault.is_tdp)
|
||||
if (IS_ENABLED(CONFIG_MITIGATION_RETPOLINE) && fault.is_tdp)
|
||||
r = kvm_tdp_page_fault(vcpu, &fault);
|
||||
else
|
||||
r = vcpu->arch.mmu->page_fault(vcpu, &fault);
|
||||
|
@ -3455,7 +3455,7 @@ int svm_invoke_exit_handler(struct kvm_vcpu *vcpu, u64 exit_code)
|
||||
if (!svm_check_exit_valid(exit_code))
|
||||
return svm_handle_invalid_exit(vcpu, exit_code);
|
||||
|
||||
#ifdef CONFIG_RETPOLINE
|
||||
#ifdef CONFIG_MITIGATION_RETPOLINE
|
||||
if (exit_code == SVM_EXIT_MSR)
|
||||
return msr_interception(vcpu);
|
||||
else if (exit_code == SVM_EXIT_VINTR)
|
||||
|
@ -207,7 +207,7 @@ SYM_FUNC_START(__svm_vcpu_run)
|
||||
7: vmload %_ASM_AX
|
||||
8:
|
||||
|
||||
#ifdef CONFIG_RETPOLINE
|
||||
#ifdef CONFIG_MITIGATION_RETPOLINE
|
||||
/* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */
|
||||
FILL_RETURN_BUFFER %_ASM_AX, RSB_CLEAR_LOOPS, X86_FEATURE_RETPOLINE
|
||||
#endif
|
||||
@ -344,7 +344,7 @@ SYM_FUNC_START(__svm_sev_es_vcpu_run)
|
||||
/* Pop @svm to RDI, guest registers have been saved already. */
|
||||
pop %_ASM_DI
|
||||
|
||||
#ifdef CONFIG_RETPOLINE
|
||||
#ifdef CONFIG_MITIGATION_RETPOLINE
|
||||
/* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */
|
||||
FILL_RETURN_BUFFER %_ASM_AX, RSB_CLEAR_LOOPS, X86_FEATURE_RETPOLINE
|
||||
#endif
|
||||
|
@ -6544,7 +6544,7 @@ static int __vmx_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath)
|
||||
|
||||
if (exit_reason.basic >= kvm_vmx_max_exit_handlers)
|
||||
goto unexpected_vmexit;
|
||||
#ifdef CONFIG_RETPOLINE
|
||||
#ifdef CONFIG_MITIGATION_RETPOLINE
|
||||
if (exit_reason.basic == EXIT_REASON_MSR_WRITE)
|
||||
return kvm_emulate_wrmsr(vcpu);
|
||||
else if (exit_reason.basic == EXIT_REASON_PREEMPTION_TIMER)
|
||||
|
@ -49,7 +49,7 @@ lib-$(CONFIG_ARCH_HAS_COPY_MC) += copy_mc.o copy_mc_64.o
|
||||
lib-$(CONFIG_INSTRUCTION_DECODER) += insn.o inat.o insn-eval.o
|
||||
lib-$(CONFIG_RANDOMIZE_BASE) += kaslr.o
|
||||
lib-$(CONFIG_FUNCTION_ERROR_INJECTION) += error-inject.o
|
||||
lib-$(CONFIG_RETPOLINE) += retpoline.o
|
||||
lib-$(CONFIG_MITIGATION_RETPOLINE) += retpoline.o
|
||||
|
||||
obj-y += msr.o msr-reg.o msr-reg-export.o hweight.o
|
||||
obj-y += iomem.o
|
||||
|
@ -469,7 +469,7 @@ static void emit_indirect_jump(u8 **pprog, int reg, u8 *ip)
|
||||
emit_jump(&prog, &__x86_indirect_thunk_array[reg], ip);
|
||||
} else {
|
||||
EMIT2(0xFF, 0xE0 + reg); /* jmp *%\reg */
|
||||
if (IS_ENABLED(CONFIG_RETPOLINE) || IS_ENABLED(CONFIG_SLS))
|
||||
if (IS_ENABLED(CONFIG_MITIGATION_RETPOLINE) || IS_ENABLED(CONFIG_SLS))
|
||||
EMIT1(0xCC); /* int3 */
|
||||
}
|
||||
|
||||
|
@ -1273,7 +1273,7 @@ static int emit_jmp_edx(u8 **pprog, u8 *ip)
|
||||
u8 *prog = *pprog;
|
||||
int cnt = 0;
|
||||
|
||||
#ifdef CONFIG_RETPOLINE
|
||||
#ifdef CONFIG_MITIGATION_RETPOLINE
|
||||
EMIT1_off32(0xE9, (u8 *)__x86_indirect_thunk_edx - (ip + 5));
|
||||
#else
|
||||
EMIT2(0xFF, 0xE2);
|
||||
|
@ -61,7 +61,7 @@ ifdef CONFIG_STACKPROTECTOR_STRONG
|
||||
PURGATORY_CFLAGS_REMOVE += -fstack-protector-strong
|
||||
endif
|
||||
|
||||
ifdef CONFIG_RETPOLINE
|
||||
ifdef CONFIG_MITIGATION_RETPOLINE
|
||||
PURGATORY_CFLAGS_REMOVE += $(RETPOLINE_CFLAGS)
|
||||
endif
|
||||
|
||||
|
@ -35,7 +35,7 @@
|
||||
(typeof(ptr)) (__ptr + (off)); \
|
||||
})
|
||||
|
||||
#ifdef CONFIG_RETPOLINE
|
||||
#ifdef CONFIG_MITIGATION_RETPOLINE
|
||||
#define __noretpoline __attribute__((__indirect_branch__("keep")))
|
||||
#endif
|
||||
|
||||
|
@ -2,7 +2,7 @@
|
||||
#ifndef _LINUX_INDIRECT_CALL_WRAPPER_H
|
||||
#define _LINUX_INDIRECT_CALL_WRAPPER_H
|
||||
|
||||
#ifdef CONFIG_RETPOLINE
|
||||
#ifdef CONFIG_MITIGATION_RETPOLINE
|
||||
|
||||
/*
|
||||
* INDIRECT_CALL_$NR - wrapper for indirect calls with $NR known builtin
|
||||
|
@ -885,7 +885,7 @@ static inline void module_bug_finalize(const Elf_Ehdr *hdr,
|
||||
static inline void module_bug_cleanup(struct module *mod) {}
|
||||
#endif /* CONFIG_GENERIC_BUG */
|
||||
|
||||
#ifdef CONFIG_RETPOLINE
|
||||
#ifdef CONFIG_MITIGATION_RETPOLINE
|
||||
extern bool retpoline_module_ok(bool has_retpoline);
|
||||
#else
|
||||
static inline bool retpoline_module_ok(bool has_retpoline)
|
||||
|
@ -93,7 +93,7 @@ extern const struct nft_set_type nft_set_bitmap_type;
|
||||
extern const struct nft_set_type nft_set_pipapo_type;
|
||||
extern const struct nft_set_type nft_set_pipapo_avx2_type;
|
||||
|
||||
#ifdef CONFIG_RETPOLINE
|
||||
#ifdef CONFIG_MITIGATION_RETPOLINE
|
||||
bool nft_rhash_lookup(const struct net *net, const struct nft_set *set,
|
||||
const u32 *key, const struct nft_set_ext **ext);
|
||||
bool nft_rbtree_lookup(const struct net *net, const struct nft_set *set,
|
||||
|
@ -4,7 +4,7 @@
|
||||
|
||||
#include <net/pkt_cls.h>
|
||||
|
||||
#if IS_ENABLED(CONFIG_RETPOLINE)
|
||||
#if IS_ENABLED(CONFIG_MITIGATION_RETPOLINE)
|
||||
|
||||
#include <linux/cpufeature.h>
|
||||
#include <linux/static_key.h>
|
||||
|
@ -1156,7 +1156,7 @@ static inline u64 rb_time_stamp(struct trace_buffer *buffer)
|
||||
u64 ts;
|
||||
|
||||
/* Skip retpolines :-( */
|
||||
if (IS_ENABLED(CONFIG_RETPOLINE) && likely(buffer->clock == trace_clock_local))
|
||||
if (IS_ENABLED(CONFIG_MITIGATION_RETPOLINE) && likely(buffer->clock == trace_clock_local))
|
||||
ts = trace_clock_local();
|
||||
else
|
||||
ts = buffer->clock();
|
||||
|
@ -101,7 +101,7 @@ endif
|
||||
endif
|
||||
|
||||
ifdef CONFIG_NFT_CT
|
||||
ifdef CONFIG_RETPOLINE
|
||||
ifdef CONFIG_MITIGATION_RETPOLINE
|
||||
nf_tables-objs += nft_ct_fast.o
|
||||
endif
|
||||
endif
|
||||
|
@ -21,7 +21,7 @@
|
||||
#include <net/netfilter/nf_log.h>
|
||||
#include <net/netfilter/nft_meta.h>
|
||||
|
||||
#if defined(CONFIG_RETPOLINE) && defined(CONFIG_X86)
|
||||
#if defined(CONFIG_MITIGATION_RETPOLINE) && defined(CONFIG_X86)
|
||||
|
||||
static struct static_key_false nf_tables_skip_direct_calls;
|
||||
|
||||
@ -207,7 +207,7 @@ static void expr_call_ops_eval(const struct nft_expr *expr,
|
||||
struct nft_regs *regs,
|
||||
struct nft_pktinfo *pkt)
|
||||
{
|
||||
#ifdef CONFIG_RETPOLINE
|
||||
#ifdef CONFIG_MITIGATION_RETPOLINE
|
||||
unsigned long e;
|
||||
|
||||
if (nf_skip_indirect_calls())
|
||||
@ -236,7 +236,7 @@ static void expr_call_ops_eval(const struct nft_expr *expr,
|
||||
X(e, nft_objref_map_eval);
|
||||
#undef X
|
||||
indirect_call:
|
||||
#endif /* CONFIG_RETPOLINE */
|
||||
#endif /* CONFIG_MITIGATION_RETPOLINE */
|
||||
expr->ops->eval(expr, regs, pkt);
|
||||
}
|
||||
|
||||
|
@ -751,7 +751,7 @@ static bool nft_ct_set_reduce(struct nft_regs_track *track,
|
||||
return false;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_RETPOLINE
|
||||
#ifdef CONFIG_MITIGATION_RETPOLINE
|
||||
static const struct nft_expr_ops nft_ct_get_fast_ops = {
|
||||
.type = &nft_ct_type,
|
||||
.size = NFT_EXPR_SIZE(sizeof(struct nft_ct)),
|
||||
@ -796,7 +796,7 @@ nft_ct_select_ops(const struct nft_ctx *ctx,
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
if (tb[NFTA_CT_DREG]) {
|
||||
#ifdef CONFIG_RETPOLINE
|
||||
#ifdef CONFIG_MITIGATION_RETPOLINE
|
||||
u32 k = ntohl(nla_get_be32(tb[NFTA_CT_KEY]));
|
||||
|
||||
switch (k) {
|
||||
|
@ -24,7 +24,7 @@ struct nft_lookup {
|
||||
struct nft_set_binding binding;
|
||||
};
|
||||
|
||||
#ifdef CONFIG_RETPOLINE
|
||||
#ifdef CONFIG_MITIGATION_RETPOLINE
|
||||
bool nft_set_do_lookup(const struct net *net, const struct nft_set *set,
|
||||
const u32 *key, const struct nft_set_ext **ext)
|
||||
{
|
||||
|
@ -2353,7 +2353,7 @@ static struct pernet_operations psched_net_ops = {
|
||||
.exit = psched_net_exit,
|
||||
};
|
||||
|
||||
#if IS_ENABLED(CONFIG_RETPOLINE)
|
||||
#if IS_ENABLED(CONFIG_MITIGATION_RETPOLINE)
|
||||
DEFINE_STATIC_KEY_FALSE(tc_skip_wrapper);
|
||||
#endif
|
||||
|
||||
|
@ -262,7 +262,7 @@ ifdef CONFIG_FTRACE_MCOUNT_USE_OBJTOOL
|
||||
objtool-args-$(CONFIG_HAVE_OBJTOOL_NOP_MCOUNT) += --mnop
|
||||
endif
|
||||
objtool-args-$(CONFIG_UNWINDER_ORC) += --orc
|
||||
objtool-args-$(CONFIG_RETPOLINE) += --retpoline
|
||||
objtool-args-$(CONFIG_MITIGATION_RETPOLINE) += --retpoline
|
||||
objtool-args-$(CONFIG_RETHUNK) += --rethunk
|
||||
objtool-args-$(CONFIG_SLS) += --sls
|
||||
objtool-args-$(CONFIG_STACK_VALIDATION) += --stackval
|
||||
|
@ -155,7 +155,7 @@ fn main() {
|
||||
"e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128",
|
||||
);
|
||||
let mut features = "-3dnow,-3dnowa,-mmx,+soft-float".to_string();
|
||||
if cfg.has("RETPOLINE") {
|
||||
if cfg.has("MITIGATION_RETPOLINE") {
|
||||
features += ",+retpoline-external-thunk";
|
||||
}
|
||||
ts.push("features", features);
|
||||
|
@ -1843,7 +1843,7 @@ static void add_header(struct buffer *b, struct module *mod)
|
||||
|
||||
buf_printf(b,
|
||||
"\n"
|
||||
"#ifdef CONFIG_RETPOLINE\n"
|
||||
"#ifdef CONFIG_MITIGATION_RETPOLINE\n"
|
||||
"MODULE_INFO(retpoline, \"Y\");\n"
|
||||
"#endif\n");
|
||||
|
||||
|
@ -50,7 +50,7 @@
|
||||
# define DISABLE_PTI (1 << (X86_FEATURE_PTI & 31))
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_RETPOLINE
|
||||
#ifdef CONFIG_MITIGATION_RETPOLINE
|
||||
# define DISABLE_RETPOLINE 0
|
||||
#else
|
||||
# define DISABLE_RETPOLINE ((1 << (X86_FEATURE_RETPOLINE & 31)) | \
|
||||
|
@ -83,7 +83,7 @@ bool arch_support_alt_relocation(struct special_alt *special_alt,
|
||||
* TODO: Once we have DWARF CFI and smarter instruction decoding logic,
|
||||
* ensure the same register is used in the mov and jump instructions.
|
||||
*
|
||||
* NOTE: RETPOLINE made it harder still to decode dynamic jumps.
|
||||
* NOTE: MITIGATION_RETPOLINE made it harder still to decode dynamic jumps.
|
||||
*/
|
||||
struct reloc *arch_find_switch_table(struct objtool_file *file,
|
||||
struct instruction *insn)
|
||||
|
@ -3984,7 +3984,7 @@ static int validate_retpoline(struct objtool_file *file)
|
||||
} else
|
||||
continue;
|
||||
} else {
|
||||
WARN_INSN(insn, "indirect %s found in RETPOLINE build",
|
||||
WARN_INSN(insn, "indirect %s found in MITIGATION_RETPOLINE build",
|
||||
insn->type == INSN_JUMP_DYNAMIC ? "jump" : "call");
|
||||
}
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user