mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-12-29 17:25:38 +00:00
Misc fixes:
- Fix PAT on Xen, which caused i915 driver failures - Fix compat INT 80 entry crash on Xen PV guests - Fix 'MMIO Stale Data' mitigation status reporting on older Intel CPUs - Fix RSB stuffing regressions - Fix ORC unwinding on ftrace trampolines - Add Intel Raptor Lake CPU model number - Fix (work around) a SEV-SNP bootloader bug providing bogus values in boot_params->cc_blob_address, by ignoring the value on !SEV-SNP bootups. - Fix SEV-SNP early boot failure - Fix the objtool list of noreturn functions and annotate snp_abort(), which bug confused objtool on gcc-12. - Fix the documentation for retbleed Signed-off-by: Ingo Molnar <mingo@kernel.org> -----BEGIN PGP SIGNATURE----- iQJFBAABCgAvFiEEBpT5eoXrXCwVQwEKEnMQ0APhK1gFAmMLgUERHG1pbmdvQGtl cm5lbC5vcmcACgkQEnMQ0APhK1hU1hAAj9bKO+D07gROpkRhXpLvbqm+mUqk6It8 qEuyLkC/xvD9N//Pya0ZPPE+l23EHQxM/L0tXCYwsc8Zlx3fr678FQktHZuxULaP qlGmwub8PvsyMesXBGtgHJ4RT8L07FelBR+E/TpqCSbv/geM7mcc0ojyOKo+OmbD vbsUTDNqsMYndzePUBrv/vJRqVLGlbd1a22DKE/YiYBbZu5hughNUB0bHYhYdsml mutcRsVTKRbZOi4wiuY/pnveMf/z9wAwKOWd9EBaDWILygVSHkBJJQyhHigBh3F8 H1hFn1q4ybULdrAUT4YND9Tjb6U8laU0fxg8Z43bay7bFXXElqoj3W2qka9NjAim QvWSFvTYQRTIWt6sCshVsUBWj6fBZdHxcJ8Fh+ucJJp+JWl9/aD0A41vK2Jx0LIt p8YzBRKEqd1Q/7QD855BArN7HNtQGgShNt4oPVZ7nPnjuQt0+lngu8NR+6X3RKpX r8rordyPvzgPL6W+1uV+8hnz1w+YU2xplAbK+zTijwgJVgyf8khSlZQNpFKULrou zjtzo/2nB+4C4bvfetNnaOGhi1/AdCHZHyZE35rotpd73SLHvdOrH0Ll9oCVfVrC UWbC1E67cHQw97Ni/4CrCsJRBULK01uyszVCxlEkSYInf0UsKlnmy+TqxizwsCVy reYQ0ePyWg0= =ZpCJ -----END PGP SIGNATURE----- Merge tag 'x86-urgent-2022-08-28' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip Pull misc x86 fixes from Ingo Molnar: - Fix PAT on Xen, which caused i915 driver failures - Fix compat INT 80 entry crash on Xen PV guests - Fix 'MMIO Stale Data' mitigation status reporting on older Intel CPUs - Fix RSB stuffing regressions - Fix ORC unwinding on ftrace trampolines - Add Intel Raptor Lake CPU model number - Fix (work around) a SEV-SNP bootloader bug providing bogus values in boot_params->cc_blob_address, by ignoring the value on !SEV-SNP bootups. - Fix SEV-SNP early boot failure - Fix the objtool list of noreturn functions and annotate snp_abort(), which bug confused objtool on gcc-12. - Fix the documentation for retbleed * tag 'x86-urgent-2022-08-28' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: Documentation/ABI: Mention retbleed vulnerability info file for sysfs x86/sev: Mark snp_abort() noreturn x86/sev: Don't use cc_platform_has() for early SEV-SNP calls x86/boot: Don't propagate uninitialized boot_params->cc_blob_address x86/cpu: Add new Raptor Lake CPU model number x86/unwind/orc: Unwind ftrace trampolines with correct ORC entry x86/nospec: Fix i386 RSB stuffing x86/nospec: Unwreck the RSB stuffing x86/bugs: Add "unknown" reporting for MMIO Stale Data x86/entry: Fix entry_INT80_compat for Xen PV guests x86/PAT: Have pat_enabled() properly reflect state when running on Xen
This commit is contained in:
commit
2f23a7c914
@ -523,6 +523,7 @@ What: /sys/devices/system/cpu/vulnerabilities
|
||||
/sys/devices/system/cpu/vulnerabilities/tsx_async_abort
|
||||
/sys/devices/system/cpu/vulnerabilities/itlb_multihit
|
||||
/sys/devices/system/cpu/vulnerabilities/mmio_stale_data
|
||||
/sys/devices/system/cpu/vulnerabilities/retbleed
|
||||
Date: January 2018
|
||||
Contact: Linux kernel mailing list <linux-kernel@vger.kernel.org>
|
||||
Description: Information about CPU vulnerabilities
|
||||
|
@ -230,6 +230,20 @@ The possible values in this file are:
|
||||
* - 'Mitigation: Clear CPU buffers'
|
||||
- The processor is vulnerable and the CPU buffer clearing mitigation is
|
||||
enabled.
|
||||
* - 'Unknown: No mitigations'
|
||||
- The processor vulnerability status is unknown because it is
|
||||
out of Servicing period. Mitigation is not attempted.
|
||||
|
||||
Definitions:
|
||||
------------
|
||||
|
||||
Servicing period: The process of providing functional and security updates to
|
||||
Intel processors or platforms, utilizing the Intel Platform Update (IPU)
|
||||
process or other similar mechanisms.
|
||||
|
||||
End of Servicing Updates (ESU): ESU is the date at which Intel will no
|
||||
longer provide Servicing, such as through IPU or other similar update
|
||||
processes. ESU dates will typically be aligned to end of quarter.
|
||||
|
||||
If the processor is vulnerable then the following information is appended to
|
||||
the above information:
|
||||
|
@ -132,7 +132,17 @@ void snp_set_page_private(unsigned long paddr);
|
||||
void snp_set_page_shared(unsigned long paddr);
|
||||
void sev_prep_identity_maps(unsigned long top_level_pgt);
|
||||
#else
|
||||
static inline void sev_enable(struct boot_params *bp) { }
|
||||
static inline void sev_enable(struct boot_params *bp)
|
||||
{
|
||||
/*
|
||||
* bp->cc_blob_address should only be set by boot/compressed kernel.
|
||||
* Initialize it to 0 unconditionally (thus here in this stub too) to
|
||||
* ensure that uninitialized values from buggy bootloaders aren't
|
||||
* propagated.
|
||||
*/
|
||||
if (bp)
|
||||
bp->cc_blob_address = 0;
|
||||
}
|
||||
static inline void sev_es_shutdown_ghcb(void) { }
|
||||
static inline bool sev_es_check_ghcb_fault(unsigned long address)
|
||||
{
|
||||
|
@ -276,6 +276,14 @@ void sev_enable(struct boot_params *bp)
|
||||
struct msr m;
|
||||
bool snp;
|
||||
|
||||
/*
|
||||
* bp->cc_blob_address should only be set by boot/compressed kernel.
|
||||
* Initialize it to 0 to ensure that uninitialized values from
|
||||
* buggy bootloaders aren't propagated.
|
||||
*/
|
||||
if (bp)
|
||||
bp->cc_blob_address = 0;
|
||||
|
||||
/*
|
||||
* Setup/preliminary detection of SNP. This will be sanity-checked
|
||||
* against CPUID/MSR values later.
|
||||
|
@ -311,7 +311,7 @@ SYM_CODE_START(entry_INT80_compat)
|
||||
* Interrupts are off on entry.
|
||||
*/
|
||||
ASM_CLAC /* Do this early to minimize exposure */
|
||||
SWAPGS
|
||||
ALTERNATIVE "swapgs", "", X86_FEATURE_XENPV
|
||||
|
||||
/*
|
||||
* User tracing code (ptrace or signal handlers) might assume that
|
||||
|
@ -457,7 +457,8 @@
|
||||
#define X86_BUG_ITLB_MULTIHIT X86_BUG(23) /* CPU may incur MCE during certain page attribute changes */
|
||||
#define X86_BUG_SRBDS X86_BUG(24) /* CPU may leak RNG bits if not mitigated */
|
||||
#define X86_BUG_MMIO_STALE_DATA X86_BUG(25) /* CPU is affected by Processor MMIO Stale Data vulnerabilities */
|
||||
#define X86_BUG_RETBLEED X86_BUG(26) /* CPU is affected by RETBleed */
|
||||
#define X86_BUG_EIBRS_PBRSB X86_BUG(27) /* EIBRS is vulnerable to Post Barrier RSB Predictions */
|
||||
#define X86_BUG_MMIO_UNKNOWN X86_BUG(26) /* CPU is too old and its MMIO Stale Data status is unknown */
|
||||
#define X86_BUG_RETBLEED X86_BUG(27) /* CPU is affected by RETBleed */
|
||||
#define X86_BUG_EIBRS_PBRSB X86_BUG(28) /* EIBRS is vulnerable to Post Barrier RSB Predictions */
|
||||
|
||||
#endif /* _ASM_X86_CPUFEATURES_H */
|
||||
|
@ -27,6 +27,7 @@
|
||||
* _X - regular server parts
|
||||
* _D - micro server parts
|
||||
* _N,_P - other mobile parts
|
||||
* _S - other client parts
|
||||
*
|
||||
* Historical OPTDIFFs:
|
||||
*
|
||||
@ -112,6 +113,7 @@
|
||||
|
||||
#define INTEL_FAM6_RAPTORLAKE 0xB7
|
||||
#define INTEL_FAM6_RAPTORLAKE_P 0xBA
|
||||
#define INTEL_FAM6_RAPTORLAKE_S 0xBF
|
||||
|
||||
/* "Small Core" Processors (Atom) */
|
||||
|
||||
|
@ -35,33 +35,56 @@
|
||||
#define RSB_CLEAR_LOOPS 32 /* To forcibly overwrite all entries */
|
||||
|
||||
/*
|
||||
* Common helper for __FILL_RETURN_BUFFER and __FILL_ONE_RETURN.
|
||||
*/
|
||||
#define __FILL_RETURN_SLOT \
|
||||
ANNOTATE_INTRA_FUNCTION_CALL; \
|
||||
call 772f; \
|
||||
int3; \
|
||||
772:
|
||||
|
||||
/*
|
||||
* Stuff the entire RSB.
|
||||
*
|
||||
* Google experimented with loop-unrolling and this turned out to be
|
||||
* the optimal version - two calls, each with their own speculation
|
||||
* trap should their return address end up getting used, in a loop.
|
||||
*/
|
||||
#define __FILL_RETURN_BUFFER(reg, nr, sp) \
|
||||
mov $(nr/2), reg; \
|
||||
771: \
|
||||
ANNOTATE_INTRA_FUNCTION_CALL; \
|
||||
call 772f; \
|
||||
773: /* speculation trap */ \
|
||||
UNWIND_HINT_EMPTY; \
|
||||
pause; \
|
||||
lfence; \
|
||||
jmp 773b; \
|
||||
772: \
|
||||
ANNOTATE_INTRA_FUNCTION_CALL; \
|
||||
call 774f; \
|
||||
775: /* speculation trap */ \
|
||||
UNWIND_HINT_EMPTY; \
|
||||
pause; \
|
||||
lfence; \
|
||||
jmp 775b; \
|
||||
774: \
|
||||
add $(BITS_PER_LONG/8) * 2, sp; \
|
||||
dec reg; \
|
||||
jnz 771b; \
|
||||
/* barrier for jnz misprediction */ \
|
||||
#ifdef CONFIG_X86_64
|
||||
#define __FILL_RETURN_BUFFER(reg, nr) \
|
||||
mov $(nr/2), reg; \
|
||||
771: \
|
||||
__FILL_RETURN_SLOT \
|
||||
__FILL_RETURN_SLOT \
|
||||
add $(BITS_PER_LONG/8) * 2, %_ASM_SP; \
|
||||
dec reg; \
|
||||
jnz 771b; \
|
||||
/* barrier for jnz misprediction */ \
|
||||
lfence;
|
||||
#else
|
||||
/*
|
||||
* i386 doesn't unconditionally have LFENCE, as such it can't
|
||||
* do a loop.
|
||||
*/
|
||||
#define __FILL_RETURN_BUFFER(reg, nr) \
|
||||
.rept nr; \
|
||||
__FILL_RETURN_SLOT; \
|
||||
.endr; \
|
||||
add $(BITS_PER_LONG/8) * nr, %_ASM_SP;
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Stuff a single RSB slot.
|
||||
*
|
||||
* To mitigate Post-Barrier RSB speculation, one CALL instruction must be
|
||||
* forced to retire before letting a RET instruction execute.
|
||||
*
|
||||
* On PBRSB-vulnerable CPUs, it is not safe for a RET to be executed
|
||||
* before this point.
|
||||
*/
|
||||
#define __FILL_ONE_RETURN \
|
||||
__FILL_RETURN_SLOT \
|
||||
add $(BITS_PER_LONG/8), %_ASM_SP; \
|
||||
lfence;
|
||||
|
||||
#ifdef __ASSEMBLY__
|
||||
@ -132,28 +155,15 @@
|
||||
#endif
|
||||
.endm
|
||||
|
||||
.macro ISSUE_UNBALANCED_RET_GUARD
|
||||
ANNOTATE_INTRA_FUNCTION_CALL
|
||||
call .Lunbalanced_ret_guard_\@
|
||||
int3
|
||||
.Lunbalanced_ret_guard_\@:
|
||||
add $(BITS_PER_LONG/8), %_ASM_SP
|
||||
lfence
|
||||
.endm
|
||||
|
||||
/*
|
||||
* A simpler FILL_RETURN_BUFFER macro. Don't make people use the CPP
|
||||
* monstrosity above, manually.
|
||||
*/
|
||||
.macro FILL_RETURN_BUFFER reg:req nr:req ftr:req ftr2
|
||||
.ifb \ftr2
|
||||
ALTERNATIVE "jmp .Lskip_rsb_\@", "", \ftr
|
||||
.else
|
||||
ALTERNATIVE_2 "jmp .Lskip_rsb_\@", "", \ftr, "jmp .Lunbalanced_\@", \ftr2
|
||||
.endif
|
||||
__FILL_RETURN_BUFFER(\reg,\nr,%_ASM_SP)
|
||||
.Lunbalanced_\@:
|
||||
ISSUE_UNBALANCED_RET_GUARD
|
||||
.macro FILL_RETURN_BUFFER reg:req nr:req ftr:req ftr2=ALT_NOT(X86_FEATURE_ALWAYS)
|
||||
ALTERNATIVE_2 "jmp .Lskip_rsb_\@", \
|
||||
__stringify(__FILL_RETURN_BUFFER(\reg,\nr)), \ftr, \
|
||||
__stringify(__FILL_ONE_RETURN), \ftr2
|
||||
|
||||
.Lskip_rsb_\@:
|
||||
.endm
|
||||
|
||||
|
@ -195,7 +195,7 @@ void snp_set_memory_shared(unsigned long vaddr, unsigned int npages);
|
||||
void snp_set_memory_private(unsigned long vaddr, unsigned int npages);
|
||||
void snp_set_wakeup_secondary_cpu(void);
|
||||
bool snp_init(struct boot_params *bp);
|
||||
void snp_abort(void);
|
||||
void __init __noreturn snp_abort(void);
|
||||
int snp_issue_guest_request(u64 exit_code, struct snp_req_data *input, unsigned long *fw_err);
|
||||
#else
|
||||
static inline void sev_es_ist_enter(struct pt_regs *regs) { }
|
||||
|
@ -433,7 +433,8 @@ static void __init mmio_select_mitigation(void)
|
||||
u64 ia32_cap;
|
||||
|
||||
if (!boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA) ||
|
||||
cpu_mitigations_off()) {
|
||||
boot_cpu_has_bug(X86_BUG_MMIO_UNKNOWN) ||
|
||||
cpu_mitigations_off()) {
|
||||
mmio_mitigation = MMIO_MITIGATION_OFF;
|
||||
return;
|
||||
}
|
||||
@ -538,6 +539,8 @@ static void __init md_clear_update_mitigation(void)
|
||||
pr_info("TAA: %s\n", taa_strings[taa_mitigation]);
|
||||
if (boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA))
|
||||
pr_info("MMIO Stale Data: %s\n", mmio_strings[mmio_mitigation]);
|
||||
else if (boot_cpu_has_bug(X86_BUG_MMIO_UNKNOWN))
|
||||
pr_info("MMIO Stale Data: Unknown: No mitigations\n");
|
||||
}
|
||||
|
||||
static void __init md_clear_select_mitigation(void)
|
||||
@ -2275,6 +2278,9 @@ static ssize_t tsx_async_abort_show_state(char *buf)
|
||||
|
||||
static ssize_t mmio_stale_data_show_state(char *buf)
|
||||
{
|
||||
if (boot_cpu_has_bug(X86_BUG_MMIO_UNKNOWN))
|
||||
return sysfs_emit(buf, "Unknown: No mitigations\n");
|
||||
|
||||
if (mmio_mitigation == MMIO_MITIGATION_OFF)
|
||||
return sysfs_emit(buf, "%s\n", mmio_strings[mmio_mitigation]);
|
||||
|
||||
@ -2421,6 +2427,7 @@ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr
|
||||
return srbds_show_state(buf);
|
||||
|
||||
case X86_BUG_MMIO_STALE_DATA:
|
||||
case X86_BUG_MMIO_UNKNOWN:
|
||||
return mmio_stale_data_show_state(buf);
|
||||
|
||||
case X86_BUG_RETBLEED:
|
||||
@ -2480,7 +2487,10 @@ ssize_t cpu_show_srbds(struct device *dev, struct device_attribute *attr, char *
|
||||
|
||||
ssize_t cpu_show_mmio_stale_data(struct device *dev, struct device_attribute *attr, char *buf)
|
||||
{
|
||||
return cpu_show_common(dev, attr, buf, X86_BUG_MMIO_STALE_DATA);
|
||||
if (boot_cpu_has_bug(X86_BUG_MMIO_UNKNOWN))
|
||||
return cpu_show_common(dev, attr, buf, X86_BUG_MMIO_UNKNOWN);
|
||||
else
|
||||
return cpu_show_common(dev, attr, buf, X86_BUG_MMIO_STALE_DATA);
|
||||
}
|
||||
|
||||
ssize_t cpu_show_retbleed(struct device *dev, struct device_attribute *attr, char *buf)
|
||||
|
@ -1135,7 +1135,8 @@ static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c)
|
||||
#define NO_SWAPGS BIT(6)
|
||||
#define NO_ITLB_MULTIHIT BIT(7)
|
||||
#define NO_SPECTRE_V2 BIT(8)
|
||||
#define NO_EIBRS_PBRSB BIT(9)
|
||||
#define NO_MMIO BIT(9)
|
||||
#define NO_EIBRS_PBRSB BIT(10)
|
||||
|
||||
#define VULNWL(vendor, family, model, whitelist) \
|
||||
X86_MATCH_VENDOR_FAM_MODEL(vendor, family, model, whitelist)
|
||||
@ -1158,6 +1159,11 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = {
|
||||
VULNWL(VORTEX, 6, X86_MODEL_ANY, NO_SPECULATION),
|
||||
|
||||
/* Intel Family 6 */
|
||||
VULNWL_INTEL(TIGERLAKE, NO_MMIO),
|
||||
VULNWL_INTEL(TIGERLAKE_L, NO_MMIO),
|
||||
VULNWL_INTEL(ALDERLAKE, NO_MMIO),
|
||||
VULNWL_INTEL(ALDERLAKE_L, NO_MMIO),
|
||||
|
||||
VULNWL_INTEL(ATOM_SALTWELL, NO_SPECULATION | NO_ITLB_MULTIHIT),
|
||||
VULNWL_INTEL(ATOM_SALTWELL_TABLET, NO_SPECULATION | NO_ITLB_MULTIHIT),
|
||||
VULNWL_INTEL(ATOM_SALTWELL_MID, NO_SPECULATION | NO_ITLB_MULTIHIT),
|
||||
@ -1176,9 +1182,9 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = {
|
||||
VULNWL_INTEL(ATOM_AIRMONT_MID, NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
|
||||
VULNWL_INTEL(ATOM_AIRMONT_NP, NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT),
|
||||
|
||||
VULNWL_INTEL(ATOM_GOLDMONT, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT),
|
||||
VULNWL_INTEL(ATOM_GOLDMONT_D, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT),
|
||||
VULNWL_INTEL(ATOM_GOLDMONT_PLUS, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_EIBRS_PBRSB),
|
||||
VULNWL_INTEL(ATOM_GOLDMONT, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
|
||||
VULNWL_INTEL(ATOM_GOLDMONT_D, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
|
||||
VULNWL_INTEL(ATOM_GOLDMONT_PLUS, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO | NO_EIBRS_PBRSB),
|
||||
|
||||
/*
|
||||
* Technically, swapgs isn't serializing on AMD (despite it previously
|
||||
@ -1193,18 +1199,18 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = {
|
||||
VULNWL_INTEL(ATOM_TREMONT_D, NO_ITLB_MULTIHIT | NO_EIBRS_PBRSB),
|
||||
|
||||
/* AMD Family 0xf - 0x12 */
|
||||
VULNWL_AMD(0x0f, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT),
|
||||
VULNWL_AMD(0x10, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT),
|
||||
VULNWL_AMD(0x11, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT),
|
||||
VULNWL_AMD(0x12, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT),
|
||||
VULNWL_AMD(0x0f, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
|
||||
VULNWL_AMD(0x10, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
|
||||
VULNWL_AMD(0x11, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
|
||||
VULNWL_AMD(0x12, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
|
||||
|
||||
/* FAMILY_ANY must be last, otherwise 0x0f - 0x12 matches won't work */
|
||||
VULNWL_AMD(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT),
|
||||
VULNWL_HYGON(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT),
|
||||
VULNWL_AMD(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
|
||||
VULNWL_HYGON(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
|
||||
|
||||
/* Zhaoxin Family 7 */
|
||||
VULNWL(CENTAUR, 7, X86_MODEL_ANY, NO_SPECTRE_V2 | NO_SWAPGS),
|
||||
VULNWL(ZHAOXIN, 7, X86_MODEL_ANY, NO_SPECTRE_V2 | NO_SWAPGS),
|
||||
VULNWL(CENTAUR, 7, X86_MODEL_ANY, NO_SPECTRE_V2 | NO_SWAPGS | NO_MMIO),
|
||||
VULNWL(ZHAOXIN, 7, X86_MODEL_ANY, NO_SPECTRE_V2 | NO_SWAPGS | NO_MMIO),
|
||||
{}
|
||||
};
|
||||
|
||||
@ -1358,10 +1364,16 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
|
||||
* Affected CPU list is generally enough to enumerate the vulnerability,
|
||||
* but for virtualization case check for ARCH_CAP MSR bits also, VMM may
|
||||
* not want the guest to enumerate the bug.
|
||||
*
|
||||
* Set X86_BUG_MMIO_UNKNOWN for CPUs that are neither in the blacklist,
|
||||
* nor in the whitelist and also don't enumerate MSR ARCH_CAP MMIO bits.
|
||||
*/
|
||||
if (cpu_matches(cpu_vuln_blacklist, MMIO) &&
|
||||
!arch_cap_mmio_immune(ia32_cap))
|
||||
setup_force_cpu_bug(X86_BUG_MMIO_STALE_DATA);
|
||||
if (!arch_cap_mmio_immune(ia32_cap)) {
|
||||
if (cpu_matches(cpu_vuln_blacklist, MMIO))
|
||||
setup_force_cpu_bug(X86_BUG_MMIO_STALE_DATA);
|
||||
else if (!cpu_matches(cpu_vuln_whitelist, NO_MMIO))
|
||||
setup_force_cpu_bug(X86_BUG_MMIO_UNKNOWN);
|
||||
}
|
||||
|
||||
if (!cpu_has(c, X86_FEATURE_BTC_NO)) {
|
||||
if (cpu_matches(cpu_vuln_blacklist, RETBLEED) || (ia32_cap & ARCH_CAP_RSBA))
|
||||
|
@ -701,7 +701,13 @@ static void __init early_set_pages_state(unsigned long paddr, unsigned int npage
|
||||
void __init early_snp_set_memory_private(unsigned long vaddr, unsigned long paddr,
|
||||
unsigned int npages)
|
||||
{
|
||||
if (!cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
|
||||
/*
|
||||
* This can be invoked in early boot while running identity mapped, so
|
||||
* use an open coded check for SNP instead of using cc_platform_has().
|
||||
* This eliminates worries about jump tables or checking boot_cpu_data
|
||||
* in the cc_platform_has() function.
|
||||
*/
|
||||
if (!(sev_status & MSR_AMD64_SEV_SNP_ENABLED))
|
||||
return;
|
||||
|
||||
/*
|
||||
@ -717,7 +723,13 @@ void __init early_snp_set_memory_private(unsigned long vaddr, unsigned long padd
|
||||
void __init early_snp_set_memory_shared(unsigned long vaddr, unsigned long paddr,
|
||||
unsigned int npages)
|
||||
{
|
||||
if (!cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
|
||||
/*
|
||||
* This can be invoked in early boot while running identity mapped, so
|
||||
* use an open coded check for SNP instead of using cc_platform_has().
|
||||
* This eliminates worries about jump tables or checking boot_cpu_data
|
||||
* in the cc_platform_has() function.
|
||||
*/
|
||||
if (!(sev_status & MSR_AMD64_SEV_SNP_ENABLED))
|
||||
return;
|
||||
|
||||
/* Invalidate the memory pages before they are marked shared in the RMP table. */
|
||||
@ -2100,7 +2112,7 @@ bool __init snp_init(struct boot_params *bp)
|
||||
return true;
|
||||
}
|
||||
|
||||
void __init snp_abort(void)
|
||||
void __init __noreturn snp_abort(void)
|
||||
{
|
||||
sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SNP_UNSUPPORTED);
|
||||
}
|
||||
|
@ -93,22 +93,27 @@ static struct orc_entry *orc_find(unsigned long ip);
|
||||
static struct orc_entry *orc_ftrace_find(unsigned long ip)
|
||||
{
|
||||
struct ftrace_ops *ops;
|
||||
unsigned long caller;
|
||||
unsigned long tramp_addr, offset;
|
||||
|
||||
ops = ftrace_ops_trampoline(ip);
|
||||
if (!ops)
|
||||
return NULL;
|
||||
|
||||
/* Set tramp_addr to the start of the code copied by the trampoline */
|
||||
if (ops->flags & FTRACE_OPS_FL_SAVE_REGS)
|
||||
caller = (unsigned long)ftrace_regs_call;
|
||||
tramp_addr = (unsigned long)ftrace_regs_caller;
|
||||
else
|
||||
caller = (unsigned long)ftrace_call;
|
||||
tramp_addr = (unsigned long)ftrace_caller;
|
||||
|
||||
/* Now place tramp_addr to the location within the trampoline ip is at */
|
||||
offset = ip - ops->trampoline;
|
||||
tramp_addr += offset;
|
||||
|
||||
/* Prevent unlikely recursion */
|
||||
if (ip == caller)
|
||||
if (ip == tramp_addr)
|
||||
return NULL;
|
||||
|
||||
return orc_find(caller);
|
||||
return orc_find(tramp_addr);
|
||||
}
|
||||
#else
|
||||
static struct orc_entry *orc_ftrace_find(unsigned long ip)
|
||||
|
@ -62,6 +62,7 @@
|
||||
|
||||
static bool __read_mostly pat_bp_initialized;
|
||||
static bool __read_mostly pat_disabled = !IS_ENABLED(CONFIG_X86_PAT);
|
||||
static bool __initdata pat_force_disabled = !IS_ENABLED(CONFIG_X86_PAT);
|
||||
static bool __read_mostly pat_bp_enabled;
|
||||
static bool __read_mostly pat_cm_initialized;
|
||||
|
||||
@ -86,6 +87,7 @@ void pat_disable(const char *msg_reason)
|
||||
static int __init nopat(char *str)
|
||||
{
|
||||
pat_disable("PAT support disabled via boot option.");
|
||||
pat_force_disabled = true;
|
||||
return 0;
|
||||
}
|
||||
early_param("nopat", nopat);
|
||||
@ -272,7 +274,7 @@ static void pat_ap_init(u64 pat)
|
||||
wrmsrl(MSR_IA32_CR_PAT, pat);
|
||||
}
|
||||
|
||||
void init_cache_modes(void)
|
||||
void __init init_cache_modes(void)
|
||||
{
|
||||
u64 pat = 0;
|
||||
|
||||
@ -313,6 +315,12 @@ void init_cache_modes(void)
|
||||
*/
|
||||
pat = PAT(0, WB) | PAT(1, WT) | PAT(2, UC_MINUS) | PAT(3, UC) |
|
||||
PAT(4, WB) | PAT(5, WT) | PAT(6, UC_MINUS) | PAT(7, UC);
|
||||
} else if (!pat_force_disabled && cpu_feature_enabled(X86_FEATURE_HYPERVISOR)) {
|
||||
/*
|
||||
* Clearly PAT is enabled underneath. Allow pat_enabled() to
|
||||
* reflect this.
|
||||
*/
|
||||
pat_bp_enabled = true;
|
||||
}
|
||||
|
||||
__init_cache_modes(pat);
|
||||
|
@ -162,32 +162,34 @@ static bool __dead_end_function(struct objtool_file *file, struct symbol *func,
|
||||
|
||||
/*
|
||||
* Unfortunately these have to be hard coded because the noreturn
|
||||
* attribute isn't provided in ELF data.
|
||||
* attribute isn't provided in ELF data. Keep 'em sorted.
|
||||
*/
|
||||
static const char * const global_noreturns[] = {
|
||||
"__stack_chk_fail",
|
||||
"panic",
|
||||
"do_exit",
|
||||
"do_task_dead",
|
||||
"kthread_exit",
|
||||
"make_task_dead",
|
||||
"__module_put_and_kthread_exit",
|
||||
"kthread_complete_and_exit",
|
||||
"__reiserfs_panic",
|
||||
"lbug_with_loc",
|
||||
"fortify_panic",
|
||||
"usercopy_abort",
|
||||
"machine_real_restart",
|
||||
"rewind_stack_and_make_dead",
|
||||
"kunit_try_catch_throw",
|
||||
"xen_start_kernel",
|
||||
"cpu_bringup_and_idle",
|
||||
"do_group_exit",
|
||||
"stop_this_cpu",
|
||||
"__invalid_creds",
|
||||
"cpu_startup_entry",
|
||||
"__module_put_and_kthread_exit",
|
||||
"__reiserfs_panic",
|
||||
"__stack_chk_fail",
|
||||
"__ubsan_handle_builtin_unreachable",
|
||||
"cpu_bringup_and_idle",
|
||||
"cpu_startup_entry",
|
||||
"do_exit",
|
||||
"do_group_exit",
|
||||
"do_task_dead",
|
||||
"ex_handler_msr_mce",
|
||||
"fortify_panic",
|
||||
"kthread_complete_and_exit",
|
||||
"kthread_exit",
|
||||
"kunit_try_catch_throw",
|
||||
"lbug_with_loc",
|
||||
"machine_real_restart",
|
||||
"make_task_dead",
|
||||
"panic",
|
||||
"rewind_stack_and_make_dead",
|
||||
"sev_es_terminate",
|
||||
"snp_abort",
|
||||
"stop_this_cpu",
|
||||
"usercopy_abort",
|
||||
"xen_start_kernel",
|
||||
};
|
||||
|
||||
if (!func)
|
||||
|
Loading…
Reference in New Issue
Block a user