mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2025-01-16 10:17:32 +00:00
- Add synthetic X86_FEATURE flags for the different AMD Zen generations
and use them everywhere instead of ad-hoc family/model checks. Drop an ancient AMD errata checking facility as a result - Fix a fragile initcall ordering in intel_epb - Do not issue the MFENCE+LFENCE barrier for the TSC deadline and X2APIC MSRs on AMD as it is not needed there -----BEGIN PGP SIGNATURE----- iQIzBAABCgAdFiEEzv7L6UO9uDPlPSfHEsHwGGHeVUoFAmWalaEACgkQEsHwGGHe VUrDYg/+LjqjJv3OcVZZkx9WVds0kmBCajrf9JxRYgiSTIpiL/usH0QOms8FjHQ6 tYcukj+RJDk2nP5ho3Vs1WNA0mvU0nxC+99u0Ph4zugSMSl0XGOA+YxxTBPXmDGB 1IxH9IloMFPhwDoQ4/ear0IvjIrfE4ESV2Dafe45WzVSdG7/0ijurisaH1kPYraP wzuNn142Tk0eicaam30sdThXZraO9Paz5MOYbpYEAU4lxNtdH85sQa+Xk0tqJcjD IwEcQJLE6n3r8t/lNMIlhAsmOVGrD5WltDH9HvEmKT4mzTumSc9DLu3YHHRWyx2K TMpRYHlVuvGJkJV3CYXi8fhTsV6uMsHEe1+xZ/Rf0iQzOG25v+zen8WK4REWOr/o VmprG3j7LkEFeeH3CqSOtVSbYmxFILQb6pAbzSlI907b5C6PaEYuudjVXuX01urN IG3krWHGMJ3AWKDV2Z3hW1TYtbLJyqKPNhqcBJiOuWyCe8cQXfKQBTpP5HuAEZEd UXc4QpStMvuPqxyQhlPSTAtY7L/UVhBH8oHoXPYiBmcCo7VtJYW6HH9z1ISUc1av FgKdkpx6vaJiXlD/wI/B5T1oViWQ8udhHpit99rhKl623e7WC2rdguAOVDLn/YIe cZB+R05yknBWOavH0kcuz9R9xYKMSBcEsRnBKmeOg9R+tTK/7BM= =afTN -----END PGP SIGNATURE----- Merge tag 'x86_cpu_for_v6.8' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip Pull x86 cpu feature updates from Borislav Petkov: - Add synthetic X86_FEATURE flags for the different AMD Zen generations and use them everywhere instead of ad-hoc family/model checks. Drop an ancient AMD errata checking facility as a result - Fix a fragile initcall ordering in intel_epb - Do not issue the MFENCE+LFENCE barrier for the TSC deadline and X2APIC MSRs on AMD as it is not needed there * tag 'x86_cpu_for_v6.8' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86/CPU/AMD: Add X86_FEATURE_ZEN1 x86/CPU/AMD: Drop now unused CPU erratum checking function x86/CPU/AMD: Get rid of amd_erratum_1485[] x86/CPU/AMD: Get rid of amd_erratum_400[] x86/CPU/AMD: Get rid of amd_erratum_383[] x86/CPU/AMD: Get rid of amd_erratum_1054[] x86/CPU/AMD: Move the DIV0 bug detection to the Zen1 init function x86/CPU/AMD: Move Zenbleed check to the Zen2 init function x86/CPU/AMD: Rename init_amd_zn() to init_amd_zen_common() x86/CPU/AMD: Call the spectral chicken in the Zen2 init function x86/CPU/AMD: Move erratum 1076 fix into the Zen1 init function x86/CPU/AMD: Move the Zen3 BTC_NO detection to the Zen3 init function x86/CPU/AMD: Carve out the erratum 1386 fix x86/CPU/AMD: Add ZenX generations flags x86/cpu/intel_epb: Don't rely on link order x86/barrier: Do not serialize MSR accesses on AMD
This commit is contained in:
commit
bef91c28f2
@ -81,22 +81,4 @@ do { \
|
||||
|
||||
#include <asm-generic/barrier.h>
|
||||
|
||||
/*
|
||||
* Make previous memory operations globally visible before
|
||||
* a WRMSR.
|
||||
*
|
||||
* MFENCE makes writes visible, but only affects load/store
|
||||
* instructions. WRMSR is unfortunately not a load/store
|
||||
* instruction and is unaffected by MFENCE. The LFENCE ensures
|
||||
* that the WRMSR is not reordered.
|
||||
*
|
||||
* Most WRMSRs are full serializing instructions themselves and
|
||||
* do not require this barrier. This is only required for the
|
||||
* IA32_TSC_DEADLINE and X2APIC MSRs.
|
||||
*/
|
||||
static inline void weak_wrmsr_fence(void)
|
||||
{
|
||||
asm volatile("mfence; lfence" : : : "memory");
|
||||
}
|
||||
|
||||
#endif /* _ASM_X86_BARRIER_H */
|
||||
|
@ -218,7 +218,7 @@
|
||||
#define X86_FEATURE_IBRS ( 7*32+25) /* Indirect Branch Restricted Speculation */
|
||||
#define X86_FEATURE_IBPB ( 7*32+26) /* Indirect Branch Prediction Barrier */
|
||||
#define X86_FEATURE_STIBP ( 7*32+27) /* Single Thread Indirect Branch Predictors */
|
||||
#define X86_FEATURE_ZEN (7*32+28) /* "" CPU based on Zen microarchitecture */
|
||||
#define X86_FEATURE_ZEN ( 7*32+28) /* "" Generic flag for all Zen and newer */
|
||||
#define X86_FEATURE_L1TF_PTEINV ( 7*32+29) /* "" L1TF workaround PTE inversion */
|
||||
#define X86_FEATURE_IBRS_ENHANCED ( 7*32+30) /* Enhanced IBRS */
|
||||
#define X86_FEATURE_MSR_IA32_FEAT_CTL ( 7*32+31) /* "" MSR IA32_FEAT_CTL configured */
|
||||
@ -308,10 +308,14 @@
|
||||
#define X86_FEATURE_SMBA (11*32+21) /* "" Slow Memory Bandwidth Allocation */
|
||||
#define X86_FEATURE_BMEC (11*32+22) /* "" Bandwidth Monitoring Event Configuration */
|
||||
#define X86_FEATURE_USER_SHSTK (11*32+23) /* Shadow stack support for user mode applications */
|
||||
|
||||
#define X86_FEATURE_SRSO (11*32+24) /* "" AMD BTB untrain RETs */
|
||||
#define X86_FEATURE_SRSO_ALIAS (11*32+25) /* "" AMD BTB untrain RETs through aliasing */
|
||||
#define X86_FEATURE_IBPB_ON_VMEXIT (11*32+26) /* "" Issue an IBPB only on VMEXIT */
|
||||
#define X86_FEATURE_APIC_MSRS_FENCE (11*32+27) /* "" IA32_TSC_DEADLINE and X2APIC MSRs need fencing */
|
||||
#define X86_FEATURE_ZEN2 (11*32+28) /* "" CPU based on Zen2 microarchitecture */
|
||||
#define X86_FEATURE_ZEN3 (11*32+29) /* "" CPU based on Zen3 microarchitecture */
|
||||
#define X86_FEATURE_ZEN4 (11*32+30) /* "" CPU based on Zen4 microarchitecture */
|
||||
#define X86_FEATURE_ZEN1 (11*32+31) /* "" CPU based on Zen1 microarchitecture */
|
||||
|
||||
/* Intel-defined CPU features, CPUID level 0x00000007:1 (EAX), word 12 */
|
||||
#define X86_FEATURE_AVX_VNNI (12*32+ 4) /* AVX VNNI instructions */
|
||||
|
@ -749,4 +749,22 @@ enum mds_mitigations {
|
||||
|
||||
extern bool gds_ucode_mitigated(void);
|
||||
|
||||
/*
|
||||
* Make previous memory operations globally visible before
|
||||
* a WRMSR.
|
||||
*
|
||||
* MFENCE makes writes visible, but only affects load/store
|
||||
* instructions. WRMSR is unfortunately not a load/store
|
||||
* instruction and is unaffected by MFENCE. The LFENCE ensures
|
||||
* that the WRMSR is not reordered.
|
||||
*
|
||||
* Most WRMSRs are full serializing instructions themselves and
|
||||
* do not require this barrier. This is only required for the
|
||||
* IA32_TSC_DEADLINE and X2APIC MSRs.
|
||||
*/
|
||||
static inline void weak_wrmsr_fence(void)
|
||||
{
|
||||
alternative("mfence; lfence", "", ALT_NOT(X86_FEATURE_APIC_MSRS_FENCE));
|
||||
}
|
||||
|
||||
#endif /* _ASM_X86_PROCESSOR_H */
|
||||
|
@ -34,87 +34,6 @@
|
||||
*/
|
||||
static u32 nodes_per_socket = 1;
|
||||
|
||||
/*
|
||||
* AMD errata checking
|
||||
*
|
||||
* Errata are defined as arrays of ints using the AMD_LEGACY_ERRATUM() or
|
||||
* AMD_OSVW_ERRATUM() macros. The latter is intended for newer errata that
|
||||
* have an OSVW id assigned, which it takes as first argument. Both take a
|
||||
* variable number of family-specific model-stepping ranges created by
|
||||
* AMD_MODEL_RANGE().
|
||||
*
|
||||
* Example:
|
||||
*
|
||||
* const int amd_erratum_319[] =
|
||||
* AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0x4, 0x2),
|
||||
* AMD_MODEL_RANGE(0x10, 0x8, 0x0, 0x8, 0x0),
|
||||
* AMD_MODEL_RANGE(0x10, 0x9, 0x0, 0x9, 0x0));
|
||||
*/
|
||||
|
||||
#define AMD_LEGACY_ERRATUM(...) { -1, __VA_ARGS__, 0 }
|
||||
#define AMD_OSVW_ERRATUM(osvw_id, ...) { osvw_id, __VA_ARGS__, 0 }
|
||||
#define AMD_MODEL_RANGE(f, m_start, s_start, m_end, s_end) \
|
||||
((f << 24) | (m_start << 16) | (s_start << 12) | (m_end << 4) | (s_end))
|
||||
#define AMD_MODEL_RANGE_FAMILY(range) (((range) >> 24) & 0xff)
|
||||
#define AMD_MODEL_RANGE_START(range) (((range) >> 12) & 0xfff)
|
||||
#define AMD_MODEL_RANGE_END(range) ((range) & 0xfff)
|
||||
|
||||
static const int amd_erratum_400[] =
|
||||
AMD_OSVW_ERRATUM(1, AMD_MODEL_RANGE(0xf, 0x41, 0x2, 0xff, 0xf),
|
||||
AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0xff, 0xf));
|
||||
|
||||
static const int amd_erratum_383[] =
|
||||
AMD_OSVW_ERRATUM(3, AMD_MODEL_RANGE(0x10, 0, 0, 0xff, 0xf));
|
||||
|
||||
/* #1054: Instructions Retired Performance Counter May Be Inaccurate */
|
||||
static const int amd_erratum_1054[] =
|
||||
AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x17, 0, 0, 0x2f, 0xf));
|
||||
|
||||
static const int amd_zenbleed[] =
|
||||
AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x17, 0x30, 0x0, 0x4f, 0xf),
|
||||
AMD_MODEL_RANGE(0x17, 0x60, 0x0, 0x7f, 0xf),
|
||||
AMD_MODEL_RANGE(0x17, 0x90, 0x0, 0x91, 0xf),
|
||||
AMD_MODEL_RANGE(0x17, 0xa0, 0x0, 0xaf, 0xf));
|
||||
|
||||
static const int amd_div0[] =
|
||||
AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x17, 0x00, 0x0, 0x2f, 0xf),
|
||||
AMD_MODEL_RANGE(0x17, 0x50, 0x0, 0x5f, 0xf));
|
||||
|
||||
static const int amd_erratum_1485[] =
|
||||
AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x19, 0x10, 0x0, 0x1f, 0xf),
|
||||
AMD_MODEL_RANGE(0x19, 0x60, 0x0, 0xaf, 0xf));
|
||||
|
||||
static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum)
|
||||
{
|
||||
int osvw_id = *erratum++;
|
||||
u32 range;
|
||||
u32 ms;
|
||||
|
||||
if (osvw_id >= 0 && osvw_id < 65536 &&
|
||||
cpu_has(cpu, X86_FEATURE_OSVW)) {
|
||||
u64 osvw_len;
|
||||
|
||||
rdmsrl(MSR_AMD64_OSVW_ID_LENGTH, osvw_len);
|
||||
if (osvw_id < osvw_len) {
|
||||
u64 osvw_bits;
|
||||
|
||||
rdmsrl(MSR_AMD64_OSVW_STATUS + (osvw_id >> 6),
|
||||
osvw_bits);
|
||||
return osvw_bits & (1ULL << (osvw_id & 0x3f));
|
||||
}
|
||||
}
|
||||
|
||||
/* OSVW unavailable or ID unknown, match family-model-stepping range */
|
||||
ms = (cpu->x86_model << 4) | cpu->x86_stepping;
|
||||
while ((range = *erratum++))
|
||||
if ((cpu->x86 == AMD_MODEL_RANGE_FAMILY(range)) &&
|
||||
(ms >= AMD_MODEL_RANGE_START(range)) &&
|
||||
(ms <= AMD_MODEL_RANGE_END(range)))
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p)
|
||||
{
|
||||
u32 gprs[8] = { 0 };
|
||||
@ -616,6 +535,49 @@ static void bsp_init_amd(struct cpuinfo_x86 *c)
|
||||
}
|
||||
|
||||
resctrl_cpu_detect(c);
|
||||
|
||||
/* Figure out Zen generations: */
|
||||
switch (c->x86) {
|
||||
case 0x17: {
|
||||
switch (c->x86_model) {
|
||||
case 0x00 ... 0x2f:
|
||||
case 0x50 ... 0x5f:
|
||||
setup_force_cpu_cap(X86_FEATURE_ZEN1);
|
||||
break;
|
||||
case 0x30 ... 0x4f:
|
||||
case 0x60 ... 0x7f:
|
||||
case 0x90 ... 0x91:
|
||||
case 0xa0 ... 0xaf:
|
||||
setup_force_cpu_cap(X86_FEATURE_ZEN2);
|
||||
break;
|
||||
default:
|
||||
goto warn;
|
||||
}
|
||||
break;
|
||||
}
|
||||
case 0x19: {
|
||||
switch (c->x86_model) {
|
||||
case 0x00 ... 0x0f:
|
||||
case 0x20 ... 0x5f:
|
||||
setup_force_cpu_cap(X86_FEATURE_ZEN3);
|
||||
break;
|
||||
case 0x10 ... 0x1f:
|
||||
case 0x60 ... 0xaf:
|
||||
setup_force_cpu_cap(X86_FEATURE_ZEN4);
|
||||
break;
|
||||
default:
|
||||
goto warn;
|
||||
}
|
||||
break;
|
||||
}
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
return;
|
||||
|
||||
warn:
|
||||
WARN_ONCE(1, "Family 0x%x, model: 0x%x??\n", c->x86, c->x86_model);
|
||||
}
|
||||
|
||||
static void early_detect_mem_encrypt(struct cpuinfo_x86 *c)
|
||||
@ -739,15 +701,6 @@ static void early_init_amd(struct cpuinfo_x86 *c)
|
||||
if (c->x86 == 0x16 && c->x86_model <= 0xf)
|
||||
msr_set_bit(MSR_AMD64_LS_CFG, 15);
|
||||
|
||||
/*
|
||||
* Check whether the machine is affected by erratum 400. This is
|
||||
* used to select the proper idle routine and to enable the check
|
||||
* whether the machine is affected in arch_post_acpi_init(), which
|
||||
* sets the X86_BUG_AMD_APIC_C1E bug depending on the MSR check.
|
||||
*/
|
||||
if (cpu_has_amd_erratum(c, amd_erratum_400))
|
||||
set_cpu_bug(c, X86_BUG_AMD_E400);
|
||||
|
||||
early_detect_mem_encrypt(c);
|
||||
|
||||
/* Re-enable TopologyExtensions if switched off by BIOS */
|
||||
@ -814,6 +767,16 @@ static void init_amd_k8(struct cpuinfo_x86 *c)
|
||||
msr_set_bit(MSR_K7_HWCR, 6);
|
||||
#endif
|
||||
set_cpu_bug(c, X86_BUG_SWAPGS_FENCE);
|
||||
|
||||
/*
|
||||
* Check models and steppings affected by erratum 400. This is
|
||||
* used to select the proper idle routine and to enable the
|
||||
* check whether the machine is affected in arch_post_acpi_subsys_init()
|
||||
* which sets the X86_BUG_AMD_APIC_C1E bug depending on the MSR check.
|
||||
*/
|
||||
if (c->x86_model > 0x41 ||
|
||||
(c->x86_model == 0x41 && c->x86_stepping >= 0x2))
|
||||
setup_force_cpu_bug(X86_BUG_AMD_E400);
|
||||
}
|
||||
|
||||
static void init_amd_gh(struct cpuinfo_x86 *c)
|
||||
@ -847,8 +810,17 @@ static void init_amd_gh(struct cpuinfo_x86 *c)
|
||||
*/
|
||||
msr_clear_bit(MSR_AMD64_BU_CFG2, 24);
|
||||
|
||||
if (cpu_has_amd_erratum(c, amd_erratum_383))
|
||||
set_cpu_bug(c, X86_BUG_AMD_TLB_MMATCH);
|
||||
set_cpu_bug(c, X86_BUG_AMD_TLB_MMATCH);
|
||||
|
||||
/*
|
||||
* Check models and steppings affected by erratum 400. This is
|
||||
* used to select the proper idle routine and to enable the
|
||||
* check whether the machine is affected in arch_post_acpi_subsys_init()
|
||||
* which sets the X86_BUG_AMD_APIC_C1E bug depending on the MSR check.
|
||||
*/
|
||||
if (c->x86_model > 0x2 ||
|
||||
(c->x86_model == 0x2 && c->x86_stepping >= 0x1))
|
||||
setup_force_cpu_bug(X86_BUG_AMD_E400);
|
||||
}
|
||||
|
||||
static void init_amd_ln(struct cpuinfo_x86 *c)
|
||||
@ -941,26 +913,8 @@ static void init_amd_bd(struct cpuinfo_x86 *c)
|
||||
clear_rdrand_cpuid_bit(c);
|
||||
}
|
||||
|
||||
void init_spectral_chicken(struct cpuinfo_x86 *c)
|
||||
static void fix_erratum_1386(struct cpuinfo_x86 *c)
|
||||
{
|
||||
#ifdef CONFIG_CPU_UNRET_ENTRY
|
||||
u64 value;
|
||||
|
||||
/*
|
||||
* On Zen2 we offer this chicken (bit) on the altar of Speculation.
|
||||
*
|
||||
* This suppresses speculation from the middle of a basic block, i.e. it
|
||||
* suppresses non-branch predictions.
|
||||
*
|
||||
* We use STIBP as a heuristic to filter out Zen2 from the rest of F17H
|
||||
*/
|
||||
if (!cpu_has(c, X86_FEATURE_HYPERVISOR) && cpu_has(c, X86_FEATURE_AMD_STIBP)) {
|
||||
if (!rdmsrl_safe(MSR_ZEN2_SPECTRAL_CHICKEN, &value)) {
|
||||
value |= MSR_ZEN2_SPECTRAL_CHICKEN_BIT;
|
||||
wrmsrl_safe(MSR_ZEN2_SPECTRAL_CHICKEN, value);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
/*
|
||||
* Work around Erratum 1386. The XSAVES instruction malfunctions in
|
||||
* certain circumstances on Zen1/2 uarch, and not all parts have had
|
||||
@ -972,13 +926,38 @@ void init_spectral_chicken(struct cpuinfo_x86 *c)
|
||||
clear_cpu_cap(c, X86_FEATURE_XSAVES);
|
||||
}
|
||||
|
||||
static void init_amd_zn(struct cpuinfo_x86 *c)
|
||||
void init_spectral_chicken(struct cpuinfo_x86 *c)
|
||||
{
|
||||
set_cpu_cap(c, X86_FEATURE_ZEN);
|
||||
#ifdef CONFIG_CPU_UNRET_ENTRY
|
||||
u64 value;
|
||||
|
||||
/*
|
||||
* On Zen2 we offer this chicken (bit) on the altar of Speculation.
|
||||
*
|
||||
* This suppresses speculation from the middle of a basic block, i.e. it
|
||||
* suppresses non-branch predictions.
|
||||
*/
|
||||
if (!cpu_has(c, X86_FEATURE_HYPERVISOR)) {
|
||||
if (!rdmsrl_safe(MSR_ZEN2_SPECTRAL_CHICKEN, &value)) {
|
||||
value |= MSR_ZEN2_SPECTRAL_CHICKEN_BIT;
|
||||
wrmsrl_safe(MSR_ZEN2_SPECTRAL_CHICKEN, value);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
static void init_amd_zen_common(void)
|
||||
{
|
||||
setup_force_cpu_cap(X86_FEATURE_ZEN);
|
||||
#ifdef CONFIG_NUMA
|
||||
node_reclaim_distance = 32;
|
||||
#endif
|
||||
}
|
||||
|
||||
static void init_amd_zen1(struct cpuinfo_x86 *c)
|
||||
{
|
||||
init_amd_zen_common();
|
||||
fix_erratum_1386(c);
|
||||
|
||||
/* Fix up CPUID bits, but only if not virtualised. */
|
||||
if (!cpu_has(c, X86_FEATURE_HYPERVISOR)) {
|
||||
@ -986,15 +965,10 @@ static void init_amd_zn(struct cpuinfo_x86 *c)
|
||||
/* Erratum 1076: CPB feature bit not being set in CPUID. */
|
||||
if (!cpu_has(c, X86_FEATURE_CPB))
|
||||
set_cpu_cap(c, X86_FEATURE_CPB);
|
||||
|
||||
/*
|
||||
* Zen3 (Fam19 model < 0x10) parts are not susceptible to
|
||||
* Branch Type Confusion, but predate the allocation of the
|
||||
* BTC_NO bit.
|
||||
*/
|
||||
if (c->x86 == 0x19 && !cpu_has(c, X86_FEATURE_BTC_NO))
|
||||
set_cpu_cap(c, X86_FEATURE_BTC_NO);
|
||||
}
|
||||
|
||||
pr_notice_once("AMD Zen1 DIV0 bug detected. Disable SMT for full protection.\n");
|
||||
setup_force_cpu_bug(X86_BUG_DIV0);
|
||||
}
|
||||
|
||||
static bool cpu_has_zenbleed_microcode(void)
|
||||
@ -1018,11 +992,8 @@ static bool cpu_has_zenbleed_microcode(void)
|
||||
return true;
|
||||
}
|
||||
|
||||
static void zenbleed_check(struct cpuinfo_x86 *c)
|
||||
static void zen2_zenbleed_check(struct cpuinfo_x86 *c)
|
||||
{
|
||||
if (!cpu_has_amd_erratum(c, amd_zenbleed))
|
||||
return;
|
||||
|
||||
if (cpu_has(c, X86_FEATURE_HYPERVISOR))
|
||||
return;
|
||||
|
||||
@ -1037,6 +1008,37 @@ static void zenbleed_check(struct cpuinfo_x86 *c)
|
||||
}
|
||||
}
|
||||
|
||||
static void init_amd_zen2(struct cpuinfo_x86 *c)
|
||||
{
|
||||
init_amd_zen_common();
|
||||
init_spectral_chicken(c);
|
||||
fix_erratum_1386(c);
|
||||
zen2_zenbleed_check(c);
|
||||
}
|
||||
|
||||
static void init_amd_zen3(struct cpuinfo_x86 *c)
|
||||
{
|
||||
init_amd_zen_common();
|
||||
|
||||
if (!cpu_has(c, X86_FEATURE_HYPERVISOR)) {
|
||||
/*
|
||||
* Zen3 (Fam19 model < 0x10) parts are not susceptible to
|
||||
* Branch Type Confusion, but predate the allocation of the
|
||||
* BTC_NO bit.
|
||||
*/
|
||||
if (!cpu_has(c, X86_FEATURE_BTC_NO))
|
||||
set_cpu_cap(c, X86_FEATURE_BTC_NO);
|
||||
}
|
||||
}
|
||||
|
||||
static void init_amd_zen4(struct cpuinfo_x86 *c)
|
||||
{
|
||||
init_amd_zen_common();
|
||||
|
||||
if (!cpu_has(c, X86_FEATURE_HYPERVISOR))
|
||||
msr_set_bit(MSR_ZEN4_BP_CFG, MSR_ZEN4_BP_CFG_SHARED_BTB_FIX_BIT);
|
||||
}
|
||||
|
||||
static void init_amd(struct cpuinfo_x86 *c)
|
||||
{
|
||||
u64 vm_cr;
|
||||
@ -1072,11 +1074,17 @@ static void init_amd(struct cpuinfo_x86 *c)
|
||||
case 0x12: init_amd_ln(c); break;
|
||||
case 0x15: init_amd_bd(c); break;
|
||||
case 0x16: init_amd_jg(c); break;
|
||||
case 0x17: init_spectral_chicken(c);
|
||||
fallthrough;
|
||||
case 0x19: init_amd_zn(c); break;
|
||||
}
|
||||
|
||||
if (boot_cpu_has(X86_FEATURE_ZEN1))
|
||||
init_amd_zen1(c);
|
||||
else if (boot_cpu_has(X86_FEATURE_ZEN2))
|
||||
init_amd_zen2(c);
|
||||
else if (boot_cpu_has(X86_FEATURE_ZEN3))
|
||||
init_amd_zen3(c);
|
||||
else if (boot_cpu_has(X86_FEATURE_ZEN4))
|
||||
init_amd_zen4(c);
|
||||
|
||||
/*
|
||||
* Enable workaround for FXSAVE leak on CPUs
|
||||
* without a XSaveErPtr feature
|
||||
@ -1136,7 +1144,7 @@ static void init_amd(struct cpuinfo_x86 *c)
|
||||
* Counter May Be Inaccurate".
|
||||
*/
|
||||
if (cpu_has(c, X86_FEATURE_IRPERF) &&
|
||||
!cpu_has_amd_erratum(c, amd_erratum_1054))
|
||||
(boot_cpu_has(X86_FEATURE_ZEN1) && c->x86_model > 0x2f))
|
||||
msr_set_bit(MSR_K7_HWCR, MSR_K7_HWCR_IRPERF_EN_BIT);
|
||||
|
||||
check_null_seg_clears_base(c);
|
||||
@ -1152,16 +1160,8 @@ static void init_amd(struct cpuinfo_x86 *c)
|
||||
cpu_has(c, X86_FEATURE_AUTOIBRS))
|
||||
WARN_ON_ONCE(msr_set_bit(MSR_EFER, _EFER_AUTOIBRS));
|
||||
|
||||
zenbleed_check(c);
|
||||
|
||||
if (cpu_has_amd_erratum(c, amd_div0)) {
|
||||
pr_notice_once("AMD Zen1 DIV0 bug detected. Disable SMT for full protection.\n");
|
||||
setup_force_cpu_bug(X86_BUG_DIV0);
|
||||
}
|
||||
|
||||
if (!cpu_has(c, X86_FEATURE_HYPERVISOR) &&
|
||||
cpu_has_amd_erratum(c, amd_erratum_1485))
|
||||
msr_set_bit(MSR_ZEN4_BP_CFG, MSR_ZEN4_BP_CFG_SHARED_BTB_FIX_BIT);
|
||||
/* AMD CPUs don't need fencing after x2APIC/TSC_DEADLINE MSR writes. */
|
||||
clear_cpu_cap(c, X86_FEATURE_APIC_MSRS_FENCE);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
@ -1315,7 +1315,7 @@ static void zenbleed_check_cpu(void *unused)
|
||||
{
|
||||
struct cpuinfo_x86 *c = &cpu_data(smp_processor_id());
|
||||
|
||||
zenbleed_check(c);
|
||||
zen2_zenbleed_check(c);
|
||||
}
|
||||
|
||||
void amd_check_microcode(void)
|
||||
|
@ -1856,6 +1856,13 @@ static void identify_cpu(struct cpuinfo_x86 *c)
|
||||
c->topo.apicid = apic->phys_pkg_id(c->topo.initial_apicid, 0);
|
||||
#endif
|
||||
|
||||
|
||||
/*
|
||||
* Set default APIC and TSC_DEADLINE MSR fencing flag. AMD and
|
||||
* Hygon will clear it in ->c_init() below.
|
||||
*/
|
||||
set_cpu_cap(c, X86_FEATURE_APIC_MSRS_FENCE);
|
||||
|
||||
/*
|
||||
* Vendor-specific initialization. In this section we
|
||||
* canonicalize the feature flags, meaning if there are
|
||||
|
@ -354,6 +354,9 @@ static void init_hygon(struct cpuinfo_x86 *c)
|
||||
set_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS);
|
||||
|
||||
check_null_seg_clears_base(c);
|
||||
|
||||
/* Hygon CPUs don't need fencing after x2APIC/TSC_DEADLINE MSR writes. */
|
||||
clear_cpu_cap(c, X86_FEATURE_APIC_MSRS_FENCE);
|
||||
}
|
||||
|
||||
static void cpu_detect_tlb_hygon(struct cpuinfo_x86 *c)
|
||||
|
@ -237,4 +237,4 @@ err_out_online:
|
||||
cpuhp_remove_state(CPUHP_AP_X86_INTEL_EPB_ONLINE);
|
||||
return ret;
|
||||
}
|
||||
subsys_initcall(intel_epb_init);
|
||||
late_initcall(intel_epb_init);
|
||||
|
@ -218,7 +218,7 @@
|
||||
#define X86_FEATURE_IBRS ( 7*32+25) /* Indirect Branch Restricted Speculation */
|
||||
#define X86_FEATURE_IBPB ( 7*32+26) /* Indirect Branch Prediction Barrier */
|
||||
#define X86_FEATURE_STIBP ( 7*32+27) /* Single Thread Indirect Branch Predictors */
|
||||
#define X86_FEATURE_ZEN (7*32+28) /* "" CPU based on Zen microarchitecture */
|
||||
#define X86_FEATURE_ZEN ( 7*32+28) /* "" Generic flag for all Zen and newer */
|
||||
#define X86_FEATURE_L1TF_PTEINV ( 7*32+29) /* "" L1TF workaround PTE inversion */
|
||||
#define X86_FEATURE_IBRS_ENHANCED ( 7*32+30) /* Enhanced IBRS */
|
||||
#define X86_FEATURE_MSR_IA32_FEAT_CTL ( 7*32+31) /* "" MSR IA32_FEAT_CTL configured */
|
||||
|
Loading…
x
Reference in New Issue
Block a user