Merge branch into tip/master: 'x86/cpu'

# New commits in x86/cpu:
    e5d3a57891ba ("x86/cpu: Make all all CPUID leaf names consistent")
    588e148d8bab ("x86/fpu: Remove unnecessary CPUID level check")
    754aaac3bbf1 ("x86/fpu: Move CPUID leaf definitions to common code")
    e558eadf6bd6 ("x86/tsc: Remove CPUID "frequency" leaf magic numbers.")
    030c15b5610c ("x86/tsc: Move away from TSC leaf magic numbers")
    a86740a77bf0 ("x86/cpu: Move TSC CPUID leaf definition")
    5d82d8e0a9ac ("x86/cpu: Refresh DCA leaf reading code")
    262fba55708b ("x86/cpu: Remove unnecessary MwAIT leaf checks")
    8bd6821c9cf3 ("x86/cpu: Use MWAIT leaf definition")
    497f70284695 ("x86/cpu: Move MWAIT leaf definition to common header")
    5366d8965d35 ("x86/cpu: Remove 'x86_cpu_desc' infrastructure")
    f3f325152673 ("x86/cpu: Move AMD erratum 1386 table over to 'x86_cpu_id'")
    3fa5626720c0 ("x86/cpu: Replace PEBS use of 'x86_cpu_desc' use with 'x86_cpu_id'")
    85b08180df07 ("x86/cpu: Expose only stepping min/max interface")
    b8e10c86e674 ("x86/cpu: Introduce new microcode matching helper")
    4bf610499c42 ("x86/cpufeature: Document cpu_feature_enabled() as the default to use")
    29188c160061 ("x86/paravirt: Remove the WBINVD callback")
    7a470e826d75 ("x86/cpufeatures: Free up unused feature bits")

Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
Ingo Molnar 2024-12-19 20:24:26 +01:00
commit be442533dd
33 changed files with 178 additions and 277 deletions

View File

@ -5371,42 +5371,32 @@ static __init void intel_clovertown_quirk(void)
x86_pmu.pebs_constraints = NULL;
}
static const struct x86_cpu_desc isolation_ucodes[] = {
INTEL_CPU_DESC(INTEL_HASWELL, 3, 0x0000001f),
INTEL_CPU_DESC(INTEL_HASWELL_L, 1, 0x0000001e),
INTEL_CPU_DESC(INTEL_HASWELL_G, 1, 0x00000015),
INTEL_CPU_DESC(INTEL_HASWELL_X, 2, 0x00000037),
INTEL_CPU_DESC(INTEL_HASWELL_X, 4, 0x0000000a),
INTEL_CPU_DESC(INTEL_BROADWELL, 4, 0x00000023),
INTEL_CPU_DESC(INTEL_BROADWELL_G, 1, 0x00000014),
INTEL_CPU_DESC(INTEL_BROADWELL_D, 2, 0x00000010),
INTEL_CPU_DESC(INTEL_BROADWELL_D, 3, 0x07000009),
INTEL_CPU_DESC(INTEL_BROADWELL_D, 4, 0x0f000009),
INTEL_CPU_DESC(INTEL_BROADWELL_D, 5, 0x0e000002),
INTEL_CPU_DESC(INTEL_BROADWELL_X, 1, 0x0b000014),
INTEL_CPU_DESC(INTEL_SKYLAKE_X, 3, 0x00000021),
INTEL_CPU_DESC(INTEL_SKYLAKE_X, 4, 0x00000000),
INTEL_CPU_DESC(INTEL_SKYLAKE_X, 5, 0x00000000),
INTEL_CPU_DESC(INTEL_SKYLAKE_X, 6, 0x00000000),
INTEL_CPU_DESC(INTEL_SKYLAKE_X, 7, 0x00000000),
INTEL_CPU_DESC(INTEL_SKYLAKE_X, 11, 0x00000000),
INTEL_CPU_DESC(INTEL_SKYLAKE_L, 3, 0x0000007c),
INTEL_CPU_DESC(INTEL_SKYLAKE, 3, 0x0000007c),
INTEL_CPU_DESC(INTEL_KABYLAKE, 9, 0x0000004e),
INTEL_CPU_DESC(INTEL_KABYLAKE_L, 9, 0x0000004e),
INTEL_CPU_DESC(INTEL_KABYLAKE_L, 10, 0x0000004e),
INTEL_CPU_DESC(INTEL_KABYLAKE_L, 11, 0x0000004e),
INTEL_CPU_DESC(INTEL_KABYLAKE_L, 12, 0x0000004e),
INTEL_CPU_DESC(INTEL_KABYLAKE, 10, 0x0000004e),
INTEL_CPU_DESC(INTEL_KABYLAKE, 11, 0x0000004e),
INTEL_CPU_DESC(INTEL_KABYLAKE, 12, 0x0000004e),
INTEL_CPU_DESC(INTEL_KABYLAKE, 13, 0x0000004e),
static const struct x86_cpu_id isolation_ucodes[] = {
X86_MATCH_VFM_STEPS(INTEL_HASWELL, 3, 3, 0x0000001f),
X86_MATCH_VFM_STEPS(INTEL_HASWELL_L, 1, 1, 0x0000001e),
X86_MATCH_VFM_STEPS(INTEL_HASWELL_G, 1, 1, 0x00000015),
X86_MATCH_VFM_STEPS(INTEL_HASWELL_X, 2, 2, 0x00000037),
X86_MATCH_VFM_STEPS(INTEL_HASWELL_X, 4, 4, 0x0000000a),
X86_MATCH_VFM_STEPS(INTEL_BROADWELL, 4, 4, 0x00000023),
X86_MATCH_VFM_STEPS(INTEL_BROADWELL_G, 1, 1, 0x00000014),
X86_MATCH_VFM_STEPS(INTEL_BROADWELL_D, 2, 2, 0x00000010),
X86_MATCH_VFM_STEPS(INTEL_BROADWELL_D, 3, 3, 0x07000009),
X86_MATCH_VFM_STEPS(INTEL_BROADWELL_D, 4, 4, 0x0f000009),
X86_MATCH_VFM_STEPS(INTEL_BROADWELL_D, 5, 5, 0x0e000002),
X86_MATCH_VFM_STEPS(INTEL_BROADWELL_X, 1, 1, 0x0b000014),
X86_MATCH_VFM_STEPS(INTEL_SKYLAKE_X, 3, 3, 0x00000021),
X86_MATCH_VFM_STEPS(INTEL_SKYLAKE_X, 4, 7, 0x00000000),
X86_MATCH_VFM_STEPS(INTEL_SKYLAKE_X, 11, 11, 0x00000000),
X86_MATCH_VFM_STEPS(INTEL_SKYLAKE_L, 3, 3, 0x0000007c),
X86_MATCH_VFM_STEPS(INTEL_SKYLAKE, 3, 3, 0x0000007c),
X86_MATCH_VFM_STEPS(INTEL_KABYLAKE, 9, 13, 0x0000004e),
X86_MATCH_VFM_STEPS(INTEL_KABYLAKE_L, 9, 12, 0x0000004e),
{}
};
static void intel_check_pebs_isolation(void)
{
x86_pmu.pebs_no_isolation = !x86_cpu_has_min_microcode_rev(isolation_ucodes);
x86_pmu.pebs_no_isolation = !x86_match_min_microcode_rev(isolation_ucodes);
}
static __init void intel_pebs_isolation_quirk(void)
@ -5416,16 +5406,16 @@ static __init void intel_pebs_isolation_quirk(void)
intel_check_pebs_isolation();
}
static const struct x86_cpu_desc pebs_ucodes[] = {
INTEL_CPU_DESC(INTEL_SANDYBRIDGE, 7, 0x00000028),
INTEL_CPU_DESC(INTEL_SANDYBRIDGE_X, 6, 0x00000618),
INTEL_CPU_DESC(INTEL_SANDYBRIDGE_X, 7, 0x0000070c),
static const struct x86_cpu_id pebs_ucodes[] = {
X86_MATCH_VFM_STEPS(INTEL_SANDYBRIDGE, 7, 7, 0x00000028),
X86_MATCH_VFM_STEPS(INTEL_SANDYBRIDGE_X, 6, 6, 0x00000618),
X86_MATCH_VFM_STEPS(INTEL_SANDYBRIDGE_X, 7, 7, 0x0000070c),
{}
};
static bool intel_snb_pebs_broken(void)
{
return !x86_cpu_has_min_microcode_rev(pebs_ucodes);
return !x86_match_min_microcode_rev(pebs_ucodes);
}
static void intel_snb_check_microcode(void)

View File

@ -18,6 +18,7 @@
#include <linux/slab.h>
#include <linux/device.h>
#include <asm/cpuid.h>
#include <asm/perf_event.h>
#include <asm/insn.h>
#include <asm/io.h>
@ -201,10 +202,10 @@ static int __init pt_pmu_hw_init(void)
* otherwise, zero for numerator stands for "not enumerated"
* as per SDM
*/
if (boot_cpu_data.cpuid_level >= CPUID_TSC_LEAF) {
if (boot_cpu_data.cpuid_level >= CPUID_LEAF_TSC) {
u32 eax, ebx, ecx, edx;
cpuid(CPUID_TSC_LEAF, &eax, &ebx, &ecx, &edx);
cpuid(CPUID_LEAF_TSC, &eax, &ebx, &ecx, &edx);
pt_pmu.tsc_art_num = ebx;
pt_pmu.tsc_art_den = eax;

View File

@ -37,9 +37,6 @@ struct topa_entry {
u64 rsvd4 : 12;
};
/* TSC to Core Crystal Clock Ratio */
#define CPUID_TSC_LEAF 0x15
struct pt_pmu {
struct pmu pmu;
u32 caps[PT_CPUID_REGS_NUM * PT_CPUID_LEAVES];

View File

@ -56,7 +56,6 @@
/* x86_cpu_id::flags */
#define X86_CPU_ID_FLAG_ENTRY_VALID BIT(0)
#define X86_STEPPINGS(mins, maxs) GENMASK(maxs, mins)
/**
* X86_MATCH_VENDOR_FAM_MODEL_STEPPINGS_FEATURE - Base macro for CPU matching
* @_vendor: The vendor name, e.g. INTEL, AMD, HYGON, ..., ANY
@ -208,6 +207,7 @@
VFM_MODEL(vfm), \
X86_STEPPING_ANY, X86_FEATURE_ANY, data)
#define __X86_STEPPINGS(mins, maxs) GENMASK(maxs, mins)
/**
* X86_MATCH_VFM_STEPPINGS - Match encoded vendor/family/model/stepping
* @vfm: Encoded 8-bits each for vendor, family, model
@ -218,12 +218,13 @@
*
* feature is set to wildcard
*/
#define X86_MATCH_VFM_STEPPINGS(vfm, steppings, data) \
X86_MATCH_VENDORID_FAM_MODEL_STEPPINGS_FEATURE( \
VFM_VENDOR(vfm), \
VFM_FAMILY(vfm), \
VFM_MODEL(vfm), \
steppings, X86_FEATURE_ANY, data)
#define X86_MATCH_VFM_STEPS(vfm, min_step, max_step, data) \
X86_MATCH_VENDORID_FAM_MODEL_STEPPINGS_FEATURE( \
VFM_VENDOR(vfm), \
VFM_FAMILY(vfm), \
VFM_MODEL(vfm), \
__X86_STEPPINGS(min_step, max_step), \
X86_FEATURE_ANY, data)
/**
* X86_MATCH_VFM_FEATURE - Match encoded vendor/family/model/feature
@ -242,41 +243,7 @@
VFM_MODEL(vfm), \
X86_STEPPING_ANY, feature, data)
/*
* Match specific microcode revisions.
*
* vendor/family/model/stepping must be all set.
*
* Only checks against the boot CPU. When mixed-stepping configs are
* valid for a CPU model, add a quirk for every valid stepping and
* do the fine-tuning in the quirk handler.
*/
struct x86_cpu_desc {
u8 x86_family;
u8 x86_vendor;
u8 x86_model;
u8 x86_stepping;
u32 x86_microcode_rev;
};
#define INTEL_CPU_DESC(vfm, stepping, revision) { \
.x86_family = VFM_FAMILY(vfm), \
.x86_vendor = VFM_VENDOR(vfm), \
.x86_model = VFM_MODEL(vfm), \
.x86_stepping = (stepping), \
.x86_microcode_rev = (revision), \
}
#define AMD_CPU_DESC(fam, model, stepping, revision) { \
.x86_family = (fam), \
.x86_vendor = X86_VENDOR_AMD, \
.x86_model = (model), \
.x86_stepping = (stepping), \
.x86_microcode_rev = (revision), \
}
extern const struct x86_cpu_id *x86_match_cpu(const struct x86_cpu_id *match);
extern bool x86_cpu_has_min_microcode_rev(const struct x86_cpu_desc *table);
extern bool x86_match_min_microcode_rev(const struct x86_cpu_id *table);
#endif /* _ASM_X86_CPU_DEVICE_ID */

View File

@ -132,11 +132,12 @@ extern const char * const x86_bug_flags[NBUGINTS*32];
x86_this_cpu_test_bit(bit, cpu_info.x86_capability))
/*
* This macro is for detection of features which need kernel
* infrastructure to be used. It may *not* directly test the CPU
* itself. Use the cpu_has() family if you want true runtime
* testing of CPU features, like in hypervisor code where you are
* supporting a possible guest feature where host support for it
* This is the default CPU features testing macro to use in code.
*
* It is for detection of features which need kernel infrastructure to be
* used. It may *not* directly test the CPU itself. Use the cpu_has() family
* if you want true runtime testing of CPU features, like in hypervisor code
* where you are supporting a possible guest feature where host support for it
* is not relevant.
*/
#define cpu_feature_enabled(bit) \
@ -161,13 +162,6 @@ extern void clear_cpu_cap(struct cpuinfo_x86 *c, unsigned int bit);
#define setup_force_cpu_bug(bit) setup_force_cpu_cap(bit)
/*
* Static testing of CPU features. Used the same as boot_cpu_has(). It
* statically patches the target code for additional performance. Use
* static_cpu_has() only in fast paths, where every cycle counts. Which
* means that the boot_cpu_has() variant is already fast enough for the
* majority of cases and you should stick to using it as it is generally
* only two instructions: a RIP-relative MOV and a TEST.
*
* Do not use an "m" constraint for [cap_byte] here: gcc doesn't know
* that this is only used on a fallback path and will sometimes cause
* it to manifest the address of boot_cpu_data in a register, fouling

View File

@ -83,8 +83,8 @@
#define X86_FEATURE_CENTAUR_MCR ( 3*32+ 3) /* "centaur_mcr" Centaur MCRs (= MTRRs) */
#define X86_FEATURE_K8 ( 3*32+ 4) /* Opteron, Athlon64 */
#define X86_FEATURE_ZEN5 ( 3*32+ 5) /* CPU based on Zen5 microarchitecture */
#define X86_FEATURE_P3 ( 3*32+ 6) /* P3 */
#define X86_FEATURE_P4 ( 3*32+ 7) /* P4 */
/* Free ( 3*32+ 6) */
/* Free ( 3*32+ 7) */
#define X86_FEATURE_CONSTANT_TSC ( 3*32+ 8) /* "constant_tsc" TSC ticks at a constant rate */
#define X86_FEATURE_UP ( 3*32+ 9) /* "up" SMP kernel running on UP */
#define X86_FEATURE_ART ( 3*32+10) /* "art" Always running timer (ART) */

View File

@ -21,6 +21,13 @@ enum cpuid_regs_idx {
CPUID_EDX,
};
#define CPUID_LEAF_MWAIT 0x5
#define CPUID_LEAF_DCA 0x9
#define CPUID_LEAF_XSTATE 0x0d
#define CPUID_LEAF_TSC 0x15
#define CPUID_LEAF_FREQ 0x16
#define CPUID_LEAF_TILE 0x1d
#ifdef CONFIG_X86_32
bool have_cpuid_p(void);
#else

View File

@ -12,10 +12,6 @@
/* Bit 63 of XCR0 is reserved for future expansion */
#define XFEATURE_MASK_EXTEND (~(XFEATURE_MASK_FPSSE | (1ULL << 63)))
#define XSTATE_CPUID 0x0000000d
#define TILE_CPUID 0x0000001d
#define FXSAVE_SIZE 512
#define XSAVE_HDR_SIZE 64

View File

@ -15,7 +15,6 @@
#define MWAIT_HINT2SUBSTATE(hint) ((hint) & MWAIT_CSTATE_MASK)
#define MWAIT_C1_SUBSTATE_MASK 0xf0
#define CPUID_MWAIT_LEAF 5
#define CPUID5_ECX_EXTENSIONS_SUPPORTED 0x1
#define CPUID5_ECX_INTERRUPT_BREAK 0x2

View File

@ -180,13 +180,6 @@ static inline void halt(void)
PVOP_VCALL0(irq.halt);
}
extern noinstr void pv_native_wbinvd(void);
static __always_inline void wbinvd(void)
{
PVOP_ALT_VCALL0(cpu.wbinvd, "wbinvd", ALT_NOT_XEN);
}
static inline u64 paravirt_read_msr(unsigned msr)
{
return PVOP_CALL1(u64, cpu.read_msr, msr);

View File

@ -86,8 +86,6 @@ struct pv_cpu_ops {
void (*update_io_bitmap)(void);
#endif
void (*wbinvd)(void);
/* cpuid emulation, mostly so that caps bits can be disabled */
void (*cpuid)(unsigned int *eax, unsigned int *ebx,
unsigned int *ecx, unsigned int *edx);

View File

@ -115,7 +115,7 @@ static inline void wrpkru(u32 pkru)
}
#endif
static __always_inline void native_wbinvd(void)
static __always_inline void wbinvd(void)
{
asm volatile("wbinvd": : :"memory");
}
@ -167,12 +167,6 @@ static inline void __write_cr4(unsigned long x)
{
native_write_cr4(x);
}
static __always_inline void wbinvd(void)
{
native_wbinvd();
}
#endif /* CONFIG_PARAVIRT_XXL */
static __always_inline void clflush(volatile void *__p)

View File

@ -13,6 +13,7 @@
#include <linux/sched.h>
#include <acpi/processor.h>
#include <asm/cpuid.h>
#include <asm/mwait.h>
#include <asm/special_insns.h>
@ -128,7 +129,7 @@ static long acpi_processor_ffh_cstate_probe_cpu(void *_cx)
unsigned int cstate_type; /* C-state type and not ACPI C-state type */
unsigned int num_cstate_subtype;
cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &edx);
cpuid(CPUID_LEAF_MWAIT, &eax, &ebx, &ecx, &edx);
/* Check whether this particular cx_type (in CST) is supported or not */
cstate_type = (((cx->address >> MWAIT_SUBSTATE_SIZE) &
@ -172,7 +173,7 @@ int acpi_processor_ffh_cstate_probe(unsigned int cpu,
struct cpuinfo_x86 *c = &cpu_data(cpu);
long retval;
if (!cpu_cstate_entry || c->cpuid_level < CPUID_MWAIT_LEAF)
if (!cpu_cstate_entry || c->cpuid_level < CPUID_LEAF_MWAIT)
return -1;
if (reg->bit_offset != NATIVE_CSTATE_BEYOND_HALT)

View File

@ -509,19 +509,19 @@ static struct clock_event_device lapic_clockevent = {
static DEFINE_PER_CPU(struct clock_event_device, lapic_events);
static const struct x86_cpu_id deadline_match[] __initconst = {
X86_MATCH_VFM_STEPPINGS(INTEL_HASWELL_X, X86_STEPPINGS(0x2, 0x2), 0x3a), /* EP */
X86_MATCH_VFM_STEPPINGS(INTEL_HASWELL_X, X86_STEPPINGS(0x4, 0x4), 0x0f), /* EX */
X86_MATCH_VFM_STEPS(INTEL_HASWELL_X, 0x2, 0x2, 0x3a), /* EP */
X86_MATCH_VFM_STEPS(INTEL_HASWELL_X, 0x4, 0x4, 0x0f), /* EX */
X86_MATCH_VFM(INTEL_BROADWELL_X, 0x0b000020),
X86_MATCH_VFM_STEPPINGS(INTEL_BROADWELL_D, X86_STEPPINGS(0x2, 0x2), 0x00000011),
X86_MATCH_VFM_STEPPINGS(INTEL_BROADWELL_D, X86_STEPPINGS(0x3, 0x3), 0x0700000e),
X86_MATCH_VFM_STEPPINGS(INTEL_BROADWELL_D, X86_STEPPINGS(0x4, 0x4), 0x0f00000c),
X86_MATCH_VFM_STEPPINGS(INTEL_BROADWELL_D, X86_STEPPINGS(0x5, 0x5), 0x0e000003),
X86_MATCH_VFM_STEPS(INTEL_BROADWELL_D, 0x2, 0x2, 0x00000011),
X86_MATCH_VFM_STEPS(INTEL_BROADWELL_D, 0x3, 0x3, 0x0700000e),
X86_MATCH_VFM_STEPS(INTEL_BROADWELL_D, 0x4, 0x4, 0x0f00000c),
X86_MATCH_VFM_STEPS(INTEL_BROADWELL_D, 0x5, 0x5, 0x0e000003),
X86_MATCH_VFM_STEPPINGS(INTEL_SKYLAKE_X, X86_STEPPINGS(0x3, 0x3), 0x01000136),
X86_MATCH_VFM_STEPPINGS(INTEL_SKYLAKE_X, X86_STEPPINGS(0x4, 0x4), 0x02000014),
X86_MATCH_VFM_STEPPINGS(INTEL_SKYLAKE_X, X86_STEPPINGS(0x5, 0xf), 0),
X86_MATCH_VFM_STEPS(INTEL_SKYLAKE_X, 0x3, 0x3, 0x01000136),
X86_MATCH_VFM_STEPS(INTEL_SKYLAKE_X, 0x4, 0x4, 0x02000014),
X86_MATCH_VFM_STEPS(INTEL_SKYLAKE_X, 0x5, 0xf, 0),
X86_MATCH_VFM(INTEL_HASWELL, 0x22),
X86_MATCH_VFM(INTEL_HASWELL_L, 0x20),

View File

@ -795,10 +795,9 @@ static void init_amd_bd(struct cpuinfo_x86 *c)
clear_rdrand_cpuid_bit(c);
}
static const struct x86_cpu_desc erratum_1386_microcode[] = {
AMD_CPU_DESC(0x17, 0x1, 0x2, 0x0800126e),
AMD_CPU_DESC(0x17, 0x31, 0x0, 0x08301052),
{},
static const struct x86_cpu_id erratum_1386_microcode[] = {
X86_MATCH_VFM_STEPS(VFM_MAKE(X86_VENDOR_AMD, 0x17, 0x01), 0x2, 0x2, 0x0800126e),
X86_MATCH_VFM_STEPS(VFM_MAKE(X86_VENDOR_AMD, 0x17, 0x31), 0x0, 0x0, 0x08301052),
};
static void fix_erratum_1386(struct cpuinfo_x86 *c)
@ -814,7 +813,7 @@ static void fix_erratum_1386(struct cpuinfo_x86 *c)
* Clear the feature flag only on microcode revisions which
* don't have the fix.
*/
if (x86_cpu_has_min_microcode_rev(erratum_1386_microcode))
if (x86_match_min_microcode_rev(erratum_1386_microcode))
return;
clear_cpu_cap(c, X86_FEATURE_XSAVES);

View File

@ -29,6 +29,7 @@
#include <asm/alternative.h>
#include <asm/cmdline.h>
#include <asm/cpuid.h>
#include <asm/perf_event.h>
#include <asm/mmu_context.h>
#include <asm/doublefault.h>
@ -636,9 +637,9 @@ struct cpuid_dependent_feature {
static const struct cpuid_dependent_feature
cpuid_dependent_features[] = {
{ X86_FEATURE_MWAIT, 0x00000005 },
{ X86_FEATURE_DCA, 0x00000009 },
{ X86_FEATURE_XSAVE, 0x0000000d },
{ X86_FEATURE_MWAIT, CPUID_LEAF_MWAIT },
{ X86_FEATURE_DCA, CPUID_LEAF_DCA },
{ X86_FEATURE_XSAVE, CPUID_LEAF_XSTATE },
{ 0, 0 }
};
@ -1201,8 +1202,8 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = {
#define VULNBL(vendor, family, model, blacklist) \
X86_MATCH_VENDOR_FAM_MODEL(vendor, family, model, blacklist)
#define VULNBL_INTEL_STEPPINGS(vfm, steppings, issues) \
X86_MATCH_VFM_STEPPINGS(vfm, steppings, issues)
#define VULNBL_INTEL_STEPS(vfm, max_stepping, issues) \
X86_MATCH_VFM_STEPS(vfm, X86_STEP_MIN, max_stepping, issues)
#define VULNBL_AMD(family, blacklist) \
VULNBL(AMD, family, X86_MODEL_ANY, blacklist)
@ -1227,43 +1228,43 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = {
#define RFDS BIT(7)
static const struct x86_cpu_id cpu_vuln_blacklist[] __initconst = {
VULNBL_INTEL_STEPPINGS(INTEL_IVYBRIDGE, X86_STEPPING_ANY, SRBDS),
VULNBL_INTEL_STEPPINGS(INTEL_HASWELL, X86_STEPPING_ANY, SRBDS),
VULNBL_INTEL_STEPPINGS(INTEL_HASWELL_L, X86_STEPPING_ANY, SRBDS),
VULNBL_INTEL_STEPPINGS(INTEL_HASWELL_G, X86_STEPPING_ANY, SRBDS),
VULNBL_INTEL_STEPPINGS(INTEL_HASWELL_X, X86_STEPPING_ANY, MMIO),
VULNBL_INTEL_STEPPINGS(INTEL_BROADWELL_D, X86_STEPPING_ANY, MMIO),
VULNBL_INTEL_STEPPINGS(INTEL_BROADWELL_G, X86_STEPPING_ANY, SRBDS),
VULNBL_INTEL_STEPPINGS(INTEL_BROADWELL_X, X86_STEPPING_ANY, MMIO),
VULNBL_INTEL_STEPPINGS(INTEL_BROADWELL, X86_STEPPING_ANY, SRBDS),
VULNBL_INTEL_STEPPINGS(INTEL_SKYLAKE_X, X86_STEPPING_ANY, MMIO | RETBLEED | GDS),
VULNBL_INTEL_STEPPINGS(INTEL_SKYLAKE_L, X86_STEPPING_ANY, MMIO | RETBLEED | GDS | SRBDS),
VULNBL_INTEL_STEPPINGS(INTEL_SKYLAKE, X86_STEPPING_ANY, MMIO | RETBLEED | GDS | SRBDS),
VULNBL_INTEL_STEPPINGS(INTEL_KABYLAKE_L, X86_STEPPING_ANY, MMIO | RETBLEED | GDS | SRBDS),
VULNBL_INTEL_STEPPINGS(INTEL_KABYLAKE, X86_STEPPING_ANY, MMIO | RETBLEED | GDS | SRBDS),
VULNBL_INTEL_STEPPINGS(INTEL_CANNONLAKE_L, X86_STEPPING_ANY, RETBLEED),
VULNBL_INTEL_STEPPINGS(INTEL_ICELAKE_L, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED | GDS),
VULNBL_INTEL_STEPPINGS(INTEL_ICELAKE_D, X86_STEPPING_ANY, MMIO | GDS),
VULNBL_INTEL_STEPPINGS(INTEL_ICELAKE_X, X86_STEPPING_ANY, MMIO | GDS),
VULNBL_INTEL_STEPPINGS(INTEL_COMETLAKE, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED | GDS),
VULNBL_INTEL_STEPPINGS(INTEL_COMETLAKE_L, X86_STEPPINGS(0x0, 0x0), MMIO | RETBLEED),
VULNBL_INTEL_STEPPINGS(INTEL_COMETLAKE_L, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED | GDS),
VULNBL_INTEL_STEPPINGS(INTEL_TIGERLAKE_L, X86_STEPPING_ANY, GDS),
VULNBL_INTEL_STEPPINGS(INTEL_TIGERLAKE, X86_STEPPING_ANY, GDS),
VULNBL_INTEL_STEPPINGS(INTEL_LAKEFIELD, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED),
VULNBL_INTEL_STEPPINGS(INTEL_ROCKETLAKE, X86_STEPPING_ANY, MMIO | RETBLEED | GDS),
VULNBL_INTEL_STEPPINGS(INTEL_ALDERLAKE, X86_STEPPING_ANY, RFDS),
VULNBL_INTEL_STEPPINGS(INTEL_ALDERLAKE_L, X86_STEPPING_ANY, RFDS),
VULNBL_INTEL_STEPPINGS(INTEL_RAPTORLAKE, X86_STEPPING_ANY, RFDS),
VULNBL_INTEL_STEPPINGS(INTEL_RAPTORLAKE_P, X86_STEPPING_ANY, RFDS),
VULNBL_INTEL_STEPPINGS(INTEL_RAPTORLAKE_S, X86_STEPPING_ANY, RFDS),
VULNBL_INTEL_STEPPINGS(INTEL_ATOM_GRACEMONT, X86_STEPPING_ANY, RFDS),
VULNBL_INTEL_STEPPINGS(INTEL_ATOM_TREMONT, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RFDS),
VULNBL_INTEL_STEPPINGS(INTEL_ATOM_TREMONT_D, X86_STEPPING_ANY, MMIO | RFDS),
VULNBL_INTEL_STEPPINGS(INTEL_ATOM_TREMONT_L, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RFDS),
VULNBL_INTEL_STEPPINGS(INTEL_ATOM_GOLDMONT, X86_STEPPING_ANY, RFDS),
VULNBL_INTEL_STEPPINGS(INTEL_ATOM_GOLDMONT_D, X86_STEPPING_ANY, RFDS),
VULNBL_INTEL_STEPPINGS(INTEL_ATOM_GOLDMONT_PLUS, X86_STEPPING_ANY, RFDS),
VULNBL_INTEL_STEPS(INTEL_IVYBRIDGE, X86_STEP_MAX, SRBDS),
VULNBL_INTEL_STEPS(INTEL_HASWELL, X86_STEP_MAX, SRBDS),
VULNBL_INTEL_STEPS(INTEL_HASWELL_L, X86_STEP_MAX, SRBDS),
VULNBL_INTEL_STEPS(INTEL_HASWELL_G, X86_STEP_MAX, SRBDS),
VULNBL_INTEL_STEPS(INTEL_HASWELL_X, X86_STEP_MAX, MMIO),
VULNBL_INTEL_STEPS(INTEL_BROADWELL_D, X86_STEP_MAX, MMIO),
VULNBL_INTEL_STEPS(INTEL_BROADWELL_G, X86_STEP_MAX, SRBDS),
VULNBL_INTEL_STEPS(INTEL_BROADWELL_X, X86_STEP_MAX, MMIO),
VULNBL_INTEL_STEPS(INTEL_BROADWELL, X86_STEP_MAX, SRBDS),
VULNBL_INTEL_STEPS(INTEL_SKYLAKE_X, X86_STEP_MAX, MMIO | RETBLEED | GDS),
VULNBL_INTEL_STEPS(INTEL_SKYLAKE_L, X86_STEP_MAX, MMIO | RETBLEED | GDS | SRBDS),
VULNBL_INTEL_STEPS(INTEL_SKYLAKE, X86_STEP_MAX, MMIO | RETBLEED | GDS | SRBDS),
VULNBL_INTEL_STEPS(INTEL_KABYLAKE_L, X86_STEP_MAX, MMIO | RETBLEED | GDS | SRBDS),
VULNBL_INTEL_STEPS(INTEL_KABYLAKE, X86_STEP_MAX, MMIO | RETBLEED | GDS | SRBDS),
VULNBL_INTEL_STEPS(INTEL_CANNONLAKE_L, X86_STEP_MAX, RETBLEED),
VULNBL_INTEL_STEPS(INTEL_ICELAKE_L, X86_STEP_MAX, MMIO | MMIO_SBDS | RETBLEED | GDS),
VULNBL_INTEL_STEPS(INTEL_ICELAKE_D, X86_STEP_MAX, MMIO | GDS),
VULNBL_INTEL_STEPS(INTEL_ICELAKE_X, X86_STEP_MAX, MMIO | GDS),
VULNBL_INTEL_STEPS(INTEL_COMETLAKE, X86_STEP_MAX, MMIO | MMIO_SBDS | RETBLEED | GDS),
VULNBL_INTEL_STEPS(INTEL_COMETLAKE_L, 0x0, MMIO | RETBLEED),
VULNBL_INTEL_STEPS(INTEL_COMETLAKE_L, X86_STEP_MAX, MMIO | MMIO_SBDS | RETBLEED | GDS),
VULNBL_INTEL_STEPS(INTEL_TIGERLAKE_L, X86_STEP_MAX, GDS),
VULNBL_INTEL_STEPS(INTEL_TIGERLAKE, X86_STEP_MAX, GDS),
VULNBL_INTEL_STEPS(INTEL_LAKEFIELD, X86_STEP_MAX, MMIO | MMIO_SBDS | RETBLEED),
VULNBL_INTEL_STEPS(INTEL_ROCKETLAKE, X86_STEP_MAX, MMIO | RETBLEED | GDS),
VULNBL_INTEL_STEPS(INTEL_ALDERLAKE, X86_STEP_MAX, RFDS),
VULNBL_INTEL_STEPS(INTEL_ALDERLAKE_L, X86_STEP_MAX, RFDS),
VULNBL_INTEL_STEPS(INTEL_RAPTORLAKE, X86_STEP_MAX, RFDS),
VULNBL_INTEL_STEPS(INTEL_RAPTORLAKE_P, X86_STEP_MAX, RFDS),
VULNBL_INTEL_STEPS(INTEL_RAPTORLAKE_S, X86_STEP_MAX, RFDS),
VULNBL_INTEL_STEPS(INTEL_ATOM_GRACEMONT, X86_STEP_MAX, RFDS),
VULNBL_INTEL_STEPS(INTEL_ATOM_TREMONT, X86_STEP_MAX, MMIO | MMIO_SBDS | RFDS),
VULNBL_INTEL_STEPS(INTEL_ATOM_TREMONT_D, X86_STEP_MAX, MMIO | RFDS),
VULNBL_INTEL_STEPS(INTEL_ATOM_TREMONT_L, X86_STEP_MAX, MMIO | MMIO_SBDS | RFDS),
VULNBL_INTEL_STEPS(INTEL_ATOM_GOLDMONT, X86_STEP_MAX, RFDS),
VULNBL_INTEL_STEPS(INTEL_ATOM_GOLDMONT_D, X86_STEP_MAX, RFDS),
VULNBL_INTEL_STEPS(INTEL_ATOM_GOLDMONT_PLUS, X86_STEP_MAX, RFDS),
VULNBL_AMD(0x15, RETBLEED),
VULNBL_AMD(0x16, RETBLEED),

View File

@ -599,11 +599,6 @@ static void init_intel(struct cpuinfo_x86 *c)
if (p)
strcpy(c->x86_model_id, p);
}
if (c->x86 == 15)
set_cpu_cap(c, X86_FEATURE_P4);
if (c->x86 == 6)
set_cpu_cap(c, X86_FEATURE_P3);
#endif
/* Work around errata */

View File

@ -56,33 +56,13 @@ const struct x86_cpu_id *x86_match_cpu(const struct x86_cpu_id *match)
}
EXPORT_SYMBOL(x86_match_cpu);
static const struct x86_cpu_desc *
x86_match_cpu_with_stepping(const struct x86_cpu_desc *match)
bool x86_match_min_microcode_rev(const struct x86_cpu_id *table)
{
struct cpuinfo_x86 *c = &boot_cpu_data;
const struct x86_cpu_desc *m;
const struct x86_cpu_id *res = x86_match_cpu(table);
for (m = match; m->x86_family | m->x86_model; m++) {
if (c->x86_vendor != m->x86_vendor)
continue;
if (c->x86 != m->x86_family)
continue;
if (c->x86_model != m->x86_model)
continue;
if (c->x86_stepping != m->x86_stepping)
continue;
return m;
}
return NULL;
}
bool x86_cpu_has_min_microcode_rev(const struct x86_cpu_desc *table)
{
const struct x86_cpu_desc *res = x86_match_cpu_with_stepping(table);
if (!res || res->x86_microcode_rev > boot_cpu_data.microcode)
if (!res || res->driver_data > boot_cpu_data.microcode)
return false;
return true;
}
EXPORT_SYMBOL_GPL(x86_cpu_has_min_microcode_rev);
EXPORT_SYMBOL_GPL(x86_match_min_microcode_rev);

View File

@ -459,7 +459,7 @@ static int pseudo_lock_fn(void *_rdtgrp)
* increase likelihood that allocated cache portion will be filled
* with associated memory.
*/
native_wbinvd();
wbinvd();
/*
* Always called with interrupts enabled. By disabling interrupts

View File

@ -20,6 +20,7 @@
#include <asm/fpu/signal.h>
#include <asm/fpu/xcr.h>
#include <asm/cpuid.h>
#include <asm/tlbflush.h>
#include <asm/prctl.h>
#include <asm/elf.h>
@ -232,7 +233,7 @@ static void __init setup_xstate_cache(void)
xmm_space);
for_each_extended_xfeature(i, fpu_kernel_cfg.max_features) {
cpuid_count(XSTATE_CPUID, i, &eax, &ebx, &ecx, &edx);
cpuid_count(CPUID_LEAF_XSTATE, i, &eax, &ebx, &ecx, &edx);
xstate_sizes[i] = eax;
xstate_flags[i] = ecx;
@ -398,7 +399,7 @@ int xfeature_size(int xfeature_nr)
u32 eax, ebx, ecx, edx;
CHECK_XFEATURE(xfeature_nr);
cpuid_count(XSTATE_CPUID, xfeature_nr, &eax, &ebx, &ecx, &edx);
cpuid_count(CPUID_LEAF_XSTATE, xfeature_nr, &eax, &ebx, &ecx, &edx);
return eax;
}
@ -441,9 +442,9 @@ static void __init __xstate_dump_leaves(void)
* just in case there are some goodies up there
*/
for (i = 0; i < XFEATURE_MAX + 10; i++) {
cpuid_count(XSTATE_CPUID, i, &eax, &ebx, &ecx, &edx);
cpuid_count(CPUID_LEAF_XSTATE, i, &eax, &ebx, &ecx, &edx);
pr_warn("CPUID[%02x, %02x]: eax=%08x ebx=%08x ecx=%08x edx=%08x\n",
XSTATE_CPUID, i, eax, ebx, ecx, edx);
CPUID_LEAF_XSTATE, i, eax, ebx, ecx, edx);
}
}
@ -484,7 +485,7 @@ static int __init check_xtile_data_against_struct(int size)
* Check the maximum palette id:
* eax: the highest numbered palette subleaf.
*/
cpuid_count(TILE_CPUID, 0, &max_palid, &ebx, &ecx, &edx);
cpuid_count(CPUID_LEAF_TILE, 0, &max_palid, &ebx, &ecx, &edx);
/*
* Cross-check each tile size and find the maximum number of
@ -498,7 +499,7 @@ static int __init check_xtile_data_against_struct(int size)
* eax[31:16]: bytes per title
* ebx[31:16]: the max names (or max number of tiles)
*/
cpuid_count(TILE_CPUID, palid, &eax, &ebx, &edx, &edx);
cpuid_count(CPUID_LEAF_TILE, palid, &eax, &ebx, &edx, &edx);
tile_size = eax >> 16;
max = ebx >> 16;
@ -633,7 +634,7 @@ static unsigned int __init get_compacted_size(void)
* are no supervisor states, but XSAVEC still uses compacted
* format.
*/
cpuid_count(XSTATE_CPUID, 1, &eax, &ebx, &ecx, &edx);
cpuid_count(CPUID_LEAF_XSTATE, 1, &eax, &ebx, &ecx, &edx);
return ebx;
}
@ -674,7 +675,7 @@ static unsigned int __init get_xsave_size_user(void)
* containing all the *user* state components
* corresponding to bits currently set in XCR0.
*/
cpuid_count(XSTATE_CPUID, 0, &eax, &ebx, &ecx, &edx);
cpuid_count(CPUID_LEAF_XSTATE, 0, &eax, &ebx, &ecx, &edx);
return ebx;
}
@ -763,21 +764,16 @@ void __init fpu__init_system_xstate(unsigned int legacy_size)
return;
}
if (boot_cpu_data.cpuid_level < XSTATE_CPUID) {
WARN_ON_FPU(1);
return;
}
/*
* Find user xstates supported by the processor.
*/
cpuid_count(XSTATE_CPUID, 0, &eax, &ebx, &ecx, &edx);
cpuid_count(CPUID_LEAF_XSTATE, 0, &eax, &ebx, &ecx, &edx);
fpu_kernel_cfg.max_features = eax + ((u64)edx << 32);
/*
* Find supervisor xstates supported by the processor.
*/
cpuid_count(XSTATE_CPUID, 1, &eax, &ebx, &ecx, &edx);
cpuid_count(CPUID_LEAF_XSTATE, 1, &eax, &ebx, &ecx, &edx);
fpu_kernel_cfg.max_features |= ecx + ((u64)edx << 32);
if ((fpu_kernel_cfg.max_features & XFEATURE_MASK_FPSSE) != XFEATURE_MASK_FPSSE) {

View File

@ -7,6 +7,7 @@
#include <linux/cpu.h>
#include <linux/irq.h>
#include <asm/cpuid.h>
#include <asm/irq_remapping.h>
#include <asm/hpet.h>
#include <asm/time.h>
@ -927,10 +928,7 @@ static bool __init mwait_pc10_supported(void)
if (!cpu_feature_enabled(X86_FEATURE_MWAIT))
return false;
if (boot_cpu_data.cpuid_level < CPUID_MWAIT_LEAF)
return false;
cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &mwait_substates);
cpuid(CPUID_LEAF_MWAIT, &eax, &ebx, &ecx, &mwait_substates);
return (ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED) &&
(ecx & CPUID5_ECX_INTERRUPT_BREAK) &&

View File

@ -116,11 +116,6 @@ static noinstr void pv_native_set_debugreg(int regno, unsigned long val)
native_set_debugreg(regno, val);
}
noinstr void pv_native_wbinvd(void)
{
native_wbinvd();
}
static noinstr void pv_native_safe_halt(void)
{
native_safe_halt();
@ -148,7 +143,6 @@ struct paravirt_patch_template pv_ops = {
.cpu.read_cr0 = native_read_cr0,
.cpu.write_cr0 = native_write_cr0,
.cpu.write_cr4 = native_write_cr4,
.cpu.wbinvd = pv_native_wbinvd,
.cpu.read_msr = native_read_msr,
.cpu.write_msr = native_write_msr,
.cpu.read_msr_safe = native_read_msr_safe,

View File

@ -30,6 +30,7 @@
#include <linux/hw_breakpoint.h>
#include <linux/entry-common.h>
#include <asm/cpu.h>
#include <asm/cpuid.h>
#include <asm/apic.h>
#include <linux/uaccess.h>
#include <asm/mwait.h>
@ -825,7 +826,7 @@ void __noreturn stop_this_cpu(void *dummy)
* X86_FEATURE_SME due to cmdline options.
*/
if (c->extended_cpuid_level >= 0x8000001f && (cpuid_eax(0x8000001f) & BIT(0)))
native_wbinvd();
wbinvd();
/*
* This brings a cache line back and dirties it, but
@ -846,7 +847,7 @@ void __noreturn stop_this_cpu(void *dummy)
/*
* Use native_halt() so that memory contents don't change
* (stack usage and variables) after possibly issuing the
* native_wbinvd() above.
* wbinvd() above.
*/
native_halt();
}
@ -877,7 +878,7 @@ static __init bool prefer_mwait_c1_over_halt(void)
if (boot_cpu_has_bug(X86_BUG_MONITOR) || boot_cpu_has_bug(X86_BUG_AMD_APIC_C1E))
return false;
cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &edx);
cpuid(CPUID_LEAF_MWAIT, &eax, &ebx, &ecx, &edx);
/*
* If MWAIT extensions are not available, it is safe to use MWAIT

View File

@ -64,6 +64,7 @@
#include <asm/acpi.h>
#include <asm/cacheinfo.h>
#include <asm/cpuid.h>
#include <asm/desc.h>
#include <asm/nmi.h>
#include <asm/irq.h>
@ -1291,10 +1292,8 @@ static inline void mwait_play_dead(void)
return;
if (!this_cpu_has(X86_FEATURE_CLFLUSH))
return;
if (__this_cpu_read(cpu_info.cpuid_level) < CPUID_MWAIT_LEAF)
return;
eax = CPUID_MWAIT_LEAF;
eax = CPUID_LEAF_MWAIT;
ecx = 0;
native_cpuid(&eax, &ebx, &ecx, &edx);

View File

@ -16,6 +16,7 @@
#include <linux/static_key.h>
#include <linux/static_call.h>
#include <asm/cpuid.h>
#include <asm/hpet.h>
#include <asm/timer.h>
#include <asm/vgtod.h>
@ -665,13 +666,13 @@ unsigned long native_calibrate_tsc(void)
if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
return 0;
if (boot_cpu_data.cpuid_level < 0x15)
if (boot_cpu_data.cpuid_level < CPUID_LEAF_TSC)
return 0;
eax_denominator = ebx_numerator = ecx_hz = edx = 0;
/* CPUID 15H TSC/Crystal ratio, plus optionally Crystal Hz */
cpuid(0x15, &eax_denominator, &ebx_numerator, &ecx_hz, &edx);
cpuid(CPUID_LEAF_TSC, &eax_denominator, &ebx_numerator, &ecx_hz, &edx);
if (ebx_numerator == 0 || eax_denominator == 0)
return 0;
@ -680,8 +681,8 @@ unsigned long native_calibrate_tsc(void)
/*
* Denverton SoCs don't report crystal clock, and also don't support
* CPUID.0x16 for the calculation below, so hardcode the 25MHz crystal
* clock.
* CPUID_LEAF_FREQ for the calculation below, so hardcode the 25MHz
* crystal clock.
*/
if (crystal_khz == 0 &&
boot_cpu_data.x86_vfm == INTEL_ATOM_GOLDMONT_D)
@ -700,10 +701,10 @@ unsigned long native_calibrate_tsc(void)
* clock, but we can easily calculate it to a high degree of accuracy
* by considering the crystal ratio and the CPU speed.
*/
if (crystal_khz == 0 && boot_cpu_data.cpuid_level >= 0x16) {
if (crystal_khz == 0 && boot_cpu_data.cpuid_level >= CPUID_LEAF_FREQ) {
unsigned int eax_base_mhz, ebx, ecx, edx;
cpuid(0x16, &eax_base_mhz, &ebx, &ecx, &edx);
cpuid(CPUID_LEAF_FREQ, &eax_base_mhz, &ebx, &ecx, &edx);
crystal_khz = eax_base_mhz * 1000 *
eax_denominator / ebx_numerator;
}
@ -738,12 +739,12 @@ static unsigned long cpu_khz_from_cpuid(void)
if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
return 0;
if (boot_cpu_data.cpuid_level < 0x16)
if (boot_cpu_data.cpuid_level < CPUID_LEAF_FREQ)
return 0;
eax_base_mhz = ebx_max_mhz = ecx_bus_mhz = edx = 0;
cpuid(0x16, &eax_base_mhz, &ebx_max_mhz, &ecx_bus_mhz, &edx);
cpuid(CPUID_LEAF_FREQ, &eax_base_mhz, &ebx_max_mhz, &ecx_bus_mhz, &edx);
return eax_base_mhz * 1000;
}
@ -1067,10 +1068,8 @@ core_initcall(cpufreq_register_tsc_scaling);
#endif /* CONFIG_CPU_FREQ */
#define ART_CPUID_LEAF (0x15)
#define ART_MIN_DENOMINATOR (1)
/*
* If ART is present detect the numerator:denominator to convert to TSC
*/
@ -1078,7 +1077,7 @@ static void __init detect_art(void)
{
unsigned int unused;
if (boot_cpu_data.cpuid_level < ART_CPUID_LEAF)
if (boot_cpu_data.cpuid_level < CPUID_LEAF_TSC)
return;
/*
@ -1091,7 +1090,7 @@ static void __init detect_art(void)
tsc_async_resets)
return;
cpuid(ART_CPUID_LEAF, &art_base_clk.denominator,
cpuid(CPUID_LEAF_TSC, &art_base_clk.denominator,
&art_base_clk.numerator, &art_base_clk.freq_khz, &unused);
art_base_clk.freq_khz /= KHZ;

View File

@ -49,6 +49,7 @@
#include <xen/hvc-console.h>
#include <xen/acpi.h>
#include <asm/cpuid.h>
#include <asm/paravirt.h>
#include <asm/apic.h>
#include <asm/page.h>
@ -230,7 +231,7 @@ static void xen_cpuid(unsigned int *ax, unsigned int *bx,
or_ebx = smp_processor_id() << 24;
break;
case CPUID_MWAIT_LEAF:
case CPUID_LEAF_MWAIT:
/* Synthesize the values.. */
*ax = 0;
*bx = 0;
@ -300,7 +301,7 @@ static bool __init xen_check_mwait(void)
* ecx and edx. The hypercall provides only partial information.
*/
ax = CPUID_MWAIT_LEAF;
ax = CPUID_LEAF_MWAIT;
bx = 0;
cx = 0;
dx = 0;
@ -1161,8 +1162,6 @@ static const typeof(pv_ops) xen_cpu_ops __initconst = {
.write_cr4 = xen_write_cr4,
.wbinvd = pv_native_wbinvd,
.read_msr = xen_read_msr,
.write_msr = xen_write_msr,

View File

@ -19,6 +19,7 @@
#include <linux/acpi.h>
#include <linux/perf_event.h>
#include <linux/platform_device.h>
#include <asm/cpuid.h>
#include <asm/mwait.h>
#include <xen/xen.h>
@ -46,10 +47,8 @@ static void power_saving_mwait_init(void)
if (!boot_cpu_has(X86_FEATURE_MWAIT))
return;
if (boot_cpu_data.cpuid_level < CPUID_MWAIT_LEAF)
return;
cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &edx);
cpuid(CPUID_LEAF_MWAIT, &eax, &ebx, &ecx, &edx);
if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED) ||
!(ecx & CPUID5_ECX_INTERRUPT_BREAK))

View File

@ -10,6 +10,8 @@
#include <linux/interrupt.h>
#include <linux/dca.h>
#include <asm/cpuid.h>
/* either a kernel change is needed, or we need something like this in kernel */
#ifndef CONFIG_SMP
#include <asm/smp.h>
@ -58,11 +60,11 @@ static int dca_enabled_in_bios(struct pci_dev *pdev)
{
/* CPUID level 9 returns DCA configuration */
/* Bit 0 indicates DCA enabled by the BIOS */
unsigned long cpuid_level_9;
u32 eax;
int res;
cpuid_level_9 = cpuid_eax(9);
res = test_bit(0, &cpuid_level_9);
eax = cpuid_eax(CPUID_LEAF_DCA);
res = eax & BIT(0);
if (!res)
dev_dbg(&pdev->dev, "DCA is disabled in BIOS\n");

View File

@ -938,16 +938,17 @@ static struct res_config gnr_cfg = {
};
static const struct x86_cpu_id i10nm_cpuids[] = {
X86_MATCH_VFM_STEPPINGS(INTEL_ATOM_TREMONT_D, X86_STEPPINGS(0x0, 0x3), &i10nm_cfg0),
X86_MATCH_VFM_STEPPINGS(INTEL_ATOM_TREMONT_D, X86_STEPPINGS(0x4, 0xf), &i10nm_cfg1),
X86_MATCH_VFM_STEPPINGS(INTEL_ICELAKE_X, X86_STEPPINGS(0x0, 0x3), &i10nm_cfg0),
X86_MATCH_VFM_STEPPINGS(INTEL_ICELAKE_X, X86_STEPPINGS(0x4, 0xf), &i10nm_cfg1),
X86_MATCH_VFM_STEPPINGS(INTEL_ICELAKE_D, X86_STEPPINGS(0x0, 0xf), &i10nm_cfg1),
X86_MATCH_VFM_STEPPINGS(INTEL_SAPPHIRERAPIDS_X, X86_STEPPINGS(0x0, 0xf), &spr_cfg),
X86_MATCH_VFM_STEPPINGS(INTEL_EMERALDRAPIDS_X, X86_STEPPINGS(0x0, 0xf), &spr_cfg),
X86_MATCH_VFM_STEPPINGS(INTEL_GRANITERAPIDS_X, X86_STEPPINGS(0x0, 0xf), &gnr_cfg),
X86_MATCH_VFM_STEPPINGS(INTEL_ATOM_CRESTMONT_X, X86_STEPPINGS(0x0, 0xf), &gnr_cfg),
X86_MATCH_VFM_STEPPINGS(INTEL_ATOM_CRESTMONT, X86_STEPPINGS(0x0, 0xf), &gnr_cfg),
X86_MATCH_VFM_STEPS(INTEL_ATOM_TREMONT_D, X86_STEP_MIN, 0x3, &i10nm_cfg0),
X86_MATCH_VFM_STEPS(INTEL_ATOM_TREMONT_D, 0x4, X86_STEP_MAX, &i10nm_cfg1),
X86_MATCH_VFM_STEPS(INTEL_ICELAKE_X, X86_STEP_MIN, 0x3, &i10nm_cfg0),
X86_MATCH_VFM_STEPS(INTEL_ICELAKE_X, 0x4, X86_STEP_MAX, &i10nm_cfg1),
X86_MATCH_VFM( INTEL_ICELAKE_D, &i10nm_cfg1),
X86_MATCH_VFM(INTEL_SAPPHIRERAPIDS_X, &spr_cfg),
X86_MATCH_VFM(INTEL_EMERALDRAPIDS_X, &spr_cfg),
X86_MATCH_VFM(INTEL_GRANITERAPIDS_X, &gnr_cfg),
X86_MATCH_VFM(INTEL_ATOM_CRESTMONT_X, &gnr_cfg),
X86_MATCH_VFM(INTEL_ATOM_CRESTMONT, &gnr_cfg),
{}
};
MODULE_DEVICE_TABLE(x86cpu, i10nm_cpuids);

View File

@ -164,7 +164,7 @@ static struct res_config skx_cfg = {
};
static const struct x86_cpu_id skx_cpuids[] = {
X86_MATCH_VFM_STEPPINGS(INTEL_SKYLAKE_X, X86_STEPPINGS(0x0, 0xf), &skx_cfg),
X86_MATCH_VFM(INTEL_SKYLAKE_X, &skx_cfg),
{ }
};
MODULE_DEVICE_TABLE(x86cpu, skx_cpuids);

View File

@ -51,6 +51,7 @@
#include <linux/notifier.h>
#include <linux/cpu.h>
#include <linux/moduleparam.h>
#include <asm/cpuid.h>
#include <asm/cpu_device_id.h>
#include <asm/intel-family.h>
#include <asm/mwait.h>
@ -2316,10 +2317,7 @@ static int __init intel_idle_init(void)
return -ENODEV;
}
if (boot_cpu_data.cpuid_level < CPUID_MWAIT_LEAF)
return -ENODEV;
cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &mwait_substates);
cpuid(CPUID_LEAF_MWAIT, &eax, &ebx, &ecx, &mwait_substates);
if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED) ||
!(ecx & CPUID5_ECX_INTERRUPT_BREAK) ||

View File

@ -22,6 +22,7 @@
#include <linux/suspend.h>
#include <linux/units.h>
#include <asm/cpuid.h>
#include <asm/cpu_device_id.h>
#include <asm/intel-family.h>
#include <asm/msr.h>
@ -935,13 +936,13 @@ static unsigned int pmc_core_get_crystal_freq(void)
{
unsigned int eax_denominator, ebx_numerator, ecx_hz, edx;
if (boot_cpu_data.cpuid_level < 0x15)
if (boot_cpu_data.cpuid_level < CPUID_LEAF_TSC)
return 0;
eax_denominator = ebx_numerator = ecx_hz = edx = 0;
/* CPUID 15H TSC/Crystal ratio, plus optionally Crystal Hz */
cpuid(0x15, &eax_denominator, &ebx_numerator, &ecx_hz, &edx);
/* TSC/Crystal ratio, plus optionally Crystal Hz */
cpuid(CPUID_LEAF_TSC, &eax_denominator, &ebx_numerator, &ecx_hz, &edx);
if (ebx_numerator == 0 || eax_denominator == 0)
return 0;

View File

@ -700,6 +700,8 @@ struct x86_cpu_id {
#define X86_FAMILY_ANY 0
#define X86_MODEL_ANY 0
#define X86_STEPPING_ANY 0
#define X86_STEP_MIN 0
#define X86_STEP_MAX 0xf
#define X86_FEATURE_ANY 0 /* Same as FPU, you can't test for that */
/*